From 62f5c5330f4bdbd0676ff6fafb7f7b61f13237d4 Mon Sep 17 00:00:00 2001 From: kvos Date: Mon, 21 May 2018 10:08:38 +1000 Subject: [PATCH] updated master added download_images and read_images scripts --- download_images.py | 390 ++++++++++++++ functions/NeuralNet_classif_nopan.pkl | Bin 0 -> 51273 bytes functions/data_analysis.py | 432 +++++++++++++++ functions/sds.py | 746 ++++++++++++++++++++------ functions/utils.py | 49 ++ read_images.py | 589 ++++++++++++++++++++ sl_comparison_NARRA.py | 382 ------------- 7 files changed, 2032 insertions(+), 556 deletions(-) create mode 100644 download_images.py create mode 100644 functions/NeuralNet_classif_nopan.pkl create mode 100644 functions/data_analysis.py create mode 100644 read_images.py delete mode 100644 sl_comparison_NARRA.py diff --git a/download_images.py b/download_images.py new file mode 100644 index 0000000..a44fca8 --- /dev/null +++ b/download_images.py @@ -0,0 +1,390 @@ +#==========================================================# +#==========================================================# +# Download L5, L7, L8, S2 images of a given area +#==========================================================# +#==========================================================# + + + +#==========================================================# +# Initial settings +#==========================================================# +import os +import numpy as np +import matplotlib.pyplot as plt +import pdb +import ee + +# other modules +from osgeo import gdal, ogr, osr +from urllib.request import urlretrieve +import zipfile +from datetime import datetime +import pytz +import pickle + +# import own modules +import functions.utils as utils +import functions.sds as sds + +np.seterr(all='ignore') # raise/ignore divisions by 0 and nans +ee.Initialize() + +#==========================================================# +# Location +#==========================================================# + +## location (Narrabeen-Collaroy beach) +#polygon = [[[151.301454, -33.700754], +# [151.311453, -33.702075], +# [151.307237, -33.739761], +# [151.294220, -33.736329], +# [151.301454, -33.700754]]]; + +# location (Tairua beach) +sitename = 'TAIRUA' +polygon = [[[175.835574, -36.982022], + [175.888220, -36.980680], + [175.893527, -37.029610], + [175.833444, -37.031767], + [175.835574, -36.982022]]]; + +# initialise metadata dictionnary (stores timestamps and georefencing accuracy of each image) +metadata = dict([]) + +# create directories +try: + os.makedirs(os.path.join(os.getcwd(), 'data',sitename)) +except: + print('directory already exists') + + +#%% +#==========================================================# +#==========================================================# +# L5 +#==========================================================# +#==========================================================# + + + +# define filenames for images +suffix = '.tif' +filepath = os.path.join(os.getcwd(), 'data', sitename, 'L5', '30m') +try: + os.makedirs(filepath) +except: + print('directory already exists') + +#==========================================================# +# Select L5 collection +#==========================================================# + +satname = 'L5' +input_col = ee.ImageCollection('LANDSAT/LT05/C01/T1_TOA') + +# filter by location +flt_col = input_col.filterBounds(ee.Geometry.Polygon(polygon)) +n_img = flt_col.size().getInfo() +print('Number of images covering ' + sitename, n_img) +im_all = flt_col.getInfo().get('features') + + +#==========================================================# +# Main loop trough images +#==========================================================# + +timestamps = [] +acc_georef = [] +all_names = [] +for i in range(n_img): + + # find each image in ee database + im = ee.Image(im_all[i].get('id')) + + im_dic = im.getInfo() + im_bands = im_dic.get('bands') + t = im_dic['properties']['system:time_start'] + im_timestamp = datetime.fromtimestamp(t/1000, tz=pytz.utc) + timestamps.append(im_timestamp) + im_date = im_timestamp.strftime('%Y-%m-%d-%H-%M-%S') + im_epsg = int(im_dic['bands'][0]['crs'][5:]) + try: + acc_georef.append(im_dic['properties']['GEOMETRIC_RMSE_MODEL']) + except: + acc_georef.append(12) + print('No geometric rmse model property') + + # delete dimensions key from dictionnary, otherwise the entire image is extracted + for j in range(len(im_bands)): del im_bands[j]['dimensions'] + + # bands for L5 + ms_bands = [im_bands[0], im_bands[1], im_bands[2], im_bands[3], im_bands[4], im_bands[7]] + + # filenames + filename = im_date + '_' + satname + '_' + sitename + suffix + + print(i) + if any(filename in _ for _ in all_names): + filename = im_date + '_' + satname + '_' + sitename + '_dup' + suffix + all_names.append(filename) + + local_data = sds.download_tif(im, polygon, ms_bands, filepath) + os.rename(local_data, os.path.join(filepath, filename)) + +# sort timestamps and georef accuracy (dowloaded images are sorted by date in directory) +timestamps_sorted = sorted(timestamps) +idx_sorted = sorted(range(len(timestamps)), key=timestamps.__getitem__) +acc_georef_sorted = [acc_georef[j] for j in idx_sorted] + +metadata[satname] = {'dates':timestamps_sorted, 'acc_georef':acc_georef_sorted, 'epsg':im_epsg} + +#%% +#==========================================================# +#==========================================================# +# L7&L8 +#==========================================================# +#==========================================================# + + + +# define filenames for images +suffix = '.tif' +filepath = os.path.join(os.getcwd(), 'data', sitename, 'L7&L8') +filepath_pan = os.path.join(filepath, 'pan') +filepath_ms = os.path.join(filepath, 'ms') +try: + os.makedirs(filepath_pan) + os.makedirs(filepath_ms) +except: + print('directory already exists') + +#==========================================================# +# Select L7 collection +#==========================================================# + +satname = 'L7' +input_col = ee.ImageCollection('LANDSAT/LE07/C01/T1_RT_TOA') + +# filter by location +flt_col = input_col.filterBounds(ee.Geometry.Polygon(polygon)) +n_img = flt_col.size().getInfo() +print('Number of images covering ' + sitename, n_img) +im_all = flt_col.getInfo().get('features') + +#==========================================================# +# Main loop trough images +#==========================================================# + +timestamps = [] +acc_georef = [] +all_names = [] +for i in range(n_img): + + # find each image in ee database + im = ee.Image(im_all[i].get('id')) + + im_dic = im.getInfo() + im_bands = im_dic.get('bands') + t = im_dic['properties']['system:time_start'] + im_timestamp = datetime.fromtimestamp(t/1000, tz=pytz.utc) + timestamps.append(im_timestamp) + im_date = im_timestamp.strftime('%Y-%m-%d-%H-%M-%S') + im_epsg = int(im_dic['bands'][0]['crs'][5:]) + try: + acc_georef.append(im_dic['properties']['GEOMETRIC_RMSE_MODEL']) + except: + acc_georef.append(12) + print('No geometric rmse model property') + + # delete dimensions key from dictionnary, otherwise the entire image is extracted + for j in range(len(im_bands)): del im_bands[j]['dimensions'] + + # bands for L7 + pan_band = [im_bands[8]] + ms_bands = [im_bands[0], im_bands[1], im_bands[2], im_bands[3], im_bands[4], im_bands[9]] + + # filenames + filename_pan = im_date + '_' + satname + '_' + sitename + '_pan' + suffix + filename_ms = im_date + '_' + satname + '_' + sitename + '_ms' + suffix + + print(i) + if any(filename_pan in _ for _ in all_names): + filename_pan = im_date + '_' + satname + '_' + sitename + '_pan' + '_dup' + suffix + filename_ms = im_date + '_' + satname + '_' + sitename + '_ms' + '_dup' + suffix + all_names.append(filename_pan) + + local_data_pan = sds.download_tif(im, polygon, pan_band, filepath_pan) + os.rename(local_data_pan, os.path.join(filepath_pan, filename_pan)) + local_data_ms = sds.download_tif(im, polygon, ms_bands, filepath_ms) + os.rename(local_data_ms, os.path.join(filepath_ms, filename_ms)) + +#==========================================================# +# Select L8 collection +#==========================================================# + +satname = 'L8' +input_col = ee.ImageCollection('LANDSAT/LC08/C01/T1_RT_TOA') + +# filter by location +flt_col = input_col.filterBounds(ee.Geometry.Polygon(polygon)) +n_img = flt_col.size().getInfo() +print('Number of images covering Narrabeen:', n_img) +im_all = flt_col.getInfo().get('features') + +#==========================================================# +# Main loop trough images +#==========================================================# + +for i in range(n_img): + + # find each image in ee database + im = ee.Image(im_all[i].get('id')) + + im_dic = im.getInfo() + im_bands = im_dic.get('bands') + t = im_dic['properties']['system:time_start'] + im_timestamp = datetime.fromtimestamp(t/1000, tz=pytz.utc) + timestamps.append(im_timestamp) + im_date = im_timestamp.strftime('%Y-%m-%d-%H-%M-%S') + im_epsg = int(im_dic['bands'][0]['crs'][5:]) + try: + acc_georef.append(im_dic['properties']['GEOMETRIC_RMSE_MODEL']) + except: + acc_georef.append(12) + print('No geometric rmse model property') + + # delete dimensions key from dictionnary, otherwise the entire image is extracted + for j in range(len(im_bands)): del im_bands[j]['dimensions'] + + # bands for L8 + pan_band = [im_bands[7]] + ms_bands = [im_bands[1], im_bands[2], im_bands[3], im_bands[4], im_bands[5], im_bands[11]] + + # filenames + filename_pan = im_date + '_' + satname + '_' + sitename + '_pan' + suffix + filename_ms = im_date + '_' + satname + '_' + sitename + '_ms' + suffix + + print(i) + if any(filename_pan in _ for _ in all_names): + filename_pan = im_date + '_' + satname + '_' + sitename + '_pan' + '_dup' + suffix + filename_ms = im_date + '_' + satname + '_' + sitename + '_ms' + '_dup' + suffix + all_names.append(filename_pan) + + local_data_pan = sds.download_tif(im, polygon, pan_band, filepath_pan) + os.rename(local_data_pan, os.path.join(filepath_pan, filename_pan)) + local_data_ms = sds.download_tif(im, polygon, ms_bands, filepath_ms) + os.rename(local_data_ms, os.path.join(filepath_ms, filename_ms)) + + +# sort timestamps and georef accuracy (dowloaded images are sorted by date in directory) +timestamps_sorted = sorted(timestamps) +idx_sorted = sorted(range(len(timestamps)), key=timestamps.__getitem__) +acc_georef_sorted = [acc_georef[j] for j in idx_sorted] + +metadata[satname] = {'dates':timestamps_sorted, 'acc_georef':acc_georef_sorted, 'epsg':im_epsg} + +#%% +#==========================================================# +#==========================================================# +# S2 +#==========================================================# +#==========================================================# + + + +# define filenames for images +suffix = '.tif' +filepath = os.path.join(os.getcwd(), 'data', sitename, 'S2') +try: + os.makedirs(os.path.join(filepath, '10m')) + os.makedirs(os.path.join(filepath, '20m')) + os.makedirs(os.path.join(filepath, '60m')) +except: + print('directory already exists') + +#==========================================================# +# Select L2 collection +#==========================================================# + +satname = 'S2' +input_col = ee.ImageCollection('COPERNICUS/S2') + +# filter by location +flt_col = input_col.filterBounds(ee.Geometry.Polygon(polygon)) +n_img = flt_col.size().getInfo() +print('Number of images covering ' + sitename, n_img) +im_all = flt_col.getInfo().get('features') + +#==========================================================# +# Main loop trough images +#==========================================================# + +timestamps = [] +acc_georef = [] +all_names = [] +for i in range(n_img): + + # find each image in ee database + im = ee.Image(im_all[i].get('id')) + + im_dic = im.getInfo() + im_bands = im_dic.get('bands') + t = im_dic['properties']['system:time_start'] + im_timestamp = datetime.fromtimestamp(t/1000, tz=pytz.utc) + im_date = im_timestamp.strftime('%Y-%m-%d-%H-%M-%S') + timestamps.append(im_timestamp) + im_epsg = int(im_dic['bands'][0]['crs'][5:]) + try: + if im_dic['properties']['GEOMETRIC_QUALITY_FLAG'] == 'PASSED': + acc_georef.append(1) + else: + acc_georef.append(0) + except: + acc_georef.append(0) + + # delete dimensions key from dictionnary, otherwise the entire image is extracted + for j in range(len(im_bands)): del im_bands[j]['dimensions'] + + # bands for S2 + bands10 = [im_bands[1], im_bands[2], im_bands[3], im_bands[7]] + bands20 = [im_bands[11]] + bands60 = [im_bands[15]] + + # filenames + filename10 = im_date + '_' + satname + '_' + sitename + '_' + '10m' + suffix + filename20 = im_date + '_' + satname + '_' + sitename + '_' + '20m' + suffix + filename60 = im_date + '_' + satname + '_' + sitename + '_' + '60m' + suffix + + print(i) + if any(filename10 in _ for _ in all_names): + filename10 = im_date + '_' + satname + '_' + sitename + '_' + '10m' + '_dup' + suffix + filename20 = im_date + '_' + satname + '_' + sitename + '_' + '20m' + '_dup' + suffix + filename60 = im_date + '_' + satname + '_' + sitename + '_' + '60m' + '_dup' + suffix + all_names.append(filename10) + + local_data = sds.download_tif(im, polygon, bands10, filepath) + os.rename(local_data, os.path.join(filepath, '10m', filename10)) + + local_data = sds.download_tif(im, polygon, bands20, filepath) + os.rename(local_data, os.path.join(filepath, '20m', filename20)) + + local_data = sds.download_tif(im, polygon, bands60, filepath) + os.rename(local_data, os.path.join(filepath, '60m', filename60)) + +# sort timestamps and georef accuracy (dowloaded images are sorted by date in directory) +timestamps_sorted = sorted(timestamps) +idx_sorted = sorted(range(len(timestamps)), key=timestamps.__getitem__) +acc_georef_sorted = [acc_georef[j] for j in idx_sorted] + +metadata[satname] = {'dates':timestamps_sorted, 'acc_georef':acc_georef_sorted, 'epsg':im_epsg} + + + +#%% save metadata + +filepath = os.path.join(os.getcwd(), 'data', sitename) +with open(os.path.join(filepath, sitename + '_metadata' + '.pkl'), 'wb') as f: + pickle.dump(metadata, f) + + \ No newline at end of file diff --git a/functions/NeuralNet_classif_nopan.pkl b/functions/NeuralNet_classif_nopan.pkl new file mode 100644 index 0000000000000000000000000000000000000000..2a780fd26ef332495acca950f26af43bd93f111c GIT binary patch literal 51273 zcmeFaRajNQ8~+Oe5+X>5q#!6I-HI|OASuE|6s1#8YHdP6N@5eD5`q#6qI5S%4Bg!! z-6bKQr2j4a`aI7$=jz-X*Tveb&pR_~)&y(bnfdPRfoE*@#M0CdZOvzG>VP)1)VDUZ ze`Va^=wyo4w>3o@o7&o=ZLCRdtEkCX8rs=em|2*jNe~>a7YME^a)MKf z1P2Gl(AeI>(a_$)#u~xX!p8!%sigyguXPgpr=5+ZqbV98IO2vThE~W)ErLIBEN#sV z5kh%DFE+WvD8qY{kddLivAMpTg|jI_bR5CK-UcDoI*pBRlvxYwNBU?(dsBo&iwMh% zZLIC=4Xy2wQ(Bb&8${p2+QJ?ol?Q4@x4YJp_q7PIA#81)VxP+%IV}&yv6tB6C><-q zXZjZQ*ldz1Hyqv9(#FmXAwRNN+C0)fx;%4q4|59>6H{yb;{y7VQ#*u0*~AB-JWAir z+`-Ju(iEZc(;~$tjyAM5v9Z#}<`J7^YE>;fEVH+TR=uOcBTl8|4}JC zHAEl9LE9te{EzczV`XY>?_h<{%7f*WKS%3PW~@!^u(`Hzw9~ixX9zk!Ei!D9*qXI; z!rs`%))rf42)(}+)t?&~T3VPKRg}IN`cJK4Ym7l26i56yB9H3J$kg6YUl3vZ_l^G4 z;t>@>nB>9g&$dAt-^)i&Yies}VTn!Ryu6g0)9Xorz^htkuz|6KV`OTnZ)9O@h{hHW z8e#rdTeC%*+M;cYO|cEh`VrqzI1-gVp>O^(FbTqfZCe*y5mrtuVr*j8rr6^D69QpV z#yKLkHg^9Y7nO03nueXNA==JV-^Rh-*1;aREF9 zAu^mtZQ%A@!7EpU#Sz(~?8&(zxBij6+qNfs5cz-P@39AWEyNja0|HNK#PG|u@f}QYCBa9qiP)^&?)S13m z(K>!ou704zY^)}}qjDzy^y7E?9d#~6@4N6_fvi^Ow_C!N2V{wRvGw_VAAi`X=X_$^ zgJZWmE-#dBD^TOsiF%PKR}m!P65yQSUNe?3Kk-)J(oj|B*r_La_x9Om9ADpkpSj57 zt4V6wA&LG>eL8xN#p2b`P~E~gZMgxjBb4Y;x5N!3Qzpo zI(U~?r$oFa?wmI4HtQV5ku>tYa=CUgvm|dS^qodk@?yI0SNf#njO}@4p7Es5Z^e0& zKmUllcg{I%lZnmVb#;=(O}&RXA+x2$=LJcFY)h~2XaD@}@18wmuMjt3>7I`^Cxe3E zgD*I~eL1g0#hraZUT=HOKLW!~YB>mVaCHL3PBkulYn+)jekJo>t0C*aZ9igHPiC2> z>rBT=_ApdT7hZjm+r|PmIwA>F>|)O#?8KXnS!3~1GBg8 zJ#7?a-%K$rC!?2?RnRPRBrZ`ky&ZYlP}}Hb{?z$j4XtE1X3}Mzo2v#cBmG(5f+%hI zHotX?jNLJ%@?O0=@qUZnjzvt~tB7;(vaCEAb@)Q3#rSvYy|G)l54r1pDIT122Dg^; zzPOdoHlV|28jJG`?!Gi5>0`;x+Q+VJE8gLx5YfzH|99Qv;{OSeE&bjMN^4PWc4 z3oHCjeafiQJI+3)>Pir{-Z^|Kq#!X`Kxj=>yTNdd`;Iuz12X+6MTe0Tafd^y_y>vP zvNQq}@grqDPp@Q-&}W!${?b;nJVY^(9O_Kq*!*1Y{p~LH>kRd-VLge8gvrfo5`K>Y zSu?DRIueXRgK(~uj0qAI5qWoSaaC~)1m*hsr>!1niq=Gk6_+mE2p9=}V7l?!>$Yg_ z0sGeEy9k!}37v!ngDEmb_Dq={cM9XOC5$aRV}46TzYt_?w@WYXq-NO+?_9rFy+rzE z@@8x8nUpy4T3=g;aTL&N#& z%TY34$;uj^8ZKf8?=0b(rHfxqwxrf68B6|Nk2jI2B*SH?H}a6fGm1K#M91sH1H9RB z^NcdVhY?|=bK{lwdnoR_4LNsP>va3XeIAp`&G9%(L3Mmik5#o}>H3s39c+A+c8hbK zY|E?%=&FaX+`|haEL``W;HfEZzjac;zOM9+x^YZiiqL&gQ>WY;M2k!6nrB|-gP4nV zm7YysebeKzw*WfI8`_(2E+zU?%{kp3`6(&0TYIy~rH)=%Hi`mwL85-}Y_~+*(dNK0!T{h5+G69$~nQ*O5n=ZNqm zP>CKFyH5+Szln>pS1P(BFYPbnoHwCKhd*b02ZiR~y?*lT+|20?so0zsNnbw^4zhl* zkj)*pR!)@bt<$z+$EnIt4{;?{y&~`)-Yw5LbdRpPrBq$^jr=Q3(euRM&Zk60lv1(& z&+N9T7K096Jv}$-f=iJbvX(e8!FhF@lTj}&vhd5`V*1*Wy~EqHO`B;yr{g~}JRX#Y zn2DF}UmDKP8aar5D4BKt`ujbCJzwoS+h^Ce4GKz$^yn==RWL zunc8PU77QbdztsAH!fL+QdB>x*J{<=PjI7|JzPJxh9m6Hm2L z(Nhu1XWTgN^Ndr4>WXSy%cK?S`E>fmr@|JS?2Km?JI-bL>zsQ3&NBBRF8bChFa9zT zi?t=`&r?iMrg(X+uj@JW1{)y@Lw-P5o%I)L(Oa(v6qZDPFxlTDdZ<0SIhJc(fi#8e zol6fF;v!NJ{CZdrBNV@OSIyo3HpdPhRkr_7qnU@UiKxb9huJUtQf+eEcKcUGZfZ7I zcBVyGmwqW5p%uO)ZFo6;k~!1=vFnwe2)zQ8pZb}hjh*{wMg7^lk7aC}((yGTyQcbL zN{v4{@z!;97#ih;Hv{G9+8PD!cLgPVelMuWcsj;7_)y?1(OpFR4vVINIP(?DMdz64 z8|iG{S|~(gc&r?$_sve)-}R#4JI8&CuCi1g(-GqKj=Ov+d*}yCyEnt^z0Xc7HLCQF zt`eHC(VkR8PO#&5zWdU*)1fDG^6tZ*MN8Kk>N~m=ekbx0QMV>+EZN6hQc%Y`#&>X?nHI@rEckFa8tav7^TQf zt}-Qj1=p`bz@~<{%qh@A(b5}zJJpyC9roTV?;TV3nwnv(-m@FfeWPMoPpRZ3p3d!psT2XXnb3F*h;Qsbj#BI}zSmK`nQz|iU zN-v(DWtJq3a>yvD@(vpAS1Sy@MTPlVf9J z{SEzpyT1B}+EFTYHfHu%e-cqY8Y3Iqn3`dupmY%p&Z9x-T~|c&pJQZVWfKf?@4rUL zyT7S!Y6KO48Q!%^H9XxQuzt5rW9tnA4YOgsC>&W&>JlKw`NGbGO_S@b%mM_%^={ z;=&VU104nMhqO2+nqdTlyGqRGZxsS`eG=8UKovCE!9D!2)dW-E_tuuh5_p$T>bJbt z9)Hz*;2mSO96<4o<^(Y|e+;Mbhf*oc;KWj!{k?%cKqSF=KLsZnBnsszxLm9QxFzBa z3VWSkdEN4qY^Nqa@BSse$_Gt_n!QZ;PGNOWnW0}xssBV?W~5_;XV*B~XxF#i!u0}% zGCXAoP6;_P3r{YpdJJYZZ&&+_Msx$HO@V-mC6c*>cOs-a!5ExZy815eJ7S}6}#cS`b0_Wn@istt7pse&Z z-9y*5T**kjbhA*Dh_*U7;QYUm7B> z2Hp4q?F@vc_H`5m?w)sy-kt9RG7IGK=dMEFx}b!6DLD_!h4XSoYEXz?RgSlbRjP&? z8y_l09o2**@5?wTnwS-?jMb0RCVzrt0Os)6mbI>0V0cx5xn61+GOlRI~ZGm2G8o6 zphm(NZgXiC&}eqPjB~LWP@6~J{^i{bHE$>fEz8$JnLulvyTlM|KjMEOQ-3JTfD<>h zt2YDitZXss>Q#W~jA+%54_ScHlzLxXS4P}5q|EVDLLqQ8&)?m+I1UsP=R`|zzQKE9 zyc`9ub3v9dv+(_cR)Bua=t_4Pf*Q2EL(_#$`0!@gUMF)tSohcJoU^Hd_Y>~VP2Nn% z@z1d!635Adw(1s`d@+ZYg|lETO(8=lO+m` zodB(*cP?L!#}zerTFC5cY`7r>UnM<&RLVQ4X~``-6j4lvysB-yAMh5>=M2fvEU ziEzG=UMdMrfcnp5jVTx+A@#MBjJu_&LN3IfD)0F>ax0re@r#;6Kug%=h;N^Z!OA+j zD8;F&OwFZT`sK%O;hu(|^JMlobYlD^r%T!hMwP9|c*6&P2C`Eb^?L}6^t?n5Pu2pY zmvQ_YS2xH)mvL}xlmQ8sJ)hy%V*ppQkRzp{83xjGq;(NI7AUN}*6~7W0K&p{hv#j@ zaESS=M%w*8SU$c}F!N**7|@PC>i^aQ6WZ+G&M1EXW6Fg>%dRb;^rPg3`fIJg!=Ya7 zFx&@TLpOk z;JU1P)91pGu3m-Up<40l>AY1c`HjH+xh0Aq&qebVuf z{&+ixdwd4hn7bXI6YF2->Qun@dLNWkq9?(b1-%FXyDD(~_uUcj4|h!fHFQs3+GmAf%F;YF5IB^uq}1l`J==}UW;Rj zhkkAwkj>tCK2*>L&&OrZ-W$mS79>s!SswP;`*Ku+&Sk++jB};ti@+cly?{36RhWPW zg@z%VH{-y%^ryUTm13w$aFWjirwUTq2Gi(d_P|ivcTT+AU*NRUjtkH7IBYnhFZ0_k zy-+cbN91aFBfN3y-bCX-0T_ILUQ+0q9Auyjr)egf1<6Bpqr!RN;JQ)Z4@OV{pw@fe zn)}0GdH1Qi+sWv>MBGV((4DWD4JogFsWIn4bz)ahw%r#36obLsWxsl0U6D0^Nb7r` zqgLe9MuhRo?MK3rQHVuWp`5C-?hOYp7wlQh5^_koXh{K zel6S4j*)KPr4ba*&kn!DZ-i(1hXyZly(>K7f+yj8UNT=bnSQInt^g|Kt-k+Sn+6H( zh$pmt=knO+cotdxCP0h8NZ>y69K_kPc2?vq0lhI#?3~Yb0dj_`?VbJO;H48f2*<1! z2+(W2?yKkq8m&F-c1&+!2b=QK3f~^^j?3Xmr^N&`ilAFs#|ss)3{st0?{9}Ed>Fhx zkVJvE4$-65!xfPCR6sfi9RWE>-?h5GO~MPJiVI3b-C&JvU(x7g9q2(nai?dP0JB~b zX7|4Lg5-qz+}eD4f)7n^q}4EYf>QNWhc>)wfT|V>wo9%7!JmL;=L3oS7kWF3^%wd9 zu{I3L6phQW(j~dNSuUN^BkO*yQltsgJ@h5Y*+qcVsorey&N4s{@L_O~sUK*4Jk9^? zQ$JXy6JnO;>H*Tko_t2${Q#XeEmPsTo3B!Uh0RG~tu+HecOs4C!%tvC zLvq+wArChF))Vgi4FS9P1y)9#3J{E^_uOu05q2p0w7IJ;h$X9a4M}KX-{WuHS)Csb zz_9t8aRJ70P?Stb%~jd~d^H3%P00FS&JQl#0oV;YMjuBvcr^jexis~6SEjS&)YuZ! zx3hqJ-9iA0un3-L)^ruQQVY&XC#fuVei6RWVOrVV9tQbYY9_R|D*-2&jOu1+GxQmA z_YR`3L&Fu$-9rSuS}e?0y_ua!I}eHcJg!Q0IH` z)6(jy8O^o=X*vRJ-NI7PpD$itb3!9aWX3yG_AR@Fv$kqpw^uH3J51cYm;M^6pzb)j zld|T=2M9_5i>gvRV>~?*+2CYdfc;x`A>teR6tiCkX%U zQLEfj4a^#|?KvDf0q5YJslqk{gj?FZ3dy~YRZzZ{pbvg)m*l-`&|wN}9B zZU0Lq!wKM1P=r3I|0v*fLNf~a6hp*Okl-V37YM!19uXnm4KT~TFZX_o0G^MwU+*dP z0bS3ykIIjYB;>?y;MBh?1#Hz3?M4(GfKRk0)K0$=_#Yq~FC$zfE}D+%FjI^Jje|26jl%H z3ib6AcfWyqic1AkM&n>~R4@FMYc^;=zw!U2Q~{s!hpBn%?KiTQs>}ja4lZd;cu#iXLnZ4f;F$D?D8=0emCZWOF zNxlT-F`&ja^@2-#0DML0gpEX7VGVIeA6&=*O>th&nk>5jATH^Ag}xzHi*l09WSNAe z<2ppFEwyl4ww^noEdi8r^}moP?GkD^?VociIv>smm{_N?wm?RgSjoiDPDrmZd}e4U z2Lwxc*RiZE!PTU-1;Q^G01XV!@+eHgCF{AMn==N0#=&HXdbtxy1rQi>YR8H?b%u?c zKR*s%&nU@b&UJ$giFPG{dIT^Ao^C+V^n=T3=c_|^`{09?WVHd?FK|Qu39{CV+0IlSyw!8mMnPk6R-%PCfM1hRbC!+GT50#8pJXj@@C zGs|9Jn9rD1=l)#QNomjvfqX{PYPnXukldy}o8;LD^h=I zXBYesZ}_uVaS+%(@8;-MT7`yjI|+K&_ZtaHg!-M2vBc8FPvNA(GqA(N`ds+aM7Y_K zSYvdt5E`sz>?e^9fpGgwle=|O(6(ao-UWwrAYf|XWiK=cdhQrK6Oru(exu2!W2L?V z@qr=PU2EkeC8BB1;-EMeV?cFWH`y+&O4^+CnTm+nFb?voj7rL`a~RSSp4Jpy9dLTg}!{&`#m_9&4s5HmNIDwMdrTbe7`gZQ&bzhm^Lbbh0dsvE6o@1 zE$x~3q1YfCXEh%Q7w-YLj7@1yadyFvu!GC0y%;vBGS|HNRS1fxa(=j*OpCBS)BHBx zJ^>03-&5nu6@kyrw<`I3OTar1k%-ZeFVNeuL+e{;JunKCXPfe?hc353d3;tMF#W(W zA<`0=U9257#;^QVxI6_m)gAXJ-o;aT}EN0up?2$+6eH=FUl1T=miZ)ChwjP8D$ImZxomY zjscln9Q}%s3SiB2a(=E-SR}+amNZa8Rf6Mlq36j<6)-HJ9c`Le53P}u9x0Je{5TPQ z{m)j*0OoCn*gMxLXc3{J`z|3H(APQ}**W%t7IBB`6({q+Oai5ZqhCKL&Xj9>$T0?M z);)koD|!v`Yv6+?3VJ4& z`XKEF;x)hA05VodS8upX09@ul%b(dp;Kz~?RdiD+czm(r5?ZwuUcV}HrO>S!yw)|| zFwRsIWAj)4KxmY(=^O^@l{?-vgea zd0dQdbX%SHS?J_`ZhudPu|K`P%BWU>GLuOS38zWO9=ZKWV7VX6xA!&Kyy*o(BkS{< zJsm(yJH@_BZx;BA1~c!MH^Hh-qj6uBe7IYDLAf@o7EV7A7dS{9g`e(m-?-}60eZ4h zeBR7ez~^3XL+bc9rA%L_Jb7yx4*mNH-sQ55!@YRAg1+nd0GEaD*4{h>-%JMEL`ZvJ z{|(kHhI1WYKY^l)FVGE6Pj*=n>yN{0?n%K$vT;Ho%Vm|0IgN0ajM?4h>nQfUzs7UO z(h1L|@vv(oM#8Ltw^^vr>5w!UlJhZ7RJW7#wc;=tsI$&Ldg5@(sIcsI=EO zq;a+r{2*I@^W$M7BqiJwpUuL}9gX-}IwcSb*Y!seOu6eIH&teh;vkkyfy(;rW&=3rX*S*!I_X+ffoTc*e?*`*>Dz4r-b9n?q%LU3W zrhsvCiP#onB6w6tzN2E13`Mo0p;)+1=Z{yD!1$ZQb`Db=K>4$JS{n%`Dr29pJ&1KcIv^;rNRPUTym^K zPsYHCAPFM2k_g0dY${@ctFq}1f9_6fItrvMy~N#3ECFHRIbj@gX`maezi{c+K;|bu zpS{4)F(?&osT$@v4gf^ETQl>UJj-y@YCb z7TW`(mY$r6IFAQoWh#~fS}Wmae39XJtiPM4<5OtlR1Vebf+G=2J%4M}5A4@SpI?2v0bDDo%yei~vkyEztvEFm z0E26LN%(%zAkTM%tH`NOglMO1Y58F@zr!s<&J==bz$Q!9!>(EnXB2ystHs;1q{bH7 z85kB}VV+pCUi&0Sm4&z#Q@cy^e#G5Yo+{fcWVa^1USkp_OooRu7DlK}u9?A|&4jCk80YV;+n;Ya+P&ptsN;SL}ZZYdmmDwzUqyjUUPfJr!UwaU; zy`F_y>GQo`_imM++e{KV?3jS~`(hDi;%5O_UEJA2^QzH{~oHXlh*LOdD6@XTFxSKNW!%-G!Xjfa`ju>tF`W**^%N8tued>f$jnTyK9mYZFQ~McWx-?M{%#`!@+6n0L7@D)c zECzg96*JK?^}yqT#XB9>Vu2slB^tqxD&d@T{xp5j3>?2MmU^DH9x^ZQK5RJE49qt3 znBS95z_myXZ_cE8@Pr!)rZ%5~=TW5R%nzzz$5h*GhLvt;9+keXPt7Xez!rULmSGrf zUJG>ke4-ef(@w!#r8R*UYc(m{vtoc3P8hXXV=u6hymmI?R0^E)YY=|-sTe$ds93_z z=qs4USWSBwb2-~Hfaz=e*=qXbQ&r`;_CDl6Ha_jvAsGxQSXUe7u~Jom(YqxKZ^ zIu+=8dAiA_y|{oIq?-eNn-mDR_fra7`f_-6!J!mdQwOLz^)-QxT<;Fv z+E4J+YpN*==3@B!ZNdk&0)G(tV5UHHw;!;*a0}>dZ36GV6z#9oBm!`j|JjoKFsOY~ z;371L%6paCdYQy%1|(H<{p6?o26i%(<23m5ppMYzYj<3V@;^{kmG8i5K=q)J#LcfW z(|b5m+U-$_khjBwff<<^u+*<=-=gv!6yB70Zi1cnGtxyAT^@D;CGOS33-WzHdZBUf z(#IA!#76HsXuAsPR@)^XicG`r!4G9K1hYWx7cHey&H?OuE`P|VcmSre_Q-!1n*#*R zdzL@R*MP$y_t4^Q3f!MP+;nxEf}-Zi*3BVPP+-NpLXxK+=v4n`>0lcIM{9`pT@kH+ zt|6Kz<71Eq|Fwp=+IPOXVYt&cL30Q8jYfw>jjaMKYyq)7Xy>7 zcbAq6dO!@`h3lG!jiBfp9<7+i7&Lq^cv#Mv3Fw-I3i#%#U;)3AM{nZ@;8vyeG<%j0 zaz)NizoDFiNxMJvT&CVZR)RuT^!zw%zV7vM&t?Yrv=Qtn^o$Ggyw~8(^spB^hr;pr z@M2sf?RC_iLdHSSv&CI5k-K9sTq=D)#%u!iM%^DgbQlG{y`HgCT`Gp7&!0u8(aeI+ z*%l1c8B6d*C@yd6Y!9?JklzTF?gO>u_;Y6{`XN#70SD)P4`^n1#)CIE0WT!A$4I;^ zhO)mU`IiN{VBA?6=Dy%AKwBx>X^Z*-rAU`SM*2AX-66QW&@u~t$SHK<{91vG78wX> z(-eGvy}$F;(hOME)OT+0n}mb&@6sbMEpXIh_VbukJv~L>GrIu4NXA~oz{<(w&UQ#k0DZ)pj9|OS6UXC+yNSV-0l+msDuN_v9~!x zOF+ll7XIIdZBQPaZ?e-E0flFDNuQS2z_*j;k2+_vAc?9`%$RU1bd&axn-z%`shd6^ zxPJdc_QS$p5)qOsdGwR7>6cS(F7WzJE1-ONz(rQHMu;?<2%M8}HD>O$9m~A%z z?y;=MJO}Ept|!){jlgK}azQ@A z7HGxU>dY;&2m-tb9ldmlKvhJiQFL4-L{cU@OtkXA8wYBkSbFSOu=N|)#%2NB5q$HK z_4G2_v<=o#F&+X0=pdFsmLAZ!r^lty&;~H+U)U`tDj;1g=Og5KHzcocut@N#gw(fi z+#Yt%!KtDE+TI`SP;r&8qUCxbh`Mvj+In^xNZ8N&;Xkf{{D-oNNh_6LZc+Jj)9(y0 zu>Vf|#1n5Iv5V&<^L-fLlZIZPvz>yBTk6+1EwbR|W7||d(`n#FVwypMAuAfL`Mj+d z^(F^@1UKATV^8S!hIEvirFtIIkB&5*k!g^6&t|8dYYjS1MN_{>$OTvZ=YN>uN5V=~ zO8Tg^I+)lrwWs&E2XKs8Jzs06fd+*Mojcfe$@$CxRM&Kz^{nw$JU2fT)O{U zSS{p!+@B{NH3&gFkyi(Hj$r89$Vymv7N}&avD==^gFeH~BOJk>V4Y2ySov}zT(;rd`)9& zL_E3RMnh=zuQxTI|F#K>WK99AdL6{P$X-+^L3C-2dw(|n_R?CVMoP7~1L-x74(w3N z#A-e#B4iepz1z>M82t*P4bR-CJ{*TsQEs)|GSgs+C=0>nRXS^VtUJKdv<|VB^ zzFe2U6(I$$r}+=9<3-fIvN8{ER0ztd z+n!5q$`yW4VyRRWVj>v$DB+^sWt7;QHEYOo?pg4}s+alr(+mHl+ulQ=nESlkYsx)=<$~^xuGoG^xLx*q7np`6@bA0-+gjh!_e@t>`uLA zFep`fa4kY96x^#j&2sStghZ)4yxSM@0NFLl>1(_>K=LZD^N>8iuI--CxLDKz$8SvS5)yZS&)09bVg_2l!7I9cv+8>6T&}Z?$Jz@xrC6i5SyuvxxNFZ$ zDaC;wk!!n(jG>|#p_h}#=GR59{~VLvxS*JunxgL%mG2|qPf;HoSv3O#X_PPlkmz^hW7fI6!C>e>S-TO51!hCHB0+GYh2k#^0t)j0suZ z{cNU8aaFYOtw_x8rnt;l_m4kryUznt`rEHQ+@6C%uMIMV_PgN2)@oV>pLkeHdfwJ{ zun9h&GJ3#|^+}$}D2{ZWE&_>6&U8Bu>)`>BWu5z0H}re1<)|$m29-}Zs*9xOK$CM{ zck>y#z$ry?DvRrrka{ldtW?4*FgTSZ5YCM=hCE1{gBE#B{z?(`z@IcA)ikpT6tZyc zcDHuJQq6}6K)C@{y!@0%$j}ekH#?ccF^w?6*xgNkB@gby=j}pQXtMk?XjZw0HU(HbGi2}oN}Jaj^*9-h2T?AeIa z0fxoFHX*4VQ0f=#@zd=ajINKq!N$-DDV8pZo)j(wl6m$86%*M2c$_R!8?S&o&2+zt zEDK?uru+rfmu3b1Gapxds_jIHf1dyDLB1}kalSog?)8w!#i27eyk{q2c*-8{9g9J* zaK9}_8f(~7%P{##wW9G-m*@@Ese?YJ(aOz7n(07TErmU zR8W!$TX<#fvcJu<+g-2n;pY*4yql}rd3jJEo|9GAGok|eF5OWeHR=Pt_ov{7LLszU z+vQg0>4P0FlcRaKoC08z2JkKHAQfampBAJu+;0}?B3=3gV*p|f#azGwI-yst?0 zB%*f^JPtcbfT29P_h~`Ho!^S;ioI9LjZvEVviS#FOQQ1`UF(aek+-Fn2UgIcQLuS~h6C9_ZPW9R;B z^QLh~l<{_g3_G{p8f?%yXPO6J`YuZ%pGzT?_K3!)`vfH7#@#EJiGsqCpAKA=X5dXL z%9!4=c@TXqi8pO^7GA;!-{&@m!NCe?@-@bBxOMuvwhVT!!Nc+=uZnrs;6~I;$d~CJ zfca(DU0>S|sqDs`?*!CC{Vt}^dPZHiek!c}6s{(e*9mx{g>ln z)m!0w`9?LEOiUqZ3#i zKMgp_oo+q#dIeA3ojRku1p!4^^%jlKIQZQn!Xosf7QEg{jY)jo4u-@R0!YLwpjop_ zRkN`th&q*r1Hj`jGEBjYrW(BAh^l50G`34w#+@8>owL(c6+HA=Lu(w5o?{4FDf%*qS9kymoaN<4j zGQss>5Y$=Pf_YK|!MmiL9Lh>~L*pwChk7Mkyj8?8C)xz}*-qSZ(dYy9TuCjnD;1z% zes5;5qzO8{rkHUqE{Am@pU>U`9bh=fZY8&*6I|yo(%4%V1L@bLX1Uj=!9X`HWtM+9 zl%U|WYVGI){g^`4cDxcO%N_hN4OGID6`q%g-qyppea$p=&IS;~wL&v>B?nUPv2}No z)x)T>c=)xT4mw@dqrB7F1y(8o4CY(XL91wq&LLSh2;&YwPb{W_d(sY~^F^J2I-rKh zv8oGv4-e;irqK?le~vDs^3(u?W;0fbd=xZ&yysZFUInjx^heEU4})7VZF-6$;h>2& zddp(G0n|^ROK|84A!ldvn%wDcVE9Ey^&V~qC^%2m(ah5XL}Y9vQ*yH5g!Q+|-Jn{4 zKdhKq%TNOu*}JJ02|K~UE^}<-UI$PyNOjsKXo5!bD}A0rb?^@P1&{dQBzQ%xrGSD7 zLcda%^v1Gi*xkDD;pJWn=r3@i4%Knw9oR3MezT|8{8K39Jt(%in92!k{f% zVR_z8Xi78sm5`$mhNcj;&aD}K-k%Cj;Z=q_ub((}{`FlEgMaoQ ztZLBkzk1M(RGS@CLnIvNcPJGeBwF0-`6}uDs<>0t${*kll8#|yKvGPdl zM20cvKSXuLHfYmh{qM0x(azY=(h#etGcgyJA$t2=%TtYi9U!I{#0-TzGC!jDbq^+z zrN#hajzKI?$m3(m5+oep;@1w4Cm6&Mg;*Wa=S|T}0_>FlvBn@aD8%-d#t~gIo7Bq( z2!cV-D8%lVQd^m54Th!z#2$k50Een5{^RN9n;aO zPUF5qUx2*FAQ31e@|b#m=D#?!i~&d#28l)?AC9SMV6RqoyAMDkgC?xHeD!WnKu;IhfbPSS#LNbpjkD|x~S>+3WWMPnO6q0jHmui}QELgEL7lY)X zko;pRd$wn$tI`7?1sJ3dg%llA$1^AUDAlmE7=wI8AtlFDx|=~iks3=&F-RE-DLwy2)OVKj@>tr2LE2GB$1zPRDl{kUdJd3I z4AO-{x{v8;Zx+F}dsy0oL3&Y0-!UCcba9@7<TAS)>3`!Nkn`Y|nBU)s9XfepKXL4Kf+&10%5d*&|p4J_TlAloS9=P^}$NsY<+;Rujl z7-R>9>>ktD;$Y@{Z!F!zAp0of;F!vHx$fd$!_q?x@*9Q5!AhWy4S}x<#g;`K0U8&J zFz6H5GYXCOFSFdcbLX?08~_^s$V_l#KKU>6fJV@xvDa8L;gOl>$V~h%^9`bw&vv|6 zGs%(p)RCF=U*`2vd$SKISo7&4Gue@u9D6p^I*T3U=v&+9TUcQ=?T)5@B>MWsHV)SI z=rdS5RwVLA(QJg3=pTK5fIgZ6OCSG9`N*JeV{30=^+z9%ro^&;3C{oL>lprM_>IjC z?d&a#{~g1Qp0TMJ7HU+L1Xlm?-EBY+<%z30f!Rq1-t?bY=SfGnO_Xq!( zKSt9Y1*1E|I=CVL9_g?tMXTe@>hrQSBLU{uR}p!IO+KJ)J1b)&&S)Jxv_t_qAwp^|Ht-c9xV8;?axFwSJA;t z^HRU_cp86DtV*4H-pbUgu_Wd8=I&*am9J9$b0QfP;mf%e%_jQ48am1|=Ja(0GT0yu z?v3bt3UcsnY3tV$ePSSW_w?b~5(fkkmbY&i%}JSHNM;jm{*qEBUP~xXkj@1sNPWHz z;=)3jj?Cw{tn7`|;AeNZ?#S8HfMKXQ>yhe9oS{YkaaZUP}$4IbsIPUUs~AWYF>#YU8R# zWchRLZ>NQ#dV~`ECrOA+=>QIMd@~9ifP$XsB-UI4Sdfi#^JhB&z?aUF)AFK)mtS5M{&4vsRFgiF#Kw6-=rXC$k6b)9 zh-2KEF;-$y&c8X#6`@jGzH3cUgg(y%hRMxJs%2Q<*#aV`Q~DL<%(6k8)9ScjD1hJd zjW-P#9wYNqc}N0I0GzE!O=A8Y<5=x820c;>+y=Cbj1y9(vBV$5CQPLS_I~WEH1rAY z-iY`=WW9GhSK<3VE}M+(k}adimi2NSE0nS#rLwXK*&>mVy|N0C5!rj@9Fk2&NJdsE zviB&ye(&Go{ds(T-{1M?oO7T1zR&eI_v60q`?{Xbb6p>=6P`tCJs!uBWd0GYs4_=( zUHsB$m|d`Rk^@`h^)S>9OlNY5R#rfr^SO`C0wY2yhNQDWd+xN-C8Wa+9UKszq z04voADYYT1;9SEYe_g(mYdgK- zd-gRSgz?2|28{?^{`|5>o&4K#b;oXs7?1pVxNJQI3dCnB1vLq=l^4z9HQf4Dq<_XF zlDg9?KVSMaAjr*#CFWc@Cs_3b1bu2AmQ2%Q7g*NH&NJ|1LlsYgczE0@UWE)#+_624 z^(7x0ryRb6N6{bH-&y*KYWWo4v~c6)J@pY@-^VGi^T>nmK5p?SW)h-SY#v&?$gih5 zNpThlvn>q3J^T(I3m?4RX`n-V{@e@A99u-kS?y9xfA2x97h$%nIRR2+KbSX^NPxXA zxT<~UG2uzfx{la6va^_a@x(pm18$5z&`_UCf}rH**vJ8MCKpB|IYsBFZ-)*JvHly7 z4@H}vj^3I*&w``|bXVbVaA1YbA4kHnhf%@9&$q4d@DPV5;~r`(j0hg<4ELXMQe^1E z)Z9FFOGHqKs-#!H7IbPiE-(cQLgdJ<#D0!Dx*N&3Vsbf3$mLq#qia2kNMd*d*ZmRm z>gJZ$di!O5SI)9=#~d{iAiXI)jEcSjz!rGbj^xKNl8!(Eg)8gJf{Z4)TeJOYg)84w9^?a$^8)R;P7fbSy3 zHt^DYr7vv!18IzNx&GRlz$@+U+yDFOBEH;U6^9{^F-+;Ek6@NS`VZ=JS9+ zWWDEcjbE^iQBv4Ywe}xLy&IWKnDARQ`FV{Ipq-pCtd?NJs!jGE>MUHu*x4hwVrpkf zElZTr9`90OJg;tkY(jEjsyMvbB+{^YV|+b1V6eR6gQ4;AN-HJi-8-8ayOD?<>j!cU zI@pWz+sas1*_=gs|M6s8Po0K%||jjgM%K z@Si0*!pGPo;&XF#2oWE0ZL4dvXR*@xfXCeTc(6`)t|5o!8{+w$ZcXLM+?bNatfYU= z4G1>iytH^K2hNlDXZ5wRB4UnBkC%8jG4=CBfn$p^D6Hn$6ZRbg+Y`27sRSk@*s`y# zYJe08l9^Oo$;XLIuOEe}2iBnXxwp30|La!v(acUfE(0_TUDB-36Xxy&1d0m3+OJHQ1#x} zEb6?_=FJ^PgQzkolZBt?Rehd-u(1Y03^}gKGln`tRvj;r0vZ0o0uI0sc<%vE&?f40qcyp#?=Ni4&Poi|Gaxu%bb2mNgJEHy^SA9ai*a$&z5 z##DaUlOSE|XC(@1h>(xuu3H1^{20ylkJm@14&cJiQIo0Xe<0Axt>8xN+ftY1C=qSZ zZS>t6<=#)}r!jHY;jDi(DUlax4|r=Ivtn^; zI<(&!DBzTh`J2`yI?N9X+u>m(#6F1Lxoqf3fHb7uoV#gm4$A{(_6Hp2Fhg&vDpkYV z)t{E`KlcgY#=1(}^*nb@A<&}wICakgXlPYp`^Lx+4YZ>mMTZWndP=7uEMWmA>4TLM zqHAdO2t&+u1zxt1SK|H35e2 z(yS-&1;4_X$=D0yE+~K96rVyC$$&l5c}!em(gs~epM9Mq>9HgYS|%4Eek|+gDfO|8 ztdMrGu3znS3hZ3CP_OoG)0B-iJGS@AESX#DHyYH_E~Rqi01WNSl+GzIAU+y@ zje|xAk^Qj9<9ZrSDbK8%kCIqM&_EUP_#A~^w96InL1| zW7oQ=Ynv|DwDf+ua5KoLTZ?i+3;Ob)e*Sg z$+w3yV?w$pkBO-%FCY#liGrUE>Vb=+%2{k_8cmusB&bp(Kv?{bHjiCs5&DnMLtNa> zU=mdvV*v~M@J2W~B!~PD_z_9oi!R%OHT&HNc*PHBp~l+CgYg^_#uyw$ zgETrzxIb#O6QAY4bT9Qwo~!u>@9&@HqGIBP8ghw(;8GGK)9TdY9#tBw(kfHJe@Y+y zNzhZbhPQw^Zr)5+WXvv*?J9-TB#ikj^Xi$x{hS-$#L{&!tDdmbz##qvDpNLKVr?co` znK@i_W&vU$k5YM)4CZk~8sv_A>c^fwNRLJbk+vJSZF?8gd3wY<7%EM>6CzjwD$;Ve=$-IcE0cOJ76W8Q5pUq%td^ckhI+hAbUo+v_1 zhcs1-B%=v<$e{dTHS(@ba-%4be1&!zEjYh8>f7)go!)Bp$M5ljS_j=7{c_1lpU`i) zL*a&z#RO&ejE%|zHtz-i%vI9^5(=x^}2e9Zd-7BT`d$%Pfl$8cfA8G zy+6phce3tOJ;~y9AKeD}*4m>QkzMc||4{8QPLI(N$8&u5{0m${w;2MEVW7Y`7q=** zODPu47_VL0N4>m-nqPlAM8#zPnKlbLqoLeGGx0uABE0j(W;{v@XopuR9S!d>Tp9ni zSWrNMu_>#k?OfG|S;@^B6=^a|vp09X@9Yj*&AOD^(m;ZUm=#De-nK(6*z3ADb&0UC zh`&_sRB;vWaV;|*dGy%&+@3kwyAF}UtMV3#kHB5!i38P1*z8ILpB>9gLhJ@(UQh;& z7s~WhYUNhc0;*z0%`x*#qm-WX8oE3bFBV6`S9I{e3SFk~7>Qv1TjFG6LU>VZ2i_%X zgf1Gap{DsieDrDF3iY)TJ$qiXDYiahs>HHMjQuGeZyr5f2Os|}i@{UG*t4C9tT!68 z*v%{SY5&~#MYs=%ofk!q(Wric_~Y~s;OAmfGxtSS}4C32;>_%by%+_8U1nD_(Lnmi!jhuKY%(!sy*?rpGJ$c4XuVwTYygR#?bpX`jSjm z?RnQ9_(+OVbWyC)t;((DZRbT(X`zTry-}isCD8HL?L_TdVB!bPtMbZ;A3*LxTO-dR z8s9LHUVHjC>QLm)kaF=KtX@x(TMOSpuV^(_3Y#CHl}+Y-yWW=QW%apce+hi#;uGa6 zf!qTi{n}6K@DVQ;>I{LbyG z?TQyg3-V;mjvo9(Yt}zUJuzBEH?AuY_V+wNo93c9a<@O0IaW2Maf!~Lg;&JCn#Qd| z+g9si2@O1qDpYwi^Pdgu39=8f+Yn(?1qE|+4;N6|aQX^2mn{gp^E^zp7opY~v9ewS2 zhBcWv1z7UpOLIR37vC3@|NZ3pUr_H+<85e}hCoNHT^+px$hp3Bm@Ykr#{U)QYmJ$O zv#fi?Jnlc?$`jS-O1(W)T`0Q#wZ$)Jk9H}$9D53r8u=A2UP*)zwDgd>TwuK-6o2R2 zUvFY;*tk?ik@FVXc7yGSk5)f0tqVEFy0IT?FsXZ^tuSu=1^64@`XdV$qRHzyG|GxOZH zdnMFlFSgq44q@)oms=386V$5Ts_gdr1CJTq z?O#&#p;{j<_^+MoM+Jab3%^wRgC5mdd1MK@A>8PwUVC>G}S zK-P@4T}EXSov+qkwG!Gw-K%SjrXph z#6n>j`t>s?Z|jr1LOwV2(|KXB8ndtH^BXeNiH_6Yf7g7%Pl*_-P}=CRq0>6?%N6}P zUo?&~yb%BFSp7gWP6(*!SXaRH&`y3|q8}2XSARxdltJGrwQ-Y37hkl%G9P3 zV4p=;2k=@tfTG#JlU?eov=F1mgl&<=M z61s|h|4o^;I(MqnFEVtAWO5nU(X5(xPG;vbj7tBAA+fOh2!%Chj($8I!fwteL#nX~>I!o0oXMP}J0{Z) zzIy(E1D*53z8{T%%h=FDBxebAie`RN6jy=9hQaqPrd1dlkmOq(T7>)bQS8*=Ly$_) z^6GlgEXr$pboaCP7sy~uG7L`LE9KkYD*dS2AjTVV&+TUK$6zoxXEl< zb%)!a_o>b>klQC|nQWZIXLhnS{Qa$@M1IPKn%{UDl5?>S37%@GALe^Hzwa2Wc_FJa z{&xeum+Ny+HEzItU=Z`q7P z{tA_&3i(g?daqfVdKC}3a1tdPYwbh#&yX2;OOJt>!0q?F9Y>Jv%Q7lo@VVIk)o=Ev z#k-LEY+vk7@I2bzsc2W7y^f-d`IBrCpTU0U&A{ToGRoNXkdyHdF>-M!nD5oVI$B5b z=~UuXFA45*L$gch0^Dh6&!P#eM002GjOf}tVL6xO(dq?nVOh!OpqrBAP|xf0Q00&W zA^3gz=}V4jv@p-D>1MVM1j-Mn|8yLLv)kv%jC&T*DH8(o^z)R+nGKVVRRV74S@VZE z($DJ9;EcrTPRBkWRaWNR8LMejq3&)IquV#M{Hi~tsVf7jmK(rfdhQRLC-ZF4HrPf* zpR{!%Pw%6-Et)towlS1#D>=h*`nt%kz#B%}9wQ|l4ZkPw+#OJjf4fgfg#^Tu$2)f} zR_sHe^VLsPTb*#w^KDH|FQ9DQ#DZbrwt@(`=zYpY`yDhac23TEXb131>y)3BZlY20 z>o!@%bLjZx8|d9)k8(m5!Ics9eblEzsN7w+9u8a)!;Dz265Lp|T0_I4aA0`DSJtr) zK%rhLxmi&HH8X@AVe*z@lCw;XZy8rn%fE#b?>^xpt_L#@*1GzjXjkLeRi_{5C){7M z(_M3b^feEM6Rx8I-*q^gf6c**8tbeG;UDNhM&7eZk9KrVIVf7?(sy)kos%{ae^{u2 zT%ez(eHG1M5U?q<^8qb-X9H=Y4Op~e5*`RXgf~1oc63I|==UX_&4KDe^h)IQNQ<3W z)XC_fqOr&xm=_T=SsN{&Zyq`SyPUpN?zR(?LMOS6A~g7lp)E(~myNGlSuyM9y@6^ZXTX7rjy*V?kaed)Uf>XR8)WI9hH z=Q%gf3FiZpNBK3?8itS2DqFCJbgx2lKwIX(A3}^;AzAC^w>jwjv!~?u*94lLg(IQw zYti5JN%z{G9Kl$=ka$wd25MSfGVRDsjrk@9$h5w{kJ_or4fyb^0LKfXLEJ?u#9@Q8 zRi0oSoghrzS~xmH|HCM_{2wq;=zoEU|Dit%|KDKZ8O5ig^diS-{lTSIYb(Tv3JDF- zi>(28C?A{=B(#ZYo-gYA^HmtzdaT`9-Ej%&H0a4N{jdd;HOfL}eVfqMQyXic^&2Sq zl$<0gW>D|Yl0l&xGicVhi2I1)2rRQ7+G;wl!(f=C`R`>@Se{s+I{k(UA!Zx7iF~BP zUWR*h8J^^@d?FNBJfX}c;>uujMnn6Rxc21P59g^r*65j6w6K>lVUx)!mu!025d60h zX|(zyC@p@pY@Ng|>XJobeAYk+oA2;5C4DY}FjBgwXc(-4b1f0q&H_H>oAzWl{uu{$ z!QDlAMVuB93X5p&^dm=v3obWnN__=Kru(%N<(IH<)hT@qM;_$T>YI*!gB?g#UmfRl z*@R(XH#SY43)l_k#>_7jyokH}$B2dIO&Bv_7Ov$X#F#h)G;b~7Ayo4ttp2()D8m9D=&IMUaK_HC0OMp>b*?XP}7FYmjX^i%6q ztGm->%;9DF1lD86_%YtknFY(EY}R z4*R*;va%D#fT+daV0(IL0La*uvQ_GTqqw#1n48+CklID-5XE!f(Ur>OriIrF=$(4! zP$i{hR7~ox`Jmt>tkh79B`NzdGElMK<;1)R<5x6u!bM52=qF0`S;IE4G5@Pbrq~(v zQyyx(8e;)ju^7uG;ajMzcy7v{N^xwIyqnVAS`t}&>=FI4cpAc!ibUtWp2DI?*L_6T zIIz#+`JXSwFe7{d+x9PoHXzE~FzLMh07(5M|F*+Ig=Eb=LJJ6n&~!#RmtR%`sJM7t zY(M}Rc5MMxWpmt6^50UY)bLzDhR+c7w>JTil0Ay13`uyft#v=5fFF}!vmU^|C5ZT! zNb7Y`FM`?N$deFaBCH`JkN#ZuNcD>1k$mJ;8_`9X+l1qmv!bhoY$}`<_!Yj)`EbpT zA9H6h{2lKmfCRmNbuL4u7;P-vPY{~jL0{i38m-B?gyk;b8mB0DknGEKO8m z#h0-TF%&aDH;0ZIotu`g+IRVV7!omw-hIy0_%JofHR-^tQRHs@`g zkkySJ_1x3{VBYA8r|l3SQZG-kD6M=Ei_>QdFS*N$*p=Wq6x}-4o=kDDBf{N?80`&>nhC;#KaXIe; z>q~EZ%)Z={G$>9H(#eh={K;gt%xX$}(tGdC^z-M~1kWRO|h8ntHo z5vg47R*j1h{1ZG}(=Sx(t;524I4SdJD%e=`NbyPI zBl+t=61+D~W4A-}13up&#j*viy|{Lt9C^N@(L2TZ9R}(=)k0_I(YvOS%Jr2_rP`_C z8KE5)%R&OLq$hA5l_qHxq}CaGh+j&(e@pCDoJf*Qc7xSHXt}mRo~_qA9+A4kee-}2 z63H)KVF91d@L{?KWm}rQ7m(I7c1P~KCm`|HVlLgWH25f2_Q9%m3&j7Ksb@`=K+F!p zUPS&6yuUKV)y0ruyQk^Re3nEpgt6!FcCrYfd*m{2@oX2?KIq<0pl(Lj-{kajR0?C) z1*F49f?UvVX_dRAKS`0dJ2Z#w*3_5+iOf&EDPAo4YbaOLSxzJ+Vz(-Hg-sXoqC#U(hF#Xzcv53!c^Pk8-JmvCN-eCvX4a zM=s_z4c!RchG))a@QdRPL7OSxzKlv76Z@)6=gTXE1n;ih7kEpA@V#B;-I>`0EUL}~ zDHOzRkh{63ycR~hyu)H_-KT+{g0Y1x^BAQaoOOKAB#3E2^76z0AA;}g;Qk@^7)nEi ze~e{J0+!=Ua79Er)&ET(WNm=>?S;zo?uyrz9t{y zETiJ~RC*mg>1;Uo8slM%e$MJcoR6v^x#A{#1)N1ALcHbYrJ5@)Icgc`Ox-PRef8e) z>3Lp^E8)IBR$adIQx+y#t}Q4dB`9 z99VtuL?6qObdgHW02$e>dAiOF7&qP&DQ(q>n}Gh{H^7~ilg;yYce^J zX2rl?Bo(W0bNwvGv^gHaU*>4nNymeYkcXh5ZM?{aK@x9-<~&&Q^cg5J?4v@n8}Z#; z!WdQU#P$9megt7@{iSq?7`f6bT5k1k9+oQ9`A?OJV4pPKh--#lLWJ&*+<0-aUX>2Y znBrgyz=^)}Q92h^^vtsVkL1bvA=em`^M2#RxlV=-oZW{zN}snbQ(VNHHJe1^Te(gW zE0aFo>L5h4AmO7d{|rQ%$RE9<{9Sz1*fgS_R#9xxr7(y5RqwO>+0Q>B#LfBiSPVFcR0@c}# zHKq4bV#bj-l1BcNmkgbF^`)bYN_gib6~`EyB~NKC8L&mV7P}F;(tne`Suxd1#MXDb zDOx}J*ZykkYvC^z!eoK7Lf8kFRR2WHi-;!gtu)z+zi`#^zIb8UD!7dCOc&QfBkg&D zcJ=LYS!?B;MUJzM4YhH?n+Ip7gqVemCQ_iWr=L`AjR~IyrdAz$_h}<`;hK@?#FCc2_QB{96&t zU@Rfh&}-f}<1+_IA7mN=9|&S`{zoqj3waS2f0tJ=_x}LHGEtKM^f7QA$FX_GC}4YP z|Gb1dq>&D}x9_>%Fe3>cPFu|VIsn0R%E7TjAxv5@Tj>ph2*MHPSBa-pf`*tDp1VKr z3+=jv1eYu-prZ5|eu`_(D3!q(DVsf46qn5Snb=+z6~pD})cK!)>?9A#L*`CE!(xZX ztAvEezrmWJUH@q~Z~UOkVdlgsVnv_O^Vk}0KQ3NbX(LBSU)mQx(WSvYETqA=GDb`? z!ZDvR4j)-v46NK0AVvBogNTr+LEw07d^<9W74i8g^3}Z}ob$i`LJxz$a1}H_*+xp`YW^!!UP(+O)s}U+F5K2aX3l8BMp3%Q@ex|mVkVY8zl4RZKK zId^}G8~JbhkJ@3`_)47EL5TPJTy`;R?)Dm=@);)h@ALg{<#?S4FP5#wbXi;EB9gBM z?lWZ9u)CDk^0oT#*jjJNelpTmz>*0cPuATR!qVNx@8Eoykk@l_DW_!SSowWQMdfnsWLL`A_c~X|5fP!S3fZSntPl8-*Rn(IvK`VWn{ItyHPQb2K%;B7T)TofarbXE*D36or9`0sD&|>R32M4bl zMKA(=>-tvrJ^4VL=7L-+1!Pgc$0^g35^FN|``EUu(Xj zw^|w7$dA5v?!FR|_=)0I#xVubco?c`t+R|;{D-6L?&5sqq~!x=ssD8M|Ac6kKDm18 zEc2iH|HV<3Jt_GAaFk6S$g2jpzJ&}wKZ=}-?_l!Q#kz_0T)^2PSKG?2f!-~LNEQ<} zT<48U!Y}q2z|+FMonD;{#VJ(ZVJ;Z@(!W+MhG)QSZ!_&U_iRWjo2ED#2ymlhou70Ub+d&jWw_K9kk@kXvatCG>i7lI=JvnVP7&WS4hg&A0HrOXmfHyj48}62x!k`qGer|jP^{$7EjMS;nCGa?a zaVQuzX>wz0MV~?yP5R8Up$Mp%3t!kCcmcHKMZfQ&A zP+c|))()&Kwz60tp?V-K?z$~xPE6sdNJ^ph>*7(*zXW*Soj>@SE&@pT%tA(Y{eeU{ zz;V_l23RQeXDL)-AOT+x^|{E3d!hOzG&;fs%vUK_aAmKc;T8M3ZG#S|`Z{DP>A2#E zOzKGPkG}+u!KzW`5HnnnTD?M%&I{Zp0`^andHE1hG1vCuv#R{%9~uaQL@e;$>DuI( zvH<(6>AqWX4c#k)T(0CEkYn{@zN;f1e*Oz`eDGNj-regSdL0rEuQp1F z3UcjX%WyQ3Ncasjg^1Kt_$GmVsIAOMh==@50?O}y%_0Do)bff{=oLKMR5?~#uYfbQ zyR(HIL2%}>T}ssZd|+w8S%(jn!y}&>BQ&T24q5ujY+ZFvxTOw%oaEJUR6TFd+mji< z^3eWQEPfu;OKdt(3%-DV4Vyz(X^WunN&w$r5y>%l4tK6@M9A{;kGy` zkk`=};yfAnZe(9AfEA3<$oTjTu)~ICI@1MtIu+?4mYGmc*(yxNb;ZGhbIX6aJNx8$ zd|u#@`+LCe`zaxE%&s^>zq5KC9nrXM`3G+!Z}5W5^>N3$=hbj-X~=kjvM(g=eJY3> zPlx$;Gylr${BiEYEpK`leQ-=`OllXSUP2&=&|Px~fY%1hf5hWsAiIc_Un8m%F5#cx zNw-~a^o?$!4@v!CGdL>UHlhHm_d-igZyMsLNWMEO+1cTi{#k^J&KCd)mE2>8ZC`j@ zj2`qxY{;?F%8JLFNrKCYEL0AY_R!KDa^q7^7A)RBciHG!7MzuQokt}X0UbS||GY*5 zp}|OZ`nFOSMA6(BeKq$IZr^9Ce3_OG5udyk7oWccia7(R&pj7#$4+^kD(`c^LcK_J zsE7a_ofVRFi*beM7bU`}{?)M4a69-HM>5z}x4n0`9RttaGF&}ah=P4f>Iq_@mmr%w zWzgB30>viubF*gZxC{Yvi&%SK7#?$D6>-i5#>Wf~Wo>T)=h|){Y1)?D5m~IU|KTgR z2GdMU{vkNgJEMy%>7h8c*IV(;x9T8loJ-1>Sw?=^NTD`QGa8aA8|1go*@Lv54TXk4 zIT$|eizdES2+}Dj@xBaRATS#6Mt~*}T3x>!YN#rKcaOPhFI5Jp@E%fgTyh1IHY459 zr7Q@byb`zkJ{9WE$}kkX^N=SpMSEm4qTu#ezgzLoS3u_d!S>%K20gt+4Ma_$Fru6A zo~XYDPIB6iwuxgv8Rq!R|3x$CT8e5d-qZtjAy)P7eHR>_b2z?*ag@`q`R_dpK zxQfYA=BF4q^Titx@_YdEL&2-E_f&8a&W%C$oZ{g14*k!KykH>K^Jr@j&xAui-hV7o zs*wJ9I!k>c1}Yrx)4yeY3#YQ9ZmH%z0|TiPR=(IEd8c@bh$4+NxWzwdcK9&_SAY5D z#|v&{@S!XCmzaAA5M{Y9OXAbZPyYGPF{1GjloAH(uPnR)Po!4SCIv3?^u8OS#pxLp2LK(ml6!)-%d+~v(H zBx|w3knP9;E50?bt=`^kBp!krzJ1}#${Ur)cg`ltD%I@zc7@_mJ{WvoVS!8gR5augXQ@AVYUm(kwd! zOsaMRyESsaP*>jf+oiXFf0rYxVk`%u$nU%%vvoY#^I`BE$QFHs18jz+Tx-`XaEsfpLz^xbY~NJ$xnaM3C|IV0G4)P$7_zsVEhE{oNr7I%;{x(Y^RNapLDu~c<*{n^bF64 zwRJnJZtt~Kwe%ncl6FBQg(c2936ZYGa=>7e`CRN#FbH3Zr)5V{J`1GLth4yMP z=#Rb*+v~}O53ADY{E6<+&&<|%YsyR>T@&)}`kN2FOEHwSOd+`9RWs)cUKpJF^hd$Q z4F#E$$?)IboaA49F`0EBPJu{2{%FOAnNTGRpQba!P4V*p#PRxD>a$b%5 ze6Pi0aB8zR!n~9Z{pqvN+}oUuh;_lS{=BF#wxkYyd>FI`0;Suz$M6Q zaq`{wSS=Ln*>Jqy9(RWhN8nbBECkGRT7iGf(Z2s61%?t|j~QgU;_lh*E@;K%gI5y7 zTC&{(iDF0o>67trSW0SAi!OqQJW2PCm%%{)$09@KdO9Qyq_h`sgupX?qbzmHN_esM zt!$nk7ry6@o_Fr90-vTj(Q`q;xWvx_gJZ>Ya5HsCrzN#ho+W?Q)K(}39(Jvp#c#KP znkIL=5`8iBdRE_(B&Y=<7k_~ldKHl8^jBXkKNkcvmANf#Lp*8LlebKJLT4ZB(j6IVMgs=SZE33}RTF6{{8va;?6 z#?60$g!EHWBP7q{t@ZU^>~cN><+isimQN#rMR2)LWN`o%ql+%`6mM?Ra z-_T;nMCAg2Ng}H-IWrvc?7i-a+I<6p(fyy_ZoGtr=M)8x{l9>%VD(=up$_=jRA{%% zIs*2C8vlmn2%ywQUhXu#3(hb)80RMK0KqNF{2zNNL599yiSBk5;J(nO?>y~*gX*2@ z?bM|pR_&#xx?2L18lHqpX$`QahCTJ8dxm>tEAn2R|HTP*tMcY+XePwdl!Z?RxdKD| zi=-oztgNG%rY5t9mq#e=ZyM+YZ% z#f6u3uG`lkJL0pR=>(U2$wJlCosa1t>Dsi|+2e%^C}B!8RIP;QmriG-{o3IhHu=cx zkAgg(u|NsgnPj+p=dvKBVj8IF7|IIC-jpwvYVf`~;tqKxR-G|CC*yl#GCLsQHtu4- zti=4gkMcX-N4`c69^x8xRW3j27X`Lx#v2=-S|IiVK9yhUjNCZikC>|8Ryd5mv*2|3 zJGd%8k!ku^8j_{hF3cvsfotsXUuuY|foo&CvoT5>NBA4p?XeyM#4)0HSqhxAhvu8y5l60f}!KG%e}$b5PwPs0>o%Q}~O!Vz2_wrp+%lk2(9 z9Ck8+(P*tutf&L<1x;{IO)zLt=i~OXd^LGoz@`GOrW|LSu+B!>=I8IOXUJ9^K3|CmIu84{mZL| z=^3c$d5iq@-Ib4@OwyyJZUCF5E|NRk7vQmZv9G*B9nARM8BkLw05q^2k|9Wj) z4lhOmd>jfY;me0xW_M$Qz@R1XWpR)e&faI}kVM7v1YXSM&N6xlK1HJ|GrHQqRKsXn z7^sEgc=Y|>ixYjqI>cicX>SXW@@FGLSx@!>yNw=)c~_x0fBfQam1H36;5aKa9SJD} zcfMQ}j0cZ79R+tO8hF!PR;gA09)yIKznMI8!HLB@YUN6*0@<5|Qz`LPz(mLsOQz%} zuU;~-7V|U$Of0MV=ULO>sjYJbCyx@;YJN&B-1P}phM;zQWQ@~aEh>sNWyaszo8;9X@DBQHD#ikr7aQ!2_tv{IUaZ%rJ^^Z6*Av-vF z+hsKe!d>^1O@eb^!hrb)Nn<7a;uGTf%yku~v(K0H=tUgRW$=E8i?4%MPpil!mF;n% zT2oV`yOB7usf{ryvqm_$JSM?E7zuq(2#4QXBEp?Xg($_8*Ko&(=lVeSTW~BikIuhU z3U}$N6%(Emz+~9fyID^HVLED1K4d!v7F2~j$G;~4$u@Z{yJ;32c+YpKi@$|i*Q@px zqbfl1M)|9JYIj_+Oy1aZS{0PA&pThyvxn!lwc(BZIiOUG(~xT`1y}#TnwX;;NMuXM zcj@^E7HZY~iHa3aZCv<k8j9oHXl;ttP5%_S?m0+NT1yiRF);bhjb-@Lh5DtD+>o1o>|4yDHtF^|08 z$mc7`eCfIw2M!J~r=9oT!^(DX68Ulu#AE~~b6?JbUW1#W_+1h3kNvdl+Qbu}An=dq zAEpB)ajxSxiODdLGxyr4KNPy-*Ih_cvmvEy{U}u`6aGwkYw3pI$+I?HxDs{b2B#af zulpAI!TT$}S{KdQ;1X{q!{9(6wDzBhvovZt`NxXRla%cpti0Xp(tVN*Q<4QSWjn4g zwm@u7I(P>s(P|yX=xPlKUnB43CbvLI=U<$vLI5E+nF{>k)N$Ud6+pfp^8IUa zE)=HzwG`m@f{5f>N@TJIII3l<+bI%uFseztKsp})Lr#wtzC=pGyw%jPlAtlJG{gEu z*FZY_&Lp}wm30R)$3AKu$z{Sa&ly>bXBQ!rqN(u4$-Y6Tb8@poF&1=*YeR`+Twr|k z-NSy8eR+E1P8+vOIwSKaDqm@(S@o-D6*$ zLs_%dhZ?19sOsLR{F`@_jP%1EFVz3s8cF^L14O; zYIT*d6ao`Ah}Kq1p^X2%Tx}C0ZqS0tkoHd!G;&}~M3=H)W8>zWRnT?ZaFSbVnaXK;oNUY*2K)jM+GWIq;7!xNw;Z}6L%pg$yMFeV!&r9pgmqw%S- zaG0Zh-*y`D0dycXybyQd*NOS9zVYrkcnxWaJZldDqt6CXyzk;bjHkS~Ot}gM+^09( zSAubL#Iyb%^*#W8Wx}mCwp*b8B#Bmu<~5`m-S>4gDTFI4R=L+T-b2I|y?d=gDV)yO zp%UwP1uqHqhq;?lA(zEM%ivEWXpWGe?Q1r;<9a(T?@m21x0)EYz$=C7efEm16FYKm zrNK-w|1vIH;4n?kCIQ68IUnAw;>OJoMQdaCba77hG(3rPnQ-Mpd!zv_N1lgbN&9F*sSRtX8|tmH$@nJrGUGAdHD6-EN~C+>$#jtA-`s|E#@Kg@PzlFU@%Pg6wbVS zAJFDd42FIa+|N$*!lThm)TO%->Rx|JQ+U?}i))enT`wBoJ(J<(m4YPLY^*g?(u>87 zCeHCOl3T(ow`_@6MkVlg;uQWhM#2&g{m}qrJB0p`wtFJi0+JVP(Fv1im@b&)aG!qz zsxzeO50*J`kKNM!nstJqF?3k+xqT2!8Bi{c3aEn7PPC)*ISbsEfaFIpor&Q8jCbMf zJ9D6wV$8DLdbmV!5_U_I+PJsaxhhmR~WXQNo zZTInlJY0LYxc4Q}9*zc0AM3W`;dIWi@6B0x;#8WVs+#}Cftw}8Pa0ZjTmkVSX3Lie z;=NuuJSOkpUWSqQ<+wncfz5b%v;hN7`^$`?RPj5wCUeLBd2%5L?8V*%0i^$pk)5VoXbMNXV?rlzEvx`>t>6UGHCa-E+>}>z+UMKKpr|eYSNH z7OJS0!qqGZH@C!Z*JS^Q51$S^@msA71o!f2dWSnoy$oXbb_)|{C7Bi1?Za@WX~xiz z6p!nIzqntD=i}+Q^GdArDbVh0`Ep^uAJo10OU6AT5x^*4WhLc>ikPa6R92bLJgw&2 z(H4kIN{YAIpF&`rDZl+H?HM9!SZ;gfuOyV{&gkX^+=fV!Qk$NF3uG%BJT=<$KsmPU z=hD?c{EUi8tEW#!%JLV@;wLGH6u;XpM!17AbV9%;q5!tkdb-EiRfv@n6QwCSFJM}G zZh_%$HNsp=u5RSG0Q#i^59H%qiBuKtta!b67<`zzK@p}(lsh@U<2Mi`@;ZXMJhD1* zu=D(k(iS&T`UUl*{(}#Y629(Ie9;MQhXbB8I;F#_TcydoDi>!t6R873wDH+dc4M@U z9W<$FqD!T`Nt?>{h&D9k;(LMg{QRK+#2md>wC6({BKKdm&}}Biqn2HFJchmyLU#K% z%&@vb zhkZ6gV7q3?;^PwxMDAd5)Tnw6K%tTAM&@bF-aE@aJf4C(W-PuFbami-m>w;4C=-is zI-wO^joC3R<>meOLUk%&R3!C9VaIax>8c_Tyx7 z*@Ne0RtB6(cqpIQ2P1yvmj1AmA09s8sq5LK1dBPB)xBJ?C|4Tn(|hBCyf9JgsEfWB zl^8M*72StWKEY*)b8%ptOrR_l-A>TBYv_$eh2r+nb(zN}KcJPNktIX-DoHNLJj>_y z3;2u0E97%}BfYsnR`!cBI@6+u-tY5=Op)(WgP<&7=5Ks@Vh4yrf~L7{BQZE2b2D(C ztuE1+AI@#dleZ)GKP*`WP1i`S| zUf9697G3>87G{kBAXfz0pBd06uAUyc-|d(H_x*Y!xk>SmG;P=~yjBRq&BAeQOi>W( zDsSW-jzm_*IIo>zA{0D_l9USb@U5hJjzzKp-&X7Wj%0?yewR^QSyLEzBW#d=n~{*> zIef%}?lm%QSbbtLb4FD~vy_K#0`mK32DX*OV&ZX4eC~iZj@7=Epp5u}m+d!bw8{Uc zFim&;YH1`cghkjou#@wOhMC8grcDW9$`1SYPT}~p#k}E=#xU~xgd(Xs+>k!7py7MO zfv~4KctdP72I4kV%slbS;!*B`r$k$>5?c@Q&OFHOLJ!m6;_t1GNvfMeL^4H-K@p-T z!K$7N#=g3;FCriD?yTp1hs8*6b!y+v67|JQM#-&vDY2;CI>H!le+{uFzg@Xa-yr|_ zHUa+NCn)I`x1*AdMS^>WewRidnV;UeMp||w*%jv)a{VFmz}|b~YY*lk+%c!6NwOV$ zMzu`mQnT@oRw@3Em`MDWnD|ePGU7sryH|A?o*>}(Rl0Z$Kh;T2jlf!=Sdu15U9FP8~39c^6R>s zzM+2Ti+$5%vGg87-!#Ne&Dx>Q{!qVJ(=$+;?x24;mxxQ@8TsKZm2l3utwW)zObF)l zeht);Ax_;59;|$6NpMasef_HKM^MO4#LCVm!&q~kU-vK>Or_9CQHxZgaeR*tJ5(TY=^8!uN?hsa-T+~E1c5Pf8J{G8lv}?y|oOV;NGHyjhSL5w7U&iw+X+-e)}xX6K3hyNL#%A zA+`d#-m=pl^4IXb+i$nHX&W~BGz2@`8^@ltiLw3%?nI7nNKEo`UBXFfjyj-1g&45z zu}o9jM?4P^vEQK=h6IQF=%PL3q)3KmS#7BV2UtH*r`5hdGgqeI z!fFuSG6PjYuVA-etJ49#H(u9>Z&+8Bu2U&izl?!3W9JNaIt_7u`;&UlUBkqVN5``EUk zvn`}4xy7lH#s!jG^@m=u(mY(av&j6Iz8VuNCUpx%?zo}##Es4-6x%~@D!9vd6JBGF z-$~kDBW5Gm(!D8k2(wKG6e_$-iG86Ow0Fr#!vte;rR-Z{J|?l6hw*VBHC$B!_&&Aq|98K%Yi#1NfG^q{kOH_Nza8Z@?0!#Ar-ES z#51$@!()45l;8t8iMKmakB!Rof%^o%?2nS4C}mSOkyzS594>cYdXeK!+^GBF_F$MG zSZen4x2u>E*NyV3KWDd#@7hNF=S8d%p}DmC#KDu+#Iv%s#QJl#L|BYUfOd*9dR`eb z`#2ea+Aj97UWFwTg{49_JbR7yH^*-e?eZj4HD$D#dYuTnxV;Lu+&u}giHpGkYM#VI zOPYYkxGPxW;&-!)hGG*9U8OMTDXh(>Jwx|pVCluyC2O)jSNGA#w6Ar7-BGjfQeFI&hL_S;g=Ta`$3D-j%uwjiQxm~}z|6bQn8s$egN zA+hOM)ngacGlUk$h94qR4~R3D!__m^Gm!FI=S=8O6*7cRnjY{7gooPiSDGgy0X4pt zqN1-sl@aoAQ)?FXuvcaOPz}OInwsIum#ZQ9$4^85Q3q5c&uXYIcf%ogw};4F0rsC$ zijd2y#m#zwdnJ$Z5xA8_I$|ap?CnfFQjM0_pTfYPaLfRn+#Qk1My|wA!_%Oe6*t17 zb;GUJ_dLW0{n{g+?>r%fd6XH=O&gIJ%UJvK%P4wWC_f9em!Ms?V_Cnc5}p2KoQ-jo zpegbeJKZvZHm)!aRs8;TmJttWFZ*63>0~ zvgHWf#Lc}OW?Y0YYnrlSk|V*@v%o$4nU@It)ie`azMpU#qwv)?@#rcld2@_QKGDOQ8wu=1R5mz&NPUy#q#Yq} ztvpb~+KJ$#29Ba zeAtQ9LLOaIX5`HcOIxFswPT_)Bm%?dU29FqZ*9hSC^^2HnX8w} zEuh-+c;qXJYHW;1Ki(_e57qgwH*xuu7?u?hw%Xr9zK9xh`THXIe7L5;YkwyJ`W;lM zp7fcp_T7|jV^9lwftm|D=gZ)idEG4|Jq;@l`T0sDilDSHcF6eE5XPS$IJ)6aE_iqy z&qN-F zy8qWHb6g)IP0P&OLP6H^k_W$CiTYRBv-yHr#MsqmG{&fjAe3qa@mY=F)}w&gw_K3O=C0e`={=O|ZbaK;*Pr06S-34UxH@B!iuqaDI_r)ys0>^@5PP-;pUU5^ z*4@fRz^0=F?LsCxmQ;o}$2a1EKc5Q=LkXsi?0@YW?+TG34_cusu4Gd87l+@&8Z7u| zh#%a!Ny5&uFg~lj0ffu;&@}luqzT3x*(&=1Tcz1|&Dk#_VDl)ImsAn5#x4#ya1B6s zRr&P6!F23rUdRc`{fNPxRU6l?Ct}0>)@9b{G`#02RN>dj$GzsF(4P%IVS2@g$zPxX zw%!a+7-k0Gy`7$AR+^P82Bzv9Y^EfBOZjnx$?y`iIti7b9d3l{s!bW6S_k&Av()gd zPhzh_UG*2y8Z0F_b2;DtfJRNHcDmGJEbC3guIdeAQ2%NF^!;4CGYw|;yVMAEUBXT_ zpA}5cv__b&a3gA%ws+d_1j4?1_4>-t!YN;WOl{SLur}Tl*?;36F!ZZ=Q@(>zhaVDQ2fIhk(vFKK3l`T)+L{xtHck#kWYOY-Z zqzb&eZyZr%G6y4iPkkrpQfP(nB~#6IgGyrh1=p5ILWKYG9L1hg;#XsDmI_S)(Zb5! zmZ(xeP_A*u3+IL--?ruH)|y;M2Qb{abT1pR>J*aMFB8GZSfDH2+zq-?S%w=P0|>t; zI`u;+9$a<11V%g3kf+q0wAHi-8%|A~WYn*Pq*FrmNPjdQc<*?YQXU6kR^|X7|8j6I zTMx2vwPD@ig`w)z1Z41b)k!y(p<>PbnM?I3u4LU&?spyrNs)o>=}r5+WXeQQc?=IQzt%h5$*`=HAm0)w)c%jIZoSa*Q`&*dLkyL*PaJ3n(keG!@jCX`j z!1BJpT9r0Ee1AQ#keJOuGb2TFfX*U3OL^5EQdXkpx#2V8jy~|zOZ7H}8ldaBf$>w4 z4eT#d@pIdRq5W7#US&x-=HYhuNOTvGviv%vVY6s($D;t$%B zgl{r2H^UAL!GibH&9go^aP_!l{P1`YdH;vA_mVhC1Do6Eo7DD_erFHdw-LEY7Bltg z0=5g2T2?<4y;KQ@*v%O?+xP^W@(<-S8B523d!yu{$j4yI_YYgNjK)qDwv5TLGJMqW zb_vtSLfvK6)L*M*xKvu{A;p-F21EKgru*g)QzY+TF;NP(-j~uJTWY``^tm(aQ7we% zw?AW$8~_LP`_X=Z0%#iAcu%qv{RgbzAFv|v-(bbvQu2SH%3`ggUTWVQS(g`B{bfH0 ziW;xY{N_uLeDk|=g)#nb?d;lWhN@;k8T67}YG%p3Cwd)I{TPOtpwGV&?%XhhgZf;X zTIUvEmLla-;n;$a+3>yGm$oJ+YK*^bYoY z-G06P)mVw6O|_Y>LE1Lv=FaslG>l&1{B*YqKXQETe#@BpYgceUl%n>_}ER@Ui3vq@T!r|I;&sKbx~@FFopq2?6V}kdI)NI9`88xCbrjgFLjk z{eSI|Yt4f@2Me)~^QE=w8-eVbbvl;nMy+_kd0X(>bg+=$`-ZHOBZHn#-+4;1F| A2LJ#7 literal 0 HcmV?d00001 diff --git a/functions/data_analysis.py b/functions/data_analysis.py new file mode 100644 index 0000000..9244311 --- /dev/null +++ b/functions/data_analysis.py @@ -0,0 +1,432 @@ +"""This module contains all the functions needed for data analysis """ + +# Initial settings +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.patches as mpatches +from matplotlib import gridspec +import pdb +import ee + +# other modules +from osgeo import gdal, ogr, osr +import scipy.interpolate as interpolate +import scipy.stats as sstats + +# image processing modules +import skimage.filters as filters +import skimage.exposure as exposure +import skimage.transform as transform +import sklearn.decomposition as decomposition +import skimage.measure as measure +import skimage.morphology as morphology + +# machine learning modules +from sklearn.cluster import KMeans +from sklearn.neural_network import MLPClassifier +from sklearn.externals import joblib + +import time + +# import own modules +import functions.utils as utils + +def get_tide(dates_sds, dates_tide, tide_level): + + tide = [] + for i in range(len(dates_sds)): + dates_diff = np.abs(np.array([ (dates_sds[i] - _).total_seconds() for _ in dates_tide])) + if np.min(dates_diff) <= 1800: # half-an-hour + idx_closest = np.argmin(dates_diff) + tide.append(tide_level[idx_closest]) + else: + tide.append(np.nan) + tide = np.array(tide) + + return tide + +def remove_duplicates(output, satname): + " removes duplicates from output structure, keep the one with less cloud cover or best georeferencing " + dates = output['dates'] + dates_str = [_.strftime('%Y%m%d') for _ in dates] + dupl = utils.duplicates_dict(dates_str) + if dupl: + output_nodup = dict([]) + idx_remove = [] + if satname == 'L8' or satname == 'L5': + for k,v in dupl.items(): + + idx1 = v[0] + idx2 = v[1] + + c1 = output['metadata']['cloud_cover'][idx1] + c2 = output['metadata']['cloud_cover'][idx2] + g1 = output['metadata']['acc_georef'][idx1] + g2 = output['metadata']['acc_georef'][idx2] + + if c1 < c2 - 0.01: + idx_remove.append(idx2) + elif g1 < g2 - 0.1: + idx_remove.append(idx2) + else: + idx_remove.append(idx1) + + else: + for k,v in dupl.items(): + + idx1 = v[0] + idx2 = v[1] + + c1 = output['metadata']['cloud_cover'][idx1] + c2 = output['metadata']['cloud_cover'][idx2] + + if c1 < c2 - 0.01: + idx_remove.append(idx2) + else: + idx_remove.append(idx1) + + idx_remove = sorted(idx_remove) + idx_all = np.linspace(0, len(dates_str)-1, len(dates_str)) + idx_keep = list(np.where(~np.isin(idx_all,idx_remove))[0]) + + output_nodup['dates'] = [output['dates'][k] for k in idx_keep] + output_nodup['shorelines'] = [output['shorelines'][k] for k in idx_keep] + output_nodup['metadata'] = dict([]) + for key in list(output['metadata'].keys()): + output_nodup['metadata'][key] = [output['metadata'][key][k] for k in idx_keep] + print(satname + ' : ' + str(len(idx_remove)) + ' duplicates') + return output_nodup + + else: + print(satname + ' : ' + 'no duplicates') + return output + + +def merge(output): + " merges data from the different satellites " + + # stack all list together under one key + output_all = {'dates':[], 'shorelines':[], + 'metadata':{'filenames':[], 'satname':[], 'cloud_cover':[], 'acc_georef':[]}} + for satname in list(output.keys()): + output_all['dates'] = output_all['dates'] + output[satname]['dates'] + output_all['shorelines'] = output_all['shorelines'] + output[satname]['shorelines'] + for key in list(output[satname]['metadata'].keys()): + output_all['metadata'][key] = output_all['metadata'][key] + output[satname]['metadata'][key] + + output_all_sorted = {'dates':[], 'shorelines':[], + 'metadata':{'filenames':[], 'satname':[], 'cloud_cover':[], 'acc_georef':[]}} + # sort the dates + idx_sorted = sorted(range(len(output_all['dates'])), key=output_all['dates'].__getitem__) + output_all_sorted['dates'] = [output_all['dates'][i] for i in idx_sorted] + output_all_sorted['shorelines'] = [output_all['shorelines'][i] for i in idx_sorted] + for key in list(output_all['metadata'].keys()): + output_all_sorted['metadata'][key] = [output_all['metadata'][key][i] for i in idx_sorted] + + return output_all_sorted + +def create_transects(x0, y0, orientation, chainage_length): + " creates shore-normal transects " + + transects = [] + + for k in range(len(x0)): + + # orientation of cross-shore profile + phi = (90 - orientation[k])*np.pi/180 + + # create a vector using the chainage length + x = np.linspace(0,chainage_length,chainage_length+1) + y = np.zeros(len(x)) + coords = np.zeros((len(x),2)) + coords[:,0] = x + coords[:,1] = y + + # translate and rotate the vector using the origin and orientation + tf = transform.EuclideanTransform(rotation=phi, translation=(x0[k],y0[k])) + coords_tf = tf(coords) + + transects.append(coords_tf) + + return transects + +def calculate_chainage(sds, transects, orientation, along_dist): + " intersect SDS with transect and compute chainage position " + + chainage_mtx = np.zeros((len(sds),len(transects),6)) + + for i in range(len(sds)): + + sl = sds[i] + + for j in range(len(transects)): + + # compute rotation matrix + X0 = transects[j][0,0] + Y0 = transects[j][0,1] + phi = (90 - orientation[j])*np.pi/180 + Mrot = np.array([[np.cos(phi), np.sin(phi)],[-np.sin(phi), np.cos(phi)]]) + + # calculate point to line distance between shoreline points and profile + p1 = np.array([X0,Y0]) + p2 = transects[j][-1,:] + p3 = sl + d = np.abs(np.cross(p2-p1,p3-p1)/np.linalg.norm(p2-p1)) + idx_close = utils.find_indices(d, lambda e: e <= along_dist) + + # check if there are SDS points around the profile or not + if not idx_close: + chainage_mtx[i,j,:] = np.tile(np.nan,(1,6)) + + else: + # change of base to shore-normal coordinate system + xy_close = np.array([sl[idx_close,0],sl[idx_close,1]]) - np.tile(np.array([[X0],[Y0]]), (1,len(sl[idx_close]))) + xy_rot = np.matmul(Mrot, xy_close) + + # put nan values if the chainage is negative (MAKE SURE TO PICK ORIGIN CORRECTLY) + if np.any(xy_rot[0,:] < 0): + xy_rot[0,np.where(xy_rot[0,:] < 0)] = np.nan + + # compute mean, median max and std of chainage position + n_points = len(xy_rot[0,:]) + mean_cross = np.nanmean(xy_rot[0,:]) + median_cross = np.nanmedian(xy_rot[0,:]) + max_cross = np.nanmax(xy_rot[0,:]) + min_cross = np.nanmin(xy_rot[0,:]) + std_cross = np.nanstd(xy_rot[0,:]) + + if std_cross > 10: # if large std, take the most seaward point + mean_cross = max_cross + median_cross = max_cross + min_cross = max_cross + + # store the statistics + chainage_mtx[i,j,:] = np.array([mean_cross, median_cross, max_cross, + min_cross, n_points, std_cross]) + + # format into dictionnary + chainage = dict([]) + chainage['mean'] = chainage_mtx[:,:,0] + chainage['median'] = chainage_mtx[:,:,1] + chainage['max'] = chainage_mtx[:,:,2] + chainage['min'] = chainage_mtx[:,:,3] + chainage['npoints'] = chainage_mtx[:,:,4] + chainage['std'] = chainage_mtx[:,:,5] + + return chainage + +def compare_sds(dates_sds, chain_sds, topo_profiles, mod=0, mindays=5): + """ + Compare sds with groundtruth data from topographic surveys / argus shorelines + + KV WRL 2018 + + Arguments: + ----------- + dates_sds: list + list of dates corresponding to each row in chain_sds + chain_sds: np.ndarray + array with time series of chainage for each transect (each transect is one column) + topo_profiles: dict + dict containing the dates and chainage of the groundtruth + mod: 0 or 1 + 0 for linear interpolation between 2 closest surveys, 1 for only nearest neighbour + min_days: int + minimum number of days for which the data can be compared + + Returns: ----------- + stats: dict + contains all the statistics of the comparison + + """ + + # create 3 figures + fig1 = plt.figure() + gs1 = gridspec.GridSpec(chain_sds.shape[1], 1) + fig2 = plt.figure() + gs2 = gridspec.GridSpec(2, chain_sds.shape[1]) + fig3 = plt.figure() + gs3 = gridspec.GridSpec(2,1) + + dates_sds_num = np.array([_.toordinal() for _ in dates_sds]) + stats = dict([]) + data_fin = dict([]) + + # for each transect compare and plot the data + for i in range(chain_sds.shape[1]): + + pfname = list(topo_profiles.keys())[i] + stats[pfname] = dict([]) + data_fin[pfname] = dict([]) + + dates_sur = topo_profiles[pfname]['dates'] + chain_sur = topo_profiles[pfname]['chainage'] + + # convert to datenum + dates_sur_num = np.array([_.toordinal() for _ in dates_sur]) + + chain_sur_interp = [] + diff_days = [] + + for j, satdate in enumerate(dates_sds_num): + + temp_diff = satdate - dates_sur_num + + if mod==0: + # select measurement before and after sat image date and interpolate + + ind_before = np.where(temp_diff == temp_diff[temp_diff > 0][-1])[0] + if ind_before == len(temp_diff)-1: + chain_sur_interp.append(np.nan) + diff_days.append(np.abs(satdate-dates_sur_num[ind_before])[0]) + continue + ind_after = np.where(temp_diff == temp_diff[temp_diff < 0][0])[0] + tempx = np.zeros(2) + tempx[0] = dates_sur_num[ind_before] + tempx[1] = dates_sur_num[ind_after] + tempy = np.zeros(2) + tempy[0] = chain_sur[ind_before] + tempy[1] = chain_sur[ind_after] + diff_days.append(np.abs(np.max([satdate-tempx[0], satdate-tempx[1]]))) + # interpolate + f = interpolate.interp1d(tempx, tempy) + chain_sur_interp.append(f(satdate)) + + elif mod==1: + # select the closest measurement + + idx_closest = utils.find_indices(np.abs(temp_diff), lambda e: e == np.min(np.abs(temp_diff)))[0] + diff_days.append(np.abs(satdate-dates_sur_num[idx_closest])) + if diff_days[j] > mindays: + chain_sur_interp.append(np.nan) + else: + chain_sur_interp.append(chain_sur[idx_closest]) + + chain_sur_interp = np.array(chain_sur_interp) + + # remove nan values + idx_sur_nan = ~np.isnan(chain_sur_interp) + idx_sat_nan = ~np.isnan(chain_sds[:,i]) + idx_nan = np.logical_and(idx_sur_nan, idx_sat_nan) + + # groundtruth and sds + chain_sur_fin = chain_sur_interp[idx_nan] + chain_sds_fin = chain_sds[idx_nan,i] + dates_fin = [k for (k, v) in zip(dates_sds, idx_nan) if v] + + # calculate statistics + slope, intercept, rvalue, pvalue, std_err = sstats.linregress(chain_sur_fin, chain_sds_fin) + R2 = rvalue**2 + correlation = np.corrcoef(chain_sur_fin, chain_sds_fin)[0,1] + diff_chain = chain_sur_fin - chain_sds_fin + + rmse = np.sqrt(np.nanmean((diff_chain)**2)) + mean = np.nanmean(diff_chain) + std = np.nanstd(diff_chain) + q90 = np.percentile(np.abs(diff_chain), 90) + + # store data + stats[pfname]['rmse'] = rmse + stats[pfname]['mean'] = mean + stats[pfname]['std'] = std + stats[pfname]['q90'] = q90 + stats[pfname]['diffdays'] = diff_days + stats[pfname]['corr'] = correlation + stats[pfname]['linfit'] = {'slope':slope, 'intercept':intercept, 'R2':R2, 'pvalue':pvalue} + + data_fin[pfname]['dates'] = dates_fin + data_fin[pfname]['sds'] = chain_sds_fin + data_fin[pfname]['survey'] = chain_sur_fin + + # make time-series plot + plt.figure(fig1.number) + fig1.add_subplot(gs1[i,0]) + plt.plot(dates_sur, chain_sur, 'o-', color='C1', markersize=4, label='survey all') + plt.plot(dates_fin, chain_sur_fin, 'o', color=[0.3, 0.3, 0.3], markersize=2, label='survey interp') + plt.plot(dates_fin, chain_sds_fin, 'o--', color='b', markersize=4, label='SDS') + plt.title(pfname, fontweight='bold') +# plt.xlim([dates_sds[0], dates_sds[-1]]) + plt.ylabel('chainage [m]') + + # make scatter plot + plt.figure(fig2.number) + fig2.add_subplot(gs2[0,i]) + plt.axis('equal') + plt.plot(chain_sur_fin, chain_sds_fin, 'ko', markersize=4, markerfacecolor='w', alpha=0.7) + xmax = np.max([np.nanmax(chain_sds_fin),np.nanmax(chain_sur_fin)]) + xmin = np.min([np.nanmin(chain_sds_fin),np.nanmin(chain_sur_fin)]) + ymax = np.max([np.nanmax(chain_sds_fin),np.nanmax(chain_sur_fin)]) + ymin = np.min([np.nanmin(chain_sds_fin),np.nanmin(chain_sur_fin)]) + plt.plot([xmin, xmax], [ymin, ymax], 'k--') + plt.plot([xmin, xmax], [xmin*slope + intercept, xmax*slope + intercept], 'b:') + str_corr = ' y = %.2f x + %.2f\n R2 = %.2f' % (slope, intercept, R2) + plt.text(xmin, ymax-5, str_corr, bbox=dict(facecolor=[0.7,0.7,0.7], alpha=0.5), horizontalalignment='left') + plt.xlabel('chainage survey [m]') + plt.ylabel('chainage satellite [m]') + plt.title(pfname, fontweight='bold') + + fig2.add_subplot(gs2[1,i]) + binwidth = 3 + bins = np.arange(min(diff_chain), max(diff_chain) + binwidth, binwidth) + density = plt.hist(diff_chain, bins=bins, density=True, color=[0.8, 0.8, 0.8], edgecolor='k') + plt.xlim([-50, 50]) + plt.xlabel('error [m]') + str_stats = ' rmse = %.1f\n mean = %.1f\n std = %.1f\n q90 = %.1f' % (rmse, mean, std, q90) + plt.text(15, np.max(density[0])-0.015, str_stats, bbox=dict(facecolor=[0.8,0.8,0.8], alpha=0.3), horizontalalignment='left', fontsize=10) + + fig1.set_size_inches(19.2, 9.28) + fig1.set_tight_layout(True) + fig2.set_size_inches(19.2, 9.28) + fig2.set_tight_layout(True) + + # all transects together + chain_sds_all = [] + chain_sur_all = [] + for i in range(chain_sds.shape[1]): + pfname = list(topo_profiles.keys())[i] + chain_sds_all = np.append(chain_sds_all,data_fin[pfname]['sds']) + chain_sur_all = np.append(chain_sur_all,data_fin[pfname]['survey']) + + # calculate statistics + slope, intercept, rvalue, pvalue, std_err = sstats.linregress(chain_sur_all, chain_sds_all) + R2 = rvalue**2 + correlation = np.corrcoef(chain_sur_all, chain_sds_all)[0,1] + diff_chain_all = chain_sur_all - chain_sds_all + + rmse = np.sqrt(np.nanmean((diff_chain_all)**2)) + mean = np.nanmean(diff_chain_all) + std = np.nanstd(diff_chain_all) + q90 = np.percentile(np.abs(diff_chain_all), 90) + + stats['all'] = {'rmse':rmse,'mean':mean,'std':std,'q90':q90, 'corr':correlation, + 'linfit':{'slope':slope, 'intercept':intercept, 'R2':R2, 'pvalue':pvalue}} + + # make plot + plt.figure(fig3.number) + fig3.add_subplot(gs3[0,0]) + plt.axis('equal') + plt.plot(chain_sur_all, chain_sds_all, 'ko', markersize=4, markerfacecolor='w', alpha=0.7) + xmax = np.max([np.nanmax(chain_sds_all),np.nanmax(chain_sur_all)]) + xmin = np.min([np.nanmin(chain_sds_all),np.nanmin(chain_sur_all)]) + ymax = np.max([np.nanmax(chain_sds_all),np.nanmax(chain_sur_all)]) + ymin = np.min([np.nanmin(chain_sds_all),np.nanmin(chain_sur_all)]) + plt.plot([xmin, xmax], [ymin, ymax], 'k--') + plt.plot([xmin, xmax], [xmin*slope + intercept, xmax*slope + intercept], 'b:') + str_corr = ' y = %.2f x + %.2f\n R2 = %.2f' % (slope, intercept, R2) + plt.text(xmin, ymax-5, str_corr, bbox=dict(facecolor=[0.7,0.7,0.7], alpha=0.5), horizontalalignment='left') + plt.xlabel('chainage survey [m]') + plt.ylabel('chainage satellite [m]') + plt.title(pfname, fontweight='bold') + + fig3.add_subplot(gs3[1,0]) + binwidth = 3 + bins = np.arange(min(diff_chain_all), max(diff_chain_all) + binwidth, binwidth) + density = plt.hist(diff_chain_all, bins=bins, density=True, color=[0.8, 0.8, 0.8], edgecolor='k') + plt.xlim([-50, 50]) + plt.xlabel('error [m]') + str_stats = ' rmse = %.1f\n mean = %.1f\n std = %.1f\n q90 = %.1f' % (rmse, mean, std, q90) + plt.text(15, np.max(density[0])-0.015, str_stats, bbox=dict(facecolor=[0.8,0.8,0.8], alpha=0.3), horizontalalignment='left', fontsize=10) + fig3.set_size_inches(9.2, 9.28) + fig3.set_tight_layout(True) + + return stats \ No newline at end of file diff --git a/functions/sds.py b/functions/sds.py index ebcee1b..301753a 100644 --- a/functions/sds.py +++ b/functions/sds.py @@ -5,11 +5,13 @@ Created on Thu Mar 1 11:20:35 2018 @author: z5030440 """ -"""This script contains the functions needed for satellite derived shoreline (SDS) extraction""" +"""This module contains all the functions needed for extracting satellite derived shoreline (SDS) """ # Initial settings import numpy as np import matplotlib.pyplot as plt +import matplotlib.patches as mpatches +from matplotlib import gridspec import pdb import ee @@ -18,6 +20,7 @@ from osgeo import gdal, ogr, osr import tempfile from urllib.request import urlretrieve import zipfile +import scipy.interpolate as interpolate # image processing modules import skimage.filters as filters @@ -39,7 +42,7 @@ from functions.utils import * # Download from ee server function -def download_tif(image, polygon, bandsId): +def download_tif(image, polygon, bandsId, filepath): """downloads tif image (region and bands) from the ee server and stores it in a temp file""" url = ee.data.makeDownloadUrl(ee.data.getDownloadId({ 'image': image.serialize(), @@ -50,40 +53,7 @@ def download_tif(image, polygon, bandsId): })) local_zip, headers = urlretrieve(url) with zipfile.ZipFile(local_zip) as local_zipfile: - return local_zipfile.extract('data.tif', tempfile.mkdtemp()) - -def load_image(image, polygon, bandsId): - """ - Loads an ee.Image() as a np.array. e.Image() is retrieved from the EE database. - The geographic area and bands to select can be specified - - KV WRL 2018 - - Arguments: - ----------- - image: ee.Image() - image objec from the EE database - polygon: list - coordinates of the points creating a polygon. Each point is a list with 2 values - bandsId: list - bands to select, each band is a dictionnary in the list containing the following keys: - crs, crs_transform, data_type and id. NOTE: you have to remove the key dimensions, otherwise - the entire image is retrieved. - - Returns: - ----------- - image_array : np.ndarray - An array containing the image (2D if one band, otherwise 3D) - georef : np.ndarray - 6 element vector containing the crs_parameters - [X_ul_corner Xscale Xshear Y_ul_corner Yshear Yscale] - """ - - local_tif_filename = download_tif(image, polygon, bandsId) - dataset = gdal.Open(local_tif_filename, gdal.GA_ReadOnly) - georef = np.array(dataset.GetGeoTransform()) - bands = [dataset.GetRasterBand(i + 1).ReadAsArray() for i in range(dataset.RasterCount)] - return np.stack(bands, 2), georef + return local_zipfile.extract('data.tif', filepath) def create_cloud_mask(im_qa, satname, plot_bool): """ @@ -109,8 +79,10 @@ def create_cloud_mask(im_qa, satname, plot_bool): # convert QA bits if satname == 'L8': cloud_values = [2800, 2804, 2808, 2812, 6896, 6900, 6904, 6908] - elif satname == 'L7': + elif satname == 'L7' or satname == 'L5' or satname == 'L4': cloud_values = [752, 756, 760, 764] + elif satname == 'S2': + cloud_values = [1024, 2048] # 1024 = dense cloud, 2048 = cirrus clouds cloud_mask = np.isin(im_qa, cloud_values) # remove isolated cloud pixels (there are some in the swash and they cause problems) @@ -127,109 +99,6 @@ def create_cloud_mask(im_qa, satname, plot_bool): return cloud_mask -def read_eeimage(im, polygon, sat_name, plot_bool): - """ - Read an ee.Image() object and returns the panchromatic band, multispectral bands (B, G, R, NIR, SWIR) - and a cloud mask. All outputs are at 15m resolution (bilinear interpolation for the multispectral bands) - - KV WRL 2018 - - Arguments: - ----------- - im: ee.Image() - Image to read from the Google Earth Engine database - plot_bool: boolean - True if plot is wanted - - Returns: - ----------- - im_pan: np.ndarray (2D) - The panchromatic band (15m) - im_ms: np.ndarray (3D) - The multispectral bands interpolated at 15m - im_cloud: np.ndarray (2D) - The cloud mask at 15m - crs_params: list - EPSG code and affine transformation parameters - """ - - im_dic = im.getInfo() - # save metadata - im_meta = im_dic.get('properties') - meta = {'timestamp':im_meta['system:time_start'], - 'date_acquired':im_meta['DATE_ACQUIRED'], - 'geom_rmse_model':im_meta['GEOMETRIC_RMSE_MODEL'], - 'gcp_model':im_meta['GROUND_CONTROL_POINTS_MODEL'], - 'quality':im_meta['IMAGE_QUALITY_OLI'], - 'sun_azimuth':im_meta['SUN_AZIMUTH'], - 'sun_elevation':im_meta['SUN_ELEVATION']} - - im_bands = im_dic.get('bands') - - # delete dimensions key from dictionnary, otherwise the entire image is extracted - for i in range(len(im_bands)): del im_bands[i]['dimensions'] - - # load panchromatic band - pan_band = [im_bands[7]] - im_pan, crs_pan = load_image(im, polygon, pan_band) - im_pan = im_pan[:,:,0] - - # load the multispectral bands (B2,B3,B4,B5,B6) = (blue,green,red,nir,swir1) - ms_bands = [im_bands[1], im_bands[2], im_bands[3], im_bands[4], im_bands[5]] - im_ms_30m, crs_ms = load_image(im, polygon, ms_bands) - - # create cloud mask - qa_band = [im_bands[11]] - im_qa, crs_qa = load_image(im, polygon, qa_band) - im_qa = im_qa[:,:,0] - im_cloud = create_cloud_mask(im_qa, sat_name, plot_bool) - im_cloud = transform.resize(im_cloud, (im_pan.shape[0], im_pan.shape[1]), - order=0, preserve_range=True, mode='constant').astype('bool_') - - # resize the image using bilinear interpolation (order 1) - im_ms = transform.resize(im_ms_30m,(im_pan.shape[0], im_pan.shape[1]), - order=1, preserve_range=True, mode='constant') - - # check if -inf values (means out of image) and add to cloud mask - im_inf = np.isin(im_ms[:,:,0], -np.inf) - im_nan = np.isnan(im_ms[:,:,0]) - im_cloud = np.logical_or(np.logical_or(im_cloud, im_inf), im_nan) - - # get the crs parameters for the image at 15m and 30m resolution - crs = {'crs_15m':crs_pan, 'crs_30m':crs_ms, 'epsg_code':int(pan_band[0]['crs'][5:])} - - if plot_bool: - - # if there are -inf in the image, set them to 0 before plotting - if sum(sum(np.isin(im_ms_30m[:,:,0], -np.inf).astype(int))) > 0: - idx = np.isin(im_ms_30m[:,:,0], -np.inf) - im_ms_30m[idx,0] = 0; im_ms_30m[idx,1] = 0; im_ms_30m[idx,2] = 0; - im_ms_30m[idx,3] = 0; im_ms_30m[idx,4] = 0 - - plt.figure() - - plt.subplot(221) - plt.imshow(im_pan, cmap='gray') - plt.title('PANCHROMATIC') - - plt.subplot(222) - plt.imshow(im_ms_30m[:,:,[2,1,0]]) - plt.title('RGB') - - - plt.subplot(223) - plt.imshow(im_ms_30m[:,:,3], cmap='gray') - plt.title('NIR') - - plt.subplot(224) - plt.imshow(im_ms_30m[:,:,4], cmap='gray') - plt.title('SWIR') - - plt.show() - - return im_pan, im_ms, im_cloud, crs, meta - - def rescale_image_intensity(im, cloud_mask, prob_high, plot_bool): """ Rescales the intensity of an image (multispectral or single band) by applying @@ -395,9 +264,28 @@ def pansharpen(im_ms, im_pan, cloud_mask, plot_bool): # apply PCA to RGB bands pca = decomposition.PCA() vec_pcs = pca.fit_transform(vec) + # replace 1st PC with pan band (after matching histograms) vec_pan = im_pan.reshape(im_pan.shape[0] * im_pan.shape[1]) vec_pan = vec_pan[~vec_mask] + +# plt.figure() +# ax1 = plt.subplot(131) +# plt.imshow(im_pan, cmap='gray') +# plt.title('Pan band') +# plt.subplot(132, sharex=ax1, sharey=ax1) +# plt.imshow(vec_pcs[:,0].reshape(im_pan.shape[0],im_pan.shape[1]), cmap='gray') +# plt.title('PC1') +# plt.subplot(133, sharex=ax1, sharey=ax1) +# plt.imshow(hist_match(vec_pan, vec_pcs[:,0]).reshape(im_pan.shape[0],im_pan.shape[1]), cmap='gray') +# plt.title('Pan band histmatched') +# +# plt.figure() +# plt.hist(hist_match(vec_pan, vec_pcs[:,0]), bins=300) +# plt.hist(vec_pcs[:,0], bins=300, alpha=0.5) +# plt.hist(vec_pan, bins=300, alpha=0.5) +# plt.draw() + vec_pcs[:,0] = hist_match(vec_pan, vec_pcs[:,0]) vec_ms_ps = pca.inverse_transform(vec_pcs) @@ -407,13 +295,14 @@ def pansharpen(im_ms, im_pan, cloud_mask, plot_bool): im_ms_ps = vec_ms_ps_full.reshape(im_ms.shape[0], im_ms.shape[1], im_ms.shape[2]) if plot_bool: + plt.figure() ax1 = plt.subplot(121) - plt.imshow(rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 100, False)) + plt.imshow(rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9, False)) plt.axis('off') plt.title('Original') ax2 = plt.subplot(122, sharex=ax1, sharey=ax1) - plt.imshow(rescale_image_intensity(im_ms_ps[:,:,[2,1,0]], cloud_mask, 100, False)) + plt.imshow(rescale_image_intensity(im_ms_ps[:,:,[2,1,0]], cloud_mask, 99.9, False)) plt.axis('off') plt.title('Pansharpened') plt.show() @@ -458,9 +347,10 @@ def nd_index(im1, im2, cloud_mask, plot_bool): return im_nd -def find_wl_contours(im_ndwi, cloud_mask, min_contour_points, plot_bool): +def find_wl_contours(im_ndwi, cloud_mask, plot_bool): """ - Computes normalised difference index on 2 images (2D), given a cloud mask (2D) + Finds the water line by thresholding the Normalized Difference Water Index and applying the Marching + Squares Algorithm KV WRL 2018 @@ -470,8 +360,6 @@ def find_wl_contours(im_ndwi, cloud_mask, min_contour_points, plot_bool): Image (2D) with the NDWI (water index) cloud_mask: np.ndarray 2D cloud mask with True where cloud pixels are - min_contour_points: int - minimum number of points in each contour line plot_bool: boolean True if plot is wanted @@ -489,17 +377,19 @@ def find_wl_contours(im_ndwi, cloud_mask, min_contour_points, plot_bool): t_otsu = filters.threshold_otsu(vec) # use Marching Squares algorithm to detect contours on ndwi image contours = measure.find_contours(im_ndwi, t_otsu) - # filter water lines - contours_wl = [] - for i, contour in enumerate(contours): - # remove contour points that are around clouds (nan values) - if np.any(np.isnan(contour)): - index_nan = np.where(np.isnan(contour))[0] - contour = np.delete(contour, index_nan, axis=0) - # remove contours that have only few points (less than min_contour_points) - if contour.shape[0] > min_contour_points: - contours_wl.append(contour) - + + # remove contour points that are nans + contours_nonans = [] + for k in range(len(contours)): + if np.any(np.isnan(contours[k])): + index_nan = np.where(np.isnan(contours[k]))[0] + contours_temp = np.delete(contours[k], index_nan, axis=0) + if len(contours_temp) > 1: + contours_nonans.append(contours_temp) + else: + contours_nonans.append(contours[k]) + contours = contours_nonans + if plot_bool: # plot otsu's histogram segmentation plt.figure() @@ -512,12 +402,12 @@ def find_wl_contours(im_ndwi, cloud_mask, min_contour_points, plot_bool): plt.figure() plt.imshow(im_ndwi, cmap='seismic') plt.colorbar() - for i,contour in enumerate(contours_wl): plt.plot(contour[:, 1], contour[:, 0], linewidth=3, color='k') + for i,contour in enumerate(contours): plt.plot(contour[:, 1], contour[:, 0], linewidth=3, color='k') plt.axis('image') plt.title('Detected water lines') plt.show() - return contours_wl + return contours def convert_pix2world(points, crs_vec): """ @@ -563,6 +453,49 @@ def convert_pix2world(points, crs_vec): return points_converted +def convert_world2pix(points, crs_vec): + """ + Converts world projected coordinates (X,Y) to image coordinates (row,column) + performing an affine transformation + + KV WRL 2018 + + Arguments: + ----------- + points: np.ndarray or list of np.ndarray + array with 2 columns (rows first and columns second) + crs_vec: np.ndarray + vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale] + + Returns: ----------- + points_converted: np.ndarray or list of np.ndarray + converted coordinates, first columns with row and second column with column + + """ + + # make affine transformation matrix + aff_mat = np.array([[crs_vec[1], crs_vec[2], crs_vec[0]], + [crs_vec[4], crs_vec[5], crs_vec[3]], + [0, 0, 1]]) + # create affine transformation + tform = transform.AffineTransform(aff_mat) + + if type(points) is list: + points_converted = [] + # iterate over the list + for i, arr in enumerate(points): + points_converted.append(tform.inverse(points)) + + elif type(points) is np.ndarray: + points_converted = tform.inverse(points) + + else: + print('invalid input type') + raise + + return points_converted + + def convert_epsg(points, epsg_in, epsg_out): """ Converts from one spatial reference to another using the epsg codes @@ -676,7 +609,7 @@ def classify_sand_unsupervised(im_ms_ps, im_pan, cloud_mask, wl_pix, buffer_size # im_sand = morphology.binary_dilation(im_sand, morphology.disk(1)) if plot_bool: - im = np.copy(rescale_image_intensity(im_ms_ps[:,:,[2,1,0]], cloud_mask, 100, False)) + im = np.copy(rescale_image_intensity(im_ms_ps[:,:,[2,1,0]], cloud_mask, 99.9, False)) im[im_sand,0] = 0 im[im_sand,1] = 0 im[im_sand,2] = 1 @@ -688,7 +621,7 @@ def classify_sand_unsupervised(im_ms_ps, im_pan, cloud_mask, wl_pix, buffer_size return im_sand -def classify_image_NN(im_ms_ps, im_pan, cloud_mask, plot_bool): +def classify_image_NN(im_ms_ps, im_pan, cloud_mask, min_beach_size, plot_bool): """ Classifies every pixel in the image in one of 4 classes: - sand --> label = 1 @@ -714,8 +647,10 @@ def classify_image_NN(im_ms_ps, im_pan, cloud_mask, plot_bool): True if plot is wanted Returns: ----------- - im_labels: np.ndarray - 2D binary image containing True where sand pixels are located + im_classif: np.ndarray + 2D image containing labels + im_labels: np.ndarray of booleans + 3D image containing a boolean image for each class (im_classif == label) """ @@ -743,17 +678,117 @@ def classify_image_NN(im_ms_ps, im_pan, cloud_mask, plot_bool): vec_classif = np.zeros((cloud_mask.shape[0]*cloud_mask.shape[1])) vec_classif[~vec_mask] = labels im_classif = vec_classif.reshape((im_ms_ps.shape[0], im_ms_ps.shape[1])) -# im_classif = morphology.remove_small_objects(im_classif, min_size=min_beach_size, connectivity=2) - + + # labels + im_sand = im_classif == 1 + im_sand = morphology.remove_small_objects(im_sand, min_size=min_beach_size, connectivity=2) + im_swash = im_classif == 2 + im_water = im_classif == 3 + im_labels = np.stack((im_sand,im_swash,im_water), axis=-1) + if plot_bool: - im_display = rescale_image_intensity(im_ms_ps[:,:,[2,1,0]], cloud_mask, 100, False) + # display on top of pansharpened RGB + im_display = rescale_image_intensity(im_ms_ps[:,:,[2,1,0]], cloud_mask, 99.9, False) im = np.copy(im_display) - colours = np.array([[1,0,0],[1,1,0],[0,1,1],[0,0,1]]) - for k in range(4): - im[im_classif == k,0] = colours[k,0] - im[im_classif == k,1] = colours[k,1] - im[im_classif == k,2] = colours[k,2] - + # define colours for plot + colours = np.array([[1,128/255,0/255],[204/255,1,1],[0,0,204/255]]) + for k in range(0,im_labels.shape[2]): + im[im_labels[:,:,k],0] = colours[k,0] + im[im_labels[:,:,k],1] = colours[k,1] + im[im_labels[:,:,k],2] = colours[k,2] + + plt.figure() + ax1 = plt.subplot(121) + plt.imshow(im_display) + plt.axis('off') + plt.title('Image') + ax2 = plt.subplot(122, sharex=ax1, sharey=ax1) + plt.imshow(im) + plt.axis('off') + plt.title('NN classifier') + mng = plt.get_current_fig_manager() + mng.window.showMaximized() + plt.tight_layout() + plt.draw() + + return im_classif, im_labels + +def classify_image_NN_nopan(im_ms_ps, cloud_mask, min_beach_size, plot_bool): + """ + Classifies every pixel in the image in one of 4 classes: + - sand --> label = 1 + - whitewater (breaking waves and swash) --> label = 2 + - water --> label = 3 + - other (vegetation, buildings, rocks...) --> label = 0 + + The classifier is a Neural Network, trained with 7000 pixels for the class SAND and 1500 pixels for + each of the other classes. This is because the class of interest for my application is SAND and I + wanted to minimize the classification error for that class + + KV WRL 2018 + + Arguments: + ----------- + im_ms_ps: np.ndarray + Pansharpened RGB + downsampled NIR and SWIR + im_pan: + Panchromatic band + cloud_mask: np.ndarray + 2D cloud mask with True where cloud pixels are + plot_bool: boolean + True if plot is wanted + + Returns: ----------- + im_classif: np.ndarray + 2D image containing labels + im_labels: np.ndarray of booleans + 3D image containing a boolean image for each class (im_classif == label) + + """ + + # load classifier + clf = joblib.load('functions/NeuralNet_classif_nopan.pkl') + + # calculate features + n_features = 9 + im_features = np.zeros((im_ms_ps.shape[0], im_ms_ps.shape[1], n_features)) + im_features[:,:,[0,1,2,3,4]] = im_ms_ps + im_features[:,:,5] = nd_index(im_ms_ps[:,:,3], im_ms_ps[:,:,1], cloud_mask, False) # (NIR-G) + im_features[:,:,6] = nd_index(im_ms_ps[:,:,3], im_ms_ps[:,:,2], cloud_mask, False) # ND(NIR-R) + im_features[:,:,7] = nd_index(im_ms_ps[:,:,0], im_ms_ps[:,:,2], cloud_mask, False) # ND(B-R) + im_features[:,:,8] = nd_index(im_ms_ps[:,:,4], im_ms_ps[:,:,1], cloud_mask, False) # ND(SWIR-G) + # remove NaNs and clouds + vec_features = im_features.reshape((im_ms_ps.shape[0] * im_ms_ps.shape[1], n_features)) + vec_cloud = cloud_mask.reshape(cloud_mask.shape[0]*cloud_mask.shape[1]) + vec_nan = np.any(np.isnan(vec_features), axis=1) + vec_mask = np.logical_or(vec_cloud, vec_nan) + vec_features = vec_features[~vec_mask, :] + # predict with NN classifier + labels = clf.predict(vec_features) + + # recompose image + vec_classif = np.zeros((cloud_mask.shape[0]*cloud_mask.shape[1])) + vec_classif[~vec_mask] = labels + im_classif = vec_classif.reshape((im_ms_ps.shape[0], im_ms_ps.shape[1])) + + # labels + im_sand = im_classif == 1 + im_sand = morphology.remove_small_objects(im_sand, min_size=min_beach_size, connectivity=2) + im_swash = im_classif == 2 + im_water = im_classif == 3 + im_labels = np.stack((im_sand,im_swash,im_water), axis=-1) + + if plot_bool: + # display on top of pansharpened RGB + im_display = rescale_image_intensity(im_ms_ps[:,:,[2,1,0]], cloud_mask, 99.9, False) + im = np.copy(im_display) + # define colours for plot + colours = np.array([[1,128/255,0/255],[204/255,1,1],[0,0,204/255]]) + for k in range(0,im_labels.shape[2]): + im[im_labels[:,:,k],0] = colours[k,0] + im[im_labels[:,:,k],1] = colours[k,1] + im[im_labels[:,:,k],2] = colours[k,2] + plt.figure() ax1 = plt.subplot(121) plt.imshow(im_display) @@ -762,8 +797,371 @@ def classify_image_NN(im_ms_ps, im_pan, cloud_mask, plot_bool): ax2 = plt.subplot(122, sharex=ax1, sharey=ax1) plt.imshow(im) plt.axis('off') - plt.title('NN') + plt.title('NN classifier') + mng = plt.get_current_fig_manager() + mng.window.showMaximized() + plt.tight_layout() + plt.draw() + + return im_classif, im_labels + +def find_wl_contours2(im_ms_ps, im_labels, cloud_mask, buffer_size, plot_bool): + """ + New method for extracting shorelines (more robust) + + KV WRL 2018 + + Arguments: + ----------- + im_ms_ps: np.ndarray + Pansharpened RGB + downsampled NIR and SWIR + im_labels: np.ndarray + 3D image containing a boolean image for each class in the order (sand, swash, water) + cloud_mask: np.ndarray + 2D cloud mask with True where cloud pixels are + buffer_size: int + size of the buffer around the sandy beach + plot_bool: boolean + True if plot is wanted + + Returns: ----------- + contours_wi: list of np.arrays + contains the (row,column) coordinates of the contour lines extracted with the Water Index + contours_mwi: list of np.arrays + contains the (row,column) coordinates of the contour lines extracted with the Modified Water Index + + """ + + nrows = cloud_mask.shape[0] + ncols = cloud_mask.shape[1] + im_display = rescale_image_intensity(im_ms_ps[:,:,[2,1,0]], cloud_mask, 99.9, False) + + # calculate Normalized Difference Modified Water Index (SWIR - G) + im_mwi = nd_index(im_ms_ps[:,:,4], im_ms_ps[:,:,1], cloud_mask, False) + # calculate Normalized Difference Modified Water Index (NIR - G) + im_wi = nd_index(im_ms_ps[:,:,3], im_ms_ps[:,:,1], cloud_mask, False) + # stack indices together + im_ind = np.stack((im_wi, im_mwi), axis=-1) + vec_ind = im_ind.reshape(nrows*ncols,2) + + # process labels + vec_sand = im_labels[:,:,0].reshape(ncols*nrows) + vec_swash = im_labels[:,:,1].reshape(ncols*nrows) + vec_water = im_labels[:,:,2].reshape(ncols*nrows) + + # create a buffer around the sandy beach + se = morphology.disk(buffer_size) + im_buffer = morphology.binary_dilation(im_labels[:,:,0], se) + vec_buffer = im_buffer.reshape(nrows*ncols) + + # select water/sand/swash pixels that are within the buffer + int_water = vec_ind[np.logical_and(vec_buffer,vec_water),:] + int_sand = vec_ind[np.logical_and(vec_buffer,vec_sand),:] + int_swash = vec_ind[np.logical_and(vec_buffer,vec_swash),:] + + # threshold the sand/water intensities + int_all = np.append(int_water,int_sand, axis=0) + t_mwi = filters.threshold_otsu(int_all[:,0]) + t_wi = filters.threshold_otsu(int_all[:,1]) + + # find contour with MS algorithm + im_wi_buffer = np.copy(im_wi) + im_wi_buffer[~im_buffer] = np.nan + im_mwi_buffer = np.copy(im_mwi) + im_mwi_buffer[~im_buffer] = np.nan + contours_wi = measure.find_contours(im_wi_buffer, t_wi) + contours_mwi = measure.find_contours(im_mwi, t_mwi) # WARNING (on entire image) + + # remove contour points that are nans (around clouds) + + contours = contours_wi + contours_nonans = [] + for k in range(len(contours)): + if np.any(np.isnan(contours[k])): + index_nan = np.where(np.isnan(contours[k]))[0] + contours_temp = np.delete(contours[k], index_nan, axis=0) + if len(contours_temp) > 1: + contours_nonans.append(contours_temp) + else: + contours_nonans.append(contours[k]) + contours_wi = contours_nonans + + contours = contours_mwi + contours_nonans = [] + for k in range(len(contours)): + if np.any(np.isnan(contours[k])): + index_nan = np.where(np.isnan(contours[k]))[0] + contours_temp = np.delete(contours[k], index_nan, axis=0) + if len(contours_temp) > 1: + contours_nonans.append(contours_temp) + else: + contours_nonans.append(contours[k]) + contours_mwi = contours_nonans + + if plot_bool: + + im = np.copy(im_display) + # define colours for plot + colours = np.array([[1,128/255,0/255],[204/255,1,1],[0,0,204/255]]) + for k in range(0,im_labels.shape[2]): + im[im_labels[:,:,k],0] = colours[k,0] + im[im_labels[:,:,k],1] = colours[k,1] + im[im_labels[:,:,k],2] = colours[k,2] + + fig = plt.figure() + gs = gridspec.GridSpec(3, 3, height_ratios=[1, 1, 3]) + + ax1 = fig.add_subplot(gs[0,:]) + vals = plt.hist(int_water[:,0], bins=100, label='water') + plt.hist(int_sand[:,0], bins=100, alpha=0.5, label='sand') + plt.hist(int_swash[:,0], bins=100, alpha=0.5, label='swash') + plt.plot([t_wi, t_wi], [0, np.max(vals[0])], 'r-') + plt.legend() + plt.title('Water Index NIR-G') + + ax2 = fig.add_subplot(gs[1,:], sharex=ax1) + vals = plt.hist(int_water[:,1], bins=100, label='water') + plt.hist(int_sand[:,1], bins=100, alpha=0.5, label='sand') + plt.hist(int_swash[:,1], bins=100, alpha=0.5, label='swash') + plt.plot([t_mwi, t_mwi], [0, np.max(vals[0])], 'r-') + plt.legend() + plt.title('Modified Water Index SWIR-G') + + ax3 = fig.add_subplot(gs[2,0]) + plt.imshow(im) + for i,contour in enumerate(contours_mwi): plt.plot(contour[:, 1], contour[:, 0], linewidth=3, color='k') + for i,contour in enumerate(contours_wi): plt.plot(contour[:, 1], contour[:, 0], linestyle='--', linewidth=1, color='w') + plt.grid(False) + plt.xticks([]) + plt.yticks([]) + + ax4 = fig.add_subplot(gs[2,1], sharex=ax3, sharey=ax3) + plt.imshow(im_display) + for i,contour in enumerate(contours_mwi): plt.plot(contour[:, 1], contour[:, 0], linewidth=3, color='k') + for i,contour in enumerate(contours_wi): plt.plot(contour[:, 1], contour[:, 0], linestyle='--', linewidth=1, color='w') + plt.grid(False) + plt.xticks([]) + plt.yticks([]) + + ax5 = fig.add_subplot(gs[2,2], sharex=ax3, sharey=ax3) + plt.imshow(im_mwi, cmap='seismic') + for i,contour in enumerate(contours_mwi): plt.plot(contour[:, 1], contour[:, 0], linewidth=3, color='k') + for i,contour in enumerate(contours_wi): plt.plot(contour[:, 1], contour[:, 0], linestyle='--', linewidth=1, color='w') + plt.grid(False) + plt.xticks([]) + plt.yticks([]) + +# plt.gcf().set_size_inches(17.99,7.55) + mng = plt.get_current_fig_manager() + mng.window.showMaximized() + plt.gcf().set_tight_layout(True) plt.draw() + + return contours_wi, contours_mwi + +def compare_sds(dates_sds, chain_sds, topo_profiles, mod=0, mindays=5): + """ + Compare sds with groundtruth data from topographic surveys / argus shorelines - return im_classif + KV WRL 2018 + + Arguments: + ----------- + dates_sds: list + list of dates corresponding to each row in chain_sds + chain_sds: np.ndarray + array with time series of chainage for each transect (each transect is one column) + topo_profiles: dict + dict containing the dates and chainage of the groundtruth + mod: 0 or 1 + 0 for linear interpolation between 2 closest surveys, 1 for only nearest neighbour + min_days: int + minimum number of days for which the data can be compared + + Returns: ----------- + stats: dict + contains all the statistics of the comparison + """ + + # create 3 figures + fig1 = plt.figure() + gs1 = gridspec.GridSpec(chain_sds.shape[1], 1) + fig2 = plt.figure() + gs2 = gridspec.GridSpec(2, chain_sds.shape[1]) + fig3 = plt.figure() + gs3 = gridspec.GridSpec(2,1) + + dates_sds_num = np.array([_.toordinal() for _ in dates_sds]) + stats = dict([]) + data_fin = dict([]) + + # for each transect compare and plot the data + for i in range(chain_sds.shape[1]): + + pfname = list(topo_profiles.keys())[i] + stats[pfname] = dict([]) + data_fin[pfname] = dict([]) + + dates_sur = topo_profiles[pfname]['dates'] + chain_sur = topo_profiles[pfname]['chainage'] + + # convert to datenum + dates_sur_num = np.array([_.toordinal() for _ in dates_sur]) + + chain_sur_interp = [] + diff_days = [] + + for j, satdate in enumerate(dates_sds_num): + + temp_diff = satdate - dates_sur_num + + if mod==0: + # select measurement before and after sat image date and interpolate + + ind_before = np.where(temp_diff == temp_diff[temp_diff > 0][-1])[0] + if ind_before == len(temp_diff)-1: + chain_sur_interp.append(np.nan) + diff_days.append(np.abs(satdate-dates_sur_num[ind_before])[0]) + continue + ind_after = np.where(temp_diff == temp_diff[temp_diff < 0][0])[0] + tempx = np.zeros(2) + tempx[0] = dates_sur_num[ind_before] + tempx[1] = dates_sur_num[ind_after] + tempy = np.zeros(2) + tempy[0] = chain_sur[ind_before] + tempy[1] = chain_sur[ind_after] + diff_days.append(np.abs(np.max([satdate-tempx[0], satdate-tempx[1]]))) + # interpolate + f = interpolate.interp1d(tempx, tempy) + chain_sur_interp.append(f(satdate)) + + elif mod==1: + # select the closest measurement + + idx_closest = find_indices(np.abs(temp_diff), lambda e: e == np.min(np.abs(temp_diff)))[0] + diff_days.append(np.abs(satdate-dates_sur_num[idx_closest])) + if diff_days[j] > mindays: + chain_sur_interp.append(np.nan) + else: + chain_sur_interp.append(chain_sur[idx_closest]) + + chain_sur_interp = np.array(chain_sur_interp) + + # remove nan values + idx_sur_nan = ~np.isnan(chain_sur_interp) + idx_sat_nan = ~np.isnan(chain_sds[:,i]) + idx_nan = np.logical_and(idx_sur_nan, idx_sat_nan) + + # groundtruth and sds + chain_sur_fin = chain_sur_interp[idx_nan] + chain_sds_fin = chain_sds[idx_nan,i] + dates_fin = [k for (k, v) in zip(dates_sds, idx_nan) if v] + diff_chain = chain_sur_fin - chain_sds_fin + + # calculate statistics + rmse = np.sqrt(np.nanmean((diff_chain)**2)) + mean = np.nanmean(diff_chain) + std = np.nanstd(diff_chain) + q90 = np.percentile(np.abs(diff_chain), 90) + + # store data + stats[pfname]['rmse'] = rmse + stats[pfname]['mean'] = mean + stats[pfname]['std'] = std + stats[pfname]['q90'] = q90 + stats[pfname]['diffdays'] = diff_days + + data_fin[pfname]['dates'] = dates_fin + data_fin[pfname]['sds'] = chain_sds_fin + data_fin[pfname]['survey'] = chain_sur_fin + + # make time-series plot + plt.figure(fig1.number) + ax = fig1.add_subplot(gs1[i,0]) + plt.plot(dates_sur, chain_sur, 'o-', color='C1', markersize=4, label='survey all') + plt.plot(dates_fin, chain_sur_fin, 'o', color=[0.3, 0.3, 0.3], markersize=2, label='survey interp') + plt.plot(dates_fin, chain_sds_fin, 'o--', color='b', markersize=4, label='SDS') + plt.title(pfname, fontweight='bold') + plt.xlim([dates_sds[0], dates_sds[-1]]) + plt.ylabel('chainage [m]') + + # make scatter plot + plt.figure(fig2.number) + ax1 = fig2.add_subplot(gs2[0,i]) + plt.axis('equal') + plt.plot(chain_sur_fin, chain_sds_fin, 'ko', markersize=4, markerfacecolor='w', alpha=0.7) + xmax = np.max([np.nanmax(chain_sds_fin),np.nanmax(chain_sur_fin)]) + xmin = np.min([np.nanmin(chain_sds_fin),np.nanmin(chain_sur_fin)]) + ymax = np.max([np.nanmax(chain_sds_fin),np.nanmax(chain_sur_fin)]) + ymin = np.min([np.nanmin(chain_sds_fin),np.nanmin(chain_sur_fin)]) + plt.plot([xmin, xmax], [ymin, ymax], 'r--') + correlation = np.corrcoef(chain_sur_fin, chain_sds_fin)[0,1] + str_corr = 'r = %.2f' % (correlation) + plt.text(xmin, ymax, str_corr, bbox=dict(facecolor=[0.7,0.7,0.7], alpha=0.5), horizontalalignment='left') + plt.xlabel('chainage survey [m]') + plt.ylabel('chainage satellite [m]') + plt.title(pfname, fontweight='bold') + + ax2 = fig2.add_subplot(gs2[1,i]) + binwidth = 3 + bins = np.arange(min(diff_chain), max(diff_chain) + binwidth, binwidth) + density = plt.hist(diff_chain, bins=bins, density=True, color=[0.8, 0.8, 0.8], edgecolor='k') + plt.xlim([-50, 50]) + plt.xlabel('error [m]') + str_stats = ' rmse = %.1f\n mean = %.1f\n std = %.1f\n q90 = %.1f' % (rmse, mean, std, q90) + plt.text(15, np.max(density[0])-0.015, str_stats, bbox=dict(facecolor=[0.8,0.8,0.8], alpha=0.5), horizontalalignment='left', fontsize=10) + + fig1.set_size_inches(19.2, 9.28) + fig1.set_tight_layout(True) + fig2.set_size_inches(19.2, 9.28) + fig2.set_tight_layout(True) + + # plot all the data together + chain_sds_all = [] + chain_sur_all = [] + for i in range(chain_sds.shape[1]): + pfname = list(topo_profiles.keys())[i] + chain_sds_all = np.append(chain_sds_all,data_fin[pfname]['sds']) + chain_sur_all = np.append(chain_sur_all,data_fin[pfname]['survey']) + + diff_chain_all = chain_sur_all - chain_sds_all + + # calculate statistics + rmse = np.sqrt(np.nanmean((diff_chain_all)**2)) + mean = np.nanmean(diff_chain_all) + std = np.nanstd(diff_chain_all) + q90 = np.percentile(np.abs(diff_chain_all), 90) + + stats['all'] = {'rmse':rmse,'mean':mean,'std':std,'q90':q90} + + # make plot with all datapoints (from all the transects) + plt.figure(fig3.number) + ax1 = fig3.add_subplot(gs3[0,0]) + plt.axis('equal') + plt.plot(chain_sur_all, chain_sds_all, 'ko', markersize=4, markerfacecolor='w', alpha=0.7) + xmax = np.max([np.nanmax(chain_sds_all),np.nanmax(chain_sur_all)]) + xmin = np.min([np.nanmin(chain_sds_all),np.nanmin(chain_sur_all)]) + ymax = np.max([np.nanmax(chain_sds_all),np.nanmax(chain_sur_all)]) + ymin = np.min([np.nanmin(chain_sds_all),np.nanmin(chain_sur_all)]) + plt.plot([xmin, xmax], [ymin, ymax], 'r--') + correlation = np.corrcoef(chain_sur_all, chain_sds_all)[0,1] + str_corr = 'r = %.2f' % (correlation) + plt.text(xmin, ymax, str_corr, bbox=dict(facecolor=[0.7,0.7,0.7], alpha=0.5), horizontalalignment='left') + plt.xlabel('chainage survey [m]') + plt.ylabel('chainage satellite [m]') + plt.title(pfname, fontweight='bold') + + ax2 = fig3.add_subplot(gs3[1,0]) + binwidth = 3 + bins = np.arange(min(diff_chain_all), max(diff_chain_all) + binwidth, binwidth) + density = plt.hist(diff_chain_all, bins=bins, density=True, color=[0.8, 0.8, 0.8], edgecolor='k') + plt.xlim([-50, 50]) + plt.xlabel('error [m]') + str_stats = ' rmse = %.1f\n mean = %.1f\n std = %.1f\n q90 = %.1f' % (rmse, mean, std, q90) + plt.text(15, np.max(density[0])-0.015, str_stats, bbox=dict(facecolor=[0.8,0.8,0.8], alpha=0.5), horizontalalignment='left', fontsize=10) + fig3.set_size_inches(9.2, 9.28) + fig3.set_tight_layout(True) + + return stats + \ No newline at end of file diff --git a/functions/utils.py b/functions/utils.py index 019c0d4..efd804e 100644 --- a/functions/utils.py +++ b/functions/utils.py @@ -8,7 +8,9 @@ Contains all the utilities, convenience functions and small functions that do si """ import matplotlib.pyplot as plt +from datetime import datetime, timedelta import numpy as np +import scipy.io as sio import pdb @@ -59,3 +61,50 @@ def find_indices(lst, condition): def reject_outliers(data, m=2): "rejects outliers in a numpy array" return data[abs(data - np.mean(data)) < m * np.std(data)] + +def duplicates_dict(lst): + "return duplicates and indices" + # nested function + def duplicates(lst, item): + return [i for i, x in enumerate(lst) if x == item] + + return dict((x, duplicates(lst, x)) for x in set(lst) if lst.count(x) > 1) + +def datenum2datetime(datenum): + "convert datenum to datetime" + #takes in datenum and outputs python datetime + time = [datetime.fromordinal(int(dn)) + timedelta(days=float(dn)%1) - timedelta(days = 366) for dn in datenum] + return time + +def loadmat(filename): + ''' + this function should be called instead of direct spio.loadmat + as it cures the problem of not properly recovering python dictionaries + from mat files. It calls the function check keys to cure all entries + which are still mat-objects + ''' + data = sio.loadmat(filename, struct_as_record=False, squeeze_me=True) + return _check_keys(data) + +def _check_keys(dict): + ''' + checks if entries in dictionary are mat-objects. If yes + todict is called to change them to nested dictionaries + ''' + for key in dict: + if isinstance(dict[key], sio.matlab.mio5_params.mat_struct): + dict[key] = _todict(dict[key]) + return dict + +def _todict(matobj): + ''' + A recursive function which constructs from matobjects nested dictionaries + ''' + dict = {} + for strg in matobj._fieldnames: + elem = matobj.__dict__[strg] + if isinstance(elem, sio.matlab.mio5_params.mat_struct): + dict[strg] = _todict(elem) + else: + dict[strg] = elem + return dict \ No newline at end of file diff --git a/read_images.py b/read_images.py new file mode 100644 index 0000000..8f289aa --- /dev/null +++ b/read_images.py @@ -0,0 +1,589 @@ +#==========================================================# +#==========================================================# +# Extract shorelines from Landsat images +#==========================================================# +#==========================================================# + + +#==========================================================# +# Initial settings +#==========================================================# + +import os +import numpy as np +import matplotlib.pyplot as plt +import ee +import pdb + +# other modules +from osgeo import gdal, ogr, osr +import pickle +import matplotlib.cm as cm +from pylab import ginput +from shapely.geometry import LineString + +# image processing modules +import skimage.filters as filters +import skimage.exposure as exposure +import skimage.transform as transform +import sklearn.decomposition as decomposition +import skimage.measure as measure +import skimage.morphology as morphology + +# machine learning modules +from sklearn.model_selection import train_test_split +from sklearn.neural_network import MLPClassifier +from sklearn.preprocessing import StandardScaler, Normalizer +from sklearn.externals import joblib + +# import own modules +import functions.utils as utils +import functions.sds as sds + +# some other settings +np.seterr(all='ignore') # raise/ignore divisions by 0 and nans +plt.rcParams['axes.grid'] = True +plt.rcParams['figure.max_open_warning'] = 100 +ee.Initialize() + +#==========================================================# +# Parameters +#==========================================================# + +sitename = 'NARRA' + +cloud_thresh = 0.7 # threshold for cloud cover +plot_bool = False # if you want the plots +output_epsg = 28356 # GDA94 / MGA Zone 56 +buffer_size = 7 # radius (in pixels) of disk for buffer (pixel classification) +min_beach_size = 20 # number of pixels in a beach (pixel classification) +dist_ref = 100 # maximum distance from reference point +min_length_wl = 200 # minimum length of shoreline LineString to be kept +manual_bool = True # to manually check images + + +output = dict([]) + +#==========================================================# +# Metadata +#==========================================================# + +filepath = os.path.join(os.getcwd(), 'data', sitename) +with open(os.path.join(filepath, sitename + '_metadata' + '.pkl'), 'rb') as f: + metadata = pickle.load(f) + + +#%% +#==========================================================# +# Read S2 images +#==========================================================# + +satname = 'S2' +dates = metadata[satname]['dates'] +input_epsg = 32756 # metadata[satname]['epsg'] + +# path to images +filepath10 = os.path.join(os.getcwd(), 'data', sitename, satname, '10m') +filenames10 = os.listdir(filepath10) +filepath20 = os.path.join(os.getcwd(), 'data', sitename, satname, '20m') +filenames20 = os.listdir(filepath20) +filepath60 = os.path.join(os.getcwd(), 'data', sitename, satname, '60m') +filenames60 = os.listdir(filepath60) +if (not len(filenames10) == len(filenames20)) or (not len(filenames20) == len(filenames60)): + raise 'error: not the same amount of files for 10, 20 and 60 m' +N = len(filenames10) + +# initialise variables +cloud_cover_ts = [] +acc_georef_ts = [] +date_acquired_ts = [] +filename_ts = [] +satname_ts = [] +timestamp = [] +shorelines = [] +idx_skipped = [] + +spacing = '==========================================================' +msg = ' %s\n %s\n %s' % (spacing, satname, spacing) +print(msg) + +for i in range(N): + + # read 10m bands + fn = os.path.join(filepath10, filenames10[i]) + data = gdal.Open(fn, gdal.GA_ReadOnly) + georef = np.array(data.GetGeoTransform()) + bands = [data.GetRasterBand(k + 1).ReadAsArray() for k in range(data.RasterCount)] + im10 = np.stack(bands, 2) + im10 = im10/10000 # TOA scaled to 10000 + + # if image is only zeros, skip it + if sum(sum(sum(im10))) < 1: + print('skip ' + str(i) + ' - no data') + idx_skipped.append(i) + continue + + nrows = im10.shape[0] + ncols = im10.shape[1] + + # read 20m band (SWIR1) + fn = os.path.join(filepath20, filenames20[i]) + data = gdal.Open(fn, gdal.GA_ReadOnly) + bands = [data.GetRasterBand(k + 1).ReadAsArray() for k in range(data.RasterCount)] + im20 = np.stack(bands, 2) + im20 = im20[:,:,0] + im20 = im20/10000 # TOA scaled to 10000 + im_swir = transform.resize(im20, (nrows, ncols), order=1, preserve_range=True, mode='constant') + im_swir = np.expand_dims(im_swir, axis=2) + + # append down-sampled swir band to the 10m bands + im_ms = np.append(im10, im_swir, axis=2) + + # read 60m band (QA) + fn = os.path.join(filepath60, filenames60[i]) + data = gdal.Open(fn, gdal.GA_ReadOnly) + bands = [data.GetRasterBand(k + 1).ReadAsArray() for k in range(data.RasterCount)] + im60 = np.stack(bands, 2) + im_qa = im60[:,:,0] + cloud_mask = sds.create_cloud_mask(im_qa, satname, plot_bool) + cloud_mask = transform.resize(cloud_mask,(nrows, ncols), order=0, preserve_range=True, mode='constant') + # check if -inf or nan values on any band and add to cloud mask + for k in range(im_ms.shape[2]): + im_inf = np.isin(im_ms[:,:,k], -np.inf) + im_nan = np.isnan(im_ms[:,:,k]) + cloud_mask = np.logical_or(np.logical_or(cloud_mask, im_inf), im_nan) + + # calculate cloud cover and if above threshold, skip it + cloud_cover = sum(sum(cloud_mask.astype(int)))/(cloud_mask.shape[0]*cloud_mask.shape[1]) + if cloud_cover > cloud_thresh: + print('skip ' + str(i) + ' - cloudy (' + str(np.round(cloud_cover*100).astype(int)) + '%)') + idx_skipped.append(i) + continue + + # rescale image intensity for display purposes + im_display = sds.rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9, False) + + # classify image in 4 classes (sand, whitewater, water, other) with NN classifier + im_classif, im_labels = sds.classify_image_NN_nopan(im_ms, cloud_mask, min_beach_size, plot_bool) + + # if there aren't any sandy pixels + if sum(sum(im_labels[:,:,0])) == 0 : + # use global threshold + im_ndwi = sds.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask, plot_bool) + contours = sds.find_wl_contours(im_ndwi, cloud_mask, plot_bool) + else: + # use specific threhsold + contours_wi, contours_mwi = sds.find_wl_contours2(im_ms, im_labels, cloud_mask, buffer_size, plot_bool) + + # convert from pixels to world coordinates + wl_coords = sds.convert_pix2world(contours_mwi, georef) + # convert to output epsg spatial reference + wl = sds.convert_epsg(wl_coords, input_epsg, output_epsg) + + # remove contour lines that have a perimeter < min_length_wl + wl_good = [] + for l, wls in enumerate(wl): + coords = [(wls[k,0], wls[k,1]) for k in range(len(wls))] + a = LineString(coords) # shapely LineString structure + if a.length >= min_length_wl: + wl_good.append(wls) + + # format points and only select the ones close to the refpoints + x_points = np.array([]) + y_points = np.array([]) + for k in range(len(wl_good)): + x_points = np.append(x_points,wl_good[k][:,0]) + y_points = np.append(y_points,wl_good[k][:,1]) + wl_good = np.transpose(np.array([x_points,y_points])) + temp = np.zeros((len(wl_good))).astype(bool) + for k in range(len(refpoints)): + temp = np.logical_or(np.linalg.norm(wl_good - refpoints[k,[0,1]], axis=1) < dist_ref, temp) + wl_final = wl_good[temp] + + + # plot output + plt.figure() + im = np.copy(im_display) + colours = np.array([[1,128/255,0/255],[204/255,1,1],[0,0,204/255]]) + for k in range(0,im_labels.shape[2]): + im[im_labels[:,:,k],0] = colours[k,0] + im[im_labels[:,:,k],1] = colours[k,1] + im[im_labels[:,:,k],2] = colours[k,2] + plt.imshow(im) + for k,contour in enumerate(contours_mwi): plt.plot(contour[:, 1], contour[:, 0], linewidth=2, color='k', linestyle='--') + plt.title(satname + ' ' + metadata[satname]['dates'][i].strftime('%Y-%m-%d') + ' acc : ' + str(metadata[satname]['acc_georef'][i]) + ' m' ) + plt.draw() + + pt_in = np.array(ginput(n=1, timeout=1000)) + plt.close() + + # if image is rejected, skip it + if pt_in[0][1] > nrows/2: + print('skip ' + str(i) + ' - rejected') + idx_skipped.append(i) + continue + + # if accepted, store the data + cloud_cover_ts.append(cloud_cover) + acc_georef_ts.append(metadata[satname]['acc_georef'][i]) + + filename_ts.append(filenames10[i]) + satname_ts.append(satname) + date_acquired_ts.append(filenames10[i][:10]) + + timestamp.append(metadata[satname]['dates'][i]) + shorelines.append(wl_final) + +# store in output structure +output[satname] = {'dates':timestamp, 'shorelines':shorelines, 'idx_skipped':idx_skipped, + 'metadata':{'filenames':filename_ts, 'satname':satname_ts, 'cloud_cover':cloud_cover_ts, + 'acc_georef':acc_georef_ts}} +del idx_skipped + + +#%% +#==========================================================# +# Read L7&L8 images +#==========================================================# + +satname = 'L8' +dates = metadata[satname]['dates'] +input_epsg = 32656 # metadata[satname]['epsg'] + +# path to images +filepath_pan = os.path.join(os.getcwd(), 'data', sitename, 'L7&L8', 'pan') +filepath_ms = os.path.join(os.getcwd(), 'data', sitename, 'L7&L8', 'ms') +filenames_pan = os.listdir(filepath_pan) +filenames_ms = os.listdir(filepath_ms) +if (not len(filenames_pan) == len(filenames_ms)): + raise 'error: not the same amount of files for pan and ms' +N = len(filenames_pan) + +# initialise variables +cloud_cover_ts = [] +acc_georef_ts = [] +date_acquired_ts = [] +filename_ts = [] +satname_ts = [] +timestamp = [] +shorelines = [] +idx_skipped = [] + + +spacing = '==========================================================' +msg = ' %s\n %s\n %s' % (spacing, satname, spacing) +print(msg) + +for i in range(N): + + # get satellite name + sat = filenames_pan[i][20:22] + + # read pan image + fn_pan = os.path.join(filepath_pan, filenames_pan[i]) + data = gdal.Open(fn_pan, gdal.GA_ReadOnly) + georef = np.array(data.GetGeoTransform()) + bands = [data.GetRasterBand(k + 1).ReadAsArray() for k in range(data.RasterCount)] + im_pan = np.stack(bands, 2)[:,:,0] + nrows = im_pan.shape[0] + ncols = im_pan.shape[1] + + # read ms image + fn_ms = os.path.join(filepath_ms, filenames_ms[i]) + data = gdal.Open(fn_ms, gdal.GA_ReadOnly) + bands = [data.GetRasterBand(k + 1).ReadAsArray() for k in range(data.RasterCount)] + im_ms = np.stack(bands, 2) + + # cloud mask + im_qa = im_ms[:,:,5] + cloud_mask = sds.create_cloud_mask(im_qa, sat, plot_bool) + cloud_mask = transform.resize(cloud_mask, (nrows, ncols), order=0, preserve_range=True, mode='constant').astype('bool_') + # resize the image using bilinear interpolation (order 1) + im_ms = im_ms[:,:,:5] + im_ms = transform.resize(im_ms,(nrows, ncols), order=1, preserve_range=True, mode='constant') + + # check if -inf or nan values on any band and add to cloud mask + for k in range(im_ms.shape[2]+1): + if k == 5: + im_inf = np.isin(im_pan, -np.inf) + im_nan = np.isnan(im_pan) + else: + im_inf = np.isin(im_ms[:,:,k], -np.inf) + im_nan = np.isnan(im_ms[:,:,k]) + cloud_mask = np.logical_or(np.logical_or(cloud_mask, im_inf), im_nan) + + # calculate cloud cover and skip image if above threshold + cloud_cover = sum(sum(cloud_mask.astype(int)))/(cloud_mask.shape[0]*cloud_mask.shape[1]) + if cloud_cover > cloud_thresh: + print('skip ' + str(i) + ' - cloudy (' + str(np.round(cloud_cover*100).astype(int)) + '%)') + idx_skipped.append(i) + continue + + # Pansharpen image (different for L8 and L7) + if sat == 'L7': + # pansharpen (Green, Red, NIR) and downsample Blue and SWIR1 + im_ms_ps = sds.pansharpen(im_ms[:,:,[1,2,3]], im_pan, cloud_mask, plot_bool) + im_ms_ps = np.append(im_ms[:,:,[0]], im_ms_ps, axis=2) + im_ms_ps = np.append(im_ms_ps, im_ms[:,:,[4]], axis=2) + im_display = sds.rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9, False) + elif sat == 'L8': + # pansharpen RGB image and downsample NIR and SWIR1 + im_ms_ps = sds.pansharpen(im_ms[:,:,[0,1,2]], im_pan, cloud_mask, plot_bool) + im_ms_ps = np.append(im_ms_ps, im_ms[:,:,[3,4]], axis=2) + im_display = sds.rescale_image_intensity(im_ms_ps[:,:,[2,1,0]], cloud_mask, 99.9, False) + + # classify image in 4 classes (sand, whitewater, water, other) with NN classifier + im_classif, im_labels = sds.classify_image_NN(im_ms_ps, im_pan, cloud_mask, min_beach_size, plot_bool) + + # if there aren't any sandy pixels + if sum(sum(im_labels[:,:,0])) == 0 : + # use global threshold + im_ndwi = sds.nd_index(im_ms_ps[:,:,4], im_ms_ps[:,:,1], cloud_mask, plot_bool) + contours = sds.find_wl_contours(im_ndwi, cloud_mask, plot_bool) + else: + # use specific threhsold + contours_wi, contours_mwi = sds.find_wl_contours2(im_ms_ps, im_labels, cloud_mask, buffer_size, plot_bool) + + # convert from pixels to world coordinates + wl_coords = sds.convert_pix2world(contours_mwi, georef) + # convert to output epsg spatial reference + wl = sds.convert_epsg(wl_coords, input_epsg, output_epsg) + + # remove contour lines that have a perimeter < min_length_wl + wl_good = [] + for l, wls in enumerate(wl): + coords = [(wls[k,0], wls[k,1]) for k in range(len(wls))] + a = LineString(coords) # shapely LineString structure + if a.length >= min_length_wl: + wl_good.append(wls) + + # format points and only select the ones close to the refpoints + x_points = np.array([]) + y_points = np.array([]) + for k in range(len(wl_good)): + x_points = np.append(x_points,wl_good[k][:,0]) + y_points = np.append(y_points,wl_good[k][:,1]) + wl_good = np.transpose(np.array([x_points,y_points])) + temp = np.zeros((len(wl_good))).astype(bool) + for k in range(len(refpoints)): + temp = np.logical_or(np.linalg.norm(wl_good - refpoints[k,[0,1]], axis=1) < dist_ref, temp) + wl_final = wl_good[temp] + + # plot output + plt.figure() + plt.subplot(121) + im = np.copy(im_display) + colours = np.array([[1,128/255,0/255],[204/255,1,1],[0,0,204/255]]) + for k in range(0,im_labels.shape[2]): + im[im_labels[:,:,k],0] = colours[k,0] + im[im_labels[:,:,k],1] = colours[k,1] + im[im_labels[:,:,k],2] = colours[k,2] + plt.imshow(im) + for k,contour in enumerate(contours_mwi): plt.plot(contour[:, 1], contour[:, 0], linewidth=2, color='k', linestyle='--') + plt.title(sat + ' ' + metadata[satname]['dates'][i].strftime('%Y-%m-%d') + ' acc : ' + str(metadata[satname]['acc_georef'][i]) + ' m' ) + + pt_in = np.array(ginput(n=1, timeout=1000)) + plt.close() + + # if image is rejected, skip it + if pt_in[0][1] > nrows/2: + print('skip ' + str(i) + ' - rejected') + idx_skipped.append(i) + continue + + # if accepted, store the data + cloud_cover_ts.append(cloud_cover) + acc_georef_ts.append(metadata[satname]['acc_georef'][i]) + + filename_ts.append(filenames_pan[i]) + satname_ts.append(sat) + date_acquired_ts.append(filenames_pan[i][:10]) + + timestamp.append(metadata[satname]['dates'][i]) + shorelines.append(wl_final) + +# store in output structure +output[satname] = {'dates':timestamp, 'shorelines':shorelines, 'idx_skipped':idx_skipped, + 'metadata':{'filenames':filename_ts, 'satname':satname_ts, 'cloud_cover':cloud_cover_ts, + 'acc_georef':acc_georef_ts}} + +del idx_skipped + + + +#%% +#==========================================================# +# Read L5 images +#==========================================================# + +satname = 'L5' +dates = metadata[satname]['dates'] +input_epsg = 32656 # metadata[satname]['epsg'] + +# path to images +filepath_img = os.path.join(os.getcwd(), 'data', sitename, satname, '30m') +filenames = os.listdir(filepath_img) +N = len(filenames) + +# initialise variables +cloud_cover_ts = [] +acc_georef_ts = [] +date_acquired_ts = [] +filename_ts = [] +satname_ts = [] +timestamp = [] +shorelines = [] +idx_skipped = [] + + +spacing = '==========================================================' +msg = ' %s\n %s\n %s' % (spacing, satname, spacing) +print(msg) + +for i in range(N): + + # read ms image + fn = os.path.join(filepath_img, filenames[i]) + data = gdal.Open(fn, gdal.GA_ReadOnly) + georef = np.array(data.GetGeoTransform()) + bands = [data.GetRasterBand(k + 1).ReadAsArray() for k in range(data.RasterCount)] + im_ms = np.stack(bands, 2) + + # down-sample to half hte original pixel size + nrows = im_ms.shape[0]*2 + ncols = im_ms.shape[1]*2 + + # cloud mask + im_qa = im_ms[:,:,5] + im_ms = im_ms[:,:,:-1] + cloud_mask = sds.create_cloud_mask(im_qa, satname, plot_bool) + cloud_mask = transform.resize(cloud_mask, (nrows, ncols), order=0, preserve_range=True, mode='constant').astype('bool_') + + # resize the image using bilinear interpolation (order 1) + im_ms = transform.resize(im_ms,(nrows, ncols), order=1, preserve_range=True, mode='constant') + + # adjust georef vector (scale becomes 15m and origin is adjusted to the center of new corner pixel) + georef[1] = 15 + georef[5] = -15 + georef[0] = georef[0] + 7.5 + georef[3] = georef[3] - 7.5 + + # check if -inf or nan values on any band and add to cloud mask + for k in range(im_ms.shape[2]): + im_inf = np.isin(im_ms[:,:,k], -np.inf) + im_nan = np.isnan(im_ms[:,:,k]) + cloud_mask = np.logical_or(np.logical_or(cloud_mask, im_inf), im_nan) + + # calculate cloud cover and skip image if above threshold + cloud_cover = sum(sum(cloud_mask.astype(int)))/(cloud_mask.shape[0]*cloud_mask.shape[1]) + if cloud_cover > cloud_thresh: + print('skip ' + str(i) + ' - cloudy (' + str(np.round(cloud_cover*100).astype(int)) + '%)') + idx_skipped.append(i) + continue + + # rescale image intensity for display purposes + im_display = sds.rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9, False) + + # classify image in 4 classes (sand, whitewater, water, other) with NN classifier + im_classif, im_labels = sds.classify_image_NN_nopan(im_ms, cloud_mask, min_beach_size, plot_bool) + + # if there aren't any sandy pixels + if sum(sum(im_labels[:,:,0])) == 0 : + # use global threshold + im_ndwi = sds.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask, plot_bool) + contours = sds.find_wl_contours(im_ndwi, cloud_mask, plot_bool) + else: + # use specific threhsold + contours_wi, contours_mwi = sds.find_wl_contours2(im_ms, im_labels, cloud_mask, buffer_size, plot_bool) + + # convert from pixels to world coordinates + wl_coords = sds.convert_pix2world(contours_mwi, georef) + # convert to output epsg spatial reference + wl = sds.convert_epsg(wl_coords, input_epsg, output_epsg) + + # remove contour lines that have a perimeter < min_length_wl + wl_good = [] + for l, wls in enumerate(wl): + coords = [(wls[k,0], wls[k,1]) for k in range(len(wls))] + a = LineString(coords) # shapely LineString structure + if a.length >= min_length_wl: + wl_good.append(wls) + + # format points and only select the ones close to the refpoints + x_points = np.array([]) + y_points = np.array([]) + for k in range(len(wl_good)): + x_points = np.append(x_points,wl_good[k][:,0]) + y_points = np.append(y_points,wl_good[k][:,1]) + wl_good = np.transpose(np.array([x_points,y_points])) + temp = np.zeros((len(wl_good))).astype(bool) + for k in range(len(refpoints)): + temp = np.logical_or(np.linalg.norm(wl_good - refpoints[k,[0,1]], axis=1) < dist_ref, temp) + wl_final = wl_good[temp] + + # plot output + plt.figure() + plt.subplot(121) + im = np.copy(im_display) + colours = np.array([[1,128/255,0/255],[204/255,1,1],[0,0,204/255]]) + for k in range(0,im_labels.shape[2]): + im[im_labels[:,:,k],0] = colours[k,0] + im[im_labels[:,:,k],1] = colours[k,1] + im[im_labels[:,:,k],2] = colours[k,2] + plt.imshow(im) + for k,contour in enumerate(contours_mwi): plt.plot(contour[:, 1], contour[:, 0], linewidth=2, color='k', linestyle='--') + plt.title(satname + ' ' + metadata[satname]['dates'][i].strftime('%Y-%m-%d') + ' acc : ' + str(metadata[satname]['acc_georef'][i]) + ' m' ) + plt.subplot(122) + plt.axis('equal') + plt.axis('off') + plt.plot(refpoints[:,0], refpoints[:,1], 'k.') + plt.plot(wl_final[:,0], wl_final[:,1], 'r.') + mng = plt.get_current_fig_manager() + mng.window.showMaximized() + plt.tight_layout() + plt.draw() + + pt_in = np.array(ginput(n=1, timeout=1000)) + plt.close() + + # if image is rejected, skip it + if pt_in[0][1] > nrows/2: + print('skip ' + str(i) + ' - rejected') + idx_skipped.append(i) + continue + + # if accepted, store the data + cloud_cover_ts.append(cloud_cover) + acc_georef_ts.append(metadata[satname]['acc_georef'][i]) + + filename_ts.append(filenames[i]) + satname_ts.append(satname) + date_acquired_ts.append(filenames[i][:10]) + + timestamp.append(metadata[satname]['dates'][i]) + shorelines.append(wl_final) + +# store in output structure +output[satname] = {'dates':timestamp, 'shorelines':shorelines, 'idx_skipped':idx_skipped, + 'metadata':{'filenames':filename_ts, 'satname':satname_ts, 'cloud_cover':cloud_cover_ts, + 'acc_georef':acc_georef_ts}} + +del idx_skipped + +#==========================================================# +#==========================================================# +#==========================================================# +#==========================================================# + +#%% +# save output +with open(os.path.join(filepath, sitename + '_output' + '.pkl'), 'wb') as f: + pickle.dump(output, f) + +# save idx_skipped +#idx_skipped = dict([]) +#for satname in list(output.keys()): +# idx_skipped[satname] = output[satname]['idx_skipped'] +#with open(os.path.join(filepath, sitename + '_idxskipped' + '.pkl'), 'wb') as f: +# pickle.dump(idx_skipped, f) + \ No newline at end of file diff --git a/sl_comparison_NARRA.py b/sl_comparison_NARRA.py deleted file mode 100644 index ac9a3b0..0000000 --- a/sl_comparison_NARRA.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -#==========================================================# -# Compare Narrabeen SDS with 3D quadbike surveys -#==========================================================# - -# Initial settings - -import os -import numpy as np -import matplotlib.pyplot as plt -import pdb -import ee - -import matplotlib.dates as mdates -import matplotlib.cm as cm -from datetime import datetime, timedelta -import pickle -import pytz -import scipy.io as sio -import scipy.interpolate as interpolate -import statsmodels.api as sm -import skimage.measure as measure - -# my functions -import functions.utils as utils - -# some settings -np.seterr(all='ignore') # raise/ignore divisions by 0 and nans -plt.rcParams['axes.grid'] = True -plt.rcParams['figure.max_open_warning'] = 100 - -au_tz = pytz.timezone('Australia/Sydney') - -# load quadbike dates and convert from datenum to datetime -filename = 'data\quadbike\survey_dates.mat' -filepath = os.path.join(os.getcwd(), filename) -dates_quad = sio.loadmat(filepath)['dates'] # matrix containing year, month, day -dates_quad = [datetime(dates_quad[i,0], dates_quad[i,1], dates_quad[i,2], - tzinfo=au_tz) for i in range(dates_quad.shape[0])] - -# load timestamps from satellite images -satname = 'L8' -sitename = 'NARRA' -filepath = os.path.join(os.getcwd(), 'data', satname, sitename) -with open(os.path.join(filepath, sitename + '_output_new' + '.pkl'), 'rb') as f: - output = pickle.load(f) - -dates_l8 = output['t'] -# convert to AEST -dates_l8 = [_.astimezone(au_tz) for _ in dates_l8] -# remove duplicates -dates_l8_str = [_.strftime('%Y%m%d') for _ in dates_l8] -dupl = utils.duplicates_dict(dates_l8_str) -idx_remove = [] -for k,v in dupl.items(): - - idx1 = v[0] - idx2 = v[1] - - c1 = output['cloud_cover'][idx1] - c2 = output['cloud_cover'][idx2] - g1 = output['acc_georef'][idx1] - g2 = output['acc_georef'][idx2] - - if c1 < c2 - 0.01: - idx_remove.append(idx2) - elif g1 < g2 - 0.1: - idx_remove.append(idx2) - else: - idx_remove.append(idx1) -idx_remove = sorted(idx_remove) -idx_all = np.linspace(0,70,71) -idx_keep = list(np.where(~np.isin(idx_all,idx_remove))[0]) -output['t'] = [output['t'][k] for k in idx_keep] -output['shorelines'] = [output['shorelines'][k] for k in idx_keep] -output['cloud_cover'] = [output['cloud_cover'][k] for k in idx_keep] -output['acc_georef'] = [output['acc_georef'][k] for k in idx_keep] -# convert to AEST -dates_l8 = output['t'] -dates_l8 = [_.astimezone(au_tz) for _ in dates_l8] - -# load wave data (already AEST) -filename = 'data\wave\SydneyProcessed.mat' -filepath = os.path.join(os.getcwd(), filename) -wave_data = sio.loadmat(filepath) -idx = utils.find_indices(wave_data['dates'][:,0], lambda e: e >= dates_l8[0].year and e <= dates_l8[-1].year) -hsig = np.array([wave_data['Hsig'][i][0] for i in idx]) -wdir = np.array([wave_data['Wdir'][i][0] for i in idx]) -dates_wave = [datetime(wave_data['dates'][i,0], wave_data['dates'][i,1], - wave_data['dates'][i,2], wave_data['dates'][i,3], - wave_data['dates'][i,4], wave_data['dates'][i,5], - tzinfo=au_tz) for i in idx] - -# load tide data (already AEST) -filename = 'SydTideData.mat' -filepath = os.path.join(os.getcwd(), 'data', 'tide', filename) -tide_data = sio.loadmat(filepath) -idx = utils.find_indices(tide_data['dates'][:,0], lambda e: e >= dates_l8[0].year and e <= dates_l8[-1].year) -tide = np.array([tide_data['tide'][i][0] for i in idx]) -dates_tide = [datetime(tide_data['dates'][i,0], tide_data['dates'][i,1], - tide_data['dates'][i,2], tide_data['dates'][i,3], - tide_data['dates'][i,4], tide_data['dates'][i,5], - tzinfo=au_tz) for i in idx] - -#%% make a plot of all the dates with wave data -orange = [255/255,140/255,0] -blue = [0,191/255,255/255] -f = plt.figure() -months = mdates.MonthLocator() -month_fmt = mdates.DateFormatter('%b %Y') -days = mdates.DayLocator() -years = [2013,2014,2015,2016] -for k in range(len(years)): - sel_year = years[k] - ax = plt.subplot(4,1,k+1) - idx_year = utils.find_indices(dates_wave, lambda e : e.year >= sel_year and e.year <= sel_year) - plt.plot([dates_wave[i] for i in idx_year], [hsig[i] for i in idx_year], 'k-', linewidth=0.5) - hsigmax = np.nanmax([hsig[i] for i in idx_year]) - cbool = True - for j in range(len(dates_quad)): - if dates_quad[j].year == sel_year: - if cbool: - plt.plot([dates_quad[j], dates_quad[j]], [0, hsigmax], color=orange, label='survey') - cbool = False - else: - plt.plot([dates_quad[j], dates_quad[j]], [0, hsigmax], color=orange) - cbool = True - for j in range(len(dates_l8)): - if dates_l8[j].year == sel_year: - if cbool: - plt.plot([dates_l8[j], dates_l8[j]], [0, hsigmax], color=blue, label='landsat8') - cbool = False - else: - plt.plot([dates_l8[j], dates_l8[j]], [0, hsigmax], color=blue) - if k == 3: - plt.legend() - plt.xlim((datetime(sel_year,1,1), datetime(sel_year,12,31, tzinfo=au_tz))) - plt.ylim((0, hsigmax)) - plt.ylabel('Hs [m]') - ax.xaxis.set_major_locator = months - ax.xaxis.set_major_formatter(month_fmt) -f.subplots_adjust(hspace=0.2) -plt.draw() - -#%% calculate difference between dates (quad and sat) -diff_days = [ [(x - _).days for _ in dates_quad] for x in dates_l8] -max_diff = 14 -idx_closest = [utils.find_indices(_, lambda e: abs(e) <= max_diff) for _ in diff_days] -# store in dates_diff dictionnary -dates_diff = [] -cloud_cover = [] -for i in range(len(idx_closest)): - if not idx_closest[i]: - continue - elif len(idx_closest[i]) > 1: - idx_best = np.argmin(np.abs([diff_days[i][_] for _ in idx_closest[i]])) - dates_temp = [dates_quad[_] for _ in idx_closest[i]] - days_temp = [diff_days[i][_] for _ in idx_closest[i]] - dates_diff.append({"date sat": dates_l8[i], - "date quad": dates_temp[idx_best], - "days diff": days_temp[idx_best]}) - else: - dates_diff.append({"date sat": dates_l8[i], - "date quad": dates_quad[idx_closest[i][0]], - "days diff": diff_days[i][idx_closest[i][0]] - }) - # store cloud data - cloud_cover.append(output['cloud_cover'][i]) - -# store wave data -wave_hsig = [] -for i in range(len(dates_diff)): - wave_hsig.append(hsig[np.argmin(np.abs(np.array([(dates_diff[i]['date sat'] - _).total_seconds() for _ in dates_wave])))]) - -# make a plot -plt.figure() -counter = 0 -for i in range(len(dates_diff)): - counter = counter + 1 - if dates_diff[i]['date quad'] > dates_diff[i]['date sat']: - date_min = dates_diff[i]['date sat'] - date_max = dates_diff[i]['date quad'] - color1 = orange - color2 = blue - else: - date_min = dates_diff[i]['date quad'] - date_max = dates_diff[i]['date sat'] - color1 = blue - color2 = orange - idx_t = utils.find_indices(dates_wave, lambda e : e >= date_min and e <= date_max) - hsigmax = np.nanmax([hsig[i] for i in idx_t]) - hsigmin = np.nanmin([hsig[i] for i in idx_t]) - if counter > 9: - counter = 1 - plt.figure() - ax = plt.subplot(3,3,counter) - plt.plot([dates_wave[i] for i in idx_t], [hsig[i] for i in idx_t], 'k-', linewidth=1.5) - plt.plot([date_min, date_min], [0, 4.5], color=color2, label='survey') - plt.plot([date_max, date_max], [0, 4.5], color=color1, label='landsat8') - plt.ylabel('Hs [m]') - ax.xaxis.set_major_locator(mdates.DayLocator(tz=au_tz)) - ax.xaxis.set_minor_locator(mdates.HourLocator(tz=au_tz)) - ax.xaxis.set_major_formatter(mdates.DateFormatter('%d')) - ax.xaxis.set_minor_locator(months) - plt.title(dates_diff[i]['date sat'].strftime('%b %Y') + ' (' + str(abs(dates_diff[i]['days diff'])) + ' days)') - plt.draw() - plt.gcf().subplots_adjust(hspace=0.5) - -# mean day difference -np.mean([ np.abs(_['days diff']) for _ in dates_diff]) - -#%% Compare shorelines in elevation - -dist_buffer = 50 # buffer of points selected for interpolation - -# load quadbike .mat files -foldername = 'data\quadbike\surveys3D' -folderpath = os.path.join(os.getcwd(), foldername) -filenames = os.listdir(folderpath) - -# get the satellite shorelines -sl = output['shorelines'] - -# get dates from filenames -dates_quad = [datetime(int(_[6:10]), int(_[11:13]), int(_[14:16]), tzinfo= au_tz) for _ in filenames] - -zav = [] -ztide = [] -sl_gt = [] -for i in range(len(dates_diff)): - - sl_smooth = sl[i] - - # select closest 3D survey and load .mat file - idx_closest = np.argmin(np.abs(np.array([(dates_diff[i]['date sat'] - _).days for _ in dates_quad]))) - survey3d = sio.loadmat(os.path.join(folderpath, filenames[idx_closest])) - # reshape to a vector - xs = survey3d['x'].reshape(survey3d['x'].shape[0] * survey3d['x'].shape[1]) - ys = survey3d['y'].reshape(survey3d['y'].shape[0] * survey3d['y'].shape[1]) - zs = survey3d['z'].reshape(survey3d['z'].shape[0] * survey3d['z'].shape[1]) - # remove nan values - idx_nan = np.isnan(zs) - xs = xs[~idx_nan] - ys = ys[~idx_nan] - zs = zs[~idx_nan] - - # find water level at the time the image was acquired - idx_closest = np.argmin(np.abs(np.array([(dates_diff[i]['date sat'] - _).total_seconds() for _ in dates_tide]))) - tide_level = tide[idx_closest] - ztide.append(tide_level) - - # find contour corresponding to the water level on 3D surface (if below minimum, add 0.05m increments) - if tide_level < np.nanmin(survey3d['z']): - tide_level = np.nanmin(survey3d['z']) - sl_tide = measure.find_contours(survey3d['z'], tide_level) - sl_tide = sl_tide[np.argmax(np.array([len(_) for _ in sl_tide]))] - count = 0 - while len(sl_tide) < 900: - count = count + 1 - tide_level = tide_level + 0.05*count - sl_tide = measure.find_contours(survey3d['z'], tide_level) - sl_tide = sl_tide[np.argmax(np.array([len(_) for _ in sl_tide]))] - print('added ' + str(0.05*count) + ' cm - contour with ' + str(len(sl_tide)) + ' points') - else: - sl_tide = measure.find_contours(survey3d['z'], tide_level) - sl_tide = sl_tide[np.argmax(np.array([len(_) for _ in sl_tide]))] - # remove nans - if np.any(np.isnan(sl_tide)): - index_nan = np.where(np.isnan(sl_tide))[0] - sl_tide = np.delete(sl_tide, index_nan, axis=0) - # get x,y coordinates - xtide = [survey3d['x'][int(np.round(sl_tide[m,0])), int(np.round(sl_tide[m,1]))] for m in range(sl_tide.shape[0])] - ytide = [survey3d['y'][int(np.round(sl_tide[m,0])), int(np.round(sl_tide[m,1]))] for m in range(sl_tide.shape[0])] - sl_gt.append(np.transpose(np.array([np.array(xtide), np.array(ytide)]))) - # interpolate SDS on 3D surface to get elevation (point by point) - zq = [] - for j in range(sl_smooth.shape[0]): - xq = sl_smooth[j,0] - yq = sl_smooth[j,1] - dist_q = np.linalg.norm(np.transpose(np.array([[xq - _ for _ in xs],[yq - _ for _ in ys]])), axis=1) - idx_buffer = dist_q <= dist_buffer - if sum(idx_buffer) > 0: - tck = interpolate.bisplrep(xs[idx_buffer], ys[idx_buffer], zs[idx_buffer]) - zq.append(interpolate.bisplev(xq, yq, tck)) - - zq = np.array(zq) - plt.figure() - plt.hist(zq, bins=100) - plt.draw() -# plt.figure() -# plt.axis('equal') -# plt.scatter(xs, ys, s=10, c=zs, marker='o', cmap=cm.get_cmap('jet'), -# label='quad data') -# plt.plot(xs[idx_buffer], ys[idx_buffer], 'ko') -# plt.plot(xq,yq,'ro') -# plt.draw() - - # store the alongshore median elevation - zav.append(np.median(utils.reject_outliers(zq, m=2))) - - # make plot - red = [255/255, 0, 0] - gray = [0.75, 0.75, 0.75] - plt.figure() - plt.subplot(121) - plt.axis('equal') - plt.scatter(xs, ys, s=10, c=zs, marker='o', cmap=cm.get_cmap('jet'), - label='3D survey') - plt.plot(xtide, ytide, '--', color=gray, linewidth=2.5, label='tide level contour') - plt.plot(sl_smooth[:,0], sl_smooth[:,1], '-', color=red, linewidth=2.5, label='SDS') -# plt.plot(sl[i][idx_beach,0], sl[i][idx_beach,1], 'w-', linewidth=2) - plt.xlabel('Eastings [m]') - plt.ylabel('Northings [m]') - plt.title('Shoreline comparison') - plt.colorbar(label='mAHD') - plt.legend() - plt.ylim((6266100, 6267000)) - plt.subplot(122) - plt.plot(np.linspace(0,1,len(zq)), zq, 'ko-', markersize=5) - plt.plot([0, 1], [zav[i], zav[i]], 'r-', label='median') - plt.plot([0, 1], [ztide[i], ztide[i]], 'g--', label = 'measured tide') - plt.xlabel('Northings [m]') - plt.ylabel('Elevation [mAHD]') - plt.title('Alongshore SDS elevation') - plt.legend() - mng = plt.get_current_fig_manager() - mng.window.showMaximized() - plt.tight_layout() - plt.draw() - - print(i) - -#%% Calculate some error statistics -zav = np.array(zav) -ztide = np.array(ztide) - -f = plt.figure() -plt.subplot(3,1,1) -plt.bar(np.linspace(1,len(zav),len(zav)), zav-ztide) -plt.ylabel('Error in z [m]') -plt.title('Elevation error') -plt.xticks([]) -plt.draw() - -plt.subplot(3,1,2) -plt.bar(np.linspace(1,len(zav),len(zav)), wave_hsig, color=orange) -plt.ylabel('Hsig [m]') -plt.xticks([]) -plt.draw() - -plt.subplot(3,1,3) -plt.bar(np.linspace(1,len(zav),len(zav)), np.array(cloud_cover)*100, color='g') -plt.ylabel('Cloud cover %') -plt.xlabel('comparison #') -plt.grid(False) -plt.grid(axis='y') -f.subplots_adjust(hspace=0) -plt.draw() - -np.sqrt(np.mean((zav - ztide)**2)) - - - -#%% plot to show LOWESS smoothing -#i = 0 -#idx_beach = [np.min(np.linalg.norm(sl[i][k,:] - narrabeach, axis=1)) < dist_thresh for k in range(sl[i].shape[0])] -#x = sl[i][idx_beach,0] -#y = sl[i][idx_beach,1] -#sl_smooth = lowess(x,y, frac=1./10, it = 10) -# -#plt.figure() -#plt.axis('equal') -#plt.scatter -#plt.plot(x,y,'bo', linewidth=2, label='original SDS') -#plt.plot(sl_smooth[:,1], sl_smooth[:,0], 'ro', linewidth=2, label='smoothed SDS') -#plt.legend() -#plt.xlabel('Eastings [m]') -#plt.ylabel('Northings [m]') -#plt.title('Local weighted scatterplot smoothing (LOWESS)') -#plt.draw() -