diff --git a/Analysis/Code/P1_NARCliM_First_Pass_variab_deviation_plots.py b/Analysis/Code/P1_NARCliM_First_Pass_variab_deviation_plots.py index 8545d61..6952ff0 100644 --- a/Analysis/Code/P1_NARCliM_First_Pass_variab_deviation_plots.py +++ b/Analysis/Code/P1_NARCliM_First_Pass_variab_deviation_plots.py @@ -22,16 +22,18 @@ matplotlib.style.use('ggplot') #plt.rcParams.update(plt.rcParamsDefault) # # Set working direcotry (where postprocessed NARClIM data is located) -os.chdir('C:/Users/z5025317/WRL_Postdoc/Projects/Paper#1/') +os.chdir('C:/Users/z5025317/OneDrive - UNSW/WRL_Postdoc_Manual_Backup/WRL_Postdoc/Projects/Paper#1/') # #####################################---------------------------------- #set input parameters Base_period_start = '1986-01-01' Base_period_end = '2005-01-01' #use last day that's not included in period as < is used for subsetting -Estuary = 'Terrigal' # 'Belongil' +Estuary = 'Nadgee' # 'Belongil' Clim_var_type = "*" # '*' will create pdf for all variables in folder -Clim_var_type = "wssmean*" # '*' will create pdf for all variables in folder +Clim_var_type = "tasmean*" # '*' will create pdf for all variables in folder Present_Day_Clim_Var = 'Wind' #MaxT, MinT, Rainfall, ET +present_day_plot = 'yes' +Version = "V1" #####################################---------------------------------- #set directory path for output files @@ -117,6 +119,7 @@ fig = plt.figure(figsize=(14,8)) delta_all_df = pd.DataFrame() i=1 for temp in times: + temp = 'annual' #subset the ensemble dataframe for the period used: if temp == 'annual': Ensemble_Delta_df = Ensemble_Delta_full_df.iloc[:,range(0,2)] @@ -193,19 +196,53 @@ for temp in times: for tick in ax.get_xticklabels(): tick.set_rotation(0) fig.tight_layout() + fig.patch.set_alpha(0) #reset i to i+1 for next step if temp == 'MAM': i=i+2 else: i=i+1 print(i) + plt.show() -out_file_name = Estuary + '_' + Clim_var_type + '_CC_prio_plot.png' +out_file_name = Estuary + '_' + Clim_var_type + '_CC_prio_plot' + Version + '.png' out_path = output_directory + '/' + out_file_name fig.savefig(out_path) +if present_day_plot == 'yes': + #print present day climate data + fig = plt.figure(figsize=(5,4)) + ax = fig.add_subplot(1, 1, 1) + if temp == 'annual': + xmin = int(min(Plot_in_df.min(axis=1))-minplotDelta) + xmax = int(max(Plot_in_df.max(axis=1))+maxplotDelta) + else: + xmin = int(min(Plot_in_df.min(axis=1))-minplotDelta) + xmax = int(max(Plot_in_df.max(axis=1))+maxplotDelta) + + Present_Day_ref_df.plot(legend=False, ax=ax) + z = plt.axhline(float(Present_Day_Mean-2*Present_Day_SD), linestyle='-', color='black', alpha=.5) + z.set_zorder(-1) + z = plt.axhline(float(Present_Day_Mean+2*Present_Day_SD), linestyle='-', color='black', alpha=.5) + z.set_zorder(-1) + z = plt.axhline(float(Present_Day_Mean-Present_Day_SD), linestyle='--', color='black', alpha=.5) + z.set_zorder(-1) + z = plt.axhline(float(Present_Day_Mean+Present_Day_SD), linestyle='--', color='black', alpha=.5) + z.set_zorder(-1) + z = plt.axhline(float(Present_Day_Mean), linestyle='--', color='red', alpha=.5) + z.set_zorder(-1) + #fig.patch.set_facecolor('deepskyblue') + fig.patch.set_alpha(0) + plt.ylim(13, xmax) + plt.show() + out_file_name = 'C:/Users/z5025317/OneDrive - UNSW/WRL_Postdoc_Manual_Backup/WRL_Postdoc/Projects/OEH_Coastal_Node_Deliverables/Technical_Report_1/Figures/tasmean_present_day_manual_backgroundTP.png' + out_path = output_directory + '/' + out_file_name + fig.savefig(out_file_name) + + +# use transparent=True if you want the whole figure with a transparent background diff --git a/Analysis/Code/P1_NARCliM_NC_to_CSV_CCRC_SS.py b/Analysis/Code/P1_NARCliM_NC_to_CSV_CCRC_SS.py index 493b555..aa5c268 100644 --- a/Analysis/Code/P1_NARCliM_NC_to_CSV_CCRC_SS.py +++ b/Analysis/Code/P1_NARCliM_NC_to_CSV_CCRC_SS.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from netCDF4 import * import numpy as np +from numpy import * import os import pandas as pd import glob @@ -33,22 +34,16 @@ if __name__ == "__main__": Timestep = args.timestep print("Extracting all NARCLIM time series for variable: ", Clim_var_type, " for lat lon: ", mylat, mylon, "domain", NC_Domain, "timestep ", Timestep) #set directory path for output files -output_directory = '/srv/ccrc/data02/z5025317/NARCliM_out' +output_directory = '/srv/ccrc/data02/z5025317/NARCliM_out/' + str(abs(round(mylat,3))) + '_' + str(round(mylon, 3)) + '/' #output_directory = 'J:\Project wrl2016032\NARCLIM_Raw_Data\Extracted' if not os.path.exists(output_directory): os.makedirs(output_directory) print("output directory folder didn't exist and was generated here:") print(output_directory) # -time.sleep(10) -#manual input via script -#mylat= -33.9608578 -#mylon= 151.1339882 -#Clim_var_type = 'pr1Hmaxtstep' -#NC_Domain = 'd02' -# +#time.sleep(10) #set up the loop variables for interrogating the entire NARCLIM raw data -NC_Periods = ('1950-2009','2020-2039','2060-2079') +NC_Periods = ('1990-2009','2020-2039','2060-2079') # #Define empty pandas data frames Full_df = pd.DataFrame() @@ -61,16 +56,29 @@ for NC_Period in NC_Periods: Period_short = NC_Period[:4] GCMs = os.listdir('./'+ NC_Period) for GCM in GCMs: + print GCM Warf_runs = os.listdir('./' + NC_Period + '/' + GCM + '/') for Warf_run in Warf_runs: Current_input_dir = './' + NC_Period + '/' + GCM + '/' + Warf_run + '/' + NC_Domain + '/' print Current_input_dir Climvar_ptrn = '*' + Timestep + '_*' + Clim_var_type + '.nc' Climvar_NCs = glob.glob(Current_input_dir + Climvar_ptrn) + print "test" + print Climvar_NCs[1] #Climvar_NCs = Climvar_NCs[0:2] #print(Climvar_NCs) for netcdf in Climvar_NCs: + print "test2" f=Dataset(netcdf) + # This section print on the screen information contained in the headings of the file + print '---------------------------------------------------------' + print f.ncattrs() + print f.title + print f.variables + print + for varname in f.variables: + print varname,' -> ',shape(f.variables[varname]) + print '---------------------------------------------------------' # Based on the desired inputs, this finds the nearest grid centerpoint index (x,y) in the *.nc file dist_x=np.abs(f.variables['lon'][:,:]-float(mylon)) dist_y=np.abs(f.variables['lat'][:,:]-float(mylat)) @@ -95,6 +103,7 @@ for NC_Period in NC_Periods: df1=pd.DataFrame(d, index=timestamp_dates) f.close() print 'closing '+ os.path.basename(os.path.normpath(netcdf)) + ' moving to next netcdf file' + #print f print '---------------------------------------------------------' #append in time direction each new time series to the data frame MultiNC_df = MultiNC_df.append(df1) diff --git a/Analysis/Code/P1_NARCliM_NC_to_CSV_CCRC_SS_BASH_script_readme.txt b/Analysis/Code/P1_NARCliM_NC_to_CSV_CCRC_SS_BASH_script_readme.txt index 0f54731..d58d6e2 100644 --- a/Analysis/Code/P1_NARCliM_NC_to_CSV_CCRC_SS_BASH_script_readme.txt +++ b/Analysis/Code/P1_NARCliM_NC_to_CSV_CCRC_SS_BASH_script_readme.txt @@ -61,8 +61,8 @@ Present_Day_Clim_Var = 'MaxT' #MaxT, MinT, Rainfall, (the name for present da ##PROBLEM: Without changing anything, the P1_NARCliM_NC_to_CSV_CCRC_SS.py stopped working properly on the CCRC storm servers. It's not giving an error but loading the nc files with Dataset(nc) just takes unlimited time. It used to take only a few seconds. NOT solved yet as of 7th of May 2018. - - +running a simple netcdf info script +python /srv/ccrc/data02/z5025317/Code_execution/P1_Basic_NETCDF_Interrogation.py diff --git a/Analysis/Code/P1_NARCliM_plots_Windows.py b/Analysis/Code/P1_NARCliM_plots_Windows.py index 9f3cbe4..78414f0 100644 --- a/Analysis/Code/P1_NARCliM_plots_Windows.py +++ b/Analysis/Code/P1_NARCliM_plots_Windows.py @@ -20,16 +20,16 @@ from ggplot import * matplotlib.style.use('ggplot') # # Set working direcotry (where postprocessed NARClIM data is located) -os.chdir('C:/Users/z5025317/WRL_Postdoc/Projects/Paper#1/') +os.chdir('C:/Users/z5025317/OneDrive - UNSW/WRL_Postdoc_Manual_Backup/WRL_Postdoc/Projects/Paper#1/') # #####################################---------------------------------- #set input parameters Base_period_start = '1990-01-01' Base_period_end = '2080-01-01' #use last day that's not included in period as < is used for subsetting -Estuary = 'Terrigal' # 'Belongil' -Clim_var_type = "wssmean*" # '*' will create pdf for all variables in folder "pracc*|tasmax*" -subset_ensemble = 'yes' # is yes, only the model with the lowest, median and max difference between present day and far future are selected -plot_pdf = 'no' +Estuary = 'Nadgee' # 'Belongil' +Clim_var_type = "tasmean*" # '*' will create pdf for all variables in folder "pracc*|tasmax*" +plot_pdf = 'yes' +delta_csv = 'no' #####################################---------------------------------- # #set directory path for output files @@ -63,7 +63,6 @@ for clim_var_csv_path in Clim_Var_CSVs: if Clim_var_type == 'evspsblmean' or Clim_var_type == 'potevpmean': Full_df = Full_df.iloc[:,0:(Ncols_df-1)]*60*60*24 Fdf_1900_2080 = Full_df - #Subset the data to the minimum base period and above (used to set the lenght of the present day climate period) #Fdf_1900_2080 = Full_df.loc[(Full_df.index >= Base_period_start) & (Full_df.index < Base_period_end)] # not necessary if not using reanalysis models for base period @@ -88,52 +87,52 @@ for clim_var_csv_path in Clim_Var_CSVs: print('-------------------------------------------') print('mean of all models for climate variable: ' + Clim_var_type) Fdf_1900_2080_means = Fdf_1900_2080.mean() - Fdf_1900_2080_means.plot(kind='bar').figure + #Fdf_1900_2080_means.columns = ['Mean'] + #Fdf_1900_2080_means.plot(kind='bar').figure print('-------------------------------------------') - if subset_ensemble == 'yes': - #Select the 3 most representative models (min med and max difference betwen far future and present) - Fdf_1900_2080_sorted = Fdf_1900_2080.reindex_axis(sorted(Fdf_1900_2080.columns), axis=1) - Fdf_1900_2080_sorted_means = pd.DataFrame(Fdf_1900_2080_sorted.mean()) - df = Fdf_1900_2080_sorted_means - #add a simple increasing integer index - df = df.reset_index() - df= df[df.index % 3 != 1] - df['C'] = df[0].diff() - df = df.reset_index() - df= df[df.index % 2 != 0] - #get max difference model (difference between far future and prsent day) - a = df[df.index == df['C'].argmax(skipna=True)] - Max_dif_mod_name = a.iloc[0]['index'] - #get min difference model - a = df[df.index == df['C'].argmin(skipna=True)] - Min_dif_mod_name = a.iloc[0]['index'] - #get the model which difference is closest to the median difference - df['D'] = abs(df['C']- df['C'].median()) - a = df[df.index == df['D'].argmin(skipna=True)] - Med_dif_mod_name = a.iloc[0]['index'] - #data frame with min med and max difference model - df2 = Fdf_1900_2080.filter(regex= Min_dif_mod_name[:-5] + '|' + Med_dif_mod_name[:-5] + '|' + Max_dif_mod_name[:-5] ) - dfall = df2.reindex_axis(sorted(df2.columns), axis=1) - #data frame with individual models - dfmin = Fdf_1900_2080.filter(regex= Min_dif_mod_name[:-5]) - dfmax = Fdf_1900_2080.filter(regex= Max_dif_mod_name[:-5]) - dfmed = Fdf_1900_2080.filter(regex= Max_dif_mod_name[:-5]) - # use only the 3 representative models for the analysis - Fdf_1900_2080_all_mods = Fdf_1900_2080 - #create a dataframe that has 1 column for each of the three representative models + #Select the 3 most representative models (min med and max difference betwen far future and present) + Fdf_1900_2080_sorted = Fdf_1900_2080.reindex_axis(sorted(Fdf_1900_2080.columns), axis=1) + Fdf_1900_2080_sorted_means = pd.DataFrame(Fdf_1900_2080_sorted.mean()) + df = Fdf_1900_2080_sorted_means + #add a simple increasing integer index + df = df.reset_index() + df= df[df.index % 3 != 1] + df['C'] = df[0].diff() + df = df.reset_index() + df= df[df.index % 2 != 0] + #get max difference model (difference between far future and prsent day) + a = df[df.index == df['C'].argmax(skipna=True)] + Max_dif_mod_name = a.iloc[0]['index'] + #get min difference model + a = df[df.index == df['C'].argmin(skipna=True)] + Min_dif_mod_name = a.iloc[0]['index'] + #get the model which difference is closest to the median difference + df['D'] = abs(df['C']- df['C'].median()) + a = df[df.index == df['D'].argmin(skipna=True)] + Med_dif_mod_name = a.iloc[0]['index'] + #data frame with min med and max difference model + df2 = Fdf_1900_2080.filter(regex= Min_dif_mod_name[:-5] + '|' + Med_dif_mod_name[:-5] + '|' + Max_dif_mod_name[:-5] ) + dfall = df2.reindex_axis(sorted(df2.columns), axis=1) + #data frame with individual models + dfmin = Fdf_1900_2080.filter(regex= Min_dif_mod_name[:-5]) + dfmax = Fdf_1900_2080.filter(regex= Max_dif_mod_name[:-5]) + dfmed = Fdf_1900_2080.filter(regex= Max_dif_mod_name[:-5]) + # use only the 3 representative models for the analysis + Fdf_1900_2080_all_mods = Fdf_1900_2080 + #create a dataframe that has 1 column for each of the three representative models # Full_df.loc[(Full_df.index > '1990-01-01') & (Full_df.index < '2009-01-01'), 'period']= '1990-2009' # Full_df.loc[(Full_df.index > '2020-01-01') & (Full_df.index < '2039-01-01'), 'period']= '2020-2039' # Full_df.loc[(Full_df.index > '2060-01-01') & (Full_df.index < '2079-01-01'), 'period']= '2060-2079' - dfa = Fdf_1900_2080_annual.iloc[:,[0]] - dfa1 = Fdf_1900_2080_annual.iloc[:,[0,3,6]].loc[(Fdf_1900_2080_annual.index >= '1990') & (Fdf_1900_2080_annual.index <= '2009')] - dfa1.columns = [Min_dif_mod_name[:-5], Med_dif_mod_name[:-5], Max_dif_mod_name[:-5]] - dfa2 = Fdf_1900_2080_annual.iloc[:,[1,4,7]].loc[(Fdf_1900_2080_annual.index >= '2020') & (Fdf_1900_2080_annual.index <= '2039')] - dfa2.columns = [Min_dif_mod_name[:-5], Med_dif_mod_name[:-5], Max_dif_mod_name[:-5]] - dfa3 = Fdf_1900_2080_annual.iloc[:,[2,5,8]].loc[(Fdf_1900_2080_annual.index >= '2060') & (Fdf_1900_2080_annual.index <= '2079')] - dfa3.columns = [Min_dif_mod_name[:-5], Med_dif_mod_name[:-5], Max_dif_mod_name[:-5]] - dfall_annual = dfa1.append(dfa2).append(dfa3) - + dfa = Fdf_1900_2080_annual.iloc[:,[0]] + dfa1 = Fdf_1900_2080_annual.iloc[:,[0,3,6]].loc[(Fdf_1900_2080_annual.index >= '1990') & (Fdf_1900_2080_annual.index <= '2009')] + dfa1.columns = [Min_dif_mod_name[:-5], Med_dif_mod_name[:-5], Max_dif_mod_name[:-5]] + dfa2 = Fdf_1900_2080_annual.iloc[:,[1,4,7]].loc[(Fdf_1900_2080_annual.index >= '2020') & (Fdf_1900_2080_annual.index <= '2039')] + dfa2.columns = [Min_dif_mod_name[:-5], Med_dif_mod_name[:-5], Max_dif_mod_name[:-5]] + dfa3 = Fdf_1900_2080_annual.iloc[:,[2,5,8]].loc[(Fdf_1900_2080_annual.index >= '2060') & (Fdf_1900_2080_annual.index <= '2079')] + dfa3.columns = [Min_dif_mod_name[:-5], Med_dif_mod_name[:-5], Max_dif_mod_name[:-5]] + dfall_annual = dfa1.append(dfa2).append(dfa3) + #Create Deltas of average change for annual and seasonal basis times = ['annual', 'DJF', 'MAM', 'JJA','SON'] delta_all_df = pd.DataFrame() @@ -183,9 +182,10 @@ for clim_var_csv_path in Clim_Var_CSVs: #append df to overall df delta_all_df = pd.concat([delta_all_df, delta_df], axis=1) - out_file_name = Estuary + '_' + Clim_var_type + '_NARCliM_ensemble_changes.csv' - out_path = output_directory + '/' + out_file_name - delta_all_df.to_csv(out_path) + if delta_csv == 'yes': + out_file_name = Estuary + '_' + Clim_var_type + '_NARCliM_ensemble_changes.csv' + out_path = output_directory + '/' + out_file_name + delta_all_df.to_csv(out_path) #create a dataframe that has a single column for present day, near and far future for the (3 selected models) len(Fdf_1900_2080.columns) @@ -203,23 +203,57 @@ for clim_var_csv_path in Clim_Var_CSVs: #output some summary plot into pdf if plot_pdf == 'yes': + plotcolours36 = ['darkolivegreen','turquoise', 'lightgreen', 'darkgreen', 'lightpink','slateblue', 'slategray', 'orange', 'tomato', 'peru', 'navy', 'teal', + 'darkolivegreen','turquoise', 'lightgreen', 'darkgreen', 'lightpink','slateblue', 'slategray', 'orange', 'tomato', 'peru', 'navy', 'teal', + 'darkolivegreen','turquoise', 'lightgreen', 'darkgreen', 'lightpink','slateblue', 'slategray', 'orange', 'tomato', 'peru', 'navy', 'teal'] + plotcolours36b = ['tomato', 'royalblue', 'mediumpurple' , 'tomato', 'royalblue', 'mediumpurple' , 'tomato', 'royalblue', 'mediumpurple' , 'tomato', 'royalblue', 'mediumpurple' , + 'tomato', 'royalblue', 'mediumpurple' , 'tomato', 'royalblue', 'mediumpurple' , 'tomato', 'royalblue', 'mediumpurple' , 'tomato', 'royalblue', 'mediumpurple' , + 'tomato', 'royalblue', 'mediumpurple' , 'tomato', 'royalblue', 'mediumpurple' , 'tomato', 'royalblue', 'mediumpurple' , 'tomato', 'royalblue', 'mediumpurple' ] + plotcolours12 = ['darkolivegreen','turquoise', 'lightgreen', 'darkgreen', 'lightpink','slateblue', 'slategray', 'orange', 'tomato', 'peru', 'navy', 'teal'] + plotcolours15 = ['darkolivegreen','turquoise', 'lightgreen', 'darkgreen', 'lightpink','slateblue', 'slategray', 'orange', 'tomato', 'peru', 'navy', 'teal', 'lightgreen','lightpink','slateblue'] + + #plt.cm.Paired(np.arange(len(Fdf_1900_2080_means))) #write the key plots to a single pdf document - pdf_out_file_name = Clim_var_type + '_start_' + Base_period_start + '_NARCliM_summary_3.pdf' + pdf_out_file_name = Clim_var_type + '_start_' + Base_period_start + '_NARCliM_summary_9.pdf' pdf_out_path = output_directory +'/' + pdf_out_file_name #open pdf and add the plots with PdfPages(pdf_out_path) as pdf: #barplot of model means plt.title(Clim_var_type + ' - model means - full period') ymin = min(Fdf_1900_2080_means) - ymax = max(Fdf_1900_2080_means) - Fdf_1900_2080_means.plot(kind='bar', ylim=(ymin,ymax)) + ymax = max(Fdf_1900_2080_means) + 0.008 *min(Fdf_1900_2080_means) + Fdf_1900_2080_means.plot(kind='bar', ylim=(ymin,ymax), color=plotcolours36) pdf.savefig(bbox_inches='tight', pad_inches=0.4) plt.close() + # + plt.title(Clim_var_type + ' - model deltas - far-present') + neardeltadf=delta_all_df['far'] + ymin = 0 #min(neardeltadf) - 0.008 *min(neardeltadf) + ymax = max(neardeltadf) + 0.008 * max(neardeltadf) + neardeltadf.plot(kind='bar', color=plotcolours15, ylim=(ymin,ymax)) + #fig.patch.set_alpha(0) + pdf.savefig(bbox_inches='tight', ylim=(ymin,ymax), pad_inches=0.4) + plt.close() + # + plt.title(Clim_var_type + ' - model deltas - near-present') + neardeltadf=delta_all_df['near'] + #ymin = 0 #min(neardeltadf) - 0.008 *min(neardeltadf) + #ymax = max(neardeltadf) + 0.008 *max(neardeltadf) + neardeltadf.plot(kind='bar', color=plotcolours15, ylim=(ymin,ymax)) + pdf.savefig(bbox_inches='tight', ylim=(ymin,ymax), pad_inches=0.4) + plt.close() #full period density comparison plt.title(Clim_var_type + ' - density comparison - full period - all models') Summarized_df.plot.kde() pdf.savefig(bbox_inches='tight', pad_inches=0.4) plt.close() + #full period density comparison + plt.title(Clim_var_type + ' - density comparison - full period - max delta model') + xmin = float(max(np.nanpercentile(Fdf_1900_2080.filter(regex= Max_dif_mod_name[:-5]),50) - 4 * np.std(Fdf_1900_2080.filter(regex= Max_dif_mod_name[:-5])))) + xmax = float(max(np.nanpercentile(Fdf_1900_2080.filter(regex= Max_dif_mod_name[:-5]),50) + 4 * np.std(Fdf_1900_2080.filter(regex= Max_dif_mod_name[:-5])))) + Fdf_1900_2080.filter(regex= Max_dif_mod_name[:-5]).plot.kde(xlim=(xmin,xmax)) + pdf.savefig(bbox_inches='tight', pad_inches=0.4) + plt.close() #annual box plt.title(Clim_var_type + ' - Annual means/sums for max diff model') Fdf_1900_2080_annual.boxplot(rot=90) @@ -256,16 +290,30 @@ for clim_var_csv_path in Clim_Var_CSVs: pdf.savefig(bbox_inches='tight', pad_inches=0.4) plt.close() # time series plot annual ALL models + plt.title(Clim_var_type + ' - Time series - all models') + Mod_order = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,19,20,21,16,17,18,22,23,24,31,32,33,25,26,27,28,29,30,34,35,36] + test = Fdf_1900_2080_annual + Mod_Names = test.columns + New_Mod_Name = [] + for i in range(0,len(Mod_Names)): + New_Mod_Name.append(str(Mod_order[i]+10) + '_' + Mod_Names[i]) + test.columns = New_Mod_Name + test_sorted = test.reindex_axis(sorted(test.columns), axis=1) + colnamest = test.columns + test_sorted.columns = [w[3:-5] for w in colnamest] + test_sorted.plot(legend=False, color = plotcolours36) + pdf.savefig(bbox_inches='tight', pad_inches=0.4) + plt.close() + # time series plot annual ALL models plt.title(Clim_var_type + ' - Time series - representative models') dfall_annual.plot(legend=False) pdf.savefig(bbox_inches='tight', pad_inches=0.4) plt.close() - # seasonal mean boxplots ymin = min(Fdf_Seas_means[Fdf_Seas_means.index.quarter==1].mean()) ymax = max(Fdf_Seas_means[Fdf_Seas_means.index.quarter==1].mean()) plt.title(Clim_var_type + ' - DJF Summer means/sums') - Fdf_Seas_means[Fdf_Seas_means.index.quarter==1].mean().plot(kind='bar', ylim=(ymin,ymax)) + pd.DataFrame(Fdf_Seas_means[Fdf_Seas_means.index.quarter==1].mean()).plot(kind='bar', ylim=(ymin,ymax)) pdf.savefig(bbox_inches='tight', pad_inches=0.4) plt.close() plt.title(Clim_var_type + ' - DJF Summer means/sums') @@ -275,7 +323,7 @@ for clim_var_csv_path in Clim_Var_CSVs: ymin = min(Fdf_Seas_means[Fdf_Seas_means.index.quarter==2].mean()) ymax = max(Fdf_Seas_means[Fdf_Seas_means.index.quarter==2].mean()) plt.title(Clim_var_type + ' - MAM Autumn means/sums') - Fdf_Seas_means[Fdf_Seas_means.index.quarter==2].mean().plot(kind='bar', ylim=(ymin,ymax)) + pd.DataFrame(Fdf_Seas_means[Fdf_Seas_means.index.quarter==2].mean()).plot(kind='bar', ylim=(ymin,ymax)) pdf.savefig(bbox_inches='tight', pad_inches=0.4) plt.close() plt.title(Clim_var_type + ' - MAM Autumn means/sums') @@ -285,7 +333,7 @@ for clim_var_csv_path in Clim_Var_CSVs: ymin = min(Fdf_Seas_means[Fdf_Seas_means.index.quarter==3].mean()) ymax = max(Fdf_Seas_means[Fdf_Seas_means.index.quarter==3].mean()) plt.title(Clim_var_type + ' - JJA Winter means/sums') - Fdf_Seas_means[Fdf_Seas_means.index.quarter==3].mean().plot(kind='bar', ylim=(ymin,ymax)) + pd.DataFrame(Fdf_Seas_means[Fdf_Seas_means.index.quarter==3].mean()).plot(kind='bar', ylim=(ymin,ymax)) pdf.savefig(bbox_inches='tight', pad_inches=0.4) plt.close() plt.title(Clim_var_type + ' - JJA Winter means/sums') @@ -295,7 +343,7 @@ for clim_var_csv_path in Clim_Var_CSVs: ymin = min(Fdf_Seas_means[Fdf_Seas_means.index.quarter==4].mean()) ymax = max(Fdf_Seas_means[Fdf_Seas_means.index.quarter==4].mean()) plt.title(Clim_var_type + ' - SON Spring means/sums') - Fdf_Seas_means[Fdf_Seas_means.index.quarter==4].mean().plot(kind='bar', ylim=(ymin,ymax)) + pd.DataFrame(Fdf_Seas_means[Fdf_Seas_means.index.quarter==4].mean()).plot(kind='bar', ylim=(ymin,ymax)) pdf.savefig(bbox_inches='tight', pad_inches=0.4) plt.close() plt.title(Clim_var_type + ' - SON Spring means/sums')