#a new srcipt for silo and backup for the other paper#1 codes

Development1
tinoheimhuber 7 years ago
parent 42ab0af07a
commit d56262cd14

@ -15,9 +15,9 @@
# 'wss1Hmaxtstep' Max. 1-hour time-window moving averaged surface wind speed units: m s-1 maximum 1-hour time-window moving averaged values from point values 60.0 second
# 'wssmax' Surface wind speed standard_name: air_velocity units: m s-1 height: 10 m
# 'wssmean' Surface wind speed standard_name: air_velocity units: m s-1
# 'sstmean' Sea surface temperatuer daily mean
Clim_Var <- 'wssmax'
Clim_Var <- 'sstmean'
Datatype <- 'T_GCMS' #T_GCMS for GCM forcing, T_NNRP for reanalysis (only 1950-2009)
Biasboolean <- 'False' #use bias corrected data?
@ -43,6 +43,10 @@ for (i in 1:(length(Location.df$Name))){
# }
latitude=round(as.numeric(Location.df$Lat[i]),3)
longitude=round(as.numeric(Location.df$Long[i]),3)
if (Clim_Var == 'sstmean'){
latitude=round(as.numeric(Location.df$Ocean_Lat[i]),3)
longitude=round(as.numeric(Location.df$Ocean_Long[i]),3)
}
text <- c(paste("latitude=",latitude,"", sep=""), paste("longitude=",longitude,"", sep=""),
paste("name='",name,"'", sep=""),
"python /srv/ccrc/data02/z5025317/Code_execution/\\

@ -169,7 +169,8 @@ if Bias_Correction_BOOL == 'True':
Period_short = NC_Period[:4]
Current_input_dir = './' + GCM + '/' + Warf_run + '/' + NC_Period + '/' + NC_Domain + '/'
print Current_input_dir
Climvar_ptrn = '*' + Timestep + '_*' + Clim_var_type + '_bc.nc'
Clim_var_type = Clim_var_type + '_bc.nc'
Climvar_ptrn = '*' + Timestep + '_*' + Clim_var_type
Climvar_NCs = glob.glob(Current_input_dir + Climvar_ptrn)
print Climvar_NCs[1]
print Climvar_NCs[2]
@ -178,14 +179,14 @@ if Bias_Correction_BOOL == 'True':
#print netcdf
f=Dataset(netcdf)
# This section print on the screen information contained in the headings of the file
# print '---------------------------------------------------------'
# print f.ncattrs()
# print f.title
# print f.variables
# print
# for varname in f.variables:
# print varname,' -> ',np.shape(f.variables[varname])
# print '---------------------------------------------------------'
print '---------------------------------------------------------'
print f.ncattrs()
print f.title
print f.variables
print
for varname in f.variables:
print varname,' -> ',np.shape(f.variables[varname])
print '---------------------------------------------------------'
# Based on the desired inputs, this finds the nearest grid centerpoint index (x,y) in the *.nc file
dist_x=np.abs(f.variables['lon'][:,:]-float(mylon))
dist_y=np.abs(f.variables['lat'][:,:]-float(mylat))

@ -24,6 +24,9 @@ matplotlib.style.use('ggplot')
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir('C:/Users/z5025317/OneDrive - UNSW/WRL_Postdoc_Manual_Backup/WRL_Postdoc/Projects/Paper#1/Analysis/Code')
import climdata_fcts as fct
import silo as sil
#==========================================================#
# Set working direcotry (where postprocessed NARClIM data is located)
@ -34,18 +37,19 @@ os.chdir('C:/Users/z5025317/OneDrive - UNSW/WRL_Postdoc_Manual_Backup/WRL_Postdo
#==========================================================#
#set input parameters
Base_period_start = '1990-01-01'
Case_Study_Name = 'CASESTUDY2'
Base_period_end = '2080-01-01' #use last day that's not included in period as < is used for subsetting
Estuary = 'HUNTER' # 'Belongil'
Clim_var_type = "pracc" # '*' will create pdf for all variables in folder "pracc*|tasmax*"
Clim_var_type = "sstmean" # '*' will create pdf for all variables in folder "pracc*|tasmax*"
plot_pdf = 'yes'
delta_csv = 'yes'
Stats = 'dailymax'
Stats = 'maxdaily'
Version = 'V4'
#==========================================================#
#==========================================================#
#set directory path for output files
output_directory = 'Output/Case_Study_1/'+ Estuary
output_directory = 'Output/' + Case_Study_Name + '/' + Estuary
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
@ -55,7 +59,7 @@ if not os.path.exists(output_directory):
#==========================================================#
#==========================================================#
Estuary_Folder = glob.glob('./Data/NARCLIM_Site_CSVs/' + Estuary + '*' )
Estuary_Folder = glob.glob('./Data/NARCLIM_Site_CSVs/'+ Case_Study_Name + '/' + Estuary + '*' )
Clim_Var_CSVs = glob.glob(Estuary_Folder[0] + '/' + Clim_var_type + '*')
#==========================================================#
@ -77,7 +81,7 @@ Ncols_df = len(Full_df)
#==========================================================#
#substract a constant from all values to convert from kelvin to celcius (temp)
if Clim_var_type == 'tasmean' or Clim_var_type == 'tasmax':
if Clim_var_type == 'tasmean' or Clim_var_type == 'tasmax' or Clim_var_type == 'sstmean':
Full_df = Full_df.iloc[:,0:(Ncols_df-1)]-273.15
if Clim_var_type == 'evspsblmean' or Clim_var_type == 'potevpmean':
Full_df = Full_df.iloc[:,0:(Ncols_df-1)]*60*60*24
@ -135,53 +139,7 @@ dfall_annual = dfa1.append(dfa2).append(dfa3)
#==========================================================#
#Create Deltas of average change for annual and seasonal basis
#==========================================================#
times = ['annual', 'DJF', 'MAM', 'JJA','SON']
delta_all_df = pd.DataFrame()
for temp in times:
if temp == 'annual':
Mean_df = Fdf_1900_2080_annual.mean()
Column_names = ['near', 'far']
if temp == 'DJF':
Mean_df = Fdf_Seas_means[Fdf_Seas_means.index.quarter==1].mean()
Column_names = ['DJF_near', 'DJF_far']
if temp == 'MAM':
Mean_df = Fdf_Seas_means[Fdf_Seas_means.index.quarter==2].mean()
Column_names = ['MAM_near', 'MAM_far']
if temp == 'JJA':
Mean_df = Fdf_Seas_means[Fdf_Seas_means.index.quarter==3].mean()
Column_names = ['JJA_near', 'JJA_far']
if temp == 'SON':
Mean_df = Fdf_Seas_means[Fdf_Seas_means.index.quarter==4].mean()
Column_names = ['SON_near', 'SON_far']
models = list(Fdf_1900_2080_means.index)
newmodel = []
type(newmodel)
for each in models:
newmodel.append(each[:-5])
unique_models = set(newmodel)
# calculate diff for each unique model
delta_NF_ensemble = []
delta_FF_ensemble = []
for unique_model in unique_models:
dfdiff = Mean_df.filter(regex= unique_model)
type(dfdiff)
delta_NF = dfdiff[1] - dfdiff[0]
delta_NF_ensemble.append(delta_NF)
delta_FF = dfdiff[2] - dfdiff[1]
delta_FF_ensemble.append(delta_FF)
delta_df1=pd.DataFrame(delta_NF_ensemble, index=unique_models)
delta_df2=pd.DataFrame(delta_FF_ensemble, index=unique_models)
delta_df= pd.concat([delta_df1, delta_df2], axis=1)
#rename columns
delta_df.columns = Column_names
#add a row with medians and 10 and 90th percentiles
delta_df.loc['10th'] = pd.Series({Column_names[0]:np.percentile(delta_df[Column_names[0]], 10), Column_names[1]:np.percentile(delta_df[Column_names[1]], 10)})
delta_df.loc['median'] = pd.Series({Column_names[0]:np.percentile(delta_df[Column_names[0]], 50), Column_names[1]:np.percentile(delta_df[Column_names[1]], 50)})
delta_df.loc['90th'] = pd.Series({Column_names[0]:np.percentile(delta_df[Column_names[0]], 90), Column_names[1]:np.percentile(delta_df[Column_names[1]], 90)})
#append df to overall df
delta_all_df = pd.concat([delta_all_df, delta_df], axis=1)
delta_all_df = fct.calculate_deltas_NF_FF2(Fdf_1900_2080_annual, Fdf_Seas_means)
#==========================================================#

@ -0,0 +1,115 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 18:03:25 2018
@author: z5025317
"""
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from matplotlib.backends.backend_pdf import PdfPages
from ggplot import *
matplotlib.style.use('ggplot')
import csv
from datetime import datetime
# import own modules
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir('C:/Users/z5025317/OneDrive - UNSW/WRL_Postdoc_Manual_Backup/WRL_Postdoc/Projects/Paper#1/Analysis/Code')
import climdata_fcts as fct
import silo as sil
#==========================================================#
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir('C:/Users/z5025317/OneDrive - UNSW/WRL_Postdoc_Manual_Backup/WRL_Postdoc/Projects/Paper#1/')
#==========================================================#
#==========================================================#
#set input parameters
Case_Study_Name = 'CASESTUDY2'
Casestudy2_csv_path = "Data/NARCLIM_Site_CSVs/CASESTUDY2/CASESTDUY2_NARCLIM_Point_Sites.csv"
Silo_variables = ['daily_rain', "max_temp", "min_temp", 'et_short_crop']
#==========================================================#
#==========================================================#
#set directory path for output files
output_directory = 'Data/SILO/' + Case_Study_Name + '/'
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
#==========================================================#
#read the CSV to extract the lat long and case stuy sites
with open(Casestudy2_csv_path, mode='r') as infile:
reader = csv.reader(infile)
next(reader, None)
with open('coors_new.csv', mode='w') as outfile:
writer = csv.writer(outfile)
mydict = dict((rows[0],[rows[1],rows[2]]) for rows in reader)
for Estuary in mydict:
print Estuary
print str(mydict[Estuary][0])
#==========================================================#
#set directory path for output files
output_directory_internal = output_directory + Estuary + '/'
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory_internal):
os.makedirs(output_directory_internal)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
#==========================================================#
output_csv = output_directory_internal + 'SILO_climdata_' + Estuary +'_' + mydict[Estuary][0] + '_' + mydict[Estuary][1] + '.csv'
silo_df = sil.pointdata(Silo_variables, 'Okl9EDxgS2uzjLWtVNIBM5YqwvVcCxOmpd3nCzJh','19900101','20100101',
None, mydict[Estuary][0], mydict[Estuary][1], True, output_csv)

@ -40,6 +40,8 @@ def datenum2datetime(datenum):
return time
def select_min_med_max_dif_model(NARCLIM_df):
#Select the 3 most representative models (min med and max difference betwen far future and present)
Fdf_1900_2080_sorted = NARCLIM_df.reindex_axis(sorted(NARCLIM_df.columns), axis=1)
@ -69,3 +71,56 @@ def select_min_med_max_dif_model(NARCLIM_df):
dfmax = NARCLIM_df.filter(regex= Max_dif_mod_name[:-5])
dfmed = NARCLIM_df.filter(regex= Max_dif_mod_name[:-5])
return dfall , dfmin, dfmed, dfmax, Min_dif_mod_name,Med_dif_mod_name, Max_dif_mod_name
def calculate_deltas_NF_FF2(Annual_df, Seasonal_df):
"""calculates the "deltas" between nearfuture and present day for annual or seasonal climate data in pandas TS format"""
times = ['annual', 'DJF', 'MAM', 'JJA','SON']
delta_all_df = pd.DataFrame()
for temp in times:
if temp == 'annual':
Mean_df = Annual_df.mean()
Column_names = ['near', 'far']
if temp == 'DJF':
Mean_df = Seasonal_df[Seasonal_df.index.quarter==1].mean()
Column_names = ['DJF_near', 'DJF_far']
if temp == 'MAM':
Mean_df = Seasonal_df[Seasonal_df.index.quarter==2].mean()
Column_names = ['MAM_near', 'MAM_far']
if temp == 'JJA':
Mean_df = Seasonal_df[Seasonal_df.index.quarter==3].mean()
Column_names = ['JJA_near', 'JJA_far']
if temp == 'SON':
Mean_df = Seasonal_df[Seasonal_df.index.quarter==4].mean()
Column_names = ['SON_near', 'SON_far']
models = list(Seasonal_df.mean().index)
newmodel = []
type(newmodel)
for each in models:
newmodel.append(each[:-5])
unique_models = set(newmodel)
# calculate diff for each unique model
delta_NF_ensemble = []
delta_FF_ensemble = []
for unique_model in unique_models:
dfdiff = Mean_df.filter(regex= unique_model)
type(dfdiff)
delta_NF = dfdiff[1] - dfdiff[0]
delta_NF_ensemble.append(delta_NF)
delta_FF = dfdiff[2] - dfdiff[1]
delta_FF_ensemble.append(delta_FF)
delta_df1=pd.DataFrame(delta_NF_ensemble, index=unique_models)
delta_df2=pd.DataFrame(delta_FF_ensemble, index=unique_models)
delta_df= pd.concat([delta_df1, delta_df2], axis=1)
#rename columns
delta_df.columns = Column_names
#add a row with medians and 10 and 90th percentiles
delta_df.loc['10th'] = pd.Series({Column_names[0]:np.percentile(delta_df[Column_names[0]], 10), Column_names[1]:np.percentile(delta_df[Column_names[1]], 10)})
delta_df.loc['median'] = pd.Series({Column_names[0]:np.percentile(delta_df[Column_names[0]], 50), Column_names[1]:np.percentile(delta_df[Column_names[1]], 50)})
delta_df.loc['90th'] = pd.Series({Column_names[0]:np.percentile(delta_df[Column_names[0]], 90), Column_names[1]:np.percentile(delta_df[Column_names[1]], 90)})
#append df to overall df
delta_all_df = pd.concat([delta_all_df, delta_df], axis=1)
return delta_all_df
Loading…
Cancel
Save