Updated documentation

Added more examples
Added more tests
master
Per.Andreas.Brodtkorb 14 years ago
parent e1ed205bd2
commit 83bda6890a

@ -130,8 +130,8 @@ class CovData1D(WafoData):
CovData
"""
def __init__(self,*args,**kwds):
super(CovData1D, self).__init__(*args,**kwds)
def __init__(self, *args, **kwds):
super(CovData1D, self).__init__(*args, **kwds)
self.name = 'WAFO Covariance Object'
self.type = 'time'
@ -143,7 +143,7 @@ class CovData1D(WafoData):
self.norm = 0
somekeys = ['phi', 'name', 'h', 'tr', 'lagtype', 'v', 'type', 'norm']
self.__dict__.update(sub_dict_select(kwds,somekeys))
self.__dict__.update(sub_dict_select(kwds, somekeys))
self.setlabels()
def setlabels(self):
@ -153,10 +153,10 @@ class CovData1D(WafoData):
'''
N = len(self.type)
if N==0:
if N == 0:
raise ValueError('Object does not appear to be initialized, it is empty!')
labels = ['','ACF','']
labels = ['', 'ACF', '']
if self.lagtype.startswith('t'):
labels[0] = 'Lag [s]'
@ -252,36 +252,36 @@ class CovData1D(WafoData):
if rate is None:
rate = 1 ##interpolation rate
else:
rate = 2**nextpow2(rate) ##make sure rate is a power of 2
rate = 2 ** nextpow2(rate) ##make sure rate is a power of 2
## add a nugget effect to ensure that round off errors
## do not result in negative spectral estimates
ACF[0] = ACF[0] +nugget
ACF[0] = ACF[0] + nugget
n = ACF.size
# embedding a circulant vector and Fourier transform
if fast:
nfft = 2**nextpow2(2*n-2)
nfft = 2 ** nextpow2(2 * n - 2)
else:
nfft = 2*n-2
nfft = 2 * n - 2
nf = nfft/2 ## number of frequencies
ACF = r_[ACF,zeros(nfft-2*n+2),ACF[n-1:0:-1]]
nf = nfft / 2 ## number of frequencies
ACF = r_[ACF, zeros(nfft - 2 * n + 2), ACF[n - 1:0:-1]]
Rper = (fft(ACF,nfft).real).clip(0) ## periodogram
Rper = (fft(ACF, nfft).real).clip(0) ## periodogram
RperMax = Rper.max()
Rper = where(Rper<trunc*RperMax,0,Rper)
Rper = where(Rper < trunc * RperMax, 0, Rper)
pi = pi
S = abs(Rper[0:(nf+1)])*dT/pi
w = linspace(0,pi/dT,nf+1)
S = abs(Rper[0:(nf + 1)]) * dT / pi
w = linspace(0, pi / dT, nf + 1)
So = _wafospec.SpecData1D(S, w, type=spectype, freqtype=ftype)
So.tr = self.tr
So.h = self.h
So.norm = self.norm
if rate > 1:
So.args = linspace(0, pi/dT, nf*rate)
if method=='stineman':
So.args = linspace(0, pi / dT, nf * rate)
if method == 'stineman':
So.data = stineman_interp(So.args, w, S)
else:
intfun = interpolate.interp1d(w, S, kind=method)
@ -300,11 +300,11 @@ class CovData1D(WafoData):
[s] if lagtype=='t'
[m] otherwise
'''
dt1 = self.args[1]-self.args[0]
n = size(self.args)-1
t = self.args[-1]-self.args[0]
dt = t/n
if abs(dt-dt1) > 1e-10:
dt1 = self.args[1] - self.args[0]
n = size(self.args) - 1
t = self.args[-1] - self.args[0]
dt = t / n
if abs(dt - dt1) > 1e-10:
warnings.warn('Data is not uniformly sampled!')
return dt
@ -385,7 +385,7 @@ class CovData1D(WafoData):
dT = self.sampling_period()
x = zeros((ns, cases+1))
x = zeros((ns, cases + 1))
if derivative:
xder = x.copy()
@ -399,22 +399,22 @@ class CovData1D(WafoData):
## Covariance matrix
floatinfo = finfo(float)
if (abs(ACF[-1]) > floatinfo.eps): ## assuming ACF(n+1)==0
m2 = 2*n-1
nfft = 2**nextpow2(max(m2, 2*ns))
ACF = r_[ACF, zeros((nfft-m2,1)), ACF[-1:0:-1,:]]
m2 = 2 * n - 1
nfft = 2 ** nextpow2(max(m2, 2 * ns))
ACF = r_[ACF, zeros((nfft - m2, 1)), ACF[-1:0:-1, :]]
#disp('Warning: I am now assuming that ACF(k)=0 ')
#disp('for k>MAXLAG.')
else: # # ACF(n)==0
m2 = 2*n-2
nfft = 2**nextpow2(max(m2, 2*ns))
ACF = r_[ACF, zeros((nfft-m2, 1)), ACF[n-1:1:-1, :]]
m2 = 2 * n - 2
nfft = 2 ** nextpow2(max(m2, 2 * ns))
ACF = r_[ACF, zeros((nfft - m2, 1)), ACF[n - 1:1:-1, :]]
##m2=2*n-2
S = fft(ACF,nfft,axis=0).real ## periodogram
S = fft(ACF, nfft, axis=0).real ## periodogram
I = S.argmax()
k = flatnonzero(S<0)
if k.size>0:
k = flatnonzero(S < 0)
if k.size > 0:
#disp('Warning: Not able to construct a nonnegative circulant ')
#disp('vector from the ACF. Apply the parzen windowfunction ')
#disp('to the ACF in order to avoid this.')
@ -425,57 +425,57 @@ class CovData1D(WafoData):
S[k] = 0.
ix = flatnonzero(k>2*I)
if ix.size>0:
ix = flatnonzero(k > 2 * I)
if ix.size > 0:
## # truncating all oscillating values above 2 times the peak
## # frequency to zero to ensure that
## # that high frequency noise is not added to
## # the simulated timeseries.
ix0 = k[ix[0]]
S[ix0:-ix0] =0.0
S[ix0:-ix0] = 0.0
trunc = 1e-5
maxS = S[I]
k = flatnonzero(S[I:-I]<maxS*trunc)
if k.size>0:
S[k+I]=0.
k = flatnonzero(S[I:-I] < maxS * trunc)
if k.size > 0:
S[k + I] = 0.
## truncating small values to zero to ensure that
## that high frequency noise is not added to
## the simulated timeseries
cases1 = floor(cases/2)
cases2 = ceil(cases/2)
cases1 = floor(cases / 2)
cases2 = ceil(cases / 2)
# Generate standard normal random numbers for the simulations
#randn = np.random.randn
epsi = randn(nfft,cases2)+1j*randn(nfft,cases2)
Ssqr = sqrt(S/(nfft)) # #sqrt(S(wn)*dw )
ephat = epsi*Ssqr #[:,np.newaxis]
y = fft(ephat,nfft,axis=0)
x[:, 1:cases+1] = hstack((y[2:ns+2, 0:cases2].real, y[2:ns+2, 0:cases1].imag))
epsi = randn(nfft, cases2) + 1j * randn(nfft, cases2)
Ssqr = sqrt(S / (nfft)) # #sqrt(S(wn)*dw )
ephat = epsi * Ssqr #[:,np.newaxis]
y = fft(ephat, nfft, axis=0)
x[:, 1:cases + 1] = hstack((y[2:ns + 2, 0:cases2].real, y[2:ns + 2, 0:cases1].imag))
x[:, 0] = linspace(0,(ns-1)*dT,ns) ##(0:dT:(dT*(np-1)))'
x[:, 0] = linspace(0, (ns - 1) * dT, ns) ##(0:dT:(dT*(np-1)))'
if derivative:
Ssqr = Ssqr*r_[0:(nfft/2+1), -(nfft/2-1):0]*2*pi/nfft/dT
ephat = epsi*Ssqr #[:,newaxis]
y = fft(ephat,nfft,axis=0)
xder[:, 1:(cases+1)] = hstack((y[2:ns+2, 0:cases2].imag -y[2:ns+2, 0:cases1].real))
xder[:, 0] = x[:,0]
Ssqr = Ssqr * r_[0:(nfft / 2 + 1), -(nfft / 2 - 1):0] * 2 * pi / nfft / dT
ephat = epsi * Ssqr #[:,newaxis]
y = fft(ephat, nfft, axis=0)
xder[:, 1:(cases + 1)] = hstack((y[2:ns + 2, 0:cases2].imag - y[2:ns + 2, 0:cases1].real))
xder[:, 0] = x[:, 0]
if self.tr is not None:
print(' Transforming data.')
g = self.tr
if derivative:
for ix in range(cases):
tmp = g.gauss2dat(x[:,ix+1], xder[:,ix+1])
x[:,ix+1] = tmp[0]
xder[:,ix+1] = tmp[1]
tmp = g.gauss2dat(x[:, ix + 1], xder[:, ix + 1])
x[:, ix + 1] = tmp[0]
xder[:, ix + 1] = tmp[1]
else:
for ix in range(cases):
x[:, ix+1] = g.gauss2dat(x[:, ix+1])
x[:, ix + 1] = g.gauss2dat(x[:, ix + 1])
if derivative:
return x, xder
@ -574,7 +574,7 @@ class CovData1D(WafoData):
warnings.warn(txt)
return self.sim(ns=N, cases=cases), zeros(Ns), zeros(Ns)
indg = where(1-isnan(x))[0] #indices to the known observations
indg = where(1 - isnan(x))[0] #indices to the known observations
#initializing variables
mu1o = zeros(Ns, 1)
@ -588,121 +588,121 @@ class CovData1D(WafoData):
if method.startswith('dec1'):
# only correct for variables having the Markov property
# but still seems to give a reasonable answer. Slow procedure.
Sigma = sptoeplitz(hstack((acf, zeros(N-n))))
Sigma = sptoeplitz(hstack((acf, zeros(N - n))))
#Soo=Sigma(~inds,~inds); # covariance between known observations
#S11=Sigma(inds,inds); # covariance between unknown observations
#S1o=Sigma(inds,~inds);# covariance between known and unknown observations
#tmp=S1o*pinv(full(Soo));
#tmp=S1o/Soo; # this is time consuming if Soo large
tmp = 2*Sigma[inds, indg]/(Sigma[indg, indg] + Sigma[indg, indg].T )
tmp = 2 * Sigma[inds, indg] / (Sigma[indg, indg] + Sigma[indg, indg].T)
if compute_sigma:
#standard deviation of the expected surface
#mu1o_std=sqrt(diag(S11-tmp*S1o'));
mu1o_std = sqrt(diag(Sigma[inds, inds]-tmp*Sigma[indg, inds]))
mu1o_std = sqrt(diag(Sigma[inds, inds] - tmp * Sigma[indg, inds]))
#expected surface conditioned on the known observations from x
mu1o = tmp*x[indg]
mu1o = tmp * x[indg]
#expected surface conditioned on the known observations from xs
mu1os = tmp*(xs[indg,:])
mu1os = tmp * (xs[indg, :])
# sampled surface conditioned on the known observations
sample = mu1o + xs[inds,:] - mu1os
sample = mu1o + xs[inds, :] - mu1os
elif method.startswith('dec2'):
# only correct for variables having the Markov property
# but still seems to give a reasonable answer
# approximating the expected surfaces conditioned on
# the known observations from x and xs by only using the closest points
Sigma = sptoeplitz(hstack((acf,zeros(n))))
n2 = int(floor(n/2))
idx = r_[0:2*n] + max(0,inds[0]-n2) # indices to the points used
tmpinds = zeros(N,dtype=bool)
Sigma = sptoeplitz(hstack((acf, zeros(n))))
n2 = int(floor(n / 2))
idx = r_[0:2 * n] + max(0, inds[0] - n2) # indices to the points used
tmpinds = zeros(N, dtype=bool)
tmpinds[inds] = True # temporary storage of indices to missing points
tinds = where(tmpinds[idx])[0] # indices to the points used
tindg = where(1-tmpinds[idx])[0]
tindg = where(1 - tmpinds[idx])[0]
ns = len(tinds); # number of missing data in the interval
nprev = 0; # number of previously simulated points
xsinds = xs[inds,:]
while ns>0:
tmp=2*Sigma[tinds, tindg]/(Sigma[tindg, tindg]+Sigma[tindg, tindg].T)
xsinds = xs[inds, :]
while ns > 0:
tmp = 2 * Sigma[tinds, tindg] / (Sigma[tindg, tindg] + Sigma[tindg, tindg].T)
if compute_sigma:
#standard deviation of the expected surface
#mu1o_std=sqrt(diag(S11-tmp*S1o'));
ix = slice(nprev+1,nprev+ns+1)
ix = slice(nprev + 1, nprev + ns + 1)
mu1o_std[ix] = max(mu1o_std[ix],
sqrt(diag(Sigma[tinds, tinds]-tmp*Sigma[tindg,tinds])))
sqrt(diag(Sigma[tinds, tinds] - tmp * Sigma[tindg, tinds])))
#end
#expected surface conditioned on the closest known observations
# from x and xs2
mu1o[(nprev+1):(nprev+ns+1)] = tmp*x[idx[tindg]]
mu1os[(nprev+1):(nprev+ns+1),:] = tmp*xs[idx[tindg],:]
mu1o[(nprev + 1):(nprev + ns + 1)] = tmp * x[idx[tindg]]
mu1os[(nprev + 1):(nprev + ns + 1), :] = tmp * xs[idx[tindg], :]
if idx[-1]==N-1:#
ns =0 # no more points to simulate
if idx[-1] == N - 1:#
ns = 0 # no more points to simulate
else:
# updating by putting expected surface into x
x[idx[tinds]] = mu1o[(nprev+1):(nprev+ns+1)]
xs[idx[tinds]] = mu1os[(nprev+1):(nprev+ns+1)]
x[idx[tinds]] = mu1o[(nprev + 1):(nprev + ns + 1)]
xs[idx[tinds]] = mu1os[(nprev + 1):(nprev + ns + 1)]
nw = sum(tmpinds[idx[-n2:]])# # data which we want to simulate once
tmpinds[idx[:-n2]] = False # removing indices to data ..
# which has been simulated
nprev = nprev+ns-nw # update # points simulated so far
nprev = nprev + ns - nw # update # points simulated so far
if (nw==0) and (nprev<Ns):
idx= r_[0:2*n]+(inds[nprev+1]-n2) # move to the next missing data
if (nw == 0) and (nprev < Ns):
idx = r_[0:2 * n] + (inds[nprev + 1] - n2) # move to the next missing data
else:
idx = idx+n
idx = idx + n
#end
tmp = N-idx[-1]
if tmp<0: # checking if tmp exceeds the limits
idx = idx+tmp
tmp = N - idx[-1]
if tmp < 0: # checking if tmp exceeds the limits
idx = idx + tmp
#end
# find new interval with missing data
tinds = where(tmpinds[idx])[0]
tindg = where(1-tmpinds[idx])[0]
tindg = where(1 - tmpinds[idx])[0]
ns = len(tinds);# # missing data
#end
#end
# sampled surface conditioned on the known observations
sample = mu1o+(xsinds-mu1os)
sample = mu1o + (xsinds - mu1os)
elif method.startswith('dec3'):
# this is not correct for even for variables having the
# Markov property but still seems to give a reasonable answer
# a quasi approach approximating the expected surfaces conditioned on
# the known observations from x and xs with a spline
mu1o = interp1(indg, x[indg],inds,'spline')
mu1os = interp1(indg, xs[indg,:],inds,'spline')
mu1o = interp1(indg, x[indg], inds, 'spline')
mu1os = interp1(indg, xs[indg, :], inds, 'spline')
# sampled surface conditioned on the known observations
sample = mu1o + (xs[inds,:]-mu1os)
sample = mu1o + (xs[inds, :] - mu1os)
elif method.startswith('exac') or method.startswith('pseu'):
# exact but slow. It also may not return any result
Sigma = sptoeplitz(hstack((acf,zeros(N-n))))
Sigma = sptoeplitz(hstack((acf, zeros(N - n))))
#Soo=Sigma(~inds,~inds); # covariance between known observations
#S11=Sigma(inds,inds); # covariance between unknown observations
#S1o=Sigma(inds,~inds);# covariance between known and unknown observations
#tmp=S1o/Soo; # this is time consuming if Soo large
if method[0]=='e': #exact
tmp = 2*Sigma[inds,indg]/(Sigma[indg,indg]+Sigma[indg,indg].T);
if method[0] == 'e': #exact
tmp = 2 * Sigma[inds, indg] / (Sigma[indg, indg] + Sigma[indg, indg].T);
else: # approximate the inverse with pseudo inverse
tmp = dot(Sigma[inds, indg],pinv(Sigma[indg,indg]))
tmp = dot(Sigma[inds, indg], pinv(Sigma[indg, indg]))
#end
#expected surface conditioned on the known observations from x
mu1o = dot(tmp,x[indg])
mu1o = dot(tmp, x[indg])
# Covariance conditioned on the known observations
Sigma1o = Sigma[inds,inds] - tmp*Sigma[indg,inds]
Sigma1o = Sigma[inds, inds] - tmp * Sigma[indg, inds]
#sample conditioned on the known observations from x
sample = random.multivariate_normal(mu1o, Sigma1o, cases)
#rndnormnd(mu1o,Sigma1o,cases )
if compute_sigma:
#standard deviation of the expected surface
mu1o_std=sqrt(diag(Sigma1o));
mu1o_std = sqrt(diag(Sigma1o));
#end
elif method.startswith('appr'):
@ -714,58 +714,58 @@ class CovData1D(WafoData):
# approximately the same bandstructure as the inverse of the
# covariance matrix
Nsig = 2*n;
Nsig = 2 * n;
Sigma = sptoeplitz(hstack((ACF,zeros(Nsig-n))))
n2 = floor(Nsig/4)
idx = r_[0:Nsig]+max(0,inds[0]-n2) # indices to the points used
tmpinds = zeros(N,dtype=bool)
Sigma = sptoeplitz(hstack((ACF, zeros(Nsig - n))))
n2 = floor(Nsig / 4)
idx = r_[0:Nsig] + max(0, inds[0] - n2) # indices to the points used
tmpinds = zeros(N, dtype=bool)
tmpinds[inds] = True # temporary storage of indices to missing points
tinds = where(tmpinds[idx])[0] # indices to the points used
tindg = where(1-tmpinds[idx])[0]
tindg = where(1 - tmpinds[idx])[0]
ns = len(tinds) # number of missing data in the interval
nprev = 0 # number of previously simulated points
x2 = x
while ns>0:
while ns > 0:
#make sure MATLAB uses a symmetric matrix solver
tmp = 2*Sigma[tinds,tindg]/(Sigma[tindg,tindg]+Sigma[tindg,tindg].T)
Sigma1o = Sigma[tinds,tinds] - tmp*Sigma[tindg,tinds]
tmp = 2 * Sigma[tinds, tindg] / (Sigma[tindg, tindg] + Sigma[tindg, tindg].T)
Sigma1o = Sigma[tinds, tinds] - tmp * Sigma[tindg, tinds]
if compute_sigma:
#standard deviation of the expected surface
#mu1o_std=sqrt(diag(S11-tmp*S1o'));
mu1o_std[(nprev+1):(nprev+ns+1)] = max( mu1o_std[(nprev+1):(nprev+ns)] ,
mu1o_std[(nprev + 1):(nprev + ns + 1)] = max(mu1o_std[(nprev + 1):(nprev + ns)] ,
sqrt(diag(Sigma1o)))
#end
#expected surface conditioned on the closest known observations from x
mu1o[(nprev+1):(nprev+ns+1)] = tmp*x2[idx[tindg]]
mu1o[(nprev + 1):(nprev + ns + 1)] = tmp * x2[idx[tindg]]
#sample conditioned on the known observations from x
sample[(nprev+1):(nprev+ns+1),:] = rndnormnd(tmp*x[idx[tindg]],Sigma1o, cases)
if idx[-1] == N-1:
sample[(nprev + 1):(nprev + ns + 1), :] = rndnormnd(tmp * x[idx[tindg]], Sigma1o, cases)
if idx[-1] == N - 1:
ns = 0 # no more points to simulate
else:
# updating
x2[idx[tinds]] = mu1o[(nprev+1):(nprev+ns+1)] #expected surface
x[idx[tinds]] = sample[(nprev+1):(nprev+ns+1)]#sampled surface
nw = sum(tmpinds[idx[-n2::]]==True)# # data we want to simulate once more
x2[idx[tinds]] = mu1o[(nprev + 1):(nprev + ns + 1)] #expected surface
x[idx[tinds]] = sample[(nprev + 1):(nprev + ns + 1)]#sampled surface
nw = sum(tmpinds[idx[-n2::]] == True)# # data we want to simulate once more
tmpinds[idx[:-n2]] = False # removing indices to data ..
# which has been simulated
nprev = nprev+ns-nw # update # points simulated so far
nprev = nprev + ns - nw # update # points simulated so far
if (nw==0) and (nprev<Ns):
idx = r_[0:Nsig]+(inds[nprev+1]-n2) # move to the next missing data
if (nw == 0) and (nprev < Ns):
idx = r_[0:Nsig] + (inds[nprev + 1] - n2) # move to the next missing data
else:
idx = idx+n
idx = idx + n
#end
tmp = N-idx[-1]
if tmp<0: # checking if tmp exceeds the limits
tmp = N - idx[-1]
if tmp < 0: # checking if tmp exceeds the limits
idx = idx + tmp
#end
# find new interval with missing data
tinds = where(tmpinds[idx])[0]
tindg = where(1-tmpinds[idx])[0]
tindg = where(1 - tmpinds[idx])[0]
ns = len(tinds);# # missing data in the interval
#end
#end
@ -787,17 +787,17 @@ class CovData1D(WafoData):
def sptoeplitz(x):
k = where(x.ravel())[0]
n = len(x)
if len(k)>0.3*n:
if len(k) > 0.3 * n:
return toeplitz(x)
else:
spdiags = sparse.dia_matrix
data = x[k].reshape(-1,1).repeat(n,axis=-1)
data = x[k].reshape(-1, 1).repeat(n, axis= -1)
offsets = k
y = spdiags((data, offsets), shape=(n,n))
if k[0]==0:
y = spdiags((data, offsets), shape=(n, n))
if k[0] == 0:
offsets = k[1::]
data = data[1::,:]
return y + spdiags((data, -offsets), shape=(n,n))
data = data[1::, :]
return y + spdiags((data, -offsets), shape=(n, n))
def test_covdata():
import wafo.data

File diff suppressed because it is too large Load Diff

@ -6,9 +6,8 @@ from numpy import pi, sqrt, ones, zeros #@UnresolvedImport
from scipy import integrate as intg
import scipy.special.orthogonal as ort
from scipy import special as sp
import matplotlib
import pylab as plb
matplotlib.interactive(True)
_POINTS_AND_WEIGHTS = {}
def humps(x=None):
@ -344,7 +343,7 @@ def h_roots(n, method='newton'):
>>> import numpy as np
>>> [x,w] = h_roots(10)
>>> np.sum(x*w)
-5.2516042729766621e-019
-5.2516042729766621e-19
See also
--------
@ -453,7 +452,7 @@ def j_roots(n, alpha, beta, method='newton'):
--------
>>> [x,w]= j_roots(10,0,0)
>>> sum(x*w)
2.7755575615628914e-016
2.7755575615628914e-16
See also
--------
@ -554,7 +553,7 @@ def la_roots(n, alpha=0, method='newton'):
>>> import numpy as np
>>> [x,w] = h_roots(10)
>>> np.sum(x*w)
-5.2516042729766621e-019
-5.2516042729766621e-19
See also
--------
@ -939,7 +938,7 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
integration of x**2 from 0 to 2 and from 1 to 4
>>> from scitools import numpyutils as npt
scitools.easyviz backend is gnuplot
scitools.easyviz backend is matplotlib
>>> A = [0, 1]; B = [2,4]
>>> fun = npt.wrap2callable('x**2')
>>> [val1,err1] = gaussq(fun,A,B)
@ -1165,22 +1164,22 @@ def quadgr(fun,a,b,abseps=1e-5):
>>> import numpy as np
>>> Q, err = quadgr(np.log,0,1)
>>> quadgr(np.exp,0,9999*1j*np.pi)
(-2.0000000000122617, 2.1933275196062141e-009)
(-2.0000000000122617, 2.1933275196062141e-09)
>>> quadgr(lambda x: np.sqrt(4-x**2),0,2,1e-12)
(3.1415926535897811, 1.5809575870662229e-013)
(3.1415926535897811, 1.5809575870662229e-13)
>>> quadgr(lambda x: x**-0.75,0,1)
(4.0000000000000266, 5.6843418860808015e-014)
(4.0000000000000266, 5.6843418860808015e-14)
>>> quadgr(lambda x: 1./np.sqrt(1-x**2),-1,1)
(3.141596056985029, 6.2146261559092864e-006)
(3.141596056985029, 6.2146261559092864e-06)
>>> quadgr(lambda x: np.exp(-x**2),-np.inf,np.inf,1e-9) #% sqrt(pi)
(1.7724538509055152, 1.9722334876348668e-011)
(1.7724538509055152, 1.9722334876348668e-11)
>>> quadgr(lambda x: np.cos(x)*np.exp(-x),0,np.inf,1e-9)
(0.50000000000000044, 7.3296813063450372e-011)
(0.50000000000000044, 7.3296813063450372e-11)
See also
--------
@ -1418,7 +1417,7 @@ def qdemo(f,a,b):
#[x, w]=qrule(n,1)
#x = (b-a)/2*x + (a+b)/2 % Transform base points X.
#w = (b-a)/2*w % Adjust weigths.
#q = sum(feval(f,x).*w)
#q = sum(feval(f,x)*w)
qg[k] = q
eg[k] = abs(q - true_val)

@ -694,8 +694,9 @@ class TimeSeries(WafoData):
Examples
--------
>>> import wafo.data
>>> import wafo.objects as wo
>>> x = wafo.data.sea()
>>> ts = wafo.objects.mat2timeseries(x)
>>> ts = wo.mat2timeseries(x)
>>> rf = ts.tocovdata(lag=150)
>>> h = rf.plot()
@ -771,8 +772,9 @@ class TimeSeries(WafoData):
Example:
--------
>>> import wafo.data
>>> import wafo.objects as wo
>>> x = wafo.data.sea()
>>> ts = mat2timeseries(x)
>>> ts = wo.mat2timeseries(x)
>>> acf = ts.tocovdata(150)
>>> h = acf.plot()
'''
@ -814,6 +816,7 @@ class TimeSeries(WafoData):
acf = _wafocov.CovData1D(R[lags], t)
acf.stdev = sqrt(r_[ 0, 1 , 1 + 2 * cumsum(R[1:] ** 2)] / Ncens)
acf.children = [WafoData(-2. * acf.stdev[lags], t), WafoData(2. * acf.stdev[lags], t)]
acf.plot_args_children = ['r:']
acf.norm = norm
return acf

@ -8,4 +8,3 @@ Spectrum package in WAFO Toolbox.
from wafo.spectrum.core import SpecData1D
import wafo.spectrum.models
import wafo.spectrum.dispersion_relation
#from wafo.data_structures import SpecData1D

@ -1,9 +1,9 @@
from __future__ import division
from scipy.misc.ppimport import ppimport
from wafo.misc import meshgrid
from wafo.objects import mat2timeseries, TimeSeries
import warnings
import numpy as np
from numpy import (pi, inf, meshgrid, zeros, ones, where, nonzero, #@UnresolvedImport
from numpy import (pi, inf, zeros, ones, where, nonzero, #@UnresolvedImport
flatnonzero, ceil, sqrt, exp, log, arctan2, #@UnresolvedImport
tanh, cosh, sinh, random, atleast_1d, maximum, #@UnresolvedImport
minimum, diff, isnan, any, r_, conj, mod, #@UnresolvedImport
@ -945,7 +945,8 @@ class SpecData1D(WafoData):
... res = fun(x2[:,1::],axis=0)
... m = res.mean()
... sa = res.std()
... assert(np.abs(m-trueval)<sa)
... trueval, m, sa
... np.abs(m-trueval)<sa
waveplot(x1,'r',x2,'g',1,1)
@ -1316,8 +1317,6 @@ class SpecData1D(WafoData):
if verbose:
print('2nd order frequency Limits = %g,%g' % (f_limit_lo, f_limit_up))
## if nargout>3,
## %compute the sum and frequency effects separately
## [svec, dvec] = disufq((amp.'),w,kw,min(h,10^30),g,nmin,nmax)
@ -1402,7 +1401,10 @@ class SpecData1D(WafoData):
See also
---------
hermitetr, ochitr, lc2tr, dat2tr
transform.TrHermite
transform.TrOchi
objects.LevelCrossings.trdata
objects.TimeSeries.trdata
References:
-----------
@ -1429,7 +1431,7 @@ class SpecData1D(WafoData):
w = ravel(self.args)
S = ravel(self.data)
if self.freqtype in ['f', 'w']:
vari = 't'
#vari = 't'
if self.freqtype == 'f':
w = 2. * pi * w
S = S / (2. * pi)
@ -1514,7 +1516,7 @@ class SpecData1D(WafoData):
## skew = sum((6*C2+8*E2).*E)/sa^3 % skewness
## kurt = 3+48*sum((C2+E2).*E2)/sa^4 % kurtosis
return output
def testgaussian(self, ns,test0=None, cases=100, method='nonlinear',**opt):
def testgaussian(self, ns,test0=None, cases=100, method='nonlinear',verbose=False,**opt):
'''
TESTGAUSSIAN Test if a stochastic process is Gaussian.
@ -1607,6 +1609,7 @@ class SpecData1D(WafoData):
#xs = cov2sdat(R,[ns Nstep]);
#[g, tmp] = dat2tr(xs,method, **opt);
#test1 = [test1; tmp(:)]
if verbose:
print('finished %d of %d ' % (ix+1,rep) )
if rep>1:
@ -1704,6 +1707,14 @@ class SpecData1D(WafoData):
def nyquist_freq(self):
"""
Return Nyquist frequency
Example
-------
>>> import wafo.spectrum.models as sm
>>> Sj = sm.Jonswap(Hm0=5)
>>> S = Sj.tospecdata() #Make spectrum ob
>>> S.nyquist_freq()
3.0
"""
return self.args[-1]
@ -1722,8 +1733,11 @@ class SpecData1D(WafoData):
Example
-------
S = jonswap
dt = spec2dt(S)
>>> import wafo.spectrum.models as sm
>>> Sj = sm.Jonswap(Hm0=5)
>>> S = Sj.tospecdata() #Make spectrum ob
>>> S.sampling_period()
1.0471975511965976
See also
'''
@ -1880,9 +1894,16 @@ class SpecData1D(WafoData):
Example:
-------
S = jonswap
[Sn,mn4] = specnorm(S)
mts = spec2mom(S,2) % Should be equal to one!
>>> import wafo.spectrum.models as sm
>>> Sj = sm.Jonswap(Hm0=5)
>>> S = Sj.tospecdata() #Make spectrum ob
>>> S.moment(2)
([1.5614600345079888, 0.95567089481941048], ['m0', 'm0tt'])
>>> Sn = S.copy(); Sn.normalize()
Now the moments should be one
>>> Sn.moment(2)
([1.0000000000000004, 0.99999999999999967], ['m0', 'm0tt'])
'''
mom, unused_mtext = self.moment(nr=4, even=True)
m0 = mom[0]
@ -2018,7 +2039,6 @@ class SpecData1D(WafoData):
Examples:
---------
>>> import numpy as np
>>> import wafo.spectrum.models as sm
>>> Sj = sm.Jonswap(Hm0=5)
>>> S = Sj.tospecdata() #Make spectrum ob

@ -53,8 +53,6 @@ from numpy import (inf, atleast_1d, newaxis, any, minimum, maximum, array, #@Unr
isfinite, mod, expm1, tanh, cosh, finfo, ones, ones_like, isnan, #@UnresolvedImport
zeros_like, flatnonzero, sinc, hstack, vstack, real, flipud, clip) #@UnresolvedImport
from dispersion_relation import w2k
#ppimport.enable()
#_wafospectrum = ppimport.ppimport('wafo.spectrum')
from wafo.spectrum import SpecData1D
sech = lambda x: 1.0 / cosh(x)
@ -638,8 +636,6 @@ class Jonswap(ModelSpectrum):
def phi1(wi, h, g=9.81):
''' Factor transforming spectra to finite water depth spectra.
CALL: tr = phi1(w,h)
Input
-----
w : arraylike
@ -743,7 +739,8 @@ class Tmaspec(Jonswap):
phi1,
Torsethaugen
References:
References
----------
Buows, E., Gunther, H., Rosenthal, W., and Vincent, C.L. (1985)
'Similarity of the wind wave spectrum in finite depth water: 1 spectral form.'
J. Geophys. Res., Vol 90, No. C1, pp 975-986
@ -1361,8 +1358,8 @@ class Spreading(object):
(Hasselman: spa ~= spb) (cos-2s) [6.97 9.77 4.06 -2.3 0 1.05 3 ]
(Banner : spa ~= spb) (sech2) [2.61 2.28 1.3 -1.3 0.56 0.95 1.6]
Examples :
Examples
--------
>>> import pylab as plb
>>> D = Spreading('cos2s',s_a=10.0)
@ -1385,9 +1382,12 @@ class Spreading(object):
>>> plb.close('all')
See also mkdspec, plotspec, spec2spec
See also
--------
mkdspec, plotspec, spec2spec
References
---------
Krogstad, H.E. and Barstow, S.F. (1999)
"Directional Distributions in Ocean Wave Spectra"
Proceedings of the 9th ISOPE Conference, Vol III, pp. 79-86

@ -159,7 +159,7 @@ class TrData(WafoData, TrCommon):
>>> g.sigma
5
>>> g.dat2gauss(1,2,3)
[array([ 0.]), array([ 0.4]), array([ 0.6])]
Check that the departure from a Gaussian model is zero
>>> g.dist2gauss() < 1e-16

@ -3,6 +3,7 @@ Transform Gaussian models
-------------------------
TrHermite
TrOchi
TrLinear
'''
#-------------------------------------------------------------------------------
# Name: transform.models
@ -84,7 +85,10 @@ class TrHermite(TrCommon):
See also
--------
spec2skew, ochitr, lc2tr, dat2tr
SpecData1d.stats_nl
wafo.transform.TrOchi
wafo.objects.LevelCrossings.trdata
wafo.objects.TimeSeries.trdata
References
----------
@ -306,6 +310,11 @@ class TrLinear(TrCommon):
See also
--------
TrOchi
TrHermite
SpecData1D.stats_nl
LevelCrossings.trdata
TimeSeries.trdata
spec2skew, ochitr, lc2tr, dat2tr
"""

@ -0,0 +1,47 @@
from wafo.transform.models import TrHermite, TrOchi, TrLinear
def test_trhermite():
'''
>>> std = 7./4
>>> g = TrHermite(sigma=std, ysigma=std)
>>> g.dist2gauss()
3.9858776379926808
>>> g.mean
0.0
>>> g.sigma
1.75
>>> g.dat2gauss([0,1,2,3])
array([ 0.04654321, 1.03176393, 1.98871279, 2.91930895])
'''
def test_trochi():
'''
>>> std = 7./4
>>> g = TrOchi(sigma=std, ysigma=std)
>>> g.dist2gauss()
5.9322684525265501
>>> g.mean
0.0
>>> g.sigma
1.75
>>> g.dat2gauss([0,1,2,3])
array([ 6.21927960e-04, 9.90237621e-01, 1.96075606e+00,
2.91254576e+00])
'''
def test_trlinear():
'''
>>> std = 7./4
>>> g = TrLinear(sigma=std, ysigma=std)
>>> g.dist2gauss()
0.0
>>> g.mean
0.0
>>> g.sigma
1.75
>>> g.dat2gauss([0,1,2,3])
array([ 0., 1., 2., 3.])
'''
if __name__=='__main__':
import doctest
doctest.testmod()

@ -0,0 +1,32 @@
from wafo.transform import TrData
def test_trdata():
'''
Construct a linear transformation model
>>> import numpy as np
>>> sigma = 5; mean = 1
>>> u = np.linspace(-5,5); x = sigma*u+mean; y = u
>>> g = TrData(y,x)
>>> g.mean
array([ 1.])
>>> g.sigma
array([ 5.])
>>> g = TrData(y,x,mean=1,sigma=5)
>>> g.mean
1
>>> g.sigma
5
>>> g.dat2gauss(1,2,3)
[array([ 0.]), array([ 0.4]), array([ 0.6])]
>>> g.dat2gauss([0,1,2,3])
array([-0.2, 0. , 0.2, 0.4])
Check that the departure from a Gaussian model is zero
>>> g.dist2gauss() < 1e-16
True
'''
if __name__=='__main__':
import doctest
doctest.testmod()
Loading…
Cancel
Save