pep8 + added some updates from scipy.stats to wafo.stats

master
Per.Andreas.Brodtkorb 10 years ago
parent d308357c5b
commit 6aec932677

@ -19,7 +19,7 @@ import warnings
import numpy as np import numpy as np
from numpy import (zeros, ones, sqrt, inf, where, nan, from numpy import (zeros, ones, sqrt, inf, where, nan,
atleast_1d, hstack, r_, linspace, flatnonzero, size, atleast_1d, hstack, r_, linspace, flatnonzero, size,
isnan, finfo, diag, ceil, floor, random, pi) isnan, finfo, diag, ceil, random, pi)
from numpy.fft import fft from numpy.fft import fft
from numpy.random import randn from numpy.random import randn
import scipy.interpolate as interpolate import scipy.interpolate as interpolate
@ -33,13 +33,13 @@ import wafo.spectrum as _wafospec
from scipy.sparse.linalg.dsolve.linsolve import spsolve from scipy.sparse.linalg.dsolve.linsolve import spsolve
from scipy.sparse.base import issparse from scipy.sparse.base import issparse
from scipy.signal.windows import parzen from scipy.signal.windows import parzen
#_wafospec = JITImport('wafo.spectrum') # _wafospec = JITImport('wafo.spectrum')
__all__ = ['CovData1D'] __all__ = ['CovData1D']
def _set_seed(iseed): def _set_seed(iseed):
if iseed != None: if iseed is not None:
try: try:
random.set_state(iseed) random.set_state(iseed)
except: except:
@ -385,7 +385,7 @@ class CovData1D(PlotData):
m2 = 2 * n - 1 m2 = 2 * n - 1
nfft = 2 ** nextpow2(max(m2, 2 * ns)) nfft = 2 ** nextpow2(max(m2, 2 * ns))
acf = r_[acf, zeros((nfft - m2, 1)), acf[-1:0:-1, :]] acf = r_[acf, zeros((nfft - m2, 1)), acf[-1:0:-1, :]]
#warnings,warn('I am now assuming that ACF(k)=0 for k>MAXLAG.') # warnings,warn('I am now assuming that ACF(k)=0 for k>MAXLAG.')
else: # ACF(n)==0 else: # ACF(n)==0
m2 = 2 * n - 2 m2 = 2 * n - 2
nfft = 2 ** nextpow2(max(m2, 2 * ns)) nfft = 2 ** nextpow2(max(m2, 2 * ns))
@ -397,10 +397,10 @@ class CovData1D(PlotData):
I = S.argmax() I = S.argmax()
k = flatnonzero(S < 0) k = flatnonzero(S < 0)
if k.size > 0: if k.size > 0:
#disp('Warning: Not able to construct a nonnegative circulant ') _msg = '''
#disp('vector from the ACF. Apply the parzen windowfunction ') Not able to construct a nonnegative circulant vector from ACF.
#disp('to the ACF in order to avoid this.') Apply parzen windowfunction to the ACF in order to avoid this.
#disp('The returned result is now only an approximation.') The returned result is now only an approximation.'''
# truncating negative values to zero to ensure that # truncating negative values to zero to ensure that
# that this noise is not added to the simulated timeseries # that this noise is not added to the simulated timeseries
@ -409,10 +409,10 @@ class CovData1D(PlotData):
ix = flatnonzero(k > 2 * I) ix = flatnonzero(k > 2 * I)
if ix.size > 0: if ix.size > 0:
# truncating all oscillating values above 2 times the peak # truncating all oscillating values above 2 times the peak
# frequency to zero to ensure that # frequency to zero to ensure that
# that high frequency noise is not added to # that high frequency noise is not added to
# the simulated timeseries. # the simulated timeseries.
ix0 = k[ix[0]] ix0 = k[ix[0]]
S[ix0:-ix0] = 0.0 S[ix0:-ix0] = 0.0
@ -429,7 +429,7 @@ class CovData1D(PlotData):
cases2 = int(ceil(cases / 2)) cases2 = int(ceil(cases / 2))
# Generate standard normal random numbers for the simulations # Generate standard normal random numbers for the simulations
#randn = np.random.randn # randn = np.random.randn
epsi = randn(nfft, cases2) + 1j * randn(nfft, cases2) epsi = randn(nfft, cases2) + 1j * randn(nfft, cases2)
Ssqr = sqrt(S / (nfft)) # sqrt(S(wn)*dw ) Ssqr = sqrt(S / (nfft)) # sqrt(S(wn)*dw )
ephat = epsi * Ssqr # [:,np.newaxis] ephat = epsi * Ssqr # [:,np.newaxis]
@ -573,7 +573,7 @@ class CovData1D(PlotData):
num_x = len(x) num_x = len(x)
num_acf = len(acf) num_acf = len(acf)
if not i_unknown is None: if i_unknown is not None:
x[i_unknown] = nan x[i_unknown] = nan
i_unknown = flatnonzero(isnan(x)) i_unknown = flatnonzero(isnan(x))
num_unknown = len(i_unknown) num_unknown = len(i_unknown)
@ -625,7 +625,8 @@ class CovData1D(PlotData):
Sigma = toeplitz(hstack((acf, zeros(Nsig - num_acf)))) Sigma = toeplitz(hstack((acf, zeros(Nsig - num_acf))))
overlap = int(Nsig / 4) overlap = int(Nsig / 4)
# indices to the points used # indices to the points used
idx = r_[0:Nsig] + max(0, min(i_unknown[0] - overlap, num_x - Nsig)) idx = r_[0:Nsig] + max(0, min(i_unknown[0] - overlap,
num_x - Nsig))
mask_unknown = zeros(num_x, dtype=bool) mask_unknown = zeros(num_x, dtype=bool)
# temporary storage of indices to missing points # temporary storage of indices to missing points
mask_unknown[i_unknown] = True mask_unknown[i_unknown] = True
@ -644,7 +645,7 @@ class CovData1D(PlotData):
S1o_Sooinv = spsolve(Soo + Soo.T, 2 * So1).T S1o_Sooinv = spsolve(Soo + Soo.T, 2 * So1).T
else: else:
Sooinv_So1, _res, _rank, _s = lstsq(Soo + Soo.T, 2 * So1, Sooinv_So1, _res, _rank, _s = lstsq(Soo + Soo.T, 2 * So1,
cond=1e-4) cond=1e-4)
S1o_Sooinv = Sooinv_So1.T S1o_Sooinv = Sooinv_So1.T
Sigma1o = S11 - S1o_Sooinv.dot(So1) Sigma1o = S11 - S1o_Sooinv.dot(So1)
if (diag(Sigma1o) < 0).any(): if (diag(Sigma1o) < 0).any():
@ -665,10 +666,10 @@ class CovData1D(PlotData):
else: else:
x2[idx[t_unknown]] = mu1o[ix] # expected surface x2[idx[t_unknown]] = mu1o[ix] # expected surface
x[idx[t_unknown]] = sample[ix] # sampled surface x[idx[t_unknown]] = sample[ix] # sampled surface
# removing indices to data which has been simulated # removing indices to data which has been simulated
mask_unknown[idx[:-overlap]] = False mask_unknown[idx[:-overlap]] = False
# data we want to simulate once more # data we want to simulate once more
nw = sum(mask_unknown[idx[-overlap:]] == True) nw = sum(mask_unknown[idx[-overlap:]] is True)
num_restored += ns - nw # update # points simulated so far num_restored += ns - nw # update # points simulated so far
idx = self._update_window(idx, i_unknown, num_x, num_acf, idx = self._update_window(idx, i_unknown, num_x, num_acf,
@ -716,10 +717,11 @@ def main():
inds = np.hstack((21 + np.arange(20), inds = np.hstack((21 + np.arange(20),
1000 + np.arange(20), 1000 + np.arange(20),
1024 * 4 - 21 + np.arange(20))) 1024 * 4 - 21 + np.arange(20)))
sample, mu1o, mu1o_std = R.simcond(x[:, 1], method='approx', i_unknown=inds) sample, mu1o, mu1o_std = R.simcond(x[:, 1], method='approx',
i_unknown=inds)
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
#inds = np.atleast_2d(inds).reshape((-1,1)) # inds = np.atleast_2d(inds).reshape((-1,1))
plt.plot(x[:, 1], 'k.', label='observed values') plt.plot(x[:, 1], 'k.', label='observed values')
plt.plot(inds, mu1o, '*', label='mu1o') plt.plot(inds, mu1o, '*', label='mu1o')
plt.plot(inds, sample.ravel(), 'r+', label='samples') plt.plot(inds, sample.ravel(), 'r+', label='samples')

@ -1,6 +1,7 @@
from numpy import pi, r_, minimum, maximum, atleast_1d, atleast_2d, mod, ones, floor, \ from numpy import (pi, r_, minimum, maximum, atleast_1d, atleast_2d, mod, ones,
random, eye, nonzero, where, repeat, sqrt, exp, inf, diag, zeros, sin, arcsin, nan #@UnresolvedImport floor, random, eye, nonzero, where, repeat, sqrt, exp, inf,
from numpy import triu #@UnresolvedImport diag, zeros, sin, arcsin, nan) # @UnresolvedImport
from numpy import triu # @UnresolvedImport
from scipy.special import ndtr as cdfnorm, ndtri as invnorm from scipy.special import ndtr as cdfnorm, ndtri as invnorm
from scipy.special import erfc from scipy.special import erfc
from wafo import mvn from wafo import mvn
@ -10,10 +11,13 @@ import wafo.rindmod as rindmod
import warnings import warnings
from wafo.misc import common_shape from wafo.misc import common_shape
__all__ = ['Rind', 'rindmod', 'mvnprdmod', 'mvn', 'cdflomax' , 'prbnormtndpc', __all__ = ['Rind', 'rindmod', 'mvnprdmod', 'mvn', 'cdflomax', 'prbnormtndpc',
'prbnormndpc', 'prbnormnd', 'cdfnorm2d', 'prbnorm2d','cdfnorm','invnorm', 'prbnormndpc', 'prbnormnd', 'cdfnorm2d', 'prbnorm2d', 'cdfnorm',
'test_docstring'] 'invnorm', 'test_docstring']
class Rind(object): class Rind(object):
''' '''
RIND Computes multivariate normal expectations RIND Computes multivariate normal expectations
@ -24,7 +28,8 @@ class Rind(object):
m : array-like, size Ntdc m : array-like, size Ntdc
expectation of X=[Xt,Xd,Xc] expectation of X=[Xt,Xd,Xc]
Blo, Bup : array-like, shape Mb x Nb Blo, Bup : array-like, shape Mb x Nb
Lower and upper barriers used to compute the integration limits, Hlo and Hup, respectively. Lower and upper barriers used to compute the integration limits,
Hlo and Hup, respectively.
indI : array-like, length Ni indI : array-like, length Ni
vector of indices to the different barriers in the indicator function. vector of indices to the different barriers in the indicator function.
(NB! restriction indI(1)=-1, indI(NI)=Nt+Nd, Ni = Nb+1) (NB! restriction indI(1)=-1, indI(NI)=Nt+Nd, Ni = Nb+1)
@ -50,7 +55,7 @@ class Rind(object):
"Jacobian" = |X(Nt+1)*...*X(Nt+Nd)|=|Xd(1)*Xd(2)..Xd(Nd)| "Jacobian" = |X(Nt+1)*...*X(Nt+Nd)|=|Xd(1)*Xd(2)..Xd(Nd)|
"condition" = Xc=xc(:,ix), ix=1,...,Nx. "condition" = Xc=xc(:,ix), ix=1,...,Nx.
X = [Xt, Xd, Xc], a stochastic vector of Multivariate Gaussian X = [Xt, Xd, Xc], a stochastic vector of Multivariate Gaussian
variables where Xt,Xd and Xc have the length Nt,Nd and Nc, respectively. variables where Xt,Xd and Xc have the length Nt,Nd and Nc, respectively
(Recommended limitations Nx,Nt<=100, Nd<=6 and Nc<=10) (Recommended limitations Nx,Nt<=100, Nd<=6 and Nc<=10)
Multivariate probability is computed if Nd = 0. Multivariate probability is computed if Nd = 0.
@ -126,10 +131,10 @@ class Rind(object):
Per A. Brodtkorb (2006) Per A. Brodtkorb (2006)
"Evaluating Nearly Singular Multinormal Expectations with Application to "Evaluating Nearly Singular Multinormal Expectations with Application to
Wave Distributions", Wave Distributions",
Methodology And Computing In Applied Probability, Volume 8, Number 1, pp. 65-91(27) Methodology And Computing In Applied Probability, Volume 8, Number 1,
pp. 65-91(27)
''' '''
def __init__(self, **kwds): def __init__(self, **kwds):
''' '''
Parameters Parameters
@ -149,25 +154,28 @@ class Rind(object):
scales the conditinal probability density, i.e., scales the conditinal probability density, i.e.,
f_{Xc} = exp(-0.5*Xc*inv(Sxc)*Xc + XcScale) (default XcScale=0) f_{Xc} = exp(-0.5*Xc*inv(Sxc)*Xc + XcScale) (default XcScale=0)
abseps, releps : real scalars, optional abseps, releps : real scalars, optional
absolute and relative error tolerance. (default abseps=0, releps=1e-3) absolute and relative error tolerance.
(default abseps=0, releps=1e-3)
coveps : real scalar, optional coveps : real scalar, optional
error tolerance in Cholesky factorization (default 1e-13) error tolerance in Cholesky factorization (default 1e-13)
maxpts, minpts : scalar integers, optional maxpts, minpts : scalar integers, optional
maximum and minimum number of function values allowed. The parameter, maximum and minimum number of function values allowed. The
maxpts, can be used to limit the time. A sensible strategy is to start parameter, maxpts, can be used to limit the time. A sensible
with MAXPTS = 1000*N, and then increase MAXPTS if ERROR is too large. strategy is to start with MAXPTS = 1000*N, and then increase MAXPTS
if ERROR is too large.
(Only for METHOD~=0) (default maxpts=40000, minpts=0) (Only for METHOD~=0) (default maxpts=40000, minpts=0)
seed : scalar integer, optional seed : scalar integer, optional
seed to the random generator used in the integrations seed to the random generator used in the integrations
(Only for METHOD~=0)(default floor(rand*1e9)) (Only for METHOD~=0)(default floor(rand*1e9))
nit : scalar integer, optional nit : scalar integer, optional
maximum number of Xt variables to integrate. This parameter can be used maximum number of Xt variables to integrate. This parameter can be
to limit the time. If NIT is less than the rank of the covariance matrix, used to limit the time. If NIT is less than the rank of the
the returned result is a upper bound for the true value of the integral. covariance matrix, the returned result is a upper bound for the
(default 1000) true value of the integral. (default 1000)
xcutoff : real scalar, optional xcutoff : real scalar, optional
cut off value where the marginal normal distribution is truncated. cut off value where the marginal normal distribution is truncated.
(Depends on requested accuracy. A value between 4 and 5 is reasonable.) (Depends on requested accuracy. A value between 4 and 5 is
reasonable.)
xsplit : real scalar xsplit : real scalar
parameter controlling performance of quadrature integration: parameter controlling performance of quadrature integration:
if Hup>=xCutOff AND Hlo<-XSPLIT OR if Hup>=xCutOff AND Hlo<-XSPLIT OR
@ -187,8 +195,8 @@ class Rind(object):
XCUTOFF, MAXPTS and QUADNO will be set according to XCUTOFF, MAXPTS and QUADNO will be set according to
INITOPTIONS. INITOPTIONS.
nc1c2 : scalar integer, optional nc1c2 : scalar integer, optional
number of times to use the regression equation to restrict integration number of times to use the regression equation to restrict
area. Nc1c2 = 1,2 is recommended. (default 2) integration area. Nc1c2 = 1,2 is recommended. (default 2)
(note: works only for method >0) (note: works only for method >0)
''' '''
self.method = 3 self.method = 3
@ -263,7 +271,7 @@ class Rind(object):
self.releps = min(self.abseps, 1.0e-2) self.releps = min(self.abseps, 1.0e-2)
if self.method == 0 : if self.method == 0:
# This gives approximately the same accuracy as when using # This gives approximately the same accuracy as when using
# RINDDND and RINDNIT # RINDDND and RINDNIT
# xCutOff= MIN(MAX(xCutOff+0.5d0,4.d0),5.d0) # xCutOff= MIN(MAX(xCutOff+0.5d0,4.d0),5.d0)
@ -278,17 +286,17 @@ class Rind(object):
self.nc1c2 = max(1, self.nc1c2) self.nc1c2 = max(1, self.nc1c2)
xcut = abs(invnorm(trunc_error / (self.nc1c2 * 2))) xcut = abs(invnorm(trunc_error / (self.nc1c2 * 2)))
self.xcutoff = max(min(xcut, 8.5), 1.2) self.xcutoff = max(min(xcut, 8.5), 1.2)
#self.abseps = max(self.abseps- truncError,0); # self.abseps = max(self.abseps- truncError,0);
#self.releps = max(self.releps- truncError,0); # self.releps = max(self.releps- truncError,0);
if self.method > 0: if self.method > 0:
names = ['method', 'xcscale', 'abseps', 'releps', 'coveps', names = ['method', 'xcscale', 'abseps', 'releps', 'coveps',
'maxpts', 'minpts', 'nit', 'xcutoff', 'nc1c2', 'quadno', 'maxpts', 'minpts', 'nit', 'xcutoff', 'nc1c2', 'quadno',
'xsplit'] 'xsplit']
constants = [getattr(self, name) for name in names] constants = [getattr(self, name) for name in names]
constants[0] = mod(constants[0], 10) constants[0] = mod(constants[0], 10)
rindmod.set_constants(*constants) #@UndefinedVariable rindmod.set_constants(*constants) # @UndefinedVariable
def __call__(self, cov, m, ab, bb, indI=None, xc=None, nt=None, **kwds): def __call__(self, cov, m, ab, bb, indI=None, xc=None, nt=None, **kwds):
if any(kwds): if any(kwds):
@ -317,7 +325,7 @@ class Rind(object):
Ex, indI = atleast_1d(m, indI) Ex, indI = atleast_1d(m, indI)
if self.seed is None: if self.seed is None:
seed = int(floor(random.rand(1) * 1e10)) #@UndefinedVariable seed = int(floor(random.rand(1) * 1e10)) # @UndefinedVariable
else: else:
seed = int(self.seed) seed = int(self.seed)
@ -336,7 +344,9 @@ class Rind(object):
Bup[0, ind] = minimum(Bup[0, ind], infinity * dev[indI[ind + 1]]) Bup[0, ind] = minimum(Bup[0, ind], infinity * dev[indI[ind + 1]])
Blo[0, ind] = maximum(Blo[0, ind], -infinity * dev[indI[ind + 1]]) Blo[0, ind] = maximum(Blo[0, ind], -infinity * dev[indI[ind + 1]])
ind2 = indI + 1 ind2 = indI + 1
return rindmod.rind(BIG, Ex, xc, nt, ind2, Blo, Bup, infin, seed) #@UndefinedVariable
return rindmod.rind(BIG, Ex, xc, nt, ind2, Blo, Bup, infin, seed) # @UndefinedVariable @IgnorePep8
def test_rind(): def test_rind():
''' Small test function ''' Small test function
@ -346,17 +356,17 @@ def test_rind():
Bup = -1.2 Bup = -1.2
indI = [-1, n - 1] # Barriers indI = [-1, n - 1] # Barriers
# A = np.repeat(Blo, n) # A = np.repeat(Blo, n)
# B = np.repeat(Bup, n) # Integration limits # B = np.repeat(Bup, n) # Integration limits
m = zeros(n) m = zeros(n)
rho = 0.3 rho = 0.3
Sc = (ones((n, n)) - eye(n)) * rho + eye(n) Sc = (ones((n, n)) - eye(n)) * rho + eye(n)
rind = Rind() rind = Rind()
E0 = rind(Sc, m, Blo, Bup, indI) # exact prob. 0.001946 A) E0 = rind(Sc, m, Blo, Bup, indI) # exact prob. 0.001946 A)
print(E0) print(E0)
A = repeat(Blo, n) A = repeat(Blo, n)
B = repeat(Bup, n) # Integration limits B = repeat(Bup, n) # Integration limits
E1 = rind(triu(Sc), m, A, B) #same as E0 _E1 = rind(triu(Sc), m, A, B) # same as E0
xc = zeros((0, 1)) xc = zeros((0, 1))
infinity = 37 infinity = 37
@ -365,7 +375,8 @@ def test_rind():
Bup, Blo = atleast_2d(Bup, Blo) Bup, Blo = atleast_2d(Bup, Blo)
Bup[0, ind] = minimum(Bup[0, ind], infinity * dev[indI[ind + 1]]) Bup[0, ind] = minimum(Bup[0, ind], infinity * dev[indI[ind + 1]])
Blo[0, ind] = maximum(Blo[0, ind], -infinity * dev[indI[ind + 1]]) Blo[0, ind] = maximum(Blo[0, ind], -infinity * dev[indI[ind + 1]])
E3 = rind(Sc, m, Blo, Bup, indI, xc, nt=1) _E3 = rind(Sc, m, Blo, Bup, indI, xc, nt=1)
def cdflomax(x, alpha, m0): def cdflomax(x, alpha, m0):
''' '''
@ -423,9 +434,10 @@ def cdflomax(x, alpha, m0):
c2 = alpha * c1 c2 = alpha * c1
return cdfnorm(c1) - alpha * exp(-x ** 2 / 2 / m0) * cdfnorm(c2) return cdfnorm(c1) - alpha * exp(-x ** 2 / 2 / m0) * cdfnorm(c2)
def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24): def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
''' '''
Return Multivariate normal or T probability with product correlation structure. Return Multivariate normal or T probability with product correlation.
Parameters Parameters
---------- ----------
@ -435,13 +447,14 @@ def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
where -1 < rho[i] < 1 where -1 < rho[i] < 1
a,b : array-like a,b : array-like
vector of lower and upper integration limits, respectively. vector of lower and upper integration limits, respectively.
Note: any values greater the 37 in magnitude, are considered as infinite values. Note: any values greater the 37 in magnitude, are considered as
infinite values.
D : array-like D : array-like
vector of means (default zeros(size(rho))) vector of means (default zeros(size(rho)))
df = Degrees of freedom, NDF<=0 gives normal probabilities (default) df = Degrees of freedom, NDF<=0 gives normal probabilities (default)
abseps = absolute error tolerance. (default 1e-4) abseps = absolute error tolerance. (default 1e-4)
IERC = 1 if strict error control based on fourth derivative IERC = 1 if strict error control based on fourth derivative
0 if intuitive error control based on halving the intervals (default) 0 if error control based on halving the intervals (default)
HNC = start interval width of simpson rule (default 0.24) HNC = start interval width of simpson rule (default 0.24)
Returns Returns
@ -453,7 +466,7 @@ def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
1, if N > 1000 or N < 1. 1, if N > 1000 or N < 1.
2, IF any abs(rho)>=1 2, IF any abs(rho)>=1
4, if ANY(b(I)<=A(i)) 4, if ANY(b(I)<=A(i))
5, if number of terms computed exceeds maximum number of evaluation points 5, if number of terms exceeds maximum number of evaluation points
6, if fault accurs in normal subroutines 6, if fault accurs in normal subroutines
7, if subintervals are too narrow or too many 7, if subintervals are too narrow or too many
8, if bounds exceeds abseps 8, if bounds exceeds abseps
@ -465,12 +478,12 @@ def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
Example: Example:
-------- --------
>>> import wafo.gaussian as wg >>> import wafo.gaussian as wg
>>> rho2 = np.random.rand(2); >>> rho2 = np.random.rand(2)
>>> a2 = np.zeros(2); >>> a2 = np.zeros(2)
>>> b2 = np.repeat(np.inf,2); >>> b2 = np.repeat(np.inf,2)
>>> [val2,err2, ift2] = wg.prbnormtndpc(rho2,a2,b2) >>> [val2,err2, ift2] = wg.prbnormtndpc(rho2,a2,b2)
>>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi) >>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi)
>>> E2 = g2(rho2) #% exact value >>> E2 = g2(rho2) # exact value
>>> np.abs(E2-val2)<err2 >>> np.abs(E2-val2)<err2
True True
@ -500,9 +513,12 @@ def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
# Make sure integration limits are finite # Make sure integration limits are finite
A = np.clip(a - D, -100, 100) A = np.clip(a - D, -100, 100)
B = np.clip(b - D, -100, 100) B = np.clip(b - D, -100, 100)
return mvnprdmod.prbnormtndpc(rho, A, B, df, abseps, IERC, HNC) #@UndefinedVariable
def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True, usebreakpoints=False): return mvnprdmod.prbnormtndpc(rho, A, B, df, abseps, IERC, HNC) # @UndefinedVariable @IgnorePep8
def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True,
usebreakpoints=False):
''' '''
Return Multivariate Normal probabilities with product correlation Return Multivariate Normal probabilities with product correlation
@ -527,9 +543,9 @@ def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True, usebreakpo
Example: Example:
------- -------
>>> import wafo.gaussian as wg >>> import wafo.gaussian as wg
>>> rho2 = np.random.rand(2); >>> rho2 = np.random.rand(2)
>>> a2 = np.zeros(2); >>> a2 = np.zeros(2)
>>> b2 = np.repeat(np.inf,2); >>> b2 = np.repeat(np.inf,2)
>>> [val2,err2, ift2] = wg.prbnormndpc(rho2,a2,b2) >>> [val2,err2, ift2] = wg.prbnormndpc(rho2,a2,b2)
>>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi) >>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi)
>>> E2 = g2(rho2) #% exact value >>> E2 = g2(rho2) #% exact value
@ -561,38 +577,43 @@ def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True, usebreakpo
''' '''
# Call fortran implementation # Call fortran implementation
val, err, ier = mvnprdmod.prbnormndpc(rho, a, b, abserr, relerr, usebreakpoints, usesimpson); #@UndefinedVariable val, err, ier = mvnprdmod.prbnormndpc(rho, a, b, abserr, relerr, usebreakpoints, usesimpson) # @UndefinedVariable @IgnorePep8
if ier > 0: if ier > 0:
warnings.warn('Abnormal termination ier = %d\n\n%s' % (ier, _ERRORMESSAGE[ier])) warnings.warn('Abnormal termination ier = %d\n\n%s' %
(ier, _ERRORMESSAGE[ier]))
return val, err, ier return val, err, ier
_ERRORMESSAGE = {} _ERRORMESSAGE = {}
_ERRORMESSAGE[0] = '' _ERRORMESSAGE[0] = ''
_ERRORMESSAGE[1] = ''' _ERRORMESSAGE[1] = '''
Maximum number of subdivisions allowed has been achieved. one can allow Maximum number of subdivisions allowed has been achieved. one can allow
more subdivisions by increasing the value of limit (and taking the more subdivisions by increasing the value of limit (and taking the
according dimension adjustments into account). however, if this yields according dimension adjustments into account). however, if this yields
no improvement it is advised to analyze the integrand in order to no improvement it is advised to analyze the integrand in order to
determine the integration difficulties. if the position of a local determine the integration difficulties. if the position of a local
difficulty can be determined (i.e. singularity discontinuity within difficulty can be determined (i.e. singularity discontinuity within
the interval), it should be supplied to the routine as an element of the interval), it should be supplied to the routine as an element of
the vector points. If necessary an appropriate special-purpose integrator the vector points. If necessary an appropriate special-purpose
must be used, which is designed for handling the type of difficulty involved. integrator must be used, which is designed for handling the type of
''' difficulty involved.
'''
_ERRORMESSAGE[2] = ''' _ERRORMESSAGE[2] = '''
the occurrence of roundoff error is detected, which prevents the requested the occurrence of roundoff error is detected, which prevents the requested
tolerance from being achieved. The error may be under-estimated.''' tolerance from being achieved. The error may be under-estimated.'''
_ERRORMESSAGE[3] = ''' _ERRORMESSAGE[3] = '''
Extremely bad integrand behaviour occurs at some points of the integration interval.''' Extremely bad integrand behaviour occurs at some points of the integration
interval.'''
_ERRORMESSAGE[4] = ''' _ERRORMESSAGE[4] = '''
The algorithm does not converge. Roundoff error is detected in the extrapolation table. The algorithm does not converge. Roundoff error is detected in the
It is presumed that the requested tolerance cannot be achieved, and that extrapolation table. It is presumed that the requested tolerance cannot be
the returned result is the best which can be obtained.''' achieved, and that the returned result is the best which can be obtained.
'''
_ERRORMESSAGE[5] = ''' _ERRORMESSAGE[5] = '''
The integral is probably divergent, or slowly convergent. The integral is probably divergent, or slowly convergent.
It must be noted that divergence can occur with any other value of ier>0.''' It must be noted that divergence can occur with any other value of ier>0.
'''
_ERRORMESSAGE[6] = '''the input is invalid because: _ERRORMESSAGE[6] = '''the input is invalid because:
1) npts2 < 2 1) npts2 < 2
2) break points are specified outside the integration range 2) break points are specified outside the integration range
@ -600,31 +621,30 @@ _ERRORMESSAGE[6] = '''the input is invalid because:
4) limit < npts2.''' 4) limit < npts2.'''
def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0): def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
''' '''
Multivariate Normal probability by Genz' algorithm. Multivariate Normal probability by Genz' algorithm.
Parameters Parameters
CORREL = Positive semidefinite correlation matrix CORREL = Positive semidefinite correlation matrix
A = vector of lower integration limits. A = vector of lower integration limits.
B = vector of upper integration limits. B = vector of upper integration limits.
ABSEPS = absolute error tolerance. ABSEPS = absolute error tolerance.
RELEPS = relative error tolerance. RELEPS = relative error tolerance.
MAXPTS = maximum number of function values allowed. This MAXPTS = maximum number of function values allowed. This
parameter can be used to limit the time. A sensible parameter can be used to limit the time. A sensible strategy is to
strategy is to start with MAXPTS = 1000*N, and then start with MAXPTS = 1000*N, and then increase MAXPTS if ERROR is too
increase MAXPTS if ERROR is too large. large.
METHOD = integer defining the integration method METHOD = integer defining the integration method
-1 KRBVRC randomized Korobov rules for the first 20 -1 KRBVRC randomized Korobov rules for the first 20 variables,
variables, randomized Richtmeyer rules for the rest, randomized Richtmeyer rules for the rest, NMAX = 500
NMAX = 500 0 KRBVRC, NMAX = 100 (default)
0 KRBVRC, NMAX = 100 (default) 1 SADAPT Subregion Adaptive integration method, NMAX = 20
1 SADAPT Subregion Adaptive integration method, NMAX = 20 2 KROBOV Randomized KOROBOV rules, NMAX = 100
2 KROBOV Randomized KOROBOV rules, NMAX = 100 3 RCRUDE Crude Monte-Carlo Algorithm with simple
3 RCRUDE Crude Monte-Carlo Algorithm with simple antithetic variates and weighted results on restart
antithetic variates and weighted results on restart 4 SPHMVN Monte-Carlo algorithm by Deak (1980), NMAX = 100
4 SPHMVN Monte-Carlo algorithm by Deak (1980), NMAX = 100
Returns Returns
------- -------
VALUE REAL estimated value for the integral VALUE REAL estimated value for the integral
@ -667,7 +687,6 @@ def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
prbnormndpc, Rind prbnormndpc, Rind
''' '''
m, n = correl.shape m, n = correl.shape
Na = len(a) Na = len(a)
Nb = len(b) Nb = len(b)
@ -677,7 +696,7 @@ def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
if maxpts is None: if maxpts is None:
maxpts = 1000 * n maxpts = 1000 * n
maxpts = max(round(maxpts), 10 * n); maxpts = max(round(maxpts), 10 * n)
# % array of correlation coefficients; the correlation # % array of correlation coefficients; the correlation
# % coefficient in row I column J of the correlation matrix # % coefficient in row I column J of the correlation matrix
@ -692,16 +711,16 @@ def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
A = np.clip(a, -100, 100) A = np.clip(a, -100, 100)
B = np.clip(b, -100, 100) B = np.clip(b, -100, 100)
ix = np.where(np.triu(np.ones((m, m)), 1) != 0) ix = np.where(np.triu(np.ones((m, m)), 1) != 0)
L = correl[ix].ravel() #% return only off diagonal elements L = correl[ix].ravel() # % return only off diagonal elements
infinity = 37 infinity = 37
infin = np.repeat(2, n) - (B > infinity) - 2 * (A < -infinity) infin = np.repeat(2, n) - (B > infinity) - 2 * (A < -infinity)
err, val, inform = mvn.mvndst(A, B, infin, L, maxpts, abseps, releps) #@UndefinedVariable err, val, inform = mvn.mvndst(A, B, infin, L, maxpts, abseps, releps) # @UndefinedVariable @IgnorePep8
return val, err, inform return val, err, inform
#CALL the mexroutine # CALL the mexroutine
# t0 = clock; # t0 = clock;
# if ((method==0) && (n<=100)), # if ((method==0) && (n<=100)),
# %NMAX = 100 # %NMAX = 100
@ -715,25 +734,29 @@ def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
# exTime = etime(clock,t0); # exTime = etime(clock,t0);
# ' # '
#% gauss legendre points and weights, n = 6 # gauss legendre points and weights, n = 6
_W6 = [ 0.1713244923791705e+00, 0.3607615730481384e+00, 0.4679139345726904e+00] _W6 = [0.1713244923791705e+00, 0.3607615730481384e+00, 0.4679139345726904e+00]
_X6 = [-0.9324695142031522e+00, -0.6612093864662647e+00, -0.2386191860831970e+00] _X6 = [-0.9324695142031522e+00, -
#% gauss legendre points and weights, n = 12 0.6612093864662647e+00, -0.2386191860831970e+00]
_W12 = [ 0.4717533638651177e-01, 0.1069393259953183e+00, 0.1600783285433464e+00, # gauss legendre points and weights, n = 12
0.2031674267230659e+00, 0.2334925365383547e+00, 0.2491470458134029e+00] _W12 = [0.4717533638651177e-01, 0.1069393259953183e+00, 0.1600783285433464e+00,
_X12 = [ -0.9815606342467191e+00, -0.9041172563704750e+00, -0.7699026741943050e+00, 0.2031674267230659e+00, 0.2334925365383547e+00, 0.2491470458134029e+00]
- 0.5873179542866171e+00, -0.3678314989981802e+00, -0.1252334085114692e+00] _X12 = [-0.9815606342467191e+00, -0.9041172563704750e+00,
#% gauss legendre points and weights, n = 20 -0.7699026741943050e+00,
_W20 = [ 0.1761400713915212e-01, 0.4060142980038694e-01, - 0.5873179542866171e+00, -0.3678314989981802e+00,
0.6267204833410906e-01, 0.8327674157670475e-01, -0.1252334085114692e+00]
0.1019301198172404e+00, 0.1181945319615184e+00, # gauss legendre points and weights, n = 20
0.1316886384491766e+00, 0.1420961093183821e+00, _W20 = [0.1761400713915212e-01, 0.4060142980038694e-01,
0.1491729864726037e+00, 0.1527533871307259e+00] 0.6267204833410906e-01, 0.8327674157670475e-01,
_X20 = [ -0.9931285991850949e+00, -0.9639719272779138e+00, 0.1019301198172404e+00, 0.1181945319615184e+00,
- 0.9122344282513259e+00, -0.8391169718222188e+00, 0.1316886384491766e+00, 0.1420961093183821e+00,
- 0.7463319064601508e+00, -0.6360536807265150e+00, 0.1491729864726037e+00, 0.1527533871307259e+00]
- 0.5108670019508271e+00, -0.3737060887154196e+00, _X20 = [-0.9931285991850949e+00, -0.9639719272779138e+00,
- 0.2277858511416451e+00, -0.7652652113349733e-01] - 0.9122344282513259e+00, -0.8391169718222188e+00,
- 0.7463319064601508e+00, -0.6360536807265150e+00,
- 0.5108670019508271e+00, -0.3737060887154196e+00,
- 0.2277858511416451e+00, -0.7652652113349733e-01]
def cdfnorm2d(b1, b2, r): def cdfnorm2d(b1, b2, r):
''' '''
@ -798,8 +821,8 @@ def cdfnorm2d(b1, b2, r):
bvn = where(abs(r) > 1, nan, 0.0) bvn = where(abs(r) > 1, nan, 0.0)
two = 2.e0; two = 2.e0
twopi = 6.283185307179586e0; twopi = 6.283185307179586e0
hk = h * k hk = h * k
@ -813,90 +836,97 @@ def cdfnorm2d(b1, b2, r):
for i in range(10): for i in range(10):
for sign in - 1, 1: for sign in - 1, 1:
sn = sin(asr[k1] * (sign * _X20[i] + 1) / 2) sn = sin(asr[k1] * (sign * _X20[i] + 1) / 2)
bvn[k01] = bvn[k01] + _W20[i] * exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn)); bvn[k01] = bvn[k01] + _W20[i] * \
exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn))
k1, = nonzero((0.3 <= r[k0]) & (r[k0] < 0.75)) k1, = nonzero((0.3 <= r[k0]) & (r[k0] < 0.75))
if len(k1) > 0: if len(k1) > 0:
k01 = k0[k1]; k01 = k0[k1]
for i in range(6): for i in range(6):
for sign in - 1, 1: for sign in - 1, 1:
sn = sin(asr[k1] * (sign * _X12[i] + 1) / 2); sn = sin(asr[k1] * (sign * _X12[i] + 1) / 2)
bvn[k01] = bvn[k01] + _W12[i] * exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn)); bvn[k01] = bvn[k01] + _W12[i] * \
exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn))
k1, = nonzero(r[k0] < 0.3); k1, = nonzero(r[k0] < 0.3)
if len(k1) > 0: if len(k1) > 0:
k01 = k0[k1] k01 = k0[k1]
for i in range(3): for i in range(3):
for sign in - 1, 1: for sign in - 1, 1:
sn = sin(asr[k1] * (sign * _X6[i] + 1) / 2) sn = sin(asr[k1] * (sign * _X6[i] + 1) / 2)
bvn[k01] = bvn[k01] + _W6[i] * exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn)) bvn[k01] = bvn[k01] + _W6[i] * \
exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn))
bvn[k0] *= asr / (two * twopi) bvn[k0] *= asr / (two * twopi)
bvn[k0] += fi(-h[k0]) * fi(-k[k0]) bvn[k0] += fi(-h[k0]) * fi(-k[k0])
k1, = nonzero((0.925 <= abs(r)) & (abs(r) <= 1))
k1, = nonzero((0.925 <= abs(r)) & (abs(r) <= 1));
if len(k1) > 0: if len(k1) > 0:
k2, = nonzero(r[k1] < 0); k2, = nonzero(r[k1] < 0)
if len(k2) > 0: if len(k2) > 0:
k12 = k1[k2]; k12 = k1[k2]
k[k12] = -k[k12]; k[k12] = -k[k12]
hk[k12] = -hk[k12]; hk[k12] = -hk[k12]
k3, = nonzero(abs(r[k1]) < 1); k3, = nonzero(abs(r[k1]) < 1)
if len(k3) > 0: if len(k3) > 0:
k13 = k1[k3]; k13 = k1[k3]
a2 = (1 - r[k13]) * (1 + r[k13]); a2 = (1 - r[k13]) * (1 + r[k13])
a = sqrt(a2) a = sqrt(a2)
b = abs(h[k13] - k[k13]); b = abs(h[k13] - k[k13])
bs = b * b; bs = b * b
c = (4.e0 - hk[k13]) / 8.e0; c = (4.e0 - hk[k13]) / 8.e0
d = (12.e0 - hk[k13]) / 16.e0; d = (12.e0 - hk[k13]) / 16.e0
asr = -(bs / a2 + hk[k13]) / 2.e0; asr = -(bs / a2 + hk[k13]) / 2.e0
k4, = nonzero(asr > -100.e0); k4, = nonzero(asr > -100.e0)
if len(k4) > 0: if len(k4) > 0:
bvn[k13[k4]] = a[k4] * exp(asr[k4]) * (1 - c[k4] * bvn[k13[k4]] = (a[k4] * exp(asr[k4]) *
(bs[k4] - a2[k4]) * (1 - d[k4] * bs[k4] / 5) / 3 (1 - c[k4] * (bs[k4] - a2[k4]) *
+ c[k4] * d[k4] * a2[k4] ** 2 / 5); (1 - d[k4] * bs[k4] / 5) / 3 +
c[k4] * d[k4] * a2[k4] ** 2 / 5))
k5, = nonzero(hk[k13] < 100.e0); k5, = nonzero(hk[k13] < 100.e0)
if len(k5) > 0: if len(k5) > 0:
#% b = sqrt(bs); # b = sqrt(bs);
k135 = k13[k5]; k135 = k13[k5]
bvn[k135] = bvn[k135] - exp(-hk[k135] / 2) * sqrt(twopi) * fi(-b[k5] / a[k5]) * b[k5] * (1 - c[k5] * bs[k5] * (1 - d[k5] * bs[k5] / 5) / 3) bvn[k135] = bvn[k135] - exp(-hk[k135] / 2) * sqrt(twopi) * fi(-b[k5] / a[k5]) * \
b[k5] * (1 - c[k5] * bs[k5] * (1 - d[k5] * bs[k5] / 5) / 3)
a /= two a /= two
for i in range(10): for i in range(10):
for sign in - 1, 1: for sign in - 1, 1:
xs = (a * (sign * _X20[i] + 1)) ** 2; xs = (a * (sign * _X20[i] + 1)) ** 2
rs = sqrt(1 - xs); rs = sqrt(1 - xs)
asr = -(bs / xs + hk[k13]) / 2; asr = -(bs / xs + hk[k13]) / 2
k6, = nonzero(asr > -100.e0) ; k6, = nonzero(asr > -100.e0)
if len(k6) > 0: if len(k6) > 0:
k136 = k13[k6] k136 = k13[k6]
bvn[k136] += (a[k6] * _W20[i] * exp(asr[k6]) * bvn[k136] += (a[k6] * _W20[i] * exp(asr[k6]) *
(exp(-hk[k136] * (1 - rs[k6]) / (2 * (1 + rs[k6]))) / rs[k6] - (exp(-hk[k136] * (1 - rs[k6]) /
(1 + c[k6] * xs[k6] * (1 + d[k6] * xs[k6])))) (2 * (1 + rs[k6]))) / rs[k6] -
(1 + c[k6] * xs[k6] *
(1 + d[k6] * xs[k6]))))
bvn[k3] = -bvn[k3] / twopi; bvn[k3] = -bvn[k3] / twopi
k7, = nonzero(r[k1] > 0)
k7, = nonzero(r[k1] > 0);
if len(k7): if len(k7):
k17 = k1[k7] k17 = k1[k7]
bvn[k17] += fi(-np.maximum(h[k17], k[k17])); bvn[k17] += fi(-np.maximum(h[k17], k[k17]))
k8, = nonzero(r[k1] < 0); k8, = nonzero(r[k1] < 0)
if len(k8) > 0: if len(k8) > 0:
k18 = k1[k8]; k18 = k1[k8]
bvn[k18] = -bvn[k18] + np.maximum(0, fi(-h[k18]) - fi(-k[k18])); bvn[k18] = -bvn[k18] + np.maximum(0, fi(-h[k18]) - fi(-k[k18]))
bvn.shape = cshape bvn.shape = cshape
return bvn return bvn
def fi(x): def fi(x):
return 0.5 * (erfc((-x) / sqrt(2))) return 0.5 * (erfc((-x) / sqrt(2)))
def prbnorm2d(a, b, r): def prbnorm2d(a, b, r):
''' '''
Returns Bivariate Normal probability Returns Bivariate Normal probability
@ -932,7 +962,7 @@ def prbnorm2d(a, b, r):
infinity = 37 infinity = 37
lower = np.asarray(a) lower = np.asarray(a)
upper = np.asarray(b) upper = np.asarray(b)
if np.all((lower <= -infinity) & (infinity<=upper)): if np.all((lower <= -infinity) & (infinity <= upper)):
return 1.0 return 1.0
if (lower >= upper).any(): if (lower >= upper).any():
return 0.0 return 0.0
@ -941,37 +971,43 @@ def prbnorm2d(a, b, r):
if np.all(infin == 2): if np.all(infin == 2):
return (bvd(lower[0], lower[1], correl) return (bvd(lower[0], lower[1], correl)
- bvd(upper[0], lower[1], correl) - bvd(upper[0], lower[1], correl)
- bvd(lower[0], upper[1], correl) - bvd(lower[0], upper[1], correl)
+ bvd(upper[0], upper[1], correl)) + bvd(upper[0], upper[1], correl))
elif (infin[0] == 2 and infin[1] == 1): elif (infin[0] == 2 and infin[1] == 1):
return bvd(lower[0], lower[1], correl) - bvd(upper[0], lower[1], correl) return (bvd(lower[0], lower[1], correl) -
elif (infin[0] == 1 and infin[1] == 2) : bvd(upper[0], lower[1], correl))
return bvd(lower[0], lower[1], correl) - bvd(lower[0], upper[1], correl) elif (infin[0] == 1 and infin[1] == 2):
elif (infin[0] == 2 and infin[1] == 0) : return (bvd(lower[0], lower[1], correl) -
return bvd(-upper[0], -upper[1], correl) - bvd(-lower[0], -upper[1], correl) bvd(lower[0], upper[1], correl))
elif (infin[0] == 0 and infin[1] == 2): elif (infin[0] == 2 and infin[1] == 0):
return bvd(-upper[0], -upper[1], correl) - bvd(-upper[0], -lower[1], correl) return (bvd(-upper[0], -upper[1], correl) -
elif (infin[0] == 1 and infin[1] == 0) : bvd(-lower[0], -upper[1], correl))
elif (infin[0] == 0 and infin[1] == 2):
return (bvd(-upper[0], -upper[1], correl) -
bvd(-upper[0], -lower[1], correl))
elif (infin[0] == 1 and infin[1] == 0):
return bvd(lower[0], -upper[1], -correl) return bvd(lower[0], -upper[1], -correl)
elif (infin[0] == 0 and infin[1] == 1) : elif (infin[0] == 0 and infin[1] == 1):
return bvd(-upper[0], lower[1], -correl) return bvd(-upper[0], lower[1], -correl)
elif (infin[0] == 1 and infin[1] == 1): elif (infin[0] == 1 and infin[1] == 1):
return bvd(lower[0], lower[1], correl) return bvd(lower[0], lower[1], correl)
elif (infin[0] == 0 and infin[1] == 0) : elif (infin[0] == 0 and infin[1] == 0):
return bvd(-upper[0], -upper[1], correl) return bvd(-upper[0], -upper[1], correl)
return 1 return 1
def bvd(lo, up, r): def bvd(lo, up, r):
return cdfnorm2d(-lo, -up, r) return cdfnorm2d(-lo, -up, r)
def test_docstrings(): def test_docstrings():
import doctest import doctest
doctest.testmod() doctest.testmod()
if __name__ == '__main__': if __name__ == '__main__':
test_docstrings() test_docstrings()
#if __name__ == '__main__': # if __name__ == '__main__':
# if False: #True: # # if False: #True: #
# test_rind() # test_rind()
# else: # else:

@ -70,7 +70,7 @@ def rotation_matrix(heading, pitch, roll):
deg2rad = np.pi / 180 deg2rad = np.pi / 180
H = heading * deg2rad H = heading * deg2rad
P = pitch * deg2rad P = pitch * deg2rad
R = roll * deg2rad # Convert to radians R = roll * deg2rad # Convert to radians
data.put(0, cos(H) * cos(P)) data.put(0, cos(H) * cos(P))
data.put(1, cos(H) * sin(P) * sin(R) - sin(H) * cos(R)) data.put(1, cos(H) * sin(P) * sin(R) - sin(H) * cos(R))
@ -167,7 +167,7 @@ def spaceline(start_point, stop_point, num=10):
e1, e2 = np.atleast_1d(start_point, stop_point) e1, e2 = np.atleast_1d(start_point, stop_point)
e2m1 = e2 - e1 e2m1 = e2 - e1
length = np.sqrt((e2m1 ** 2).sum()) length = np.sqrt((e2m1 ** 2).sum())
#length = sqrt((E2[0]-E1(1))^2 + (E2(2)-E1(2))^2 + (E2(3)-E1(3))^2); # length = sqrt((E2[0]-E1(1))^2 + (E2(2)-E1(2))^2 + (E2(3)-E1(3))^2)
C = e2m1 / length C = e2m1 / length
delta = length / float(num - 1) delta = length / float(num - 1)
return np.array([e1 + n * delta * C for n in range(num)]) return np.array([e1 + n * delta * C for n in range(num)])
@ -358,8 +358,9 @@ def sub2index(shape, *subscripts, **kwds):
ndx = 0 ndx = 0
s0 = np.shape(subscripts[0]) s0 = np.shape(subscripts[0])
for i, subscript in enumerate(subscripts): for i, subscript in enumerate(subscripts):
np.testing.assert_equal(s0, np.shape(subscript), np.testing.assert_equal(
'The subscripts vectors must all be of the same shape.') s0, np.shape(subscript),
'The subscripts vectors must all be of the same shape.')
if (np.any(subscript < 0)) or (np.any(s[i] <= subscript)): if (np.any(subscript < 0)) or (np.any(s[i] <= subscript)):
raise IndexError('Out of range subscript.') raise IndexError('Out of range subscript.')
@ -399,7 +400,7 @@ class JITImport(object):
except: except:
if self._module is None: if self._module is None:
self._module = __import__(self._module_name, None, None, ['*']) self._module = __import__(self._module_name, None, None, ['*'])
#assert(isinstance(self._module, types.ModuleType), 'module') # assert(isinstance(self._module, types.ModuleType), 'module')
return getattr(self._module, attr) return getattr(self._module, attr)
else: else:
raise raise
@ -634,7 +635,7 @@ def _findcross(xn):
for ix in iz.tolist(): for ix in iz.tolist():
xn[ix] = xn[ix - 1] xn[ix] = xn[ix - 1]
#% indices to local level crossings ( without turningpoints) # indices to local level crossings ( without turningpoints)
ind, = (xn[:n - 1] * xn[1:] < 0).nonzero() ind, = (xn[:n - 1] * xn[1:] < 0).nonzero()
return ind return ind
@ -708,7 +709,8 @@ def findcross(x, v=0.0, kind=None):
# make sure the first is a level v down-crossing if wdef=='tw' # make sure the first is a level v down-crossing if wdef=='tw'
# or make sure the first is a level v up-crossing if # or make sure the first is a level v up-crossing if
# wdef=='cw' # wdef=='cw'
xor = lambda a, b: a ^ b def xor(a, b):
return a ^ b
first_is_down_crossing = int(xn[ind[0]] > xn[ind[0] + 1]) first_is_down_crossing = int(xn[ind[0]] > xn[ind[0] + 1])
if xor(first_is_down_crossing, kind in ('dw', 'tw')): if xor(first_is_down_crossing, kind in ('dw', 'tw')):
ind = ind[1::] ind = ind[1::]
@ -979,7 +981,7 @@ def findrfc(tp, h=0.0, method='clib'):
ix += 1 ix += 1
ind[ix] = (Tstart + 2 * i + 1) ind[ix] = (Tstart + 2 * i + 1)
ix += 1 ix += 1
#iy = i # iy = i
continue continue
# goto L180 # goto L180
@ -1119,7 +1121,7 @@ def mctp2rfc(fmM, fMm=None):
m0 = max(0, f_min[0] - np.sum(f_rfc[N - k + 1:N, 0])) m0 = max(0, f_min[0] - np.sum(f_rfc[N - k + 1:N, 0]))
M0 = max(0, f_max[N - 1 - k] - np.sum(f_rfc[N - 1 - k, 1:k])) M0 = max(0, f_max[N - 1 - k] - np.sum(f_rfc[N - 1 - k, 1:k]))
f_rfc[N - 1 - k, 0] = min(m0, M0) f_rfc[N - 1 - k, 0] = min(m0, M0)
#% n_loops_left=N-k+1 # n_loops_left=N-k+1
# end # end
for k in range(1, N): for k in range(1, N):
@ -1202,21 +1204,27 @@ def rfcfilter(x, h, method=0):
j = 0 j = 0
t0 = 0 t0 = 0
y0 = y[t0] y0 = y[t0]
z0 = 0 z0 = 0
def aleb(a, b):
return a <= b
def altb(a, b):
return a < b
if method == 0: if method == 0:
cmpfun1 = lambda a, b: a <= b cmpfun1 = aleb
cmpfun2 = lambda a, b: a < b cmpfun2 = altb
else: else:
cmpfun1 = lambda a, b: a < b cmpfun1 = altb
cmpfun2 = lambda a, b: a <= b cmpfun2 = aleb
# The rainflow filter # The rainflow filter
for tim1, yi in enumerate(y[1::]): for tim1, yi in enumerate(y[1::]):
fpi = y0 + h fpi = y0 + h
fmi = y0 - h fmi = y0 - h
ti = tim1 + 1 ti = tim1 + 1
#yi = y[ti] # yi = y[ti]
if z0 == 0: if z0 == 0:
if cmpfun1(yi, fmi): if cmpfun1(yi, fmi):
@ -1255,7 +1263,7 @@ def rfcfilter(x, h, method=0):
t0, y0, z0 = t1, y1, z1 t0, y0, z0 = t1, y1, z1
# end # end
#% Update y if last y0 is greater than (or equal) threshold # Update y if last y0 is greater than (or equal) threshold
if cmpfun1(h, abs(y0 - y[t[j]])): if cmpfun1(h, abs(y0 - y[t[j]])):
j += 1 j += 1
t[j] = t0 t[j] = t0
@ -1349,7 +1357,8 @@ def findtp(x, h=0.0, kind=None):
ind = ind[ind1] ind = ind[ind1]
if kind in ('mw', 'Mw'): if kind in ('mw', 'Mw'):
xor = lambda a, b: a ^ b def xor(a, b):
return a ^ b
# make sure that the first is a Max if wdef == 'Mw' # make sure that the first is a Max if wdef == 'Mw'
# or make sure that the first is a min if wdef == 'mw' # or make sure that the first is a min if wdef == 'mw'
first_is_max = (x[ind[0]] > x[ind[1]]) first_is_max = (x[ind[0]] > x[ind[1]])
@ -1605,15 +1614,15 @@ def findoutliers(x, zcrit=0.0, dcrit=None, ddcrit=None, verbose=False):
print('Found %d spurious negative jumps of D^2x' % tmp.size) print('Found %d spurious negative jumps of D^2x' % tmp.size)
if zcrit >= 0.0: if zcrit >= 0.0:
#% finding consecutive values less than zcrit apart. # finding consecutive values less than zcrit apart.
indzeros = (abs(dxn) <= zcrit) indzeros = (abs(dxn) <= zcrit)
indz, = nonzero(indzeros) indz, = nonzero(indzeros)
if indz.size > 0: if indz.size > 0:
indz = indz + 1 indz = indz + 1
#%finding the beginning and end of consecutive equal values # finding the beginning and end of consecutive equal values
indtr, = nonzero((diff(indzeros))) indtr, = nonzero((diff(indzeros)))
indtr = indtr + 1 indtr = indtr + 1
#%indices to consecutive equal points # indices to consecutive equal points
# removing the point before + all equal points + the point after # removing the point before + all equal points + the point after
if True: if True:
ind = hstack((ind, indtr - 1, indz, indtr, indtr + 1)) ind = hstack((ind, indtr - 1, indz, indtr, indtr + 1))
@ -1789,7 +1798,7 @@ def stirlerr(n):
<http://lists.gnu.org/archive/html/octave-maintainers/2011-09/pdfK0uKOST642.pdf> <http://lists.gnu.org/archive/html/octave-maintainers/2011-09/pdfK0uKOST642.pdf>
''' '''
S0 = 0.083333333333333333333 # /* 1/12 */ S0 = 0.083333333333333333333 # /* 1/12 */
S1 = 0.00277777777777777777778 # /* 1/360 */ S1 = 0.00277777777777777777778 # /* 1/360 */
S2 = 0.00079365079365079365079365 # /* 1/1260 */ S2 = 0.00079365079365079365079365 # /* 1/1260 */
S3 = 0.000595238095238095238095238 # /* 1/1680 */ S3 = 0.000595238095238095238095238 # /* 1/1680 */
@ -1821,7 +1830,7 @@ def stirlerr(n):
def getshipchar(value=None, property="max_deadweight", # @ReservedAssignment def getshipchar(value=None, property="max_deadweight", # @ReservedAssignment
**kwds): # @IgnorePep8 ** kwds): # @IgnorePep8
''' '''
Return ship characteristics from value of one ship-property Return ship characteristics from value of one ship-property
@ -1909,7 +1918,7 @@ def getshipchar(value=None, property="max_deadweight", # @ReservedAssignment
draught = round(0.80 * max_deadweight ** 0.24 * 10) / 10 draught = round(0.80 * max_deadweight ** 0.24 * 10) / 10
draught_err = draught * 0.22 draught_err = draught * 0.22
#S = round(2/3*(L)**0.525) # S = round(2/3*(L)**0.525)
speed = round(1.14 * max_deadweight ** 0.21 * 10) / 10 speed = round(1.14 * max_deadweight ** 0.21 * 10) / 10
speed_err = speed * 0.10 speed_err = speed * 0.10
@ -2171,7 +2180,7 @@ def hyp2f1(a, b, c, z, rho=0.5):
e8 = gammaln(c - a - b) e8 = gammaln(c - a - b)
e9 = gammaln(a + b - c) e9 = gammaln(a + b - c)
_cmab = c - a - b _cmab = c - a - b
#~(np.round(cmab) == cmab & cmab <= 0) # ~(np.round(cmab) == cmab & cmab <= 0)
if abs(z) <= rho: if abs(z) <= rho:
h = hyp2f1_taylor(a, b, c, z, 1e-15) h = hyp2f1_taylor(a, b, c, z, 1e-15)
elif abs(1 - z) <= rho: # % Require that |arg(1-z)|<pi elif abs(1 - z) <= rho: # % Require that |arg(1-z)|<pi
@ -2253,7 +2262,7 @@ def hygfz(A, B, C, Z):
PI = 3.141592653589793 PI = 3.141592653589793
EL = .5772156649015329 EL = .5772156649015329
if (L0 or L1): if (L0 or L1):
# 'The hypergeometric series is divergent' # 'The hypergeometric series is divergent'
return np.inf return np.inf
NM = 0 NM = 0
@ -2869,7 +2878,7 @@ def trangood(x, f, min_n=None, min_x=None, max_x=None, max_n=inf):
numpy.interp numpy.interp
""" """
xo, fo = atleast_1d(x, f) xo, fo = atleast_1d(x, f)
#n = xo.size # n = xo.size
if (xo.ndim != 1): if (xo.ndim != 1):
raise ValueError('x must be a vector.') raise ValueError('x must be a vector.')
if (fo.ndim != 1): if (fo.ndim != 1):
@ -2901,14 +2910,14 @@ def trangood(x, f, min_n=None, min_x=None, max_x=None, max_n=inf):
x0 = xo[0] x0 = xo[0]
L = float(xn - x0) L = float(xn - x0)
if ((nf < min_n) or (max_n < nf) or any(abs(ddx) > 10 * _EPS * (L))): if ((nf < min_n) or (max_n < nf) or any(abs(ddx) > 10 * _EPS * (L))):
# % pab 07.01.2001: Always choose the stepsize df so that # pab 07.01.2001: Always choose the stepsize df so that
# % it is an exactly representable number. # it is an exactly representable number.
# % This is important when calculating numerical derivatives and is # This is important when calculating numerical derivatives and is
# % accomplished by the following. # accomplished by the following.
dx = L / (min(min_n, max_n) - 1) dx = L / (min(min_n, max_n) - 1)
dx = (dx + 2.) - 2. dx = (dx + 2.) - 2.
xi = arange(x0, xn + dx / 2., dx) xi = arange(x0, xn + dx / 2., dx)
#% New call pab 11.11.2000: This is much quicker # New call pab 11.11.2000: This is much quicker
fo = interp(xi, xo, fo) fo = interp(xi, xo, fo)
xo = xi xo = xi
@ -2989,7 +2998,7 @@ def tranproc(x, f, x0, *xi):
xo, fo = trangood(xo, fo, min_x=min(x0), max_x=max(x0), max_n=nmax) xo, fo = trangood(xo, fo, min_x=min(x0), max_x=max(x0), max_n=nmax)
n = f.shape[0] n = f.shape[0]
#y = x0.copy() # y = x0.copy()
xu = (n - 1) * (x0 - xo[0]) / (xo[-1] - xo[0]) xu = (n - 1) * (x0 - xo[0]) / (xo[-1] - xo[0])
fi = asarray(floor(xu), dtype=int) fi = asarray(floor(xu), dtype=int)
@ -3294,7 +3303,7 @@ def fourier(data, t=None, T=None, m=None, n=None, method='trapz'):
# Compute M-1 more coefficients # Compute M-1 more coefficients
tmp = 2 * pi * t / T tmp = 2 * pi * t / T
#% tmp = 2*pi*(0:N-1).'/(N-1); # tmp = 2*pi*(0:N-1).'/(N-1);
for i in range(1, m): for i in range(1, m):
a[i] = intfun(x * cos(i * tmp), t, axis=-1) a[i] = intfun(x * cos(i * tmp), t, axis=-1)
b[i] = intfun(x * sin(i * tmp), t, axis=-1) b[i] = intfun(x * sin(i * tmp), t, axis=-1)
@ -3363,24 +3372,24 @@ def test_docstrings():
def test_hyp2f1(): def test_hyp2f1():
# 1/(1-x) = F(1,1,1,x) = F(1,b,b,x) = F(a,1,a,x) # 1/(1-x) = F(1,1,1,x) = F(1,b,b,x) = F(a,1,a,x)
# (1+x)^n = F(-n,b,b,-x) # (1+x)^n = F(-n,b,b,-x)
# atan(x) = x*F(.5,1,1.5,-x^2) # atan(x) = x*F(.5,1,1.5,-x^2)
# asin(x) = x*F(.5,.5,1.5,x^2) # asin(x) = x*F(.5,.5,1.5,x^2)
# log(x) = x*F(1,1,2,-x) # log(x) = x*F(1,1,2,-x)
# log(1+x)-log(1-x) = 2*x*F(.5,1,1.5,x^2) # log(1+x)-log(1-x) = 2*x*F(.5,1,1.5,x^2)
x = linspace(0., .7, 20) x = linspace(0., .7, 20)
y = hyp2f1_taylor(-1, -4, 1, .9) y = hyp2f1_taylor(-1, -4, 1, .9)
_y2 = hygfz(-1, -4, 1, .9) _y2 = hygfz(-1, -4, 1, .9)
_y3 = hygfz(5, -300, 10, 0.5) _y3 = hygfz(5, -300, 10, 0.5)
_y4 = hyp2f1_taylor(5, -300, 10, 0.5) _y4 = hyp2f1_taylor(5, -300, 10, 0.5)
#y = hyp2f1(0.1, 0.2, 0.3, 0.5) # y = hyp2f1(0.1, 0.2, 0.3, 0.5)
#y = hyp2f1(1, 1.5, 3, -4 +3j) # y = hyp2f1(1, 1.5, 3, -4 +3j)
#y = hyp2f1(5, 7.5, 2.5, 5) # y = hyp2f1(5, 7.5, 2.5, 5)
# fun = lambda x : 1./(1-x) # fun = lambda x : 1./(1-x)
# x = .99 # x = .99
# y = hyp2f1(1,1,1,x) # y = hyp2f1(1,1,1,x)
# print(y-fun(x)) # print(y-fun(x))
# #
plt = plotbackend plt = plotbackend
plt.interactive(False) plt.interactive(False)
plt.semilogy(x, np.abs(y - 1. / (1 - x)) + 1e-20, 'r') plt.semilogy(x, np.abs(y - 1. / (1 - x)) + 1e-20, 'r')
@ -3389,4 +3398,4 @@ def test_hyp2f1():
if __name__ == "__main__": if __name__ == "__main__":
test_docstrings() test_docstrings()
#test_hyp2f1() # test_hyp2f1()

@ -1822,7 +1822,7 @@ class TimeSeries(PlotData):
if pdef[2] in ('u', 'd'): if pdef[2] in ('u', 'd'):
t1 = ecross(ti, x, index[(start + dist):nn:step], vh) t1 = ecross(ti, x, index[(start + dist):nn:step], vh)
else: # % min, Max, trough, crest or all crossings wanted else: # min, Max, trough, crest or all crossings wanted
t1 = x[index[(start + dist):nn:step]] t1 = x[index[(start + dist):nn:step]]
T = t1 - t0 T = t1 - t0
@ -1918,7 +1918,7 @@ class TimeSeries(PlotData):
expect = 1 # reconstruct by expectation? 1=yes 0=no expect = 1 # reconstruct by expectation? 1=yes 0=no
tol = 0.001 # absolute tolerance of e(g_new-g_old) tol = 0.001 # absolute tolerance of e(g_new-g_old)
cmvmax = 100; # if number of consecutive missing values (cmv) are longer they cmvmax = 100 # if number of consecutive missing values (cmv) are longer they
# are not used in estimation of g, due to the fact that the # are not used in estimation of g, due to the fact that the
# conditional expectation approaches zero as the length to # conditional expectation approaches zero as the length to
# the closest known points increases, see below in the for loop # the closest known points increases, see below in the for loop
@ -1977,7 +1977,7 @@ class TimeSeries(PlotData):
indNaN = np.sort(indNaN) indNaN = np.sort(indNaN)
# initial reconstruction attempt # initial reconstruction attempt
# xn(indg,2)=detrendma(xn(indg,2),1500); # xn(indg,2) = detrendma(xn(indg,2),1500);
# [g, test, cmax, irr, g2] = dat2tr(xn(indg,:),def,opt); # [g, test, cmax, irr, g2] = dat2tr(xn(indg,:),def,opt);
# xnt=xn; # xnt=xn;
# xnt(indg,:)=dat2gaus(xn(indg,:),g); # xnt(indg,:)=dat2gaus(xn(indg,:),g);
@ -2014,7 +2014,7 @@ class TimeSeries(PlotData):
# # [g0 test0 cmax irr g2] = dat2tr(xs,def,opt); # # [g0 test0 cmax irr g2] = dat2tr(xs,def,opt);
# # [test0 ind0]=sort(test0); # # [test0 ind0]=sort(test0);
# # end # # end
#
# if 1, #test>test0(end-5), # if 1, #test>test0(end-5),
# # 95# sure the data comes from a non-Gaussian process # # 95# sure the data comes from a non-Gaussian process
# def = olddef; #Non Gaussian process # def = olddef; #Non Gaussian process
@ -2208,7 +2208,6 @@ class TimeSeries(PlotData):
for ix in xrange(nsub): for ix in xrange(nsub):
if nsub > 1: if nsub > 1:
subplot(nsub, 1, ix) subplot(nsub, 1, ix)
h_scale = array([tn[ind[0]], tn[ind[-1]]]) h_scale = array([tn[ind[0]], tn[ind[-1]]])
ind2 = where((h_scale[0] <= tn2) & (tn2 <= h_scale[1]))[0] ind2 = where((h_scale[0] <= tn2) & (tn2 <= h_scale[1]))[0]
plot(tn[ind] * dT, xn[ind], sym1) plot(tn[ind] * dT, xn[ind], sym1)
@ -2216,12 +2215,9 @@ class TimeSeries(PlotData):
plot(tn2[ind2] * dT, xn2[ind2], sym2) plot(tn2[ind2] * dT, xn2[ind2], sym2)
plot(h_scale * dT, [0, 0], 'k-') plot(h_scale * dT, [0, 0], 'k-')
#plotbackend.axis([h_scale*dT, v_scale]) #plotbackend.axis([h_scale*dT, v_scale])
for iy in [-2, 2]: for iy in [-2, 2]:
plot(h_scale * dT, iy * sigma * ones(2), ':') plot(h_scale * dT, iy * sigma * ones(2), ':')
ind = ind + Ns ind = ind + Ns
# end
plotbackend.xlabel(XlblTxt) plotbackend.xlabel(XlblTxt)
return figs return figs
@ -2274,7 +2270,6 @@ class TimeSeries(PlotData):
wave_idx = wave_idx[wave_idx > -1] wave_idx = wave_idx[wave_idx > -1]
else: else:
Nwp[0] = wave_idx[-1] - wave_idx[0] + 1 Nwp[0] = wave_idx[-1] - wave_idx[0] + 1
# end
Nsub = min(6, Nsub) Nsub = min(6, Nsub)
Nfig = int(ceil(Nsub / 6)) Nfig = int(ceil(Nsub / 6))
@ -2298,7 +2293,6 @@ class TimeSeries(PlotData):
plotbackend.ylabel( plotbackend.ylabel(
'Wave %d - %d' % (wave_idx[ix], 'Wave %d - %d' % (wave_idx[ix],
wave_idx[ix] + Nwp[ix] - 1)) wave_idx[ix] + Nwp[ix] - 1))
plotbackend.xlabel('Time [sec]') plotbackend.xlabel('Time [sec]')
# wafostamp # wafostamp
return figs return figs

@ -1,7 +1,7 @@
""" """
Extended functions to operate on polynomials Extended functions to operate on polynomials
""" """
#------------------------------------------------------------------------- # -------------------------------------------------------------------------
# Name: polynomial # Name: polynomial
# Purpose: Functions to operate on polynomials. # Purpose: Functions to operate on polynomials.
# #
@ -15,8 +15,8 @@
# Created: 30.12.2008 # Created: 30.12.2008
# Copyright: (c) pab 2008 # Copyright: (c) pab 2008
# Licence: LGPL # Licence: LGPL
#------------------------------------------------------------------------- # -------------------------------------------------------------------------
#!/usr/bin/env python # !/usr/bin/env python
from plotbackend import plotbackend as plt from plotbackend import plotbackend as plt
import numpy as np import numpy as np

@ -298,7 +298,8 @@ class arcsine_gen(rv_continuous):
The probability density function for `arcsine` is:: The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x))) arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for 0 < x < 1.
for ``0 < x < 1``.
%(example)s %(example)s
@ -1101,13 +1102,12 @@ class exponpow_gen(rv_continuous):
""" """
def _pdf(self, x, b): def _pdf(self, x, b):
xbm1 = x**(b-1.0) return exp(self._logpdf(x, b))
xb = xbm1 * x
return exp(1)*b*xbm1 * exp(xb - exp(xb))
def _logpdf(self, x, b): def _logpdf(self, x, b):
xb = x ** (b - 1.0) * x xb = x**b
return 1 + log(b) + special.xlogy(b - 1.0, x) + xb - exp(xb) f = 1 + log(b) + special.xlogy(b - 1.0, x) + xb - exp(xb)
return f
def _cdf(self, x, b): def _cdf(self, x, b):
return -expm1(-expm1(x ** b)) return -expm1(-expm1(x ** b))
@ -1294,13 +1294,13 @@ class f_gen(rv_continuous):
f = f_gen(a=0.0, name='f') f = f_gen(a=0.0, name='f')
# Folded Normal ## Folded Normal
# abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S) ## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
# ##
# note: regress docs have scale parameter correct, but first parameter ## note: regress docs have scale parameter correct, but first parameter
# he gives is a shape parameter A = c * scale ## he gives is a shape parameter A = c * scale
# Half-normal is folded normal with shape-parameter c=0. ## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous): class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable. """A folded normal continuous random variable.
@ -1527,8 +1527,8 @@ class genpareto_gen(rv_continuous):
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c) genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
for ``c >= 0`` ``x >= 0``, and defined for ``x >= 0`` if ``c >=0``, and for
for ``c < 0`` ``0 <= x <= -1/c`` ``0 <= x <= -1/c`` if ``c < 0``.
For ``c == 0``, `genpareto` reduces to the exponential For ``c == 0``, `genpareto` reduces to the exponential
distribution, `expon`:: distribution, `expon`::
@ -1691,8 +1691,9 @@ class genpareto_gen(rv_continuous):
for ki, cnk in zip(k, comb(n, k)): for ki, cnk in zip(k, comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki) val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return where(c * n < 1, val * (-1.0 / c) ** n, inf) return where(c * n < 1, val * (-1.0 / c) ** n, inf)
munp = lambda c: __munp(n, c) return _lazywhere(c != 0, (c,),
return _lazywhere(c != 0, (c,), munp, gam(n + 1)) lambda c: __munp(n, c),
gam(n + 1))
def _entropy(self, c): def _entropy(self, c):
return 1. + c return 1. + c
@ -2956,8 +2957,8 @@ class loglaplace_gen(rv_continuous):
----- -----
The probability density function for `loglaplace` is:: The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1 loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1 = c / 2 * x**(-c-1), for x >= 1
for ``c > 0``. for ``c > 0``.
@ -3544,16 +3545,16 @@ class lomax_gen(rv_continuous):
return log(c) - (c + 1) * log1p(x) return log(c) - (c + 1) * log1p(x)
def _cdf(self, x, c): def _cdf(self, x, c):
return 1.0-1.0/(1.0+x)**c return -expm1(-c*log1p(x))
def _sf(self, x, c): def _sf(self, x, c):
return 1.0/(1.0+x)**c return exp(-c*log1p(x))
def _logsf(self, x, c): def _logsf(self, x, c):
return -c * log1p(x) return -c * log1p(x)
def _ppf(self, q, c): def _ppf(self, q, c):
return pow(1.0-q, -1.0/c)-1 return expm1(-log1p(-q)/c)
def _stats(self, c): def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk') mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
@ -3877,6 +3878,9 @@ class rayleigh_gen(rv_continuous):
def _ppf(self, q): def _ppf(self, q):
return sqrt(-2 * log1p(-q)) return sqrt(-2 * log1p(-q))
def _isf(self, q):
return sqrt(-2 * log(q))
def _stats(self): def _stats(self):
val = 4 - pi val = 4 - pi
return (np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5, return (np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5,

@ -161,7 +161,7 @@ class nbinom_gen(rv_discrete):
def _logpmf(self, x, n, p): def _logpmf(self, x, n, p):
coeff = gamln(n + x) - gamln(x + 1) - gamln(n) coeff = gamln(n + x) - gamln(x + 1) - gamln(n)
return coeff + n * log(p) + x * log1p(-p) return coeff + special.xlogy(n, p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p): def _cdf(self, x, n, p):
k = floor(x) k = floor(x)
@ -217,7 +217,7 @@ class geom_gen(rv_discrete):
return np.power(1-p, k-1) * p return np.power(1-p, k-1) * p
def _logpmf(self, k, p): def _logpmf(self, k, p):
return (k - 1) * log1p(-p) + log(p) return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p): def _cdf(self, x, p):
k = floor(x) k = floor(x)

@ -3,9 +3,12 @@ Created on 17. juli 2010
@author: pab @author: pab
''' '''
import numpy as np #@UnusedImport import numpy as np # @UnusedImport
from numpy import pi, inf #@UnusedImport from numpy import pi, inf # @UnusedImport
from wafo.gaussian import Rind, prbnormtndpc, prbnormndpc, prbnormnd, cdfnorm2d, prbnorm2d #@UnusedImport # @UnusedImport
from wafo.gaussian import (Rind, prbnormtndpc, prbnormndpc, prbnormnd,
cdfnorm2d, prbnorm2d)
def test_rind(): def test_rind():
''' '''
@ -42,7 +45,7 @@ def test_rind():
Compute expectation E( X1^{+}*X2^{+} ) with random Compute expectation E( X1^{+}*X2^{+} ) with random
correlation coefficient,Cov(X1,X2) = rho2. correlation coefficient,Cov(X1,X2) = rho2.
>>> m2 = [0, 0]; >>> m2 = [0, 0]
>>> rho2 = 0.3 #np.random.rand(1) >>> rho2 = 0.3 #np.random.rand(1)
>>> Sc2 = [[1, rho2], [rho2 ,1]] >>> Sc2 = [[1, rho2], [rho2 ,1]]
>>> Blo2 = 0; Bup2 = np.inf; indI2 = [-1, 1] >>> Blo2 = 0; Bup2 = np.inf; indI2 = [-1, 1]
@ -66,11 +69,13 @@ def test_rind():
array([ 0.00013838]) array([ 0.00013838])
array([ 1.00000000e-10]) array([ 1.00000000e-10])
''' '''
def test_prbnormtndpc(): def test_prbnormtndpc():
''' '''
>>> rho2 = np.random.rand(2); >>> rho2 = np.random.rand(2)
>>> a2 = np.zeros(2); >>> a2 = np.zeros(2)
>>> b2 = np.repeat(np.inf,2); >>> b2 = np.repeat(np.inf,2)
>>> [val2,err2, ift2] = prbnormtndpc(rho2,a2,b2) >>> [val2,err2, ift2] = prbnormtndpc(rho2,a2,b2)
>>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi) >>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi)
>>> E2 = g2(rho2) #% exact value >>> E2 = g2(rho2) #% exact value
@ -86,11 +91,13 @@ def test_prbnormtndpc():
>>> np.abs(E3-val3)<err3 >>> np.abs(E3-val3)<err3
True True
''' '''
def test_prbnormndpc(): def test_prbnormndpc():
''' '''
>>> rho2 = np.random.rand(2); >>> rho2 = np.random.rand(2)
>>> a2 = np.zeros(2); >>> a2 = np.zeros(2);
>>> b2 = np.repeat(np.inf,2); >>> b2 = np.repeat(np.inf,2)
>>> [val2,err2, ift2] = prbnormndpc(rho2,a2,b2) >>> [val2,err2, ift2] = prbnormndpc(rho2,a2,b2)
>>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi) >>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi)
>>> E2 = g2(rho2) #% exact value >>> E2 = g2(rho2) #% exact value
@ -107,6 +114,7 @@ def test_prbnormndpc():
True True
''' '''
def test_prbnormnd(): def test_prbnormnd():
''' '''
>>> import numpy as np >>> import numpy as np
@ -123,6 +131,8 @@ def test_prbnormnd():
>>> 'val = %2.5f' % val >>> 'val = %2.5f' % val
'val = 0.00195' 'val = 0.00195'
''' '''
def test_cdfnorm2d(): def test_cdfnorm2d():
''' '''
>>> x = np.linspace(-3,3,3) >>> x = np.linspace(-3,3,3)
@ -134,6 +144,7 @@ def test_cdfnorm2d():
[ 1.34987703e-03, 4.99795143e-01, 9.97324055e-01]]) [ 1.34987703e-03, 4.99795143e-01, 9.97324055e-01]])
''' '''
def test_prbnorm2d(): def test_prbnorm2d():
''' '''
>>> a = [-1, -2] >>> a = [-1, -2]

@ -42,64 +42,64 @@ def test_detrendma():
-1.27193089e-01, -1.71915213e-01, -1.85125121e-01, -1.27193089e-01, -1.71915213e-01, -1.85125121e-01,
-1.59745361e-01, -1.03571981e-01, -3.62676515e-02, -1.59745361e-01, -1.03571981e-01, -3.62676515e-02,
1.82219951e-02, 4.09039083e-02, 2.50630186e-02, 1.82219951e-02, 4.09039083e-02, 2.50630186e-02,
-2.11478040e-02, -7.78521440e-02, -1.21116040e-01, -2.11478040e-02, -7.78521440e-02, -1.21116040e-01,
-1.32178923e-01, -1.04689244e-01, -4.71541301e-02, -1.32178923e-01, -1.04689244e-01, -4.71541301e-02,
2.03417510e-02, 7.38826137e-02, 8.95349902e-02, 2.03417510e-02, 7.38826137e-02, 8.95349902e-02,
6.68738432e-02, 1.46828486e-02, -4.68648556e-02, 6.68738432e-02, 1.46828486e-02, -4.68648556e-02,
-9.39871606e-02, -1.08465407e-01, -8.46710629e-02, -9.39871606e-02, -1.08465407e-01, -8.46710629e-02,
-3.17365657e-02, 2.99669288e-02, 7.66864134e-02, -3.17365657e-02, 2.99669288e-02, 7.66864134e-02,
9.04482283e-02, 6.59902473e-02, 1.27914062e-02, 9.04482283e-02, 6.59902473e-02, 1.27914062e-02,
-4.85841870e-02, -9.44185349e-02, -1.06987444e-01, -4.85841870e-02, -9.44185349e-02, -1.06987444e-01,
-8.13964951e-02, -2.74687460e-02, 3.40438793e-02, -8.13964951e-02, -2.74687460e-02, 3.40438793e-02,
7.94643163e-02, 9.13222681e-02, 6.50922520e-02, 7.94643163e-02, 9.13222681e-02, 6.50922520e-02,
1.09390148e-02, -5.02028639e-02, -9.47031411e-02, 1.09390148e-02, -5.02028639e-02, -9.47031411e-02,
-1.05349757e-01, -7.79872833e-02, -2.31196073e-02, -1.05349757e-01, -7.79872833e-02, -2.31196073e-02,
3.81412653e-02, 8.22178144e-02, 9.21605209e-02, 3.81412653e-02, 8.22178144e-02, 9.21605209e-02,
6.41850565e-02, 9.13184690e-03, -5.17149253e-02, 6.41850565e-02, 9.13184690e-03, -5.17149253e-02,
-9.48363260e-02, -1.03549587e-01, -7.44424124e-02, -9.48363260e-02, -1.03549587e-01, -7.44424124e-02,
-1.86890490e-02, 4.22594607e-02, 8.49486437e-02, -1.86890490e-02, 4.22594607e-02, 8.49486437e-02,
9.29666543e-02, 6.32740911e-02, 7.37625254e-03, 9.29666543e-02, 6.32740911e-02, 7.37625254e-03,
-5.31142920e-02, -9.48133620e-02, -1.01584110e-01, -5.31142920e-02, -9.48133620e-02, -1.01584110e-01,
-7.07607748e-02, -1.41768231e-02, 4.63990484e-02, -7.07607748e-02, -1.41768231e-02, 4.63990484e-02,
8.76587937e-02, 9.37446001e-02, 6.23650231e-02, 8.76587937e-02, 9.37446001e-02, 6.23650231e-02,
5.67876495e-03, -5.43947621e-02, -9.46294406e-02, 5.67876495e-03, -5.43947621e-02, -9.46294406e-02,
-9.94504301e-02, -6.69411601e-02, -9.58252265e-03, -9.94504301e-02, -6.69411601e-02, -9.58252265e-03,
5.05608316e-02, 9.03505172e-02, 9.44985623e-02, 5.05608316e-02, 9.03505172e-02, 9.44985623e-02,
6.14637631e-02, 4.04610591e-03, -5.55500040e-02, 6.14637631e-02, 4.04610591e-03, -5.55500040e-02,
-9.42796647e-02, -9.71455674e-02, -6.29822440e-02, -9.42796647e-02, -9.71455674e-02, -6.29822440e-02,
-4.90556961e-03, 5.47458452e-02, 9.30263409e-02, -4.90556961e-03, 5.47458452e-02, 9.30263409e-02,
9.52330253e-02, 6.05764719e-02, 2.48519180e-03, 9.52330253e-02, 6.05764719e-02, 2.48519180e-03,
-5.65735506e-02, -9.37590405e-02, -9.46664506e-02, -5.65735506e-02, -9.37590405e-02, -9.46664506e-02,
-5.88825766e-02, -1.45202622e-04, 5.89553685e-02, -5.88825766e-02, -1.45202622e-04, 5.89553685e-02,
9.56890756e-02, 9.59527629e-02, 5.97095676e-02, 9.56890756e-02, 9.59527629e-02, 5.97095676e-02,
1.00314001e-03, -5.74587921e-02, -9.30624694e-02, 1.00314001e-03, -5.74587921e-02, -9.30624694e-02,
-9.20099048e-02, -5.46405701e-02, 4.69953603e-03, -9.20099048e-02, -5.46405701e-02, 4.69953603e-03,
6.31909369e-02, 9.83418277e-02, 9.66628470e-02, 6.31909369e-02, 9.83418277e-02, 9.66628470e-02,
5.88697331e-02, -3.92724035e-04, -5.81989687e-02, 5.88697331e-02, -3.92724035e-04, -5.81989687e-02,
-9.21847386e-02, -8.91726414e-02, -5.02544862e-02, -9.21847386e-02, -8.91726414e-02, -5.02544862e-02,
9.62981387e-03, 6.74543554e-02, 1.00988010e-01, 9.62981387e-03, 6.74543554e-02, 1.00988010e-01,
9.73686580e-02, 5.80639242e-02, -1.69485946e-03, 9.73686580e-02, 5.80639242e-02, -1.69485946e-03,
-5.87871620e-02, -9.11205115e-02, -8.61512458e-02, -5.87871620e-02, -9.11205115e-02, -8.61512458e-02,
-4.57224228e-02, 1.46470222e-02, 7.17477118e-02, -4.57224228e-02, 1.46470222e-02, 7.17477118e-02,
1.03631355e-01, 9.80758938e-02, 5.72993780e-02, 1.03631355e-01, 9.80758938e-02, 5.72993780e-02,
-2.89550192e-03, -5.92162868e-02, -8.98643173e-02, -2.89550192e-03, -5.92162868e-02, -8.98643173e-02,
-8.29421650e-02, -4.10422999e-02, 1.97527907e-02, -8.29421650e-02, -4.10422999e-02, 1.97527907e-02,
7.60733908e-02, 1.06275925e-01, 9.87905812e-02, 7.60733908e-02, 1.06275925e-01, 9.87905812e-02,
5.65836223e-02, -3.98665495e-03, -5.94790815e-02, 5.65836223e-02, -3.98665495e-03, -5.94790815e-02,
-8.84105398e-02, -7.95416952e-02, -3.62118451e-02, -8.84105398e-02, -7.95416952e-02, -3.62118451e-02,
2.49490024e-02, 8.04340881e-02, 1.08926128e-01, 2.49490024e-02, 8.04340881e-02, 1.08926128e-01,
9.95190863e-02, 5.59244846e-02, -4.96008086e-03, 9.95190863e-02, 5.59244846e-02, -4.96008086e-03,
-5.95680980e-02, -8.67534061e-02, -7.59459673e-02, -5.95680980e-02, -8.67534061e-02, -7.59459673e-02,
-3.12285782e-02, 3.02378095e-02, 8.48328258e-02, -3.12285782e-02, 3.02378095e-02, 8.48328258e-02,
1.11586726e-01, 1.00268126e-01, 5.53301029e-02, 1.11586726e-01, 1.00268126e-01, 5.53301029e-02,
-5.80729079e-03, -5.94756912e-02, -8.48869734e-02, -5.80729079e-03, -5.94756912e-02, -8.48869734e-02,
-7.21509330e-02, -2.60897955e-02, 3.56216499e-02, -7.21509330e-02, -2.60897955e-02, 3.56216499e-02,
8.92729678e-02, 1.14262857e-01, 1.01044781e-01, 8.92729678e-02, 1.14262857e-01, 1.01044781e-01,
5.48089359e-02, -6.51953427e-03, -5.91940075e-02, 5.48089359e-02, -6.51953427e-03, -5.91940075e-02,
-8.28051165e-02, -6.81523491e-02, -2.07925530e-02, -8.28051165e-02, -6.81523491e-02, -2.07925530e-02,
4.11032641e-02, 9.37582360e-02, 1.16960041e-01, 4.11032641e-02, 9.37582360e-02, 1.16960041e-01,
1.13824241e-01, 7.82451609e-02, 2.87461256e-02, 1.13824241e-01, 7.82451609e-02, 2.87461256e-02,
-1.07566250e-02, -2.01779675e-02, 8.98967999e-03, -1.07566250e-02, -2.01779675e-02, 8.98967999e-03,
7.03952281e-02, 1.45278564e-01, 2.09706186e-01, 7.03952281e-02, 1.45278564e-01, 2.09706186e-01,
2.43802139e-01, 2.39414013e-01, 2.03257341e-01, 2.43802139e-01, 2.39414013e-01, 2.03257341e-01,
1.54325635e-01, 1.16564992e-01, 1.09638547e-01, 1.54325635e-01, 1.16564992e-01, 1.09638547e-01,
@ -107,46 +107,46 @@ def test_detrendma():
3.44164010e-01, 3.77073744e-01 3.44164010e-01, 3.77073744e-01
])) ]))
assert_array_almost_equal(tr, array([ assert_array_almost_equal(tr, array([
1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152,
1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152,
1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152,
1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152,
1.11599212, 1.12125245, 1.12643866, 1.13166607, 1.13704477, 1.11599212, 1.12125245, 1.12643866, 1.13166607, 1.13704477,
1.14263723, 1.14843422, 1.15435845, 1.16029443, 1.16613308, 1.14263723, 1.14843422, 1.15435845, 1.16029443, 1.16613308,
1.17181383, 1.17734804, 1.18281471, 1.18833001, 1.19400259, 1.17181383, 1.17734804, 1.18281471, 1.18833001, 1.19400259,
1.19989168, 1.20598434, 1.21220048, 1.21842384, 1.22454684, 1.19989168, 1.20598434, 1.21220048, 1.21842384, 1.22454684,
1.23051218, 1.23633498, 1.24209697, 1.24791509, 1.25389641, 1.23051218, 1.23633498, 1.24209697, 1.24791509, 1.25389641,
1.26009689, 1.26649987, 1.27302256, 1.27954802, 1.28597031, 1.26009689, 1.26649987, 1.27302256, 1.27954802, 1.28597031,
1.29223546, 1.29836228, 1.30443522, 1.31057183, 1.31687751, 1.29223546, 1.29836228, 1.30443522, 1.31057183, 1.31687751,
1.32340488, 1.3301336, 1.33697825, 1.34382132, 1.35055864, 1.32340488, 1.3301336, 1.33697825, 1.34382132, 1.35055864,
1.35713958, 1.36358668, 1.36998697, 1.37645853, 1.38310497, 1.35713958, 1.36358668, 1.36998697, 1.37645853, 1.38310497,
1.38997553, 1.39704621, 1.40422902, 1.41140604, 1.41847493, 1.38997553, 1.39704621, 1.40422902, 1.41140604, 1.41847493,
1.4253885, 1.43217295, 1.43891784, 1.44574164, 1.45274607, 1.4253885, 1.43217295, 1.43891784, 1.44574164, 1.45274607,
1.45997696, 1.46740665, 1.47494469, 1.48247285, 1.48989073, 1.45997696, 1.46740665, 1.47494469, 1.48247285, 1.48989073,
1.49715462, 1.50429437, 1.51140198, 1.51859618, 1.52597672, 1.49715462, 1.50429437, 1.51140198, 1.51859618, 1.52597672,
1.53358594, 1.54139257, 1.5493038, 1.55720119, 1.56498641, 1.53358594, 1.54139257, 1.5493038, 1.55720119, 1.56498641,
1.57261924, 1.58013316, 1.58762252, 1.5952062, 1.60298187, 1.57261924, 1.58013316, 1.58762252, 1.5952062, 1.60298187,
1.61098836, 1.6191908, 1.62749412, 1.63577979, 1.64395163, 1.61098836, 1.6191908, 1.62749412, 1.63577979, 1.64395163,
1.65197298, 1.65988092, 1.66777202, 1.67576523, 1.68395602, 1.65197298, 1.65988092, 1.66777202, 1.67576523, 1.68395602,
1.69237968, 1.70099778, 1.70971307, 1.71840707, 1.72698583, 1.69237968, 1.70099778, 1.70971307, 1.71840707, 1.72698583,
1.73541631, 1.74373911, 1.75205298, 1.76047677, 1.76910369, 1.73541631, 1.74373911, 1.75205298, 1.76047677, 1.76910369,
1.77796544, 1.78702008, 1.79616827, 1.80529169, 1.81429875, 1.77796544, 1.78702008, 1.79616827, 1.80529169, 1.81429875,
1.82316, 1.83191959, 1.84067831, 1.84955481, 1.85863994, 1.82316, 1.83191959, 1.84067831, 1.84955481, 1.85863994,
1.86796178, 1.87747491, 1.88707803, 1.89665308, 1.9061109, 1.86796178, 1.87747491, 1.88707803, 1.89665308, 1.9061109,
1.91542572, 1.92464514, 1.9338719, 1.94322436, 1.9527909, 1.91542572, 1.92464514, 1.9338719, 1.94322436, 1.9527909,
1.96259596, 1.97259069, 1.9826719, 1.99272195, 2.00265419, 1.96259596, 1.97259069, 1.9826719, 1.99272195, 2.00265419,
2.01244653, 2.02215, 2.0318692, 2.04172204, 2.05179437, 2.01244653, 2.02215, 2.0318692, 2.04172204, 2.05179437,
2.06210696, 2.07260759, 2.08319129, 2.09374092, 2.10417247, 2.06210696, 2.07260759, 2.08319129, 2.09374092, 2.10417247,
2.11446752, 2.12468051, 2.13491776, 2.14529665, 2.1559004, 2.11446752, 2.12468051, 2.13491776, 2.14529665, 2.1559004,
2.16674609, 2.17777817, 2.18889002, 2.19996511, 2.21092214, 2.16674609, 2.17777817, 2.18889002, 2.19996511, 2.21092214,
2.22174641, 2.23249567, 2.24327791, 2.25420982, 2.26537192, 2.22174641, 2.23249567, 2.24327791, 2.25420982, 2.26537192,
2.2767776, 2.28836802, 2.30003501, 2.3116628, 2.32317284, 2.2767776, 2.28836802, 2.30003501, 2.3116628, 2.32317284,
2.33455419, 2.34586786, 2.35722337, 2.36873665, 2.38048542, 2.33455419, 2.34586786, 2.35722337, 2.36873665, 2.38048542,
2.39247934, 2.4046564, 2.41690694, 2.42911606, 2.44120808, 2.39247934, 2.4046564, 2.41690694, 2.42911606, 2.44120808,
2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808,
2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808,
2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808,
2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808])) 2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808]))
def test_findcross_and_ecross(): def test_findcross_and_ecross():
@ -159,8 +159,9 @@ def test_findcross_and_ecross():
assert_array_equal(ind, np.array([9, 25, 80, 97, 151, 168, 223, 239])) assert_array_equal(ind, np.array([9, 25, 80, 97, 151, 168, 223, 239]))
t0 = ecross(t, x, ind, 0.75) t0 = ecross(t, x, ind, 0.75)
assert_array_almost_equal(t0, np.array([0.84910514, 2.2933879, 7.13205663, assert_array_almost_equal(t0, np.array([0.84910514, 2.2933879, 7.13205663,
8.57630119, 13.41484739, 14.85909194, 8.57630119, 13.41484739,
19.69776067, 21.14204343])) 14.85909194,
19.69776067, 21.14204343]))
def test_findextrema(): def test_findextrema():
@ -292,14 +293,14 @@ def test_findoutliers():
def test_hygfz(): def test_hygfz():
#y = hyp2f1_taylor(-1, -4, 1, .9) # y = hyp2f1_taylor(-1, -4, 1, .9)
assert_equal(4.6, hygfz(-1, -4, 1, .9)) assert_equal(4.6, hygfz(-1, -4, 1, .9))
assert_almost_equal(1.0464328112173522, hygfz(0.1, 0.2, 0.3, 0.5)) assert_almost_equal(1.0464328112173522, hygfz(0.1, 0.2, 0.3, 0.5))
assert_almost_equal(1.2027034401166194, hygfz(0.1, 0.2, 0.3, 0.95)) assert_almost_equal(1.2027034401166194, hygfz(0.1, 0.2, 0.3, 0.95))
#assert_equal(1.661006238211309e-07, hygfz(5, -300, 10, 0.5)) # assert_equal(1.661006238211309e-07, hygfz(5, -300, 10, 0.5))
#assert_equal(0.118311386286, hygfz(0.5, -99.0, 1.5, 0.5625)) # assert_equal(0.118311386286, hygfz(0.5, -99.0, 1.5, 0.5625))
#assert_equal(0.0965606007742, hygfz(0.5, -149.0, 1.5, 0.5625)) # assert_equal(0.0965606007742, hygfz(0.5, -149.0, 1.5, 0.5625))
#assert_equal(0.49234384000963544 + 0.60513406166123973j, # assert_equal(0.49234384000963544 + 0.60513406166123973j,
# hygfz(1, 1, 4, 3 + 4j)) # hygfz(1, 1, 4, 3 + 4j))
@ -337,8 +338,8 @@ def test_argsreduce():
def test_stirlerr(): def test_stirlerr():
assert_array_almost_equal(stirlerr(range(5)), assert_array_almost_equal(stirlerr(range(5)),
np.array([np.inf, 0.08106147, 0.0413407, 0.02767793, np.array([np.inf, 0.08106147, 0.0413407,
0.02079067])) 0.02767793, 0.02079067]))
def test_getshipchar(): def test_getshipchar():
@ -362,7 +363,8 @@ def test_getshipchar():
def test_betaloge(): def test_betaloge():
assert_array_almost_equal(betaloge(3, arange(4)), assert_array_almost_equal(betaloge(3, arange(4)),
np.array([np.inf, -1.09861229, -2.48490665, -3.40119738])) np.array([np.inf, -1.09861229, -2.48490665,
-3.40119738]))
def test_gravity(): def test_gravity():

@ -4,8 +4,9 @@ Created on 5. aug. 2010
@author: pab @author: pab
""" """
import wafo.data import wafo.data # @UnusedImport
import numpy as np import numpy as np # @UnusedImport
def test_timeseries(): def test_timeseries():
''' '''
@ -29,6 +30,8 @@ def test_timeseries():
array([ 0.22368637, 0.20838473, 0.17110733, 0.12237803, 0.07024054, array([ 0.22368637, 0.20838473, 0.17110733, 0.12237803, 0.07024054,
0.02064859, -0.02218831, -0.0555993 , -0.07859847, -0.09166187]) 0.02064859, -0.02218831, -0.0555993 , -0.07859847, -0.09166187])
''' '''
def test_timeseries_trdata(): def test_timeseries_trdata():
''' '''
>>> import wafo.spectrum.models as sm >>> import wafo.spectrum.models as sm
@ -53,6 +56,6 @@ def test_timeseries_trdata():
1.0 1.0
''' '''
if __name__=='__main__': if __name__ == '__main__':
import doctest import doctest
doctest.testmod() doctest.testmod()

@ -1,7 +1,6 @@
''' '''
''' '''
from __future__ import division from __future__ import division
#import numpy as np
from numpy import trapz, sqrt, linspace # @UnresolvedImport from numpy import trapz, sqrt, linspace # @UnresolvedImport
from wafo.containers import PlotData from wafo.containers import PlotData
@ -188,7 +187,7 @@ class TrData(PlotData, TrCommon):
self.sigma = kwds.get('sigma', None) self.sigma = kwds.get('sigma', None)
if self.mean is None: if self.mean is None:
#self.mean = np.mean(self.args) # # self.mean = np.mean(self.args)
self.mean = self.gauss2dat(self.ymean) self.mean = self.gauss2dat(self.ymean)
if self.sigma is None: if self.sigma is None:
yp = self.ymean + self.ysigma yp = self.ymean + self.ysigma
@ -207,9 +206,11 @@ class TrData(PlotData, TrCommon):
def _dat2gauss(self, x, *xi): def _dat2gauss(self, x, *xi):
return tranproc(self.args, self.data, x, *xi) return tranproc(self.args, self.data, x, *xi)
class EstimateTransform(object): class EstimateTransform(object):
pass pass
def main(): def main():
pass pass

Loading…
Cancel
Save