pep8 + added some updates from scipy.stats to wafo.stats

master
Per.Andreas.Brodtkorb 10 years ago
parent d308357c5b
commit 6aec932677

@ -19,7 +19,7 @@ import warnings
import numpy as np
from numpy import (zeros, ones, sqrt, inf, where, nan,
atleast_1d, hstack, r_, linspace, flatnonzero, size,
isnan, finfo, diag, ceil, floor, random, pi)
isnan, finfo, diag, ceil, random, pi)
from numpy.fft import fft
from numpy.random import randn
import scipy.interpolate as interpolate
@ -33,13 +33,13 @@ import wafo.spectrum as _wafospec
from scipy.sparse.linalg.dsolve.linsolve import spsolve
from scipy.sparse.base import issparse
from scipy.signal.windows import parzen
#_wafospec = JITImport('wafo.spectrum')
# _wafospec = JITImport('wafo.spectrum')
__all__ = ['CovData1D']
def _set_seed(iseed):
if iseed != None:
if iseed is not None:
try:
random.set_state(iseed)
except:
@ -385,7 +385,7 @@ class CovData1D(PlotData):
m2 = 2 * n - 1
nfft = 2 ** nextpow2(max(m2, 2 * ns))
acf = r_[acf, zeros((nfft - m2, 1)), acf[-1:0:-1, :]]
#warnings,warn('I am now assuming that ACF(k)=0 for k>MAXLAG.')
# warnings,warn('I am now assuming that ACF(k)=0 for k>MAXLAG.')
else: # ACF(n)==0
m2 = 2 * n - 2
nfft = 2 ** nextpow2(max(m2, 2 * ns))
@ -397,10 +397,10 @@ class CovData1D(PlotData):
I = S.argmax()
k = flatnonzero(S < 0)
if k.size > 0:
#disp('Warning: Not able to construct a nonnegative circulant ')
#disp('vector from the ACF. Apply the parzen windowfunction ')
#disp('to the ACF in order to avoid this.')
#disp('The returned result is now only an approximation.')
_msg = '''
Not able to construct a nonnegative circulant vector from ACF.
Apply parzen windowfunction to the ACF in order to avoid this.
The returned result is now only an approximation.'''
# truncating negative values to zero to ensure that
# that this noise is not added to the simulated timeseries
@ -409,10 +409,10 @@ class CovData1D(PlotData):
ix = flatnonzero(k > 2 * I)
if ix.size > 0:
# truncating all oscillating values above 2 times the peak
# frequency to zero to ensure that
# that high frequency noise is not added to
# the simulated timeseries.
# truncating all oscillating values above 2 times the peak
# frequency to zero to ensure that
# that high frequency noise is not added to
# the simulated timeseries.
ix0 = k[ix[0]]
S[ix0:-ix0] = 0.0
@ -429,7 +429,7 @@ class CovData1D(PlotData):
cases2 = int(ceil(cases / 2))
# Generate standard normal random numbers for the simulations
#randn = np.random.randn
# randn = np.random.randn
epsi = randn(nfft, cases2) + 1j * randn(nfft, cases2)
Ssqr = sqrt(S / (nfft)) # sqrt(S(wn)*dw )
ephat = epsi * Ssqr # [:,np.newaxis]
@ -573,7 +573,7 @@ class CovData1D(PlotData):
num_x = len(x)
num_acf = len(acf)
if not i_unknown is None:
if i_unknown is not None:
x[i_unknown] = nan
i_unknown = flatnonzero(isnan(x))
num_unknown = len(i_unknown)
@ -625,7 +625,8 @@ class CovData1D(PlotData):
Sigma = toeplitz(hstack((acf, zeros(Nsig - num_acf))))
overlap = int(Nsig / 4)
# indices to the points used
idx = r_[0:Nsig] + max(0, min(i_unknown[0] - overlap, num_x - Nsig))
idx = r_[0:Nsig] + max(0, min(i_unknown[0] - overlap,
num_x - Nsig))
mask_unknown = zeros(num_x, dtype=bool)
# temporary storage of indices to missing points
mask_unknown[i_unknown] = True
@ -668,7 +669,7 @@ class CovData1D(PlotData):
# removing indices to data which has been simulated
mask_unknown[idx[:-overlap]] = False
# data we want to simulate once more
nw = sum(mask_unknown[idx[-overlap:]] == True)
nw = sum(mask_unknown[idx[-overlap:]] is True)
num_restored += ns - nw # update # points simulated so far
idx = self._update_window(idx, i_unknown, num_x, num_acf,
@ -716,10 +717,11 @@ def main():
inds = np.hstack((21 + np.arange(20),
1000 + np.arange(20),
1024 * 4 - 21 + np.arange(20)))
sample, mu1o, mu1o_std = R.simcond(x[:, 1], method='approx', i_unknown=inds)
sample, mu1o, mu1o_std = R.simcond(x[:, 1], method='approx',
i_unknown=inds)
import matplotlib.pyplot as plt
#inds = np.atleast_2d(inds).reshape((-1,1))
# inds = np.atleast_2d(inds).reshape((-1,1))
plt.plot(x[:, 1], 'k.', label='observed values')
plt.plot(inds, mu1o, '*', label='mu1o')
plt.plot(inds, sample.ravel(), 'r+', label='samples')

@ -1,6 +1,7 @@
from numpy import pi, r_, minimum, maximum, atleast_1d, atleast_2d, mod, ones, floor, \
random, eye, nonzero, where, repeat, sqrt, exp, inf, diag, zeros, sin, arcsin, nan #@UnresolvedImport
from numpy import triu #@UnresolvedImport
from numpy import (pi, r_, minimum, maximum, atleast_1d, atleast_2d, mod, ones,
floor, random, eye, nonzero, where, repeat, sqrt, exp, inf,
diag, zeros, sin, arcsin, nan) # @UnresolvedImport
from numpy import triu # @UnresolvedImport
from scipy.special import ndtr as cdfnorm, ndtri as invnorm
from scipy.special import erfc
from wafo import mvn
@ -10,10 +11,13 @@ import wafo.rindmod as rindmod
import warnings
from wafo.misc import common_shape
__all__ = ['Rind', 'rindmod', 'mvnprdmod', 'mvn', 'cdflomax' , 'prbnormtndpc',
'prbnormndpc', 'prbnormnd', 'cdfnorm2d', 'prbnorm2d','cdfnorm','invnorm',
'test_docstring']
__all__ = ['Rind', 'rindmod', 'mvnprdmod', 'mvn', 'cdflomax', 'prbnormtndpc',
'prbnormndpc', 'prbnormnd', 'cdfnorm2d', 'prbnorm2d', 'cdfnorm',
'invnorm', 'test_docstring']
class Rind(object):
'''
RIND Computes multivariate normal expectations
@ -24,7 +28,8 @@ class Rind(object):
m : array-like, size Ntdc
expectation of X=[Xt,Xd,Xc]
Blo, Bup : array-like, shape Mb x Nb
Lower and upper barriers used to compute the integration limits, Hlo and Hup, respectively.
Lower and upper barriers used to compute the integration limits,
Hlo and Hup, respectively.
indI : array-like, length Ni
vector of indices to the different barriers in the indicator function.
(NB! restriction indI(1)=-1, indI(NI)=Nt+Nd, Ni = Nb+1)
@ -50,7 +55,7 @@ class Rind(object):
"Jacobian" = |X(Nt+1)*...*X(Nt+Nd)|=|Xd(1)*Xd(2)..Xd(Nd)|
"condition" = Xc=xc(:,ix), ix=1,...,Nx.
X = [Xt, Xd, Xc], a stochastic vector of Multivariate Gaussian
variables where Xt,Xd and Xc have the length Nt,Nd and Nc, respectively.
variables where Xt,Xd and Xc have the length Nt,Nd and Nc, respectively
(Recommended limitations Nx,Nt<=100, Nd<=6 and Nc<=10)
Multivariate probability is computed if Nd = 0.
@ -126,10 +131,10 @@ class Rind(object):
Per A. Brodtkorb (2006)
"Evaluating Nearly Singular Multinormal Expectations with Application to
Wave Distributions",
Methodology And Computing In Applied Probability, Volume 8, Number 1, pp. 65-91(27)
Methodology And Computing In Applied Probability, Volume 8, Number 1,
pp. 65-91(27)
'''
def __init__(self, **kwds):
'''
Parameters
@ -149,25 +154,28 @@ class Rind(object):
scales the conditinal probability density, i.e.,
f_{Xc} = exp(-0.5*Xc*inv(Sxc)*Xc + XcScale) (default XcScale=0)
abseps, releps : real scalars, optional
absolute and relative error tolerance. (default abseps=0, releps=1e-3)
absolute and relative error tolerance.
(default abseps=0, releps=1e-3)
coveps : real scalar, optional
error tolerance in Cholesky factorization (default 1e-13)
maxpts, minpts : scalar integers, optional
maximum and minimum number of function values allowed. The parameter,
maxpts, can be used to limit the time. A sensible strategy is to start
with MAXPTS = 1000*N, and then increase MAXPTS if ERROR is too large.
maximum and minimum number of function values allowed. The
parameter, maxpts, can be used to limit the time. A sensible
strategy is to start with MAXPTS = 1000*N, and then increase MAXPTS
if ERROR is too large.
(Only for METHOD~=0) (default maxpts=40000, minpts=0)
seed : scalar integer, optional
seed to the random generator used in the integrations
(Only for METHOD~=0)(default floor(rand*1e9))
nit : scalar integer, optional
maximum number of Xt variables to integrate. This parameter can be used
to limit the time. If NIT is less than the rank of the covariance matrix,
the returned result is a upper bound for the true value of the integral.
(default 1000)
maximum number of Xt variables to integrate. This parameter can be
used to limit the time. If NIT is less than the rank of the
covariance matrix, the returned result is a upper bound for the
true value of the integral. (default 1000)
xcutoff : real scalar, optional
cut off value where the marginal normal distribution is truncated.
(Depends on requested accuracy. A value between 4 and 5 is reasonable.)
(Depends on requested accuracy. A value between 4 and 5 is
reasonable.)
xsplit : real scalar
parameter controlling performance of quadrature integration:
if Hup>=xCutOff AND Hlo<-XSPLIT OR
@ -187,8 +195,8 @@ class Rind(object):
XCUTOFF, MAXPTS and QUADNO will be set according to
INITOPTIONS.
nc1c2 : scalar integer, optional
number of times to use the regression equation to restrict integration
area. Nc1c2 = 1,2 is recommended. (default 2)
number of times to use the regression equation to restrict
integration area. Nc1c2 = 1,2 is recommended. (default 2)
(note: works only for method >0)
'''
self.method = 3
@ -263,7 +271,7 @@ class Rind(object):
self.releps = min(self.abseps, 1.0e-2)
if self.method == 0 :
if self.method == 0:
# This gives approximately the same accuracy as when using
# RINDDND and RINDNIT
# xCutOff= MIN(MAX(xCutOff+0.5d0,4.d0),5.d0)
@ -278,8 +286,8 @@ class Rind(object):
self.nc1c2 = max(1, self.nc1c2)
xcut = abs(invnorm(trunc_error / (self.nc1c2 * 2)))
self.xcutoff = max(min(xcut, 8.5), 1.2)
#self.abseps = max(self.abseps- truncError,0);
#self.releps = max(self.releps- truncError,0);
# self.abseps = max(self.abseps- truncError,0);
# self.releps = max(self.releps- truncError,0);
if self.method > 0:
names = ['method', 'xcscale', 'abseps', 'releps', 'coveps',
@ -288,7 +296,7 @@ class Rind(object):
constants = [getattr(self, name) for name in names]
constants[0] = mod(constants[0], 10)
rindmod.set_constants(*constants) #@UndefinedVariable
rindmod.set_constants(*constants) # @UndefinedVariable
def __call__(self, cov, m, ab, bb, indI=None, xc=None, nt=None, **kwds):
if any(kwds):
@ -317,7 +325,7 @@ class Rind(object):
Ex, indI = atleast_1d(m, indI)
if self.seed is None:
seed = int(floor(random.rand(1) * 1e10)) #@UndefinedVariable
seed = int(floor(random.rand(1) * 1e10)) # @UndefinedVariable
else:
seed = int(self.seed)
@ -336,7 +344,9 @@ class Rind(object):
Bup[0, ind] = minimum(Bup[0, ind], infinity * dev[indI[ind + 1]])
Blo[0, ind] = maximum(Blo[0, ind], -infinity * dev[indI[ind + 1]])
ind2 = indI + 1
return rindmod.rind(BIG, Ex, xc, nt, ind2, Blo, Bup, infin, seed) #@UndefinedVariable
return rindmod.rind(BIG, Ex, xc, nt, ind2, Blo, Bup, infin, seed) # @UndefinedVariable @IgnorePep8
def test_rind():
''' Small test function
@ -356,7 +366,7 @@ def test_rind():
A = repeat(Blo, n)
B = repeat(Bup, n) # Integration limits
E1 = rind(triu(Sc), m, A, B) #same as E0
_E1 = rind(triu(Sc), m, A, B) # same as E0
xc = zeros((0, 1))
infinity = 37
@ -365,7 +375,8 @@ def test_rind():
Bup, Blo = atleast_2d(Bup, Blo)
Bup[0, ind] = minimum(Bup[0, ind], infinity * dev[indI[ind + 1]])
Blo[0, ind] = maximum(Blo[0, ind], -infinity * dev[indI[ind + 1]])
E3 = rind(Sc, m, Blo, Bup, indI, xc, nt=1)
_E3 = rind(Sc, m, Blo, Bup, indI, xc, nt=1)
def cdflomax(x, alpha, m0):
'''
@ -423,9 +434,10 @@ def cdflomax(x, alpha, m0):
c2 = alpha * c1
return cdfnorm(c1) - alpha * exp(-x ** 2 / 2 / m0) * cdfnorm(c2)
def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
'''
Return Multivariate normal or T probability with product correlation structure.
Return Multivariate normal or T probability with product correlation.
Parameters
----------
@ -435,13 +447,14 @@ def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
where -1 < rho[i] < 1
a,b : array-like
vector of lower and upper integration limits, respectively.
Note: any values greater the 37 in magnitude, are considered as infinite values.
Note: any values greater the 37 in magnitude, are considered as
infinite values.
D : array-like
vector of means (default zeros(size(rho)))
df = Degrees of freedom, NDF<=0 gives normal probabilities (default)
abseps = absolute error tolerance. (default 1e-4)
IERC = 1 if strict error control based on fourth derivative
0 if intuitive error control based on halving the intervals (default)
0 if error control based on halving the intervals (default)
HNC = start interval width of simpson rule (default 0.24)
Returns
@ -453,7 +466,7 @@ def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
1, if N > 1000 or N < 1.
2, IF any abs(rho)>=1
4, if ANY(b(I)<=A(i))
5, if number of terms computed exceeds maximum number of evaluation points
5, if number of terms exceeds maximum number of evaluation points
6, if fault accurs in normal subroutines
7, if subintervals are too narrow or too many
8, if bounds exceeds abseps
@ -465,12 +478,12 @@ def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
Example:
--------
>>> import wafo.gaussian as wg
>>> rho2 = np.random.rand(2);
>>> a2 = np.zeros(2);
>>> b2 = np.repeat(np.inf,2);
>>> rho2 = np.random.rand(2)
>>> a2 = np.zeros(2)
>>> b2 = np.repeat(np.inf,2)
>>> [val2,err2, ift2] = wg.prbnormtndpc(rho2,a2,b2)
>>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi)
>>> E2 = g2(rho2) #% exact value
>>> E2 = g2(rho2) # exact value
>>> np.abs(E2-val2)<err2
True
@ -500,9 +513,12 @@ def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
# Make sure integration limits are finite
A = np.clip(a - D, -100, 100)
B = np.clip(b - D, -100, 100)
return mvnprdmod.prbnormtndpc(rho, A, B, df, abseps, IERC, HNC) #@UndefinedVariable
def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True, usebreakpoints=False):
return mvnprdmod.prbnormtndpc(rho, A, B, df, abseps, IERC, HNC) # @UndefinedVariable @IgnorePep8
def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True,
usebreakpoints=False):
'''
Return Multivariate Normal probabilities with product correlation
@ -527,9 +543,9 @@ def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True, usebreakpo
Example:
-------
>>> import wafo.gaussian as wg
>>> rho2 = np.random.rand(2);
>>> a2 = np.zeros(2);
>>> b2 = np.repeat(np.inf,2);
>>> rho2 = np.random.rand(2)
>>> a2 = np.zeros(2)
>>> b2 = np.repeat(np.inf,2)
>>> [val2,err2, ift2] = wg.prbnormndpc(rho2,a2,b2)
>>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi)
>>> E2 = g2(rho2) #% exact value
@ -561,10 +577,11 @@ def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True, usebreakpo
'''
# Call fortran implementation
val, err, ier = mvnprdmod.prbnormndpc(rho, a, b, abserr, relerr, usebreakpoints, usesimpson); #@UndefinedVariable
val, err, ier = mvnprdmod.prbnormndpc(rho, a, b, abserr, relerr, usebreakpoints, usesimpson) # @UndefinedVariable @IgnorePep8
if ier > 0:
warnings.warn('Abnormal termination ier = %d\n\n%s' % (ier, _ERRORMESSAGE[ier]))
warnings.warn('Abnormal termination ier = %d\n\n%s' %
(ier, _ERRORMESSAGE[ier]))
return val, err, ier
_ERRORMESSAGE = {}
@ -577,22 +594,26 @@ _ERRORMESSAGE[1] = '''
determine the integration difficulties. if the position of a local
difficulty can be determined (i.e. singularity discontinuity within
the interval), it should be supplied to the routine as an element of
the vector points. If necessary an appropriate special-purpose integrator
must be used, which is designed for handling the type of difficulty involved.
the vector points. If necessary an appropriate special-purpose
integrator must be used, which is designed for handling the type of
difficulty involved.
'''
_ERRORMESSAGE[2] = '''
the occurrence of roundoff error is detected, which prevents the requested
tolerance from being achieved. The error may be under-estimated.'''
_ERRORMESSAGE[3] = '''
Extremely bad integrand behaviour occurs at some points of the integration interval.'''
Extremely bad integrand behaviour occurs at some points of the integration
interval.'''
_ERRORMESSAGE[4] = '''
The algorithm does not converge. Roundoff error is detected in the extrapolation table.
It is presumed that the requested tolerance cannot be achieved, and that
the returned result is the best which can be obtained.'''
The algorithm does not converge. Roundoff error is detected in the
extrapolation table. It is presumed that the requested tolerance cannot be
achieved, and that the returned result is the best which can be obtained.
'''
_ERRORMESSAGE[5] = '''
The integral is probably divergent, or slowly convergent.
It must be noted that divergence can occur with any other value of ier>0.'''
It must be noted that divergence can occur with any other value of ier>0.
'''
_ERRORMESSAGE[6] = '''the input is invalid because:
1) npts2 < 2
2) break points are specified outside the integration range
@ -612,13 +633,12 @@ def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
ABSEPS = absolute error tolerance.
RELEPS = relative error tolerance.
MAXPTS = maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with MAXPTS = 1000*N, and then
increase MAXPTS if ERROR is too large.
parameter can be used to limit the time. A sensible strategy is to
start with MAXPTS = 1000*N, and then increase MAXPTS if ERROR is too
large.
METHOD = integer defining the integration method
-1 KRBVRC randomized Korobov rules for the first 20
variables, randomized Richtmeyer rules for the rest,
NMAX = 500
-1 KRBVRC randomized Korobov rules for the first 20 variables,
randomized Richtmeyer rules for the rest, NMAX = 500
0 KRBVRC, NMAX = 100 (default)
1 SADAPT Subregion Adaptive integration method, NMAX = 20
2 KROBOV Randomized KOROBOV rules, NMAX = 100
@ -667,7 +687,6 @@ def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
prbnormndpc, Rind
'''
m, n = correl.shape
Na = len(a)
Nb = len(b)
@ -677,7 +696,7 @@ def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
if maxpts is None:
maxpts = 1000 * n
maxpts = max(round(maxpts), 10 * n);
maxpts = max(round(maxpts), 10 * n)
# % array of correlation coefficients; the correlation
# % coefficient in row I column J of the correlation matrix
@ -692,16 +711,16 @@ def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
A = np.clip(a, -100, 100)
B = np.clip(b, -100, 100)
ix = np.where(np.triu(np.ones((m, m)), 1) != 0)
L = correl[ix].ravel() #% return only off diagonal elements
L = correl[ix].ravel() # % return only off diagonal elements
infinity = 37
infin = np.repeat(2, n) - (B > infinity) - 2 * (A < -infinity)
err, val, inform = mvn.mvndst(A, B, infin, L, maxpts, abseps, releps) #@UndefinedVariable
err, val, inform = mvn.mvndst(A, B, infin, L, maxpts, abseps, releps) # @UndefinedVariable @IgnorePep8
return val, err, inform
#CALL the mexroutine
# CALL the mexroutine
# t0 = clock;
# if ((method==0) && (n<=100)),
# %NMAX = 100
@ -715,26 +734,30 @@ def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
# exTime = etime(clock,t0);
# '
#% gauss legendre points and weights, n = 6
_W6 = [ 0.1713244923791705e+00, 0.3607615730481384e+00, 0.4679139345726904e+00]
_X6 = [-0.9324695142031522e+00, -0.6612093864662647e+00, -0.2386191860831970e+00]
#% gauss legendre points and weights, n = 12
_W12 = [ 0.4717533638651177e-01, 0.1069393259953183e+00, 0.1600783285433464e+00,
# gauss legendre points and weights, n = 6
_W6 = [0.1713244923791705e+00, 0.3607615730481384e+00, 0.4679139345726904e+00]
_X6 = [-0.9324695142031522e+00, -
0.6612093864662647e+00, -0.2386191860831970e+00]
# gauss legendre points and weights, n = 12
_W12 = [0.4717533638651177e-01, 0.1069393259953183e+00, 0.1600783285433464e+00,
0.2031674267230659e+00, 0.2334925365383547e+00, 0.2491470458134029e+00]
_X12 = [ -0.9815606342467191e+00, -0.9041172563704750e+00, -0.7699026741943050e+00,
- 0.5873179542866171e+00, -0.3678314989981802e+00, -0.1252334085114692e+00]
#% gauss legendre points and weights, n = 20
_W20 = [ 0.1761400713915212e-01, 0.4060142980038694e-01,
_X12 = [-0.9815606342467191e+00, -0.9041172563704750e+00,
-0.7699026741943050e+00,
- 0.5873179542866171e+00, -0.3678314989981802e+00,
-0.1252334085114692e+00]
# gauss legendre points and weights, n = 20
_W20 = [0.1761400713915212e-01, 0.4060142980038694e-01,
0.6267204833410906e-01, 0.8327674157670475e-01,
0.1019301198172404e+00, 0.1181945319615184e+00,
0.1316886384491766e+00, 0.1420961093183821e+00,
0.1491729864726037e+00, 0.1527533871307259e+00]
_X20 = [ -0.9931285991850949e+00, -0.9639719272779138e+00,
_X20 = [-0.9931285991850949e+00, -0.9639719272779138e+00,
- 0.9122344282513259e+00, -0.8391169718222188e+00,
- 0.7463319064601508e+00, -0.6360536807265150e+00,
- 0.5108670019508271e+00, -0.3737060887154196e+00,
- 0.2277858511416451e+00, -0.7652652113349733e-01]
def cdfnorm2d(b1, b2, r):
'''
Returnc Bivariate Normal cumulative distribution function
@ -798,8 +821,8 @@ def cdfnorm2d(b1, b2, r):
bvn = where(abs(r) > 1, nan, 0.0)
two = 2.e0;
twopi = 6.283185307179586e0;
two = 2.e0
twopi = 6.283185307179586e0
hk = h * k
@ -813,90 +836,97 @@ def cdfnorm2d(b1, b2, r):
for i in range(10):
for sign in - 1, 1:
sn = sin(asr[k1] * (sign * _X20[i] + 1) / 2)
bvn[k01] = bvn[k01] + _W20[i] * exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn));
bvn[k01] = bvn[k01] + _W20[i] * \
exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn))
k1, = nonzero((0.3 <= r[k0]) & (r[k0] < 0.75))
if len(k1) > 0:
k01 = k0[k1];
k01 = k0[k1]
for i in range(6):
for sign in - 1, 1:
sn = sin(asr[k1] * (sign * _X12[i] + 1) / 2);
bvn[k01] = bvn[k01] + _W12[i] * exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn));
sn = sin(asr[k1] * (sign * _X12[i] + 1) / 2)
bvn[k01] = bvn[k01] + _W12[i] * \
exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn))
k1, = nonzero(r[k0] < 0.3);
k1, = nonzero(r[k0] < 0.3)
if len(k1) > 0:
k01 = k0[k1]
for i in range(3):
for sign in - 1, 1:
sn = sin(asr[k1] * (sign * _X6[i] + 1) / 2)
bvn[k01] = bvn[k01] + _W6[i] * exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn))
bvn[k01] = bvn[k01] + _W6[i] * \
exp((sn * hk[k01] - hs[k1]) / (1 - sn * sn))
bvn[k0] *= asr / (two * twopi)
bvn[k0] += fi(-h[k0]) * fi(-k[k0])
k1, = nonzero((0.925 <= abs(r)) & (abs(r) <= 1));
k1, = nonzero((0.925 <= abs(r)) & (abs(r) <= 1))
if len(k1) > 0:
k2, = nonzero(r[k1] < 0);
k2, = nonzero(r[k1] < 0)
if len(k2) > 0:
k12 = k1[k2];
k[k12] = -k[k12];
hk[k12] = -hk[k12];
k12 = k1[k2]
k[k12] = -k[k12]
hk[k12] = -hk[k12]
k3, = nonzero(abs(r[k1]) < 1);
k3, = nonzero(abs(r[k1]) < 1)
if len(k3) > 0:
k13 = k1[k3];
a2 = (1 - r[k13]) * (1 + r[k13]);
k13 = k1[k3]
a2 = (1 - r[k13]) * (1 + r[k13])
a = sqrt(a2)
b = abs(h[k13] - k[k13]);
bs = b * b;
c = (4.e0 - hk[k13]) / 8.e0;
d = (12.e0 - hk[k13]) / 16.e0;
asr = -(bs / a2 + hk[k13]) / 2.e0;
k4, = nonzero(asr > -100.e0);
b = abs(h[k13] - k[k13])
bs = b * b
c = (4.e0 - hk[k13]) / 8.e0
d = (12.e0 - hk[k13]) / 16.e0
asr = -(bs / a2 + hk[k13]) / 2.e0
k4, = nonzero(asr > -100.e0)
if len(k4) > 0:
bvn[k13[k4]] = a[k4] * exp(asr[k4]) * (1 - c[k4] *
(bs[k4] - a2[k4]) * (1 - d[k4] * bs[k4] / 5) / 3
+ c[k4] * d[k4] * a2[k4] ** 2 / 5);
bvn[k13[k4]] = (a[k4] * exp(asr[k4]) *
(1 - c[k4] * (bs[k4] - a2[k4]) *
(1 - d[k4] * bs[k4] / 5) / 3 +
c[k4] * d[k4] * a2[k4] ** 2 / 5))
k5, = nonzero(hk[k13] < 100.e0);
k5, = nonzero(hk[k13] < 100.e0)
if len(k5) > 0:
#% b = sqrt(bs);
k135 = k13[k5];
bvn[k135] = bvn[k135] - exp(-hk[k135] / 2) * sqrt(twopi) * fi(-b[k5] / a[k5]) * b[k5] * (1 - c[k5] * bs[k5] * (1 - d[k5] * bs[k5] / 5) / 3)
# b = sqrt(bs);
k135 = k13[k5]
bvn[k135] = bvn[k135] - exp(-hk[k135] / 2) * sqrt(twopi) * fi(-b[k5] / a[k5]) * \
b[k5] * (1 - c[k5] * bs[k5] * (1 - d[k5] * bs[k5] / 5) / 3)
a /= two
for i in range(10):
for sign in - 1, 1:
xs = (a * (sign * _X20[i] + 1)) ** 2;
rs = sqrt(1 - xs);
asr = -(bs / xs + hk[k13]) / 2;
k6, = nonzero(asr > -100.e0) ;
xs = (a * (sign * _X20[i] + 1)) ** 2
rs = sqrt(1 - xs)
asr = -(bs / xs + hk[k13]) / 2
k6, = nonzero(asr > -100.e0)
if len(k6) > 0:
k136 = k13[k6]
bvn[k136] += (a[k6] * _W20[i] * exp(asr[k6]) *
(exp(-hk[k136] * (1 - rs[k6]) / (2 * (1 + rs[k6]))) / rs[k6] -
(1 + c[k6] * xs[k6] * (1 + d[k6] * xs[k6]))))
(exp(-hk[k136] * (1 - rs[k6]) /
(2 * (1 + rs[k6]))) / rs[k6] -
(1 + c[k6] * xs[k6] *
(1 + d[k6] * xs[k6]))))
bvn[k3] = -bvn[k3] / twopi;
bvn[k3] = -bvn[k3] / twopi
k7, = nonzero(r[k1] > 0);
k7, = nonzero(r[k1] > 0)
if len(k7):
k17 = k1[k7]
bvn[k17] += fi(-np.maximum(h[k17], k[k17]));
bvn[k17] += fi(-np.maximum(h[k17], k[k17]))
k8, = nonzero(r[k1] < 0);
k8, = nonzero(r[k1] < 0)
if len(k8) > 0:
k18 = k1[k8];
bvn[k18] = -bvn[k18] + np.maximum(0, fi(-h[k18]) - fi(-k[k18]));
k18 = k1[k8]
bvn[k18] = -bvn[k18] + np.maximum(0, fi(-h[k18]) - fi(-k[k18]))
bvn.shape = cshape
return bvn
def fi(x):
return 0.5 * (erfc((-x) / sqrt(2)))
def prbnorm2d(a, b, r):
'''
Returns Bivariate Normal probability
@ -932,7 +962,7 @@ def prbnorm2d(a, b, r):
infinity = 37
lower = np.asarray(a)
upper = np.asarray(b)
if np.all((lower <= -infinity) & (infinity<=upper)):
if np.all((lower <= -infinity) & (infinity <= upper)):
return 1.0
if (lower >= upper).any():
return 0.0
@ -945,33 +975,39 @@ def prbnorm2d(a, b, r):
- bvd(lower[0], upper[1], correl)
+ bvd(upper[0], upper[1], correl))
elif (infin[0] == 2 and infin[1] == 1):
return bvd(lower[0], lower[1], correl) - bvd(upper[0], lower[1], correl)
elif (infin[0] == 1 and infin[1] == 2) :
return bvd(lower[0], lower[1], correl) - bvd(lower[0], upper[1], correl)
elif (infin[0] == 2 and infin[1] == 0) :
return bvd(-upper[0], -upper[1], correl) - bvd(-lower[0], -upper[1], correl)
return (bvd(lower[0], lower[1], correl) -
bvd(upper[0], lower[1], correl))
elif (infin[0] == 1 and infin[1] == 2):
return (bvd(lower[0], lower[1], correl) -
bvd(lower[0], upper[1], correl))
elif (infin[0] == 2 and infin[1] == 0):
return (bvd(-upper[0], -upper[1], correl) -
bvd(-lower[0], -upper[1], correl))
elif (infin[0] == 0 and infin[1] == 2):
return bvd(-upper[0], -upper[1], correl) - bvd(-upper[0], -lower[1], correl)
elif (infin[0] == 1 and infin[1] == 0) :
return (bvd(-upper[0], -upper[1], correl) -
bvd(-upper[0], -lower[1], correl))
elif (infin[0] == 1 and infin[1] == 0):
return bvd(lower[0], -upper[1], -correl)
elif (infin[0] == 0 and infin[1] == 1) :
elif (infin[0] == 0 and infin[1] == 1):
return bvd(-upper[0], lower[1], -correl)
elif (infin[0] == 1 and infin[1] == 1):
return bvd(lower[0], lower[1], correl)
elif (infin[0] == 0 and infin[1] == 0) :
elif (infin[0] == 0 and infin[1] == 0):
return bvd(-upper[0], -upper[1], correl)
return 1
def bvd(lo, up, r):
return cdfnorm2d(-lo, -up, r)
def test_docstrings():
import doctest
doctest.testmod()
if __name__ == '__main__':
test_docstrings()
#if __name__ == '__main__':
# if __name__ == '__main__':
# if False: #True: #
# test_rind()
# else:

@ -167,7 +167,7 @@ def spaceline(start_point, stop_point, num=10):
e1, e2 = np.atleast_1d(start_point, stop_point)
e2m1 = e2 - e1
length = np.sqrt((e2m1 ** 2).sum())
#length = sqrt((E2[0]-E1(1))^2 + (E2(2)-E1(2))^2 + (E2(3)-E1(3))^2);
# length = sqrt((E2[0]-E1(1))^2 + (E2(2)-E1(2))^2 + (E2(3)-E1(3))^2)
C = e2m1 / length
delta = length / float(num - 1)
return np.array([e1 + n * delta * C for n in range(num)])
@ -358,7 +358,8 @@ def sub2index(shape, *subscripts, **kwds):
ndx = 0
s0 = np.shape(subscripts[0])
for i, subscript in enumerate(subscripts):
np.testing.assert_equal(s0, np.shape(subscript),
np.testing.assert_equal(
s0, np.shape(subscript),
'The subscripts vectors must all be of the same shape.')
if (np.any(subscript < 0)) or (np.any(s[i] <= subscript)):
raise IndexError('Out of range subscript.')
@ -399,7 +400,7 @@ class JITImport(object):
except:
if self._module is None:
self._module = __import__(self._module_name, None, None, ['*'])
#assert(isinstance(self._module, types.ModuleType), 'module')
# assert(isinstance(self._module, types.ModuleType), 'module')
return getattr(self._module, attr)
else:
raise
@ -634,7 +635,7 @@ def _findcross(xn):
for ix in iz.tolist():
xn[ix] = xn[ix - 1]
#% indices to local level crossings ( without turningpoints)
# indices to local level crossings ( without turningpoints)
ind, = (xn[:n - 1] * xn[1:] < 0).nonzero()
return ind
@ -708,7 +709,8 @@ def findcross(x, v=0.0, kind=None):
# make sure the first is a level v down-crossing if wdef=='tw'
# or make sure the first is a level v up-crossing if
# wdef=='cw'
xor = lambda a, b: a ^ b
def xor(a, b):
return a ^ b
first_is_down_crossing = int(xn[ind[0]] > xn[ind[0] + 1])
if xor(first_is_down_crossing, kind in ('dw', 'tw')):
ind = ind[1::]
@ -979,7 +981,7 @@ def findrfc(tp, h=0.0, method='clib'):
ix += 1
ind[ix] = (Tstart + 2 * i + 1)
ix += 1
#iy = i
# iy = i
continue
# goto L180
@ -1119,7 +1121,7 @@ def mctp2rfc(fmM, fMm=None):
m0 = max(0, f_min[0] - np.sum(f_rfc[N - k + 1:N, 0]))
M0 = max(0, f_max[N - 1 - k] - np.sum(f_rfc[N - 1 - k, 1:k]))
f_rfc[N - 1 - k, 0] = min(m0, M0)
#% n_loops_left=N-k+1
# n_loops_left=N-k+1
# end
for k in range(1, N):
@ -1202,21 +1204,27 @@ def rfcfilter(x, h, method=0):
j = 0
t0 = 0
y0 = y[t0]
z0 = 0
def aleb(a, b):
return a <= b
def altb(a, b):
return a < b
if method == 0:
cmpfun1 = lambda a, b: a <= b
cmpfun2 = lambda a, b: a < b
cmpfun1 = aleb
cmpfun2 = altb
else:
cmpfun1 = lambda a, b: a < b
cmpfun2 = lambda a, b: a <= b
cmpfun1 = altb
cmpfun2 = aleb
# The rainflow filter
for tim1, yi in enumerate(y[1::]):
fpi = y0 + h
fmi = y0 - h
ti = tim1 + 1
#yi = y[ti]
# yi = y[ti]
if z0 == 0:
if cmpfun1(yi, fmi):
@ -1255,7 +1263,7 @@ def rfcfilter(x, h, method=0):
t0, y0, z0 = t1, y1, z1
# end
#% Update y if last y0 is greater than (or equal) threshold
# Update y if last y0 is greater than (or equal) threshold
if cmpfun1(h, abs(y0 - y[t[j]])):
j += 1
t[j] = t0
@ -1349,7 +1357,8 @@ def findtp(x, h=0.0, kind=None):
ind = ind[ind1]
if kind in ('mw', 'Mw'):
xor = lambda a, b: a ^ b
def xor(a, b):
return a ^ b
# make sure that the first is a Max if wdef == 'Mw'
# or make sure that the first is a min if wdef == 'mw'
first_is_max = (x[ind[0]] > x[ind[1]])
@ -1605,15 +1614,15 @@ def findoutliers(x, zcrit=0.0, dcrit=None, ddcrit=None, verbose=False):
print('Found %d spurious negative jumps of D^2x' % tmp.size)
if zcrit >= 0.0:
#% finding consecutive values less than zcrit apart.
# finding consecutive values less than zcrit apart.
indzeros = (abs(dxn) <= zcrit)
indz, = nonzero(indzeros)
if indz.size > 0:
indz = indz + 1
#%finding the beginning and end of consecutive equal values
# finding the beginning and end of consecutive equal values
indtr, = nonzero((diff(indzeros)))
indtr = indtr + 1
#%indices to consecutive equal points
# indices to consecutive equal points
# removing the point before + all equal points + the point after
if True:
ind = hstack((ind, indtr - 1, indz, indtr, indtr + 1))
@ -1821,7 +1830,7 @@ def stirlerr(n):
def getshipchar(value=None, property="max_deadweight", # @ReservedAssignment
**kwds): # @IgnorePep8
** kwds): # @IgnorePep8
'''
Return ship characteristics from value of one ship-property
@ -1909,7 +1918,7 @@ def getshipchar(value=None, property="max_deadweight", # @ReservedAssignment
draught = round(0.80 * max_deadweight ** 0.24 * 10) / 10
draught_err = draught * 0.22
#S = round(2/3*(L)**0.525)
# S = round(2/3*(L)**0.525)
speed = round(1.14 * max_deadweight ** 0.21 * 10) / 10
speed_err = speed * 0.10
@ -2171,7 +2180,7 @@ def hyp2f1(a, b, c, z, rho=0.5):
e8 = gammaln(c - a - b)
e9 = gammaln(a + b - c)
_cmab = c - a - b
#~(np.round(cmab) == cmab & cmab <= 0)
# ~(np.round(cmab) == cmab & cmab <= 0)
if abs(z) <= rho:
h = hyp2f1_taylor(a, b, c, z, 1e-15)
elif abs(1 - z) <= rho: # % Require that |arg(1-z)|<pi
@ -2869,7 +2878,7 @@ def trangood(x, f, min_n=None, min_x=None, max_x=None, max_n=inf):
numpy.interp
"""
xo, fo = atleast_1d(x, f)
#n = xo.size
# n = xo.size
if (xo.ndim != 1):
raise ValueError('x must be a vector.')
if (fo.ndim != 1):
@ -2901,14 +2910,14 @@ def trangood(x, f, min_n=None, min_x=None, max_x=None, max_n=inf):
x0 = xo[0]
L = float(xn - x0)
if ((nf < min_n) or (max_n < nf) or any(abs(ddx) > 10 * _EPS * (L))):
# % pab 07.01.2001: Always choose the stepsize df so that
# % it is an exactly representable number.
# % This is important when calculating numerical derivatives and is
# % accomplished by the following.
# pab 07.01.2001: Always choose the stepsize df so that
# it is an exactly representable number.
# This is important when calculating numerical derivatives and is
# accomplished by the following.
dx = L / (min(min_n, max_n) - 1)
dx = (dx + 2.) - 2.
xi = arange(x0, xn + dx / 2., dx)
#% New call pab 11.11.2000: This is much quicker
# New call pab 11.11.2000: This is much quicker
fo = interp(xi, xo, fo)
xo = xi
@ -2989,7 +2998,7 @@ def tranproc(x, f, x0, *xi):
xo, fo = trangood(xo, fo, min_x=min(x0), max_x=max(x0), max_n=nmax)
n = f.shape[0]
#y = x0.copy()
# y = x0.copy()
xu = (n - 1) * (x0 - xo[0]) / (xo[-1] - xo[0])
fi = asarray(floor(xu), dtype=int)
@ -3294,7 +3303,7 @@ def fourier(data, t=None, T=None, m=None, n=None, method='trapz'):
# Compute M-1 more coefficients
tmp = 2 * pi * t / T
#% tmp = 2*pi*(0:N-1).'/(N-1);
# tmp = 2*pi*(0:N-1).'/(N-1);
for i in range(1, m):
a[i] = intfun(x * cos(i * tmp), t, axis=-1)
b[i] = intfun(x * sin(i * tmp), t, axis=-1)
@ -3363,24 +3372,24 @@ def test_docstrings():
def test_hyp2f1():
# 1/(1-x) = F(1,1,1,x) = F(1,b,b,x) = F(a,1,a,x)
# (1+x)^n = F(-n,b,b,-x)
# atan(x) = x*F(.5,1,1.5,-x^2)
# asin(x) = x*F(.5,.5,1.5,x^2)
# log(x) = x*F(1,1,2,-x)
# log(1+x)-log(1-x) = 2*x*F(.5,1,1.5,x^2)
# (1+x)^n = F(-n,b,b,-x)
# atan(x) = x*F(.5,1,1.5,-x^2)
# asin(x) = x*F(.5,.5,1.5,x^2)
# log(x) = x*F(1,1,2,-x)
# log(1+x)-log(1-x) = 2*x*F(.5,1,1.5,x^2)
x = linspace(0., .7, 20)
y = hyp2f1_taylor(-1, -4, 1, .9)
_y2 = hygfz(-1, -4, 1, .9)
_y3 = hygfz(5, -300, 10, 0.5)
_y4 = hyp2f1_taylor(5, -300, 10, 0.5)
#y = hyp2f1(0.1, 0.2, 0.3, 0.5)
#y = hyp2f1(1, 1.5, 3, -4 +3j)
#y = hyp2f1(5, 7.5, 2.5, 5)
# fun = lambda x : 1./(1-x)
# x = .99
# y = hyp2f1(1,1,1,x)
# print(y-fun(x))
#
# y = hyp2f1(0.1, 0.2, 0.3, 0.5)
# y = hyp2f1(1, 1.5, 3, -4 +3j)
# y = hyp2f1(5, 7.5, 2.5, 5)
# fun = lambda x : 1./(1-x)
# x = .99
# y = hyp2f1(1,1,1,x)
# print(y-fun(x))
#
plt = plotbackend
plt.interactive(False)
plt.semilogy(x, np.abs(y - 1. / (1 - x)) + 1e-20, 'r')
@ -3389,4 +3398,4 @@ def test_hyp2f1():
if __name__ == "__main__":
test_docstrings()
#test_hyp2f1()
# test_hyp2f1()

@ -1822,7 +1822,7 @@ class TimeSeries(PlotData):
if pdef[2] in ('u', 'd'):
t1 = ecross(ti, x, index[(start + dist):nn:step], vh)
else: # % min, Max, trough, crest or all crossings wanted
else: # min, Max, trough, crest or all crossings wanted
t1 = x[index[(start + dist):nn:step]]
T = t1 - t0
@ -1918,7 +1918,7 @@ class TimeSeries(PlotData):
expect = 1 # reconstruct by expectation? 1=yes 0=no
tol = 0.001 # absolute tolerance of e(g_new-g_old)
cmvmax = 100; # if number of consecutive missing values (cmv) are longer they
cmvmax = 100 # if number of consecutive missing values (cmv) are longer they
# are not used in estimation of g, due to the fact that the
# conditional expectation approaches zero as the length to
# the closest known points increases, see below in the for loop
@ -1977,7 +1977,7 @@ class TimeSeries(PlotData):
indNaN = np.sort(indNaN)
# initial reconstruction attempt
# xn(indg,2)=detrendma(xn(indg,2),1500);
# xn(indg,2) = detrendma(xn(indg,2),1500);
# [g, test, cmax, irr, g2] = dat2tr(xn(indg,:),def,opt);
# xnt=xn;
# xnt(indg,:)=dat2gaus(xn(indg,:),g);
@ -2014,7 +2014,7 @@ class TimeSeries(PlotData):
# # [g0 test0 cmax irr g2] = dat2tr(xs,def,opt);
# # [test0 ind0]=sort(test0);
# # end
#
# if 1, #test>test0(end-5),
# # 95# sure the data comes from a non-Gaussian process
# def = olddef; #Non Gaussian process
@ -2208,7 +2208,6 @@ class TimeSeries(PlotData):
for ix in xrange(nsub):
if nsub > 1:
subplot(nsub, 1, ix)
h_scale = array([tn[ind[0]], tn[ind[-1]]])
ind2 = where((h_scale[0] <= tn2) & (tn2 <= h_scale[1]))[0]
plot(tn[ind] * dT, xn[ind], sym1)
@ -2216,12 +2215,9 @@ class TimeSeries(PlotData):
plot(tn2[ind2] * dT, xn2[ind2], sym2)
plot(h_scale * dT, [0, 0], 'k-')
#plotbackend.axis([h_scale*dT, v_scale])
for iy in [-2, 2]:
plot(h_scale * dT, iy * sigma * ones(2), ':')
ind = ind + Ns
# end
plotbackend.xlabel(XlblTxt)
return figs
@ -2274,7 +2270,6 @@ class TimeSeries(PlotData):
wave_idx = wave_idx[wave_idx > -1]
else:
Nwp[0] = wave_idx[-1] - wave_idx[0] + 1
# end
Nsub = min(6, Nsub)
Nfig = int(ceil(Nsub / 6))
@ -2298,7 +2293,6 @@ class TimeSeries(PlotData):
plotbackend.ylabel(
'Wave %d - %d' % (wave_idx[ix],
wave_idx[ix] + Nwp[ix] - 1))
plotbackend.xlabel('Time [sec]')
# wafostamp
return figs

@ -1,7 +1,7 @@
"""
Extended functions to operate on polynomials
"""
#-------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Name: polynomial
# Purpose: Functions to operate on polynomials.
#
@ -15,8 +15,8 @@
# Created: 30.12.2008
# Copyright: (c) pab 2008
# Licence: LGPL
#-------------------------------------------------------------------------
#!/usr/bin/env python
# -------------------------------------------------------------------------
# !/usr/bin/env python
from plotbackend import plotbackend as plt
import numpy as np

@ -298,7 +298,8 @@ class arcsine_gen(rv_continuous):
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for 0 < x < 1.
for ``0 < x < 1``.
%(example)s
@ -1101,13 +1102,12 @@ class exponpow_gen(rv_continuous):
"""
def _pdf(self, x, b):
xbm1 = x**(b-1.0)
xb = xbm1 * x
return exp(1)*b*xbm1 * exp(xb - exp(xb))
return exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x ** (b - 1.0) * x
return 1 + log(b) + special.xlogy(b - 1.0, x) + xb - exp(xb)
xb = x**b
f = 1 + log(b) + special.xlogy(b - 1.0, x) + xb - exp(xb)
return f
def _cdf(self, x, b):
return -expm1(-expm1(x ** b))
@ -1294,13 +1294,13 @@ class f_gen(rv_continuous):
f = f_gen(a=0.0, name='f')
# Folded Normal
# abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
#
# note: regress docs have scale parameter correct, but first parameter
# he gives is a shape parameter A = c * scale
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
# Half-normal is folded normal with shape-parameter c=0.
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
@ -1527,8 +1527,8 @@ class genpareto_gen(rv_continuous):
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
for ``c >= 0`` ``x >= 0``, and
for ``c < 0`` ``0 <= x <= -1/c``
defined for ``x >= 0`` if ``c >=0``, and for
``0 <= x <= -1/c`` if ``c < 0``.
For ``c == 0``, `genpareto` reduces to the exponential
distribution, `expon`::
@ -1691,8 +1691,9 @@ class genpareto_gen(rv_continuous):
for ki, cnk in zip(k, comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return where(c * n < 1, val * (-1.0 / c) ** n, inf)
munp = lambda c: __munp(n, c)
return _lazywhere(c != 0, (c,), munp, gam(n + 1))
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
gam(n + 1))
def _entropy(self, c):
return 1. + c
@ -3544,16 +3545,16 @@ class lomax_gen(rv_continuous):
return log(c) - (c + 1) * log1p(x)
def _cdf(self, x, c):
return 1.0-1.0/(1.0+x)**c
return -expm1(-c*log1p(x))
def _sf(self, x, c):
return 1.0/(1.0+x)**c
return exp(-c*log1p(x))
def _logsf(self, x, c):
return -c * log1p(x)
def _ppf(self, q, c):
return pow(1.0-q, -1.0/c)-1
return expm1(-log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
@ -3877,6 +3878,9 @@ class rayleigh_gen(rv_continuous):
def _ppf(self, q):
return sqrt(-2 * log1p(-q))
def _isf(self, q):
return sqrt(-2 * log(q))
def _stats(self):
val = 4 - pi
return (np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5,

@ -161,7 +161,7 @@ class nbinom_gen(rv_discrete):
def _logpmf(self, x, n, p):
coeff = gamln(n + x) - gamln(x + 1) - gamln(n)
return coeff + n * log(p) + x * log1p(-p)
return coeff + special.xlogy(n, p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
@ -217,7 +217,7 @@ class geom_gen(rv_discrete):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return (k - 1) * log1p(-p) + log(p)
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)

@ -3,9 +3,12 @@ Created on 17. juli 2010
@author: pab
'''
import numpy as np #@UnusedImport
from numpy import pi, inf #@UnusedImport
from wafo.gaussian import Rind, prbnormtndpc, prbnormndpc, prbnormnd, cdfnorm2d, prbnorm2d #@UnusedImport
import numpy as np # @UnusedImport
from numpy import pi, inf # @UnusedImport
# @UnusedImport
from wafo.gaussian import (Rind, prbnormtndpc, prbnormndpc, prbnormnd,
cdfnorm2d, prbnorm2d)
def test_rind():
'''
@ -42,7 +45,7 @@ def test_rind():
Compute expectation E( X1^{+}*X2^{+} ) with random
correlation coefficient,Cov(X1,X2) = rho2.
>>> m2 = [0, 0];
>>> m2 = [0, 0]
>>> rho2 = 0.3 #np.random.rand(1)
>>> Sc2 = [[1, rho2], [rho2 ,1]]
>>> Blo2 = 0; Bup2 = np.inf; indI2 = [-1, 1]
@ -66,11 +69,13 @@ def test_rind():
array([ 0.00013838])
array([ 1.00000000e-10])
'''
def test_prbnormtndpc():
'''
>>> rho2 = np.random.rand(2);
>>> a2 = np.zeros(2);
>>> b2 = np.repeat(np.inf,2);
>>> rho2 = np.random.rand(2)
>>> a2 = np.zeros(2)
>>> b2 = np.repeat(np.inf,2)
>>> [val2,err2, ift2] = prbnormtndpc(rho2,a2,b2)
>>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi)
>>> E2 = g2(rho2) #% exact value
@ -86,11 +91,13 @@ def test_prbnormtndpc():
>>> np.abs(E3-val3)<err3
True
'''
def test_prbnormndpc():
'''
>>> rho2 = np.random.rand(2);
>>> rho2 = np.random.rand(2)
>>> a2 = np.zeros(2);
>>> b2 = np.repeat(np.inf,2);
>>> b2 = np.repeat(np.inf,2)
>>> [val2,err2, ift2] = prbnormndpc(rho2,a2,b2)
>>> g2 = lambda x : 0.25+np.arcsin(x[0]*x[1])/(2*pi)
>>> E2 = g2(rho2) #% exact value
@ -107,6 +114,7 @@ def test_prbnormndpc():
True
'''
def test_prbnormnd():
'''
>>> import numpy as np
@ -123,6 +131,8 @@ def test_prbnormnd():
>>> 'val = %2.5f' % val
'val = 0.00195'
'''
def test_cdfnorm2d():
'''
>>> x = np.linspace(-3,3,3)
@ -134,6 +144,7 @@ def test_cdfnorm2d():
[ 1.34987703e-03, 4.99795143e-01, 9.97324055e-01]])
'''
def test_prbnorm2d():
'''
>>> a = [-1, -2]

@ -159,7 +159,8 @@ def test_findcross_and_ecross():
assert_array_equal(ind, np.array([9, 25, 80, 97, 151, 168, 223, 239]))
t0 = ecross(t, x, ind, 0.75)
assert_array_almost_equal(t0, np.array([0.84910514, 2.2933879, 7.13205663,
8.57630119, 13.41484739, 14.85909194,
8.57630119, 13.41484739,
14.85909194,
19.69776067, 21.14204343]))
@ -292,14 +293,14 @@ def test_findoutliers():
def test_hygfz():
#y = hyp2f1_taylor(-1, -4, 1, .9)
# y = hyp2f1_taylor(-1, -4, 1, .9)
assert_equal(4.6, hygfz(-1, -4, 1, .9))
assert_almost_equal(1.0464328112173522, hygfz(0.1, 0.2, 0.3, 0.5))
assert_almost_equal(1.2027034401166194, hygfz(0.1, 0.2, 0.3, 0.95))
#assert_equal(1.661006238211309e-07, hygfz(5, -300, 10, 0.5))
#assert_equal(0.118311386286, hygfz(0.5, -99.0, 1.5, 0.5625))
#assert_equal(0.0965606007742, hygfz(0.5, -149.0, 1.5, 0.5625))
#assert_equal(0.49234384000963544 + 0.60513406166123973j,
# assert_equal(1.661006238211309e-07, hygfz(5, -300, 10, 0.5))
# assert_equal(0.118311386286, hygfz(0.5, -99.0, 1.5, 0.5625))
# assert_equal(0.0965606007742, hygfz(0.5, -149.0, 1.5, 0.5625))
# assert_equal(0.49234384000963544 + 0.60513406166123973j,
# hygfz(1, 1, 4, 3 + 4j))
@ -337,8 +338,8 @@ def test_argsreduce():
def test_stirlerr():
assert_array_almost_equal(stirlerr(range(5)),
np.array([np.inf, 0.08106147, 0.0413407, 0.02767793,
0.02079067]))
np.array([np.inf, 0.08106147, 0.0413407,
0.02767793, 0.02079067]))
def test_getshipchar():
@ -362,7 +363,8 @@ def test_getshipchar():
def test_betaloge():
assert_array_almost_equal(betaloge(3, arange(4)),
np.array([np.inf, -1.09861229, -2.48490665, -3.40119738]))
np.array([np.inf, -1.09861229, -2.48490665,
-3.40119738]))
def test_gravity():

@ -4,8 +4,9 @@ Created on 5. aug. 2010
@author: pab
"""
import wafo.data
import numpy as np
import wafo.data # @UnusedImport
import numpy as np # @UnusedImport
def test_timeseries():
'''
@ -29,6 +30,8 @@ def test_timeseries():
array([ 0.22368637, 0.20838473, 0.17110733, 0.12237803, 0.07024054,
0.02064859, -0.02218831, -0.0555993 , -0.07859847, -0.09166187])
'''
def test_timeseries_trdata():
'''
>>> import wafo.spectrum.models as sm
@ -53,6 +56,6 @@ def test_timeseries_trdata():
1.0
'''
if __name__=='__main__':
if __name__ == '__main__':
import doctest
doctest.testmod()

@ -1,7 +1,6 @@
'''
'''
from __future__ import division
#import numpy as np
from numpy import trapz, sqrt, linspace # @UnresolvedImport
from wafo.containers import PlotData
@ -188,7 +187,7 @@ class TrData(PlotData, TrCommon):
self.sigma = kwds.get('sigma', None)
if self.mean is None:
#self.mean = np.mean(self.args) #
# self.mean = np.mean(self.args)
self.mean = self.gauss2dat(self.ymean)
if self.sigma is None:
yp = self.ymean + self.ysigma
@ -207,9 +206,11 @@ class TrData(PlotData, TrCommon):
def _dat2gauss(self, x, *xi):
return tranproc(self.args, self.data, x, *xi)
class EstimateTransform(object):
pass
def main():
pass

Loading…
Cancel
Save