master
Per A Brodtkorb 8 years ago
parent f7754c2402
commit 5a7d739721

File diff suppressed because it is too large Load Diff

@ -1,6 +1,6 @@
''' """
Module extending the bitoperator capabilites of numpy Module extending the bitoperator capabilites of numpy
''' """
from numpy import (bitwise_and, bitwise_or, from numpy import (bitwise_and, bitwise_or,
bitwise_not, binary_repr, # @UnusedImport bitwise_not, binary_repr, # @UnusedImport

@ -1,8 +1,8 @@
''' """
Created on 10. mai 2014 Created on 10. mai 2014
@author: pab @author: pab
''' """
import numpy as np import numpy as np
from numpy.fft import fft from numpy.fft import fft
from wafo.misc import nextpow2 from wafo.misc import nextpow2
@ -13,7 +13,7 @@ import warnings
def sampling_period(t_vec): def sampling_period(t_vec):
''' """
Returns sampling interval Returns sampling interval
Returns Returns
@ -24,7 +24,7 @@ def sampling_period(t_vec):
[m] otherwise [m] otherwise
See also See also
''' """
dt1 = t_vec[1] - t_vec[0] dt1 = t_vec[1] - t_vec[0]
n = len(t_vec) - 1 n = len(t_vec) - 1
t = t_vec[-1] - t_vec[0] t = t_vec[-1] - t_vec[0]
@ -35,7 +35,7 @@ def sampling_period(t_vec):
class CovarianceEstimator(object): class CovarianceEstimator(object):
''' """
Class for estimating AutoCovariance from timeseries Class for estimating AutoCovariance from timeseries
Parameters Parameters
@ -61,7 +61,7 @@ class CovarianceEstimator(object):
True if normalize output to one True if normalize output to one
dt : scalar dt : scalar
time-step between data points (default see sampling_period). time-step between data points (default see sampling_period).
''' """
def __init__(self, lag=None, tr=None, detrend=None, window='boxcar', def __init__(self, lag=None, tr=None, detrend=None, window='boxcar',
flag='biased', norm=False, dt=None): flag='biased', norm=False, dt=None):
self.lag = lag self.lag = lag
@ -84,24 +84,24 @@ class CovarianceEstimator(object):
return lag return lag
def tocovdata(self, timeseries): def tocovdata(self, timeseries):
''' """
Return auto covariance function from data. Return auto covariance function from data.
Return Return
------- -------
R : CovData1D object acf : CovData1D object
with attributes: with attributes:
data : ACF vector length L+1 data : ACF vector length L+1
args : time lags length L+1 args : time lags length L+1
sigma : estimated large lag standard deviation of the estimate sigma : estimated large lag standard deviation of the estimate
assuming x is a Gaussian process: assuming x is a Gaussian process:
if R(k)=0 for all lags k>q then an approximation if acf[k]=0 for all lags k>q then an approximation
of the variance for large samples due to Bartlett of the variance for large samples due to Bartlett
var(R(k))=1/N*(R(0)^2+2*R(1)^2+2*R(2)^2+ ..+2*R(q)^2) var(acf[k])=1/N*(acf[0]**2+2*acf[1]**2+2*acf[2]**2+ ..+2*acf[q]**2)
for k>q and where N=length(x). Special case is for k>q and where N=length(x). Special case is
white noise where it equals R(0)^2/N for k>0 white noise where it equals acf[0]**2/N for k>0
norm : bool norm : bool
If false indicating that R is not normalized If false indicating that auto_cov is not normalized
Example: Example:
-------- --------
@ -112,7 +112,7 @@ class CovarianceEstimator(object):
>>> acf = ts.tocovdata(150) >>> acf = ts.tocovdata(150)
h = acf.plot() h = acf.plot()
''' """
lag = self.lag lag = self.lag
window = self.window window = self.window
detrend = self.detrend detrend = self.detrend
@ -142,30 +142,30 @@ class CovarianceEstimator(object):
x = detrend(x) x = detrend(x)
nfft = 2 ** nextpow2(n) nfft = 2 ** nextpow2(n)
Rper = abs(fft(x, nfft)) ** 2 / Ncens # Raw periodogram raw_periodogram = abs(fft(x, nfft)) ** 2 / Ncens
R = np.real(fft(Rper)) / nfft # ifft = fft/nfft since Rper is real! auto_cov = np.real(fft(raw_periodogram)) / nfft # ifft = fft/nfft since raw_periodogram is real!
if self.flag.startswith('unbiased'): if self.flag.startswith('unbiased'):
# unbiased result, i.e. divide by n-abs(lag) # unbiased result, i.e. divide by n-abs(lag)
R = R[:Ncens] * Ncens / np.arange(Ncens, 1, -1) auto_cov = auto_cov[:Ncens] * Ncens / np.arange(Ncens, 1, -1)
if self.norm: if self.norm:
R = R / R[0] auto_cov = auto_cov / auto_cov[0]
if lag is None: if lag is None:
lag = self._estimate_lag(R, Ncens) lag = self._estimate_lag(auto_cov, Ncens)
lag = min(lag, n - 2) lag = min(lag, n - 2)
if isinstance(window, str) or type(window) is tuple: if isinstance(window, str) or type(window) is tuple:
win = get_window(window, 2 * lag - 1) win = get_window(window, 2 * lag - 1)
else: else:
win = np.asarray(window) win = np.asarray(window)
R[:lag] = R[:lag] * win[lag - 1::] auto_cov[:lag] = auto_cov[:lag] * win[lag - 1::]
R[lag] = 0 auto_cov[lag] = 0
lags = slice(0, lag + 1) lags = slice(0, lag + 1)
t = np.linspace(0, lag * dt, lag + 1) t = np.linspace(0, lag * dt, lag + 1)
acf = CovData1D(R[lags], t) acf = CovData1D(auto_cov[lags], t)
acf.sigma = np.sqrt(np.r_[0, R[0] ** 2, acf.sigma = np.sqrt(np.r_[0, auto_cov[0] ** 2,
R[0] ** 2 + 2 * np.cumsum(R[1:] ** 2)] / Ncens) auto_cov[0] ** 2 + 2 * np.cumsum(auto_cov[1:] ** 2)] / Ncens)
acf.children = [PlotData(-2. * acf.sigma[lags], t), acf.children = [PlotData(-2. * acf.sigma[lags], t),
PlotData(2. * acf.sigma[lags], t)] PlotData(2. * acf.sigma[lags], t)]
acf.plot_args_children = ['r:'] acf.plot_args_children = ['r:']

@ -8,7 +8,7 @@ __all__ = ['dct', 'idct', 'dctn', 'idctn', 'dst', 'idst', 'dstn', 'idstn']
def dct(x, type=2, n=None, axis=-1, norm='ortho'): # @ReservedAssignment def dct(x, type=2, n=None, axis=-1, norm='ortho'): # @ReservedAssignment
''' """
Return the Discrete Cosine Transform of arbitrary type sequence x. Return the Discrete Cosine Transform of arbitrary type sequence x.
Parameters Parameters
@ -108,7 +108,7 @@ def dct(x, type=2, n=None, axis=-1, norm='ortho'): # @ReservedAssignment
'A Fast Cosine Transform in One and Two Dimensions', by J. Makhoul, `IEEE 'A Fast Cosine Transform in One and Two Dimensions', by J. Makhoul, `IEEE
Transactions on acoustics, speech and signal processing` vol. 28(1), Transactions on acoustics, speech and signal processing` vol. 28(1),
pp. 27-34, http://dx.doi.org/10.1109/TASSP.1980.1163351 (1980). pp. 27-34, http://dx.doi.org/10.1109/TASSP.1980.1163351 (1980).
''' """
return _dct(x, type, n, axis, norm) return _dct(x, type, n, axis, norm)
@ -117,7 +117,7 @@ def dst(x, type=2, n=None, axis=-1, norm='ortho'): # @ReservedAssignment
def idct(x, type=2, n=None, axis=-1, norm='ortho'): # @ReservedAssignment def idct(x, type=2, n=None, axis=-1, norm='ortho'): # @ReservedAssignment
''' """
Return the Inverse Discrete Cosine Transform of an arbitrary type sequence. Return the Inverse Discrete Cosine Transform of an arbitrary type sequence.
Parameters Parameters
@ -152,7 +152,7 @@ def idct(x, type=2, n=None, axis=-1, norm='ortho'): # @ReservedAssignment
IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type 3, IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type 3,
and IDCT of type 3 is the DCT of type 2. For the definition of these types, and IDCT of type 3 is the DCT of type 2. For the definition of these types,
see `dct`. see `dct`.
''' """
return _idct(x, type, n, axis, norm) return _idct(x, type, n, axis, norm)
@ -201,7 +201,7 @@ def _raw_dctn(y, type, shape, axes, norm, fun): # @ReservedAssignment
def dctn(x, type=2, shape=None, axes=None, # @ReservedAssignment def dctn(x, type=2, shape=None, axes=None, # @ReservedAssignment
norm='ortho'): norm='ortho'):
''' """
Return the N-D Discrete Cosine Transform of array x. Return the N-D Discrete Cosine Transform of array x.
Parameters Parameters
@ -271,7 +271,7 @@ def dctn(x, type=2, shape=None, axes=None, # @ReservedAssignment
See also See also
-------- --------
idctn, dct, idct idctn, dct, idct
''' """
y = np.atleast_1d(x) y = np.atleast_1d(x)
return _raw_dctn(y, type, shape, axes, norm, dct) return _raw_dctn(y, type, shape, axes, norm, dct)
@ -284,14 +284,14 @@ def dstn(x, type=2, shape=None, axes=None, # @ReservedAssignment
def idctn(x, type=2, shape=None, axes=None, # @ReservedAssignment def idctn(x, type=2, shape=None, axes=None, # @ReservedAssignment
norm='ortho'): norm='ortho'):
'''Return inverse N-D Discrete Cosine Transform of array x. """Return inverse N-D Discrete Cosine Transform of array x.
For description of parameters see `dctn`. For description of parameters see `dctn`.
See Also See Also
-------- --------
dctn : for detailed information. dctn : for detailed information.
''' """
y = np.atleast_1d(x) y = np.atleast_1d(x)
return _raw_dctn(y, type, shape, axes, norm, idct) return _raw_dctn(y, type, shape, axes, norm, idct)
@ -317,7 +317,7 @@ def no_leading_ones(x):
def shiftdim(x, n=None): def shiftdim(x, n=None):
''' """
Shift dimensions Shift dimensions
Parameters Parameters
@ -342,7 +342,7 @@ def shiftdim(x, n=None):
See also See also
-------- --------
reshape, squeeze reshape, squeeze
''' """
if n is None: if n is None:
return x.reshape(no_leading_ones(x.shape)) return x.reshape(no_leading_ones(x.shape))
elif n >= 0: elif n >= 0:
@ -383,7 +383,7 @@ def example_dct2(debug=True):
plt.set_cmap('jet') plt.set_cmap('jet')
J = dctn(rgb) J = dctn(rgb)
irgb = idctn(J) irgb = idctn(J)
out['diff_rgb_irgb'] = np.abs(rgb-irgb).max() out['diff_rgb_irgb'] = np.abs(rgb - irgb).max()
plt.imshow(np.log(np.abs(J))) plt.imshow(np.log(np.abs(J)))
# plt.colorbar() #colormap(jet), colorbar # plt.colorbar() #colormap(jet), colorbar
# plt.show('hold') # plt.show('hold')

@ -319,7 +319,7 @@ gh = wtm.TrHermite(mean=me, sigma=sa, skew=sk, kurt=ku, ysigma=sa)
ysim_t = St.sim(ns=240, dt=0.5) ysim_t = St.sim(ns=240, dt=0.5)
xsim_t = ysim_t.copy() xsim_t = ysim_t.copy()
xsim_t[:,1] = gh.gauss2dat(ysim_t[:,1]) xsim_t[:, 1] = gh.gauss2dat(ysim_t[:, 1])
ts_y = wo.mat2timeseries(ysim_t) ts_y = wo.mat2timeseries(ysim_t)
ts_x = wo.mat2timeseries(xsim_t) ts_x = wo.mat2timeseries(xsim_t)

@ -70,7 +70,7 @@ plt.semilogx(T, sT)
plt.hold(True) plt.hold(True)
# ws.edf(Hs).plot() # ws.edf(Hs).plot()
Nmax = len(Hs) Nmax = len(Hs)
N = np.r_[1:Nmax+1] N = np.r_[1:Nmax + 1]
plt.plot(Nmax/N, sorted(Hs, reverse=True), '.') plt.plot(Nmax/N, sorted(Hs, reverse=True), '.')
plt.title('Return values in the Gumbel model') plt.title('Return values in the Gumbel model')
@ -107,7 +107,7 @@ XI = np.r_[1:len(xn):0.25] - .99
N = len(XI) N = len(XI)
N = N - np.mod(N, 4*60*5) N = N - np.mod(N, 4*60*5)
YI = si.interp1d(xn[:, 0],xn[:, 1], kind='linear')(XI) YI = si.interp1d(xn[:, 0], xn[:, 1], kind='linear')(XI)
YI = YI.reshape(4*60*5, N/(4*60*5)) # Each column holds 5 minutes of YI = YI.reshape(4*60*5, N/(4*60*5)) # Each column holds 5 minutes of
# interpolated data. # interpolated data.
Y5 = (YI - YI.mean(axis=0)) / YI.std(axis=0) Y5 = (YI - YI.mean(axis=0)) / YI.std(axis=0)

@ -28,7 +28,8 @@ def plot_varying_symbols(x, y, color='red', size=5):
def damage_vs_S(S, beta, K): def damage_vs_S(S, beta, K):
""" """
calculate the damage 1/N for a given stress S Calculate the damage 1/N for a given stress S
Parameters Parameters
---------- ----------
S : Stress [Pa] S : Stress [Pa]

@ -32,7 +32,7 @@ __all__ = ['Rind', 'rindmod', 'mvnprdmod', 'mvn', 'cdflomax', 'prbnormtndpc',
class Rind(object): class Rind(object):
''' """
RIND Computes multivariate normal expectations RIND Computes multivariate normal expectations
Parameters Parameters
@ -152,10 +152,10 @@ class Rind(object):
Wave Distributions", Wave Distributions",
Methodology And Computing In Applied Probability, Volume 8, Number 1, Methodology And Computing In Applied Probability, Volume 8, Number 1,
pp. 65-91(27) pp. 65-91(27)
''' """
def __init__(self, **kwds): def __init__(self, **kwds):
''' """
Parameters Parameters
---------- ----------
method : integer, optional method : integer, optional
@ -217,7 +217,7 @@ class Rind(object):
number of times to use the regression equation to restrict number of times to use the regression equation to restrict
integration area. Nc1c2 = 1,2 is recommended. (default 2) integration area. Nc1c2 = 1,2 is recommended. (default 2)
(note: works only for method >0) (note: works only for method >0)
''' """
self.method = 3 self.method = 3
self.xcscale = 0 self.xcscale = 0
self.abseps = 0 self.abseps = 0
@ -238,7 +238,7 @@ class Rind(object):
self.set_constants() self.set_constants()
def initialize(self, speed=None): def initialize(self, speed=None):
''' """
Initializes member variables according to speed. Initializes member variables according to speed.
Parameter Parameter
@ -259,7 +259,7 @@ class Rind(object):
maxpts : Maximum number of function values allowed. maxpts : Maximum number of function values allowed.
quadno : Quadrature formulae used in integration of Xd(i) quadno : Quadrature formulae used in integration of Xd(i)
implicitly determining # nodes implicitly determining # nodes
''' """
if speed is None: if speed is None:
return return
self.speed = min(max(speed, 1), 13) self.speed = min(max(speed, 1), 13)
@ -324,22 +324,22 @@ class Rind(object):
if xc is None: if xc is None:
xc = zeros((0, 1)) xc = zeros((0, 1))
BIG, Blo, Bup, xc = atleast_2d(cov, ab, bb, xc) big, b_lo, b_up, xc = atleast_2d(cov, ab, bb, xc)
Blo = Blo.copy() b_lo = b_lo.copy()
Bup = Bup.copy() b_up = b_up.copy()
Ntdc = BIG.shape[0] Ntdc = big.shape[0]
Nc = xc.shape[0] Nc = xc.shape[0]
if nt is None: if nt is None:
nt = Ntdc - Nc nt = Ntdc - Nc
unused_Mb, Nb = Blo.shape unused_Mb, Nb = b_lo.shape
Nd = Ntdc - nt - Nc Nd = Ntdc - nt - Nc
Ntd = nt + Nd Ntd = nt + Nd
if indI is None: if indI is None:
if Nb != Ntd: if Nb != Ntd:
raise ValueError('Inconsistent size of Blo and Bup') raise ValueError('Inconsistent size of b_lo and b_up')
indI = r_[-1:Ntd] indI = r_[-1:Ntd]
Ex, indI = atleast_1d(m, indI) Ex, indI = atleast_1d(m, indI)
@ -354,22 +354,22 @@ class Rind(object):
# if INFIN(I) = 1, Ith limits are [Hlo(I), infinity); # if INFIN(I) = 1, Ith limits are [Hlo(I), infinity);
# if INFIN(I) = 2, Ith limits are [Hlo(I), Hup(I)]. # if INFIN(I) = 2, Ith limits are [Hlo(I), Hup(I)].
infinity = 37 infinity = 37
dev = sqrt(diag(BIG)) # std dev = sqrt(diag(big)) # std
ind = nonzero(indI[1:] > -1)[0] ind = nonzero(indI[1:] > -1)[0]
infin = repeat(2, len(indI) - 1) infin = repeat(2, len(indI) - 1)
infin[ind] = (2 - (Bup[0, ind] > infinity * dev[indI[ind + 1]]) - infin[ind] = (2 - (b_up[0, ind] > infinity * dev[indI[ind + 1]]) -
2 * (Blo[0, ind] < -infinity * dev[indI[ind + 1]])) 2 * (b_lo[0, ind] < -infinity * dev[indI[ind + 1]]))
Bup[0, ind] = minimum(Bup[0, ind], infinity * dev[indI[ind + 1]]) b_up[0, ind] = minimum(b_up[0, ind], infinity * dev[indI[ind + 1]])
Blo[0, ind] = maximum(Blo[0, ind], -infinity * dev[indI[ind + 1]]) b_lo[0, ind] = maximum(b_lo[0, ind], -infinity * dev[indI[ind + 1]])
ind2 = indI + 1 ind2 = indI + 1
return rindmod.rind(BIG, Ex, xc, nt, ind2, Blo, Bup, infin, seed) return rindmod.rind(big, Ex, xc, nt, ind2, b_lo, b_up, infin, seed)
def test_rind(): def test_rind():
''' Small test function """ Small test function
''' """
n = 5 n = 5
Blo = -inf Blo = -inf
Bup = -1.2 Bup = -1.2
@ -398,7 +398,7 @@ def test_rind():
def cdflomax(x, alpha, m0): def cdflomax(x, alpha, m0):
''' """
Return CDF for local maxima for a zero-mean Gaussian process Return CDF for local maxima for a zero-mean Gaussian process
Parameters Parameters
@ -449,14 +449,14 @@ def cdflomax(x, alpha, m0):
See also See also
-------- --------
spec2mom, spec2bw spec2mom, spec2bw
''' """
c1 = 1.0 / (sqrt(1 - alpha ** 2)) * x / sqrt(m0) c1 = 1.0 / (sqrt(1 - alpha ** 2)) * x / sqrt(m0)
c2 = alpha * c1 c2 = alpha * c1
return cdfnorm(c1) - alpha * exp(-x ** 2 / 2 / m0) * cdfnorm(c2) return cdfnorm(c1) - alpha * exp(-x ** 2 / 2 / m0) * cdfnorm(c2)
def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24): def prbnormtndpc(rho, a, b, d=None, df=0, abseps=1e-4, ierc=0, hnc=0.24):
''' """
Return Multivariate normal or T probability with product correlation. Return Multivariate normal or T probability with product correlation.
Parameters Parameters
@ -469,13 +469,13 @@ def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
vector of lower and upper integration limits, respectively. vector of lower and upper integration limits, respectively.
Note: any values greater the 37 in magnitude, are considered as Note: any values greater the 37 in magnitude, are considered as
infinite values. infinite values.
D : array-like d : array-like
vector of means (default zeros(size(rho))) vector of means (default zeros(size(rho)))
df = Degrees of freedom, NDF<=0 gives normal probabilities (default) df = Degrees of freedom, NDF<=0 gives normal probabilities (default)
abseps = absolute error tolerance. (default 1e-4) abseps = absolute error tolerance. (default 1e-4)
IERC = 1 if strict error control based on fourth derivative ierc = 1 if strict error control based on fourth derivative
0 if error control based on halving the intervals (default) 0 if error control based on halving the intervals (default)
HNC = start interval width of simpson rule (default 0.24) hnc = start interval width of simpson rule (default 0.24)
Returns Returns
------- -------
@ -529,20 +529,20 @@ def prbnormtndpc(rho, a, b, D=None, df=0, abseps=1e-4, IERC=0, HNC=0.24):
Charles Dunnett (1989) Charles Dunnett (1989)
"Multivariate normal probability integrals with product correlation "Multivariate normal probability integrals with product correlation
structure", Applied statistics, Vol 38,No3, (Algorithm AS 251) structure", Applied statistics, Vol 38,No3, (Algorithm AS 251)
''' """
if D is None: if d is None:
D = zeros(len(rho)) d = zeros(len(rho))
# Make sure integration limits are finite # Make sure integration limits are finite
A = np.clip(a - D, -100, 100) aa = np.clip(a - d, -100, 100)
B = np.clip(b - D, -100, 100) bb = np.clip(b - d, -100, 100)
return mvnprdmod.prbnormtndpc(rho, A, B, df, abseps, IERC, HNC) return mvnprdmod.prbnormtndpc(rho, aa, bb, df, abseps, ierc, hnc)
def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True, def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True,
usebreakpoints=False): usebreakpoints=False):
''' """
Return Multivariate Normal probabilities with product correlation Return Multivariate Normal probabilities with product correlation
Parameters Parameters
@ -600,7 +600,7 @@ def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True,
Dr.Ing thesis, Norwegian University of Science and Technolgy, NTNU, Dr.Ing thesis, Norwegian University of Science and Technolgy, NTNU,
Trondheim, Norway. Trondheim, Norway.
''' """
# Call fortran implementation # Call fortran implementation
val, err, ier = mvnprdmod.prbnormndpc(rho, a, b, abserr, relerr, val, err, ier = mvnprdmod.prbnormndpc(rho, a, b, abserr, relerr,
usebreakpoints, usesimpson) usebreakpoints, usesimpson)
@ -612,7 +612,7 @@ def prbnormndpc(rho, a, b, abserr=1e-4, relerr=1e-4, usesimpson=True,
_ERRORMESSAGE = {} _ERRORMESSAGE = {}
_ERRORMESSAGE[0] = '' _ERRORMESSAGE[0] = ''
_ERRORMESSAGE[1] = ''' _ERRORMESSAGE[1] = """
Maximum number of subdivisions allowed has been achieved. one can allow Maximum number of subdivisions allowed has been achieved. one can allow
more subdivisions by increasing the value of limit (and taking the more subdivisions by increasing the value of limit (and taking the
according dimension adjustments into account). however, if this yields according dimension adjustments into account). however, if this yields
@ -623,32 +623,32 @@ _ERRORMESSAGE[1] = '''
the vector points. If necessary an appropriate special-purpose the vector points. If necessary an appropriate special-purpose
integrator must be used, which is designed for handling the type of integrator must be used, which is designed for handling the type of
difficulty involved. difficulty involved.
''' """
_ERRORMESSAGE[2] = ''' _ERRORMESSAGE[2] = """
the occurrence of roundoff error is detected, which prevents the requested the occurrence of roundoff error is detected, which prevents the requested
tolerance from being achieved. The error may be under-estimated.''' tolerance from being achieved. The error may be under-estimated."""
_ERRORMESSAGE[3] = ''' _ERRORMESSAGE[3] = """
Extremely bad integrand behaviour occurs at some points of the integration Extremely bad integrand behaviour occurs at some points of the integration
interval.''' interval."""
_ERRORMESSAGE[4] = ''' _ERRORMESSAGE[4] = """
The algorithm does not converge. Roundoff error is detected in the The algorithm does not converge. Roundoff error is detected in the
extrapolation table. It is presumed that the requested tolerance cannot be extrapolation table. It is presumed that the requested tolerance cannot be
achieved, and that the returned result is the best which can be obtained. achieved, and that the returned result is the best which can be obtained.
''' """
_ERRORMESSAGE[5] = ''' _ERRORMESSAGE[5] = """
The integral is probably divergent, or slowly convergent. The integral is probably divergent, or slowly convergent.
It must be noted that divergence can occur with any other value of ier>0. It must be noted that divergence can occur with any other value of ier>0.
''' """
_ERRORMESSAGE[6] = '''the input is invalid because: _ERRORMESSAGE[6] = """the input is invalid because:
1) npts2 < 2 1) npts2 < 2
2) break points are specified outside the integration range 2) break points are specified outside the integration range
3) (epsabs<=0 and epsrel<max(50*rel.mach.acc.,0.5d-28)) 3) (epsabs<=0 and epsrel<max(50*rel.mach.acc.,0.5d-28))
4) limit < npts2.''' 4) limit < npts2."""
def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0): def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
''' """
Multivariate Normal probability by Genz' algorithm. Multivariate Normal probability by Genz' algorithm.
@ -713,7 +713,7 @@ def prbnormnd(correl, a, b, abseps=1e-4, releps=1e-3, maxpts=None, method=0):
See also See also
-------- --------
prbnormndpc, Rind prbnormndpc, Rind
''' """
m, n = correl.shape m, n = correl.shape
Na = len(a) Na = len(a)
@ -787,7 +787,7 @@ _X20 = [-0.9931285991850949e+00, -0.9639719272779138e+00,
def cdfnorm2d(b1, b2, r): def cdfnorm2d(b1, b2, r):
''' """
Returnc Bivariate Normal cumulative distribution function Returnc Bivariate Normal cumulative distribution function
Parameters Parameters
@ -831,7 +831,7 @@ def cdfnorm2d(b1, b2, r):
Drezner, z and g.o. Wesolowsky, (1989), Drezner, z and g.o. Wesolowsky, (1989),
"On the computation of the bivariate normal integral", "On the computation of the bivariate normal integral",
Journal of statist. comput. simul. 35, pp. 101-107, Journal of statist. comput. simul. 35, pp. 101-107,
''' """
# Translated into Python # Translated into Python
# Per A. Brodtkorb # Per A. Brodtkorb
# #
@ -956,7 +956,7 @@ def fi(x):
def prbnorm2d(a, b, r): def prbnorm2d(a, b, r):
''' """
Returns Bivariate Normal probability Returns Bivariate Normal probability
Parameters Parameters
@ -986,7 +986,7 @@ def prbnorm2d(a, b, r):
cdfnorm2d, cdfnorm2d,
cdfnorm, cdfnorm,
prbnormndpc prbnormndpc
''' """
infinity = 37 infinity = 37
lower = np.asarray(a) lower = np.asarray(a)
upper = np.asarray(b) upper = np.asarray(b)

@ -1,8 +1,8 @@
''' """
Created on 6. okt. 2016 Created on 6. okt. 2016
@author: pab @author: pab
''' """
from __future__ import absolute_import, division from __future__ import absolute_import, division
from numba import guvectorize, jit, float64, int64, int32, int8, void from numba import guvectorize, jit, float64, int64, int32, int8, void
import numpy as np import numpy as np
@ -66,8 +66,8 @@ def _findcross(ind, y):
def findcross(xn): def findcross(xn):
'''Return indices to zero up and downcrossings of a vector """Return indices to zero up and downcrossings of a vector
''' """
ind = np.empty(len(xn), dtype=np.int64) ind = np.empty(len(xn), dtype=np.int64)
m = _findcross(ind, xn) m = _findcross(ind, xn)
return ind[:m] return ind[:m]
@ -150,9 +150,9 @@ _findrfc_lt = _make_findrfc(a_lt_b, a_le_b)
def _findrfc(ind, y, h): def _findrfc(ind, y, h):
n = len(y) n = len(y)
t_start = 0 t_start = 0
NC = n // 2 - 1 nc = n // 2 - 1
ix = 0 ix = 0
for i in range(NC): for i in range(nc):
Tmi = t_start + 2 * i Tmi = t_start + 2 * i
Tpl = t_start + 2 * i + 2 Tpl = t_start + 2 * i + 2
xminus = y[2 * i] xminus = y[2 * i]
@ -174,7 +174,7 @@ def _findrfc(ind, y, h):
# goto L180 continue # goto L180 continue
else: else:
j = i + 1 j = i + 1
while (j < NC): while (j < nc):
if (y[2 * j + 1] >= y[2 * i + 1]): if (y[2 * j + 1] >= y[2 * i + 1]):
break # goto L170 break # goto L170
if((y[2 * j + 2] <= xplus)): if((y[2 * j + 2] <= xplus)):

@ -57,7 +57,7 @@ def sech(x):
def _gengamspec(wn, N=5, M=4): def _gengamspec(wn, N=5, M=4):
''' Return Generalized gamma spectrum in dimensionless form """ Return Generalized gamma spectrum in dimensionless form
Parameters Parameters
---------- ----------
@ -106,7 +106,7 @@ def _gengamspec(wn, N=5, M=4):
Torsethaugen, K. (2004) Torsethaugen, K. (2004)
"Simplified Double Peak Spectral Model for Ocean Waves" "Simplified Double Peak Spectral Model for Ocean Waves"
In Proc. 14th ISOPE In Proc. 14th ISOPE
''' """
w = atleast_1d(wn) w = atleast_1d(wn)
S = zeros_like(w) S = zeros_like(w)
@ -132,7 +132,7 @@ class ModelSpectrum(object):
self.type = 'ModelSpectrum' self.type = 'ModelSpectrum'
def tospecdata(self, w=None, wc=None, nw=257): def tospecdata(self, w=None, wc=None, nw=257):
''' """
Return SpecData1D object from ModelSpectrum Return SpecData1D object from ModelSpectrum
Parameter Parameter
@ -148,7 +148,7 @@ class ModelSpectrum(object):
------- -------
S : SpecData1D object S : SpecData1D object
member attributes of model spectrum are copied to S.workspace member attributes of model spectrum are copied to S.workspace
''' """
if w is None: if w is None:
if wc is None: if wc is None:
@ -165,8 +165,8 @@ class ModelSpectrum(object):
return S return S
def chk_seastate(self): def chk_seastate(self):
''' Check if seastate is valid """ Check if seastate is valid
''' """
if self.Hm0 < 0: if self.Hm0 < 0:
raise ValueError('Hm0 can not be negative!') raise ValueError('Hm0 can not be negative!')
@ -185,7 +185,7 @@ class ModelSpectrum(object):
class Bretschneider(ModelSpectrum): class Bretschneider(ModelSpectrum):
''' """
Bretschneider spectral density model Bretschneider spectral density model
Member variables Member variables
@ -237,7 +237,7 @@ class Bretschneider(ModelSpectrum):
-------- --------
Jonswap, Jonswap,
Torsethaugen Torsethaugen
''' """
def __init__(self, Hm0=7.0, Tp=11.0, N=5, M=4, chk_seastate=True, **kwds): def __init__(self, Hm0=7.0, Tp=11.0, N=5, M=4, chk_seastate=True, **kwds):
self.type = 'Bretschneider' self.type = 'Bretschneider'
@ -249,8 +249,8 @@ class Bretschneider(ModelSpectrum):
self.chk_seastate() self.chk_seastate()
def __call__(self, wi): def __call__(self, wi):
''' Return Bretschnieder spectrum """ Return Bretschnieder spectrum
''' """
w = atleast_1d(wi) w = atleast_1d(wi)
if self.Hm0 > 0: if self.Hm0 > 0:
wp = 2 * pi / self.Tp wp = 2 * pi / self.Tp
@ -262,7 +262,7 @@ class Bretschneider(ModelSpectrum):
def jonswap_peakfact(Hm0, Tp): def jonswap_peakfact(Hm0, Tp):
''' Jonswap peakedness factor, gamma, given Hm0 and Tp """ Jonswap peakedness factor, gamma, given Hm0 and Tp
Parameters Parameters
---------- ----------
@ -313,7 +313,7 @@ def jonswap_peakfact(Hm0, Tp):
See also See also
-------- --------
jonswap jonswap
''' """
Hm0, Tp = atleast_1d(Hm0, Tp) Hm0, Tp = atleast_1d(Hm0, Tp)
x = Tp / sqrt(Hm0) x = Tp / sqrt(Hm0)
@ -332,7 +332,7 @@ def jonswap_peakfact(Hm0, Tp):
def jonswap_seastate(u10, fetch=150000., method='lewis', g=9.81, def jonswap_seastate(u10, fetch=150000., method='lewis', g=9.81,
output='dict'): output='dict'):
''' """
Return Jonswap seastate from windspeed and fetch Return Jonswap seastate from windspeed and fetch
Parameters Parameters
@ -400,7 +400,7 @@ def jonswap_seastate(u10, fetch=150000., method='lewis', g=9.81,
A parametric wave prediction model. A parametric wave prediction model.
J. phys. oceanogr. Vol 6, pp 200-228 J. phys. oceanogr. Vol 6, pp 200-228
''' """
# The following formulas are from Lewis and Allos 1990: # The following formulas are from Lewis and Allos 1990:
zeta = g * fetch / (u10 ** 2) # dimensionless fetch, Table 1 zeta = g * fetch / (u10 ** 2) # dimensionless fetch, Table 1
@ -440,7 +440,7 @@ def jonswap_seastate(u10, fetch=150000., method='lewis', g=9.81,
class Jonswap(ModelSpectrum): class Jonswap(ModelSpectrum):
''' """
Jonswap spectral density model Jonswap spectral density model
Member variables Member variables
@ -520,7 +520,7 @@ class Jonswap(ModelSpectrum):
Measurements of Wind-Wave Growth and Swell Decay during the Joint Measurements of Wind-Wave Growth and Swell Decay during the Joint
North Sea Project (JONSWAP). North Sea Project (JONSWAP).
Ergansungsheft, Reihe A(8), Nr. 12, Deutschen Hydrografischen Zeitschrift. Ergansungsheft, Reihe A(8), Nr. 12, Deutschen Hydrografischen Zeitschrift.
''' """
def __init__(self, Hm0=7.0, Tp=11.0, gamma=None, sigmaA=0.07, sigmaB=0.09, def __init__(self, Hm0=7.0, Tp=11.0, gamma=None, sigmaA=0.07, sigmaB=0.09,
Ag=None, N=5, M=4, method='integration', wnc=6.0, Ag=None, N=5, M=4, method='integration', wnc=6.0,
@ -552,17 +552,17 @@ class Jonswap(ModelSpectrum):
gam = self.gamma gam = self.gamma
outsideJonswapRange = Tp > 5 * sqrt(Hm0) or Tp < 3.6 * sqrt(Hm0) outsideJonswapRange = Tp > 5 * sqrt(Hm0) or Tp < 3.6 * sqrt(Hm0)
if outsideJonswapRange: if outsideJonswapRange:
txt0 = ''' txt0 = """
Hm0=%g,Tp=%g is outside the JONSWAP range. Hm0=%g,Tp=%g is outside the JONSWAP range.
The validity of the spectral density is questionable. The validity of the spectral density is questionable.
''' % (Hm0, Tp) """ % (Hm0, Tp)
warnings.warn(txt0) warnings.warn(txt0)
if gam < 1 or 7 < gam: if gam < 1 or 7 < gam:
txt = ''' txt = """
The peakedness factor, gamma, is possibly too large. The peakedness factor, gamma, is possibly too large.
The validity of the spectral density is questionable. The validity of the spectral density is questionable.
''' """
warnings.warn(txt) warnings.warn(txt)
def _localspec(self, wn): def _localspec(self, wn):
@ -617,8 +617,8 @@ class Jonswap(ModelSpectrum):
self.Ag = 1.0 / area self.Ag = 1.0 / area
def _pre_calculate_ag(self): def _pre_calculate_ag(self):
''' PRECALCULATEAG Precalculate normalization. """ PRECALCULATEAG Precalculate normalization.
''' """
if self.gamma == 1: if self.gamma == 1:
self.Ag = 1.0 self.Ag = 1.0
self.method = 'parametric' self.method = 'parametric'
@ -631,8 +631,8 @@ class Jonswap(ModelSpectrum):
norm_ag() norm_ag()
def peak_e_factor(self, wn): def peak_e_factor(self, wn):
''' PEAKENHANCEMENTFACTOR """ PEAKENHANCEMENTFACTOR
''' """
w = maximum(atleast_1d(wn), 0.0) w = maximum(atleast_1d(wn), 0.0)
sab = where(w > 1, self.sigmaB, self.sigmaA) sab = where(w > 1, self.sigmaB, self.sigmaA)
@ -641,8 +641,8 @@ class Jonswap(ModelSpectrum):
return Gf return Gf
def __call__(self, wi): def __call__(self, wi):
''' JONSWAP spectral density """ JONSWAP spectral density
''' """
w = atleast_1d(wi) w = atleast_1d(wi)
if (self.Hm0 > 0.0): if (self.Hm0 > 0.0):
@ -660,7 +660,7 @@ class Jonswap(ModelSpectrum):
def phi1(wi, h, g=9.81): def phi1(wi, h, g=9.81):
''' Factor transforming spectra to finite water depth spectra. """ Factor transforming spectra to finite water depth spectra.
Input Input
----- -----
@ -692,7 +692,7 @@ def phi1(wi, h, g=9.81):
1 spectral form.' 1 spectral form.'
J. Geophys. Res., Vol 90, No. C1, pp 975-986 J. Geophys. Res., Vol 90, No. C1, pp 975-986
''' """
w = atleast_1d(wi) w = atleast_1d(wi)
if h == inf: # % special case infinite water depth if h == inf: # % special case infinite water depth
return ones_like(w) return ones_like(w)
@ -709,7 +709,7 @@ def phi1(wi, h, g=9.81):
class Tmaspec(Jonswap): class Tmaspec(Jonswap):
''' JONSWAP spectrum for finite water depth """ JONSWAP spectrum for finite water depth
Member variables Member variables
---------------- ----------------
@ -783,7 +783,7 @@ class Tmaspec(Jonswap):
Ergansungsheft, Reihe A(8), Nr. 12, deutschen Hydrografischen Ergansungsheft, Reihe A(8), Nr. 12, deutschen Hydrografischen
Zeitschrift. Zeitschrift.
''' """
def __init__(self, Hm0=7.0, Tp=11.0, gamma=None, sigmaA=0.07, sigmaB=0.09, def __init__(self, Hm0=7.0, Tp=11.0, gamma=None, sigmaA=0.07, sigmaB=0.09,
Ag=None, N=5, M=4, method='integration', wnc=6.0, Ag=None, N=5, M=4, method='integration', wnc=6.0,
@ -808,7 +808,7 @@ class Tmaspec(Jonswap):
class Torsethaugen(ModelSpectrum): class Torsethaugen(ModelSpectrum):
''' """
Torsethaugen double peaked (swell + wind) spectrum model Torsethaugen double peaked (swell + wind) spectrum model
Member variables Member variables
@ -881,7 +881,7 @@ class Torsethaugen(ModelSpectrum):
'A two peak wave spectral model.' 'A two peak wave spectral model.'
In proceedings OMAE, Glasgow In proceedings OMAE, Glasgow
''' """
def __init__(self, Hm0=7, Tp=11, method='integration', wnc=6, gravity=9.81, def __init__(self, Hm0=7, Tp=11, method='integration', wnc=6, gravity=9.81,
chk_seastate=True, **kwds): chk_seastate=True, **kwds):
@ -899,26 +899,26 @@ class Torsethaugen(ModelSpectrum):
self._init_spec() self._init_spec()
def __call__(self, w): def __call__(self, w):
''' TORSETHAUGEN spectral density """ TORSETHAUGEN spectral density
''' """
return self.wind(w) + self.swell(w) return self.wind(w) + self.swell(w)
def _chk_extra_param(self): def _chk_extra_param(self):
Hm0 = self.Hm0 Hm0 = self.Hm0
Tp = self.Tp Tp = self.Tp
if Hm0 > 11 or Hm0 > max((Tp / 3.6) ** 2, (Tp - 2) * 12 / 11): if Hm0 > 11 or Hm0 > max((Tp / 3.6) ** 2, (Tp - 2) * 12 / 11):
txt0 = '''Hm0 is outside the valid range. txt0 = """Hm0 is outside the valid range.
The validity of the spectral density is questionable''' The validity of the spectral density is questionable"""
warnings.warn(txt0) warnings.warn(txt0)
if Tp > 20 or Tp < 3: if Tp > 20 or Tp < 3:
txt1 = '''Tp is outside the valid range. txt1 = """Tp is outside the valid range.
The validity of the spectral density is questionable''' The validity of the spectral density is questionable"""
warnings.warn(txt1) warnings.warn(txt1)
def _init_spec(self): def _init_spec(self):
''' Initialize swell and wind part of Torsethaugen spectrum """ Initialize swell and wind part of Torsethaugen spectrum
''' """
monitor = 0 monitor = 0
Hm0 = self.Hm0 Hm0 = self.Hm0
Tp = self.Tp Tp = self.Tp
@ -1050,7 +1050,7 @@ class Torsethaugen(ModelSpectrum):
class McCormick(Bretschneider): class McCormick(Bretschneider):
''' McCormick spectral density model """ McCormick spectral density model
Member variables Member variables
---------------- ----------------
@ -1095,7 +1095,7 @@ class McCormick(Bretschneider):
M.E. McCormick (1999) M.E. McCormick (1999)
"Application of the Generic Spectral Formula to Fetch-Limited Seas" "Application of the Generic Spectral Formula to Fetch-Limited Seas"
Marine Technology Society, Vol 33, No. 3, pp 27-32 Marine Technology Society, Vol 33, No. 3, pp 27-32
''' """
def __init__(self, Hm0=7, Tp=11, Tz=None, M=None, chk_seastate=True): def __init__(self, Hm0=7, Tp=11, Tz=None, M=None, chk_seastate=True):
self.type = 'McCormick' self.type = 'McCormick'
@ -1122,7 +1122,7 @@ class McCormick(Bretschneider):
class OchiHubble(ModelSpectrum): class OchiHubble(ModelSpectrum):
''' OchiHubble bimodal spectral density model. """ OchiHubble bimodal spectral density model.
Member variables Member variables
---------------- ----------------
@ -1171,7 +1171,7 @@ class OchiHubble(ModelSpectrum):
'On six-parameter wave spectra.' 'On six-parameter wave spectra.'
In Proc. 15th Conf. Coastal Engng., Vol.1, pp301-328 In Proc. 15th Conf. Coastal Engng., Vol.1, pp301-328
''' """
def __init__(self, Hm0=7, par=1, chk_seastate=True): def __init__(self, Hm0=7, par=1, chk_seastate=True):
self.type = 'Ochi Hubble' self.type = 'Ochi Hubble'
@ -1253,7 +1253,7 @@ class OchiHubble(ModelSpectrum):
class Wallop(Bretschneider): class Wallop(Bretschneider):
'''Wallop spectral density model. """Wallop spectral density model.
Member variables Member variables
---------------- ----------------
@ -1304,7 +1304,7 @@ class Wallop(Bretschneider):
Huang, N.E., Long, S.R., Tung, C.C, Yuen, Y. and Bilven, L.F. (1981) Huang, N.E., Long, S.R., Tung, C.C, Yuen, Y. and Bilven, L.F. (1981)
"A unified two parameter wave spectral model for a generous sea state" "A unified two parameter wave spectral model for a generous sea state"
J. Fluid Mechanics, Vol.112, pp 203-224 J. Fluid Mechanics, Vol.112, pp 203-224
''' """
def __init__(self, Hm0=7, Tp=11, N=None, chk_seastate=True): def __init__(self, Hm0=7, Tp=11, N=None, chk_seastate=True):
self.type = 'Wallop' self.type = 'Wallop'
@ -1325,7 +1325,7 @@ class Wallop(Bretschneider):
class Spreading(object): class Spreading(object):
''' """
Directional spreading function. Directional spreading function.
Parameters Parameters
@ -1488,7 +1488,7 @@ class Spreading(object):
NB! The generally strong frequency dependence in directional spread NB! The generally strong frequency dependence in directional spread
makes it questionable to run load tests of ships and structures with a makes it questionable to run load tests of ships and structures with a
directional spread independent of frequency (Krogstad and Barstow, 1999). directional spread independent of frequency (Krogstad and Barstow, 1999).
''' """
# Parameterization of B # Parameterization of B
# def = 2 Donelan et al freq. parametrization for 'sech2' # def = 2 Donelan et al freq. parametrization for 'sech2'
# def = 3 Banner freq. parametrization for 'sech2' # def = 3 Banner freq. parametrization for 'sech2'
@ -1560,10 +1560,10 @@ class Spreading(object):
return atleast_1d(self.theta0).flatten() return atleast_1d(self.theta0).flatten()
def chk_input(self, theta, w=1, wc=1): def chk_input(self, theta, w=1, wc=1):
''' CHK_INPUT """ CHK_INPUT
CALL [s_par,TH,phi0,Nt] = inputchk(theta,w,wc) CALL [s_par,TH,phi0,Nt] = inputchk(theta,w,wc)
''' """
wn = atleast_1d(w / wc) wn = atleast_1d(w / wc)
theta = theta.ravel() theta = theta.ravel()
@ -1579,7 +1579,7 @@ class Spreading(object):
return s, TH, phi0, Nt return s, TH, phi0, Nt
def cos2s(self, theta, w=1, wc=1): # [D, phi0] = def cos2s(self, theta, w=1, wc=1): # [D, phi0] =
''' COS2S spreading function """ COS2S spreading function
cos2s(theta,w) = N(S)*[cos((theta-theta0)/2)]^(2*S) (0 < S) cos2s(theta,w) = N(S)*[cos((theta-theta0)/2)]^(2*S) (0 < S)
@ -1599,7 +1599,7 @@ class Spreading(object):
The principal direction of D is always along the x-axis. The principal direction of D is always along the x-axis.
phi0 : real scalar phi0 : real scalar
Parameter defining the actual principal direction of D. Parameter defining the actual principal direction of D.
''' """
S, TH, phi0 = self.chk_input(theta, w, wc)[:3] S, TH, phi0 = self.chk_input(theta, w, wc)[:3]
gammaln = sp.gammaln gammaln = sp.gammaln
@ -1609,7 +1609,7 @@ class Spreading(object):
return D, phi0 return D, phi0
def poisson(self, theta, w=1, wc=1): # [D,phi0] = def poisson(self, theta, w=1, wc=1): # [D,phi0] =
''' POISSON spreading function """ POISSON spreading function
poisson(theta,w) = N(X)/(1-2*X*cos(theta-theta0)+X^2) (0 < X < 1) poisson(theta,w) = N(X)/(1-2*X*cos(theta-theta0)+X^2) (0 < X < 1)
@ -1629,14 +1629,14 @@ class Spreading(object):
The principal direction of D is always along the x-axis. The principal direction of D is always along the x-axis.
phi0 : real scalar phi0 : real scalar
Parameter defining the actual principal direction of D. Parameter defining the actual principal direction of D.
''' """
X, TH, phi0 = self.chk_input(theta, w, wc)[:3] X, TH, phi0 = self.chk_input(theta, w, wc)[:3]
D = (1 - X ** 2.) / (1. - (2. * cos(TH) - X) * X) / (2. * pi) D = (1 - X ** 2.) / (1. - (2. * cos(TH) - X) * X) / (2. * pi)
return D, phi0 return D, phi0
def wrap_norm(self, theta, w=1, wc=1): def wrap_norm(self, theta, w=1, wc=1):
''' Wrapped Normal spreading function """ Wrapped Normal spreading function
wnormal(theta,w) = N(D1)*[1 + wnormal(theta,w) = N(D1)*[1 +
2*sum exp(-(n*D1)^2/2)*cos(n*(theta-theta0))] (0 < D1) 2*sum exp(-(n*D1)^2/2)*cos(n*(theta-theta0))] (0 < D1)
@ -1657,7 +1657,7 @@ class Spreading(object):
The principal direction of D is always along the x-axis. The principal direction of D is always along the x-axis.
phi0 : real scalar phi0 : real scalar
Parameter defining the actual principal direction of D. Parameter defining the actual principal direction of D.
''' """
par, TH, phi0, Nt = self.chk_input(theta, w, wc) par, TH, phi0, Nt = self.chk_input(theta, w, wc)
@ -1678,7 +1678,7 @@ class Spreading(object):
return D, phi0 return D, phi0
def sech2(self, theta, w=1, wc=1): def sech2(self, theta, w=1, wc=1):
'''SECH2 directonal spreading function """SECH2 directonal spreading function
sech2(theta,w) = N(B)*0.5*B*sech(B*(theta-theta0))^2 (0 < B) sech2(theta,w) = N(B)*0.5*B*sech(B*(theta-theta0))^2 (0 < B)
@ -1698,7 +1698,7 @@ class Spreading(object):
The principal direction of D is always along the x-axis. The principal direction of D is always along the x-axis.
phi0 : real scalar phi0 : real scalar
Parameter defining the actual principal direction of D. Parameter defining the actual principal direction of D.
''' """
B, TH, phi0 = self.chk_input(theta, w, wc)[:3] B, TH, phi0 = self.chk_input(theta, w, wc)[:3]
NB = tanh(pi * B) # % Normalization factor. NB = tanh(pi * B) # % Normalization factor.
@ -1708,7 +1708,7 @@ class Spreading(object):
return D, phi0 return D, phi0
def mises(self, theta, w=1, wc=1): def mises(self, theta, w=1, wc=1):
'''Mises spreading function """Mises spreading function
mises(theta,w) = N(K)*exp(K*cos(theta-theta0)) (0 < K) mises(theta,w) = N(K)*exp(K*cos(theta-theta0)) (0 < K)
@ -1728,7 +1728,7 @@ class Spreading(object):
The principal direction of D is always along the x-axis. The principal direction of D is always along the x-axis.
phi0 : real scalar phi0 : real scalar
Parameter defining the actual principal direction of D. Parameter defining the actual principal direction of D.
''' """
K, TH, phi0 = self.chk_input(theta, w, wc)[:3] K, TH, phi0 = self.chk_input(theta, w, wc)[:3]
@ -1736,7 +1736,7 @@ class Spreading(object):
return D, phi0 return D, phi0
def box(self, theta, w=1, wc=1): def box(self, theta, w=1, wc=1):
''' Box car spreading function """ Box car spreading function
box(theta,w) = N(A)*I( -A < theta-theta0 < A) (0 < A < pi) box(theta,w) = N(A)*I( -A < theta-theta0 < A) (0 < A < pi)
@ -1756,7 +1756,7 @@ class Spreading(object):
The principal direction of D is always along the x-axis. The principal direction of D is always along the x-axis.
phi0 : real scalar phi0 : real scalar
Parameter defining the actual principal direction of D. Parameter defining the actual principal direction of D.
''' """
A, TH, phi0 = self.chk_input(theta, w, wc)[:3] A, TH, phi0 = self.chk_input(theta, w, wc)[:3]
D = ((-A <= TH) & (TH <= A)) / (2. * A) D = ((-A <= TH) & (TH <= A)) / (2. * A)
@ -1765,7 +1765,7 @@ class Spreading(object):
# Local sub functions # Local sub functions
def fourier2distpar(self, r1): def fourier2distpar(self, r1):
''' Fourier coefficients to distribution parameter """ Fourier coefficients to distribution parameter
Parameters Parameters
---------- ----------
@ -1790,14 +1790,14 @@ class Spreading(object):
Poisson spreading : R1 = X Poisson spreading : R1 = X
sech-2 spreading : R1 = pi/(2*B*sinh(pi/(2*B)) sech-2 spreading : R1 = pi/(2*B*sinh(pi/(2*B))
Wrapped Normal : R1 = exp(-D1^2/2) Wrapped Normal : R1 = exp(-D1^2/2)
''' """
fourierfun = self._fourierdispatch.get(self.type[0]) fourierfun = self._fourierdispatch.get(self.type[0])
return fourierfun(r1) return fourierfun(r1)
@staticmethod @staticmethod
def fourier2x(r1): def fourier2x(r1):
''' Returns the solution of r1 = x. """ Returns the solution of r1 = x.
''' """
X = r1 X = r1
if any(X >= 1): if any(X >= 1):
raise ValueError('POISSON spreading: X value must be less than 1') raise ValueError('POISSON spreading: X value must be less than 1')
@ -1805,8 +1805,8 @@ class Spreading(object):
@staticmethod @staticmethod
def fourier2a(r1): def fourier2a(r1):
''' Returns the solution of R1 = sin(A)/A. """ Returns the solution of R1 = sin(A)/A.
''' """
A0 = flipud(linspace(0, pi + 0.1, 1025)) A0 = flipud(linspace(0, pi + 0.1, 1025))
funA = interp1d(sinc(A0 / pi), A0) funA = interp1d(sinc(A0 / pi), A0)
A0 = funA(r1.ravel()) A0 = funA(r1.ravel())
@ -1837,9 +1837,9 @@ class Spreading(object):
@staticmethod @staticmethod
def fourier2k(r1): def fourier2k(r1):
''' """
Returns the solution of R1 = besseli(1,K)/besseli(0,K), Returns the solution of R1 = besseli(1,K)/besseli(0,K),
''' """
def fun0(x): def fun0(x):
return sp.ive(1, x) / sp.ive(0, x) return sp.ive(1, x) / sp.ive(0, x)
@ -1858,8 +1858,8 @@ class Spreading(object):
return K return K
def fourier2b(self, r1): def fourier2b(self, r1):
''' Returns the solution of R1 = pi/(2*B*sinh(pi/(2*B)). """ Returns the solution of R1 = pi/(2*B*sinh(pi/(2*B)).
''' """
B0 = hstack((linspace(_EPS, 5, 513), linspace(5.0001, 100))) B0 = hstack((linspace(_EPS, 5, 513), linspace(5.0001, 100)))
funB = interp1d(self._r1ofsech2(B0), B0) funB = interp1d(self._r1ofsech2(B0), B0)
@ -1879,8 +1879,8 @@ class Spreading(object):
return B return B
def fourier2d(self, r1): def fourier2d(self, r1):
''' Returns the solution of R1 = exp(-D**2/2). """ Returns the solution of R1 = exp(-D**2/2).
''' """
r = clip(r1, 0., 1.0) r = clip(r1, 0., 1.0)
return where(r <= 0, inf, sqrt(-2.0 * log(r))) return where(r <= 0, inf, sqrt(-2.0 * log(r)))
@ -1936,7 +1936,7 @@ class Spreading(object):
return atleast_1d(self.s_a) return atleast_1d(self.s_a)
def spread_parameter_s(self, wn): def spread_parameter_s(self, wn):
''' Return spread parameter, S, equivalent for the COS2S function """ Return spread parameter, S, equivalent for the COS2S function
Parameters Parameters
---------- ----------
@ -1947,7 +1947,7 @@ class Spreading(object):
------- -------
S : ndarray S : ndarray
spread parameter of COS2S functions spread parameter of COS2S functions
''' """
spread = dict(b=self._banner_spread, spread = dict(b=self._banner_spread,
d=self._donelan_spread, d=self._donelan_spread,
@ -1972,14 +1972,14 @@ class Spreading(object):
@staticmethod @staticmethod
def _donelan(wn): def _donelan(wn):
''' High frequency decay of B of sech2 paramater """ High frequency decay of B of sech2 paramater
''' """
return 10.0 ** (-0.4 + 0.8393 * exp(-0.567 * log(wn ** 2))) return 10.0 ** (-0.4 + 0.8393 * exp(-0.567 * log(wn ** 2)))
@staticmethod @staticmethod
def _r1ofsech2(B): def _r1ofsech2(B):
''' R1OFSECH2 Computes R1 = pi./(2*B.*sinh(pi./(2*B))) """ R1OFSECH2 Computes R1 = pi./(2*B.*sinh(pi./(2*B)))
''' """
realmax = finfo(float).max realmax = finfo(float).max
tiny = 1. / realmax tiny = 1. / realmax
x = clip(2. * B, tiny, realmax) x = clip(2. * B, tiny, realmax)
@ -1988,7 +1988,7 @@ class Spreading(object):
-2. * xk / (exp(xk) * expm1(-2. * xk))) -2. * xk / (exp(xk) * expm1(-2. * xk)))
def tospecdata2d(self, specdata=None, theta=None, wc=0.52, nt=51): def tospecdata2d(self, specdata=None, theta=None, wc=0.52, nt=51):
''' """
MKDSPEC Make a directional spectrum MKDSPEC Make a directional spectrum
frequency spectrum times spreading function frequency spectrum times spreading function
@ -2020,7 +2020,7 @@ class Spreading(object):
h = SD.plot() h = SD.plot()
See also spreading, rotspec, jonswap, torsethaugen See also spreading, rotspec, jonswap, torsethaugen
''' """
if specdata is None: if specdata is None:
specdata = Jonswap().tospecdata() specdata = Jonswap().tospecdata()

@ -19,9 +19,9 @@ arr = asarray
def now(): def now():
''' """
Return current date and time as a string Return current date and time as a string
''' """
return strftime("%a, %d %b %Y %H:%M:%S", gmtime()) return strftime("%a, %d %b %Y %H:%M:%S", gmtime())
@ -66,7 +66,7 @@ def _invnorm(q):
def edf(x, method=2): def edf(x, method=2):
''' """
Returns Empirical Distribution Function (EDF). Returns Empirical Distribution Function (EDF).
Parameters Parameters
@ -87,7 +87,7 @@ def edf(x, method=2):
>>> h = F.plot() >>> h = F.plot()
See also edf, pdfplot, cumtrapz See also edf, pdfplot, cumtrapz
''' """
z = atleast_1d(x) z = atleast_1d(x)
z.sort() z.sort()
@ -105,7 +105,7 @@ def edf(x, method=2):
def edfcnd(x, c=None, method=2): def edfcnd(x, c=None, method=2):
''' """
Returns empirical Distribution Function CoNDitioned that X>=c (EDFCND). Returns empirical Distribution Function CoNDitioned that X>=c (EDFCND).
Parameters Parameters
@ -128,7 +128,7 @@ def edfcnd(x, c=None, method=2):
>>> h = F.plot() >>> h = F.plot()
See also edf, pdfplot, cumtrapz See also edf, pdfplot, cumtrapz
''' """
z = atleast_1d(x) z = atleast_1d(x)
if c is None: if c is None:
c = floor(min(z.min(), 0)) c = floor(min(z.min(), 0))
@ -146,7 +146,7 @@ def edfcnd(x, c=None, method=2):
def reslife(data, u=None, umin=None, umax=None, nu=None, nmin=3, alpha=0.05, def reslife(data, u=None, umin=None, umax=None, nu=None, nmin=3, alpha=0.05,
plotflag=False): plotflag=False):
''' """
Return Mean Residual Life, i.e., mean excesses vs thresholds Return Mean Residual Life, i.e., mean excesses vs thresholds
Parameters Parameters
@ -196,7 +196,7 @@ def reslife(data, u=None, umin=None, umax=None, nu=None, nmin=3, alpha=0.05,
--------- ---------
genpareto genpareto
fitgenparrange, disprsnidx fitgenparrange, disprsnidx
''' """
if u is None: if u is None:
sd = np.sort(data) sd = np.sort(data)
n = len(data) n = len(data)
@ -252,7 +252,7 @@ def reslife(data, u=None, umin=None, umax=None, nu=None, nmin=3, alpha=0.05,
def dispersion_idx( def dispersion_idx(
data, t=None, u=None, umin=None, umax=None, nu=None, nmin=10, tb=1, data, t=None, u=None, umin=None, umax=None, nu=None, nmin=10, tb=1,
alpha=0.05, plotflag=False): alpha=0.05, plotflag=False):
'''Return Dispersion Index vs threshold """Return Dispersion Index vs threshold
Parameters Parameters
---------- ----------
@ -329,7 +329,7 @@ def dispersion_idx(
Cunnane, C. (1979) Note on the poisson assumption in Cunnane, C. (1979) Note on the poisson assumption in
partial duration series model. Water Resource Research, 15\bold{(2)} partial duration series model. Water Resource Research, 15\bold{(2)}
:489--494.} :489--494.}
''' """
n = len(data) n = len(data)
if t is None: if t is None:
@ -407,7 +407,7 @@ def dispersion_idx(
def decluster(data, t=None, thresh=None, tmin=1): def decluster(data, t=None, thresh=None, tmin=1):
''' """
Return declustered peaks over threshold values Return declustered peaks over threshold values
Parameters Parameters
@ -442,7 +442,7 @@ def decluster(data, t=None, thresh=None, tmin=1):
See also See also
-------- --------
fitgenpar, findpot, extremalidx fitgenpar, findpot, extremalidx
''' """
if t is None: if t is None:
t = np.arange(len(data)) t = np.arange(len(data))
i = findpot(data, t, thresh, tmin) i = findpot(data, t, thresh, tmin)
@ -450,7 +450,7 @@ def decluster(data, t=None, thresh=None, tmin=1):
def findpot(data, t=None, thresh=None, tmin=1): def findpot(data, t=None, thresh=None, tmin=1):
''' """
Retrun indices to Peaks over threshold values Retrun indices to Peaks over threshold values
Parameters Parameters
@ -490,7 +490,7 @@ def findpot(data, t=None, thresh=None, tmin=1):
See also See also
-------- --------
fitgenpar, decluster, extremalidx fitgenpar, decluster, extremalidx
''' """
Data = arr(data) Data = arr(data)
if t is None: if t is None:
ti = np.arange(len(Data)) ti = np.arange(len(Data))
@ -538,36 +538,35 @@ def findpot(data, t=None, thresh=None, tmin=1):
return Ie return Ie
def _find_ok_peaks(Ye, Te, Tmin): def _find_ok_peaks(y, t, t_min):
''' """
Return indices to the largest maxima that are at least Tmin Return indices to the largest maxima that are at least t_min distance apart.
distance apart. """
''' num_y = len(y)
Ny = len(Ye)
I = np.argsort(-Ye) # sort in descending order i = np.argsort(-y) # sort in descending order
Te1 = Te[I] tis = t[i]
oOrder = zeros(Ny, dtype=int) o_order = zeros(num_y, dtype=int)
oOrder[I] = range(Ny) # indices to the variables original location o_order[i] = range(num_y) # indices to the variables original location
isTooClose = zeros(Ny, dtype=bool) is_too_close = zeros(num_y, dtype=bool)
pool = zeros((Ny, 2)) pool = zeros((num_y, 2))
T_range = np.hstack([-Tmin, Tmin]) t_range = np.hstack([-t_min, t_min])
K = 0 k = 0
for i, ti in enumerate(Te1): for i, ti in enumerate(tis):
isTooClose[i] = np.any((pool[:K, 0] <= ti) & (ti <= pool[:K, 1])) is_too_close[i] = np.any((pool[:k, 0] <= ti) & (ti <= pool[:k, 1]))
if not isTooClose[i]: if not is_too_close[i]:
pool[K] = ti + T_range pool[k] = ti + t_range
K += 1 k += 1
iOK, = where(1 - isTooClose[oOrder]) i_ok, = where(1 - is_too_close[o_order])
return iOK return i_ok
def declustering_time(t): def declustering_time(t):
''' """
Returns minimum distance between clusters. Returns minimum distance between clusters.
Parameters Parameters
@ -589,7 +588,7 @@ def declustering_time(t):
>>> tc = declustering_time(Ie) >>> tc = declustering_time(Ie)
>>> tc >>> tc
21 21
''' """
t0 = arr(t) t0 = arr(t)
nt = len(t0) nt = len(t0)
if nt < 2: if nt < 2:
@ -606,7 +605,7 @@ def declustering_time(t):
def interexceedance_times(t): def interexceedance_times(t):
''' """
Returns interexceedance times of data Returns interexceedance times of data
Parameters Parameters
@ -624,12 +623,12 @@ def interexceedance_times(t):
>>> interexceedance_times(t) >>> interexceedance_times(t)
array([1, 3, 5]) array([1, 3, 5])
''' """
return np.diff(np.sort(t)) return np.diff(np.sort(t))
def extremal_idx(ti): def extremal_idx(ti):
''' """
Returns Extremal Index measuring the dependence of data Returns Extremal Index measuring the dependence of data
Parameters Parameters
@ -671,7 +670,7 @@ def extremal_idx(ti):
Journal of the Royal Statistical society: Series B Journal of the Royal Statistical society: Series B
(Statistical Methodology) 54 (2), 545-556 (Statistical Methodology) 54 (2), 545-556
doi:10.1111/1467-9868.00401 doi:10.1111/1467-9868.00401
''' """
t = arr(ti) t = arr(ti)
tmax = t.max() tmax = t.max()
if tmax <= 1: if tmax <= 1:
@ -693,7 +692,7 @@ def _logitinv(x):
class RegLogit(object): class RegLogit(object):
''' """
REGLOGIT Fit ordinal logistic regression model. REGLOGIT Fit ordinal logistic regression model.
CALL model = reglogit (options) CALL model = reglogit (options)
@ -756,7 +755,7 @@ class RegLogit(object):
b21.compare(b2) b21.compare(b2)
See also regglm, reglm, regnonlm See also regglm, reglm, regnonlm
''' """
#% Original for MATLAB written by Gordon K Smyth <gks@maths.uq.oz.au>, #% Original for MATLAB written by Gordon K Smyth <gks@maths.uq.oz.au>,
#% U of Queensland, Australia, on Nov 19, 1990. Last revision Aug 3, #% U of Queensland, Australia, on Nov 19, 1990. Last revision Aug 3,
@ -840,7 +839,7 @@ class RegLogit(object):
return y, X return y, X
def fit(self, y, X=None, theta0=None, beta0=None): def fit(self, y, X=None, theta0=None, beta0=None):
''' """
Member variables Member variables
.df : degrees of freedom for error. .df : degrees of freedom for error.
.params : estimated model parameters .params : estimated model parameters
@ -858,7 +857,7 @@ class RegLogit(object):
.d2L : Hessian matrix (double derivative of log-likelihood) .d2L : Hessian matrix (double derivative of log-likelihood)
.dL : First derivative of loglikelihood w.r.t. THETA and BETA. .dL : First derivative of loglikelihood w.r.t. THETA and BETA.
''' """
self.family = 'multinomial' self.family = 'multinomial'
self.link = 'logit' self.link = 'logit'
y, X = self.check_xy(y, X) y, X = self.check_xy(y, X)
@ -1016,7 +1015,7 @@ class RegLogit(object):
self.summary() self.summary()
def compare(self, object2): def compare(self, object2):
''' Compare small LOGIT versus large one """ Compare small LOGIT versus large one
CALL [pvalue] = compare(object2) CALL [pvalue] = compare(object2)
@ -1026,7 +1025,7 @@ class RegLogit(object):
model, and the residuals from the larger model. model, and the residuals from the larger model.
See also fitls See also fitls
''' """
try: try:
if self.numvar > object2.numvar: if self.numvar > object2.numvar:
@ -1156,7 +1155,7 @@ class RegLogit(object):
#end % summary #end % summary
def predict(self, Xnew=None, alpha=0.05, fulloutput=False): def predict(self, Xnew=None, alpha=0.05, fulloutput=False):
'''LOGIT/PREDICT Predict from a fitted LOGIT object """LOGIT/PREDICT Predict from a fitted LOGIT object
CALL [y,ylo,yup] = predict(Xnew,options) CALL [y,ylo,yup] = predict(Xnew,options)
@ -1167,7 +1166,7 @@ class RegLogit(object):
options = options struct defining the calculation options = options struct defining the calculation
.alpha : confidence coefficient (default 0.05) .alpha : confidence coefficient (default 0.05)
.size : size if binomial family (default 1). .size : size if binomial family (default 1).
''' """
[_mx, nx] = self.X.shape [_mx, nx] = self.X.shape
if Xnew is None: if Xnew is None:
@ -1233,10 +1232,10 @@ class RegLogit(object):
return y return y
def loglike(self, beta, y, x, z, z1, numout=3): def loglike(self, beta, y, x, z, z1, numout=3):
''' """
[dev, dl, d2l, p] = loglike( y ,x,beta,z,z1) [dev, dl, d2l, p] = loglike( y ,x,beta,z,z1)
Calculates likelihood for the ordinal logistic regression model. Calculates likelihood for the ordinal logistic regression model.
''' """
# Author: Gordon K. Smyth <gks@maths.uq.oz.au> # Author: Gordon K. Smyth <gks@maths.uq.oz.au>
zx = np.hstack((z, x)) zx = np.hstack((z, x))
z1x = np.hstack((z1, x)) z1x = np.hstack((z1, x))
@ -1248,10 +1247,10 @@ class RegLogit(object):
p = g - g1 p = g - g1
dev = -2 * np.log(p).sum() dev = -2 * np.log(p).sum()
'''[dl, d2l] = derivatives of loglike(beta, y, x, z, z1) """[dl, d2l] = derivatives of loglike(beta, y, x, z, z1)
% Called by logistic_regression. Calculates derivates of the % Called by logistic_regression. Calculates derivates of the
% log-likelihood for ordinal logistic regression model. % log-likelihood for ordinal logistic regression model.
''' """
# Author: Gordon K. Smyth <gks@maths.uq.oz.au> # Author: Gordon K. Smyth <gks@maths.uq.oz.au>
# Description: Derivates of log-likelihood in logistic regression # Description: Derivates of log-likelihood in logistic regression

@ -114,7 +114,7 @@ class TestQuadgr(unittest.TestCase):
def test_integrand6(self): def test_integrand6(self):
tol = 1e-9 tol = 1e-9
Q, err = quadgr(lambda x: np.cos(x)*np.exp(-x), 0, np.inf, tol) Q, err = quadgr(lambda x: np.cos(x) * np.exp(-x), 0, np.inf, tol)
assert_array_almost_equal(Q, 0.5) assert_array_almost_equal(Q, 0.5)
self.assert_(err < tol) self.assert_(err < tol)
# (0.50000000000000044, 7.3296813063450372e-11) # (0.50000000000000044, 7.3296813063450372e-11)

@ -7,8 +7,7 @@ Created on 5. aug. 2010
import unittest import unittest
from numpy.testing import TestCase, assert_array_almost_equal from numpy.testing import TestCase, assert_array_almost_equal
import wafo.data # @UnusedImport import wafo.data
import numpy as np # @UnusedImport
import wafo.objects as wo import wafo.objects as wo
import wafo.spectrum.models as sm import wafo.spectrum.models as sm
import wafo.transform.models as tm import wafo.transform.models as tm

Loading…
Cancel
Save