Cleanup of code

master
Per A Brodtkorb 9 years ago
parent e6ab39cbe7
commit 9522c6c24f

@ -14,7 +14,7 @@ note : Memorandum string.
date : Date and time of creation or change. date : Date and time of creation or change.
''' '''
from __future__ import division from __future__ import division, absolute_import
import warnings import warnings
import numpy as np import numpy as np
from numpy import (zeros, ones, sqrt, inf, where, nan, from numpy import (zeros, ones, sqrt, inf, where, nan,
@ -27,9 +27,9 @@ from scipy.linalg import toeplitz, lstsq
from scipy import sparse from scipy import sparse
from pylab import stineman_interp from pylab import stineman_interp
from wafo.containers import PlotData from ..containers import PlotData
from wafo.misc import sub_dict_select, nextpow2 # , JITImport from ..misc import sub_dict_select, nextpow2 # , JITImport
import wafo.spectrum as _wafospec from .. import spectrum as _wafospec
from scipy.sparse.linalg.dsolve.linsolve import spsolve from scipy.sparse.linalg.dsolve.linsolve import spsolve
from scipy.sparse.base import issparse from scipy.sparse.base import issparse
from scipy.signal.windows import parzen from scipy.signal.windows import parzen

@ -355,8 +355,8 @@ class Rind(object):
dev = sqrt(diag(BIG)) # std dev = sqrt(diag(BIG)) # std
ind = nonzero(indI[1:] > -1)[0] ind = nonzero(indI[1:] > -1)[0]
infin = repeat(2, len(indI) - 1) infin = repeat(2, len(indI) - 1)
infin[ind] = (2 - (Bup[0, ind] > infinity * dev[indI[ind + 1]]) infin[ind] = (2 - (Bup[0, ind] > infinity * dev[indI[ind + 1]]) -
- 2 * (Blo[0, ind] < -infinity * dev[indI[ind + 1]])) 2 * (Blo[0, ind] < -infinity * dev[indI[ind + 1]]))
Bup[0, ind] = minimum(Bup[0, ind], infinity * dev[indI[ind + 1]]) Bup[0, ind] = minimum(Bup[0, ind], infinity * dev[indI[ind + 1]])
Blo[0, ind] = maximum(Blo[0, ind], -infinity * dev[indI[ind + 1]]) Blo[0, ind] = maximum(Blo[0, ind], -infinity * dev[indI[ind + 1]])
@ -992,10 +992,10 @@ def prbnorm2d(a, b, r):
infin = np.repeat(2, 2) - (upper > infinity) - 2 * (lower < -infinity) infin = np.repeat(2, 2) - (upper > infinity) - 2 * (lower < -infinity)
if np.all(infin == 2): if np.all(infin == 2):
return (bvd(lower[0], lower[1], correl) return (bvd(lower[0], lower[1], correl) -
- bvd(upper[0], lower[1], correl) bvd(upper[0], lower[1], correl) -
- bvd(lower[0], upper[1], correl) bvd(lower[0], upper[1], correl) +
+ bvd(upper[0], upper[1], correl)) bvd(upper[0], upper[1], correl))
elif (infin[0] == 2 and infin[1] == 1): elif (infin[0] == 2 and infin[1] == 1):
return (bvd(lower[0], lower[1], correl) - return (bvd(lower[0], lower[1], correl) -
bvd(upper[0], lower[1], correl)) bvd(upper[0], lower[1], correl))

@ -266,8 +266,8 @@ def romberg(fun, a, b, releps=1e-3, abseps=1e-3):
fp[i] = 4 * fp[i - 1] fp[i] = 4 * fp[i - 1]
# Richardson extrapolation # Richardson extrapolation
for k in range(i): for k in range(i):
rom[two, k + 1] = rom[two, k] + \ rom[two, k + 1] = (rom[two, k] +
(rom[two, k] - rom[one, k]) / (fp[k] - 1) (rom[two, k] - rom[one, k]) / (fp[k] - 1))
Ih1 = Ih2 Ih1 = Ih2
Ih2 = Ih4 Ih2 = Ih4
@ -1119,6 +1119,8 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
''' '''
# Author: jonas.lundgren@saabgroup.com, 2009. license BSD # Author: jonas.lundgren@saabgroup.com, 2009. license BSD
# Order limits (required if infinite limits) # Order limits (required if infinite limits)
a = np.asarray(a)
b = np.asarray(b)
if a == b: if a == b:
Q = b - a Q = b - a
err = b - a err = b - a
@ -1138,17 +1140,17 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
# Change of variable # Change of variable
if np.isfinite(a) & np.isinf(b): if np.isfinite(a) & np.isinf(b):
# a to inf # a to inf
fun1 = lambda t: fun(a + t / (1 - t)) / (1 - t) ** 2 [Q, err] = quadgr(lambda t: fun(a + t / (1 - t)) / (1 - t) ** 2,
[Q, err] = quadgr(fun1, 0, 1, abseps) 0, 1, abseps)
elif np.isinf(a) & np.isfinite(b): elif np.isinf(a) & np.isfinite(b):
# -inf to b # -inf to b
fun2 = lambda t: fun(b + t / (1 + t)) / (1 + t) ** 2 [Q, err] = quadgr(lambda t: fun(b + t / (1 + t)) / (1 + t) ** 2,
[Q, err] = quadgr(fun2, -1, 0, abseps) -1, 0, abseps)
else: # -inf to inf else: # -inf to inf
fun1 = lambda t: fun(t / (1 - t)) / (1 - t) ** 2 [Q1, err1] = quadgr(lambda t: fun(t / (1 - t)) / (1 - t) ** 2,
fun2 = lambda t: fun(t / (1 + t)) / (1 + t) ** 2 0, 1, abseps / 2)
[Q1, err1] = quadgr(fun1, 0, 1, abseps / 2) [Q2, err2] = quadgr(lambda t: fun(t / (1 + t)) / (1 + t) ** 2,
[Q2, err2] = quadgr(fun2, -1, 0, abseps / 2) -1, 0, abseps / 2)
Q = Q1 + Q2 Q = Q1 + Q2
err = err1 + err2 err = err1 + err2
@ -1170,9 +1172,9 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
dtype = np.result_type(fun(a), fun(b)) dtype = np.result_type(fun(a), fun(b))
# Initiate vectors # Initiate vectors
Q0 = zeros(max_iter, dtype=dtype) # Quadrature Q0 = zeros(max_iter, dtype=dtype) # Quadrature
Q1 = zeros(max_iter, dtype=dtype) # First Richardson extrapolation Q1 = zeros(max_iter, dtype=dtype) # First Richardson extrapolation
Q2 = zeros(max_iter, dtype=dtype) # Second Richardson extrapolation Q2 = zeros(max_iter, dtype=dtype) # Second Richardson extrapolation
# One interval # One interval
hh = (b - a) / 2 # Half interval length hh = (b - a) / 2 # Half interval length
@ -1187,8 +1189,8 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
hh = hh / 2 hh = hh / 2
x = np.hstack([x + a, x + b]) / 2 x = np.hstack([x + a, x + b]) / 2
# Quadrature # Quadrature
Q0[k] = hh * \ Q0[k] = hh * np.sum(wq * np.sum(np.reshape(fun(x), (-1, nq)), axis=0),
np.sum(wq * np.sum(np.reshape(fun(x), (-1, nq)), axis=0), axis=0) axis=0)
# Richardson extrapolation # Richardson extrapolation
if k >= 5: if k >= 5:
@ -1424,207 +1426,7 @@ def test_docstrings():
doctest.testmod() doctest.testmod()
# def levin_integrate():
# ''' An oscillatory integral
# Sheehan Olver, December 2010
#
#
# (Chebfun example quad/LevinIntegrate.m)
#
# This example computes the highly oscillatory integral of
#
# f * exp( 1i * w * g ),
#
# over (0,1) using the Levin method [1]. This method computes the integral
# by rewriting it as an ODE
#
# u' + 1i * w * g' u = f,
#
# so that the indefinite integral of f * exp( 1i * w * g ) is
#
# u * exp( 1i * w * g ).
#
#
#
# We use as an example
#
# f = 1 / ( x + 2 );
# g = cos( x - 2 );
# w = 100000;
#
# #
# References:
#
# [1] Levin, D., Procedures for computing one and two-dimensional integrals
# of functions with rapid irregular oscillations, Maths Comp., 38 (1982) 531--538
# '''
# exp = np.exp
# domain=[0, 1]
# x = Chebfun.identity(domain=domain)
# f = 1./(x+2)
# g = np.cos(x-2)
# D = np.diff(domain)
#
#
# # Here is are plots of this integrand, with w = 100, in complex space
# w = 100;
# line_opts = dict(line_width=1.6)
# font_opts = dict(font_size= 14)
# #
#
# intg = f*exp(1j*w*g)
# xs, ys, xi, yi, d = intg.plot_data(1000)
# #intg.plot(with_interpolation_points=True)
# #xi = np.linspace(0, 1, 1024)
# # plt.plot(xs, ys) # , **line_opts)
# # plt.plot(xi, yi, 'r.')
# # #axis equal
# # plt.title('Complex plot of integrand') #,**font_opts)
# # plt.show('hold')
# ##
# # and of just the real part
# # intgr = np.real(intg)
# # xs, ys, xi, yi, d = intgr.plot_data(1000)
# #intgr.plot()
# # plt.plot(xs, np.real(ys)) # , **line_opts)
# # plt.plot(xi, np.real(yi), 'r.')
# #axis equal
# # plt.title('Real part of integrand') #,**font_opts)
# # plt.show('hold')
#
# ##
# # The Levin method will be accurate for large and small w, and the time
# # taken is independent of w. Here we take a reasonably large value of w.
# w = 1000;
# intg = f*exp(1j*w*g)
# val0 = np.sum(intg)
# # val1 = sum(intg)
# print(val0)
# ##
# # Start timing
# #tic
#
# ##
# # Construct the operator L
# L = D + 1j*w*np.diag(g.differentiate())
#
# ##
# # From asymptotic analysis, we know that there exists a solution to the
# # equation which is non-oscillatory, though we do not know what initial
# # condition it satisfies. Thus we find a particular solution to this
# # equation with no boundary conditions.
#
# u = L / f
#
# ##
# # Because L is a differential operator with derivative order 1, \ expects
# # it to be given a boundary condition, which is why the warning message is
# # displayed. However, this doesn't cause any problems: though there are,
# # in fact, a family of solutions to the ODE without boundary conditions
# # due to the kernel
# #
# # exp(- 1i * w * g),
# #
# # it does not actually matter which particular solution is computed.
# # Non-uniqueness is also not an issue: \ in matlab is least squares, hence
# # does not require uniqueness. The existence of a non-oscillatory solution
# # ensures that \ converges to a u with length independent of w.
# #
# # One could prevent the warning by applying a boundary condition consistent
# # with the rest of the system, that is
# # L.lbc = {L(1,:),f(0)};
#
# ##
# # Now we evaluate the antiderivative at the endpoints to obtain the
# # integral.
#
# u(1)*exp(1j*w*g(1)) - u(0)*exp(1j*w*g(0))
#
# #toc
#
#
# ##
# # Here is a way to compute the integral using Clenshaw--Curtis quadrature.
# # As w becomes large, this takes an increasingly long time as the
# # oscillations must be resolved.
#
# #tic
# sum( f*exp(1j*w*g) )
# #toc
# aLevinTQ[omega_,a_,b_,f_,g_,nu_,wprec_,prm_,test_,basis_,np_]:=
#
# Module[{r,betam,A,AA,BB,S,F,w=N[omega, wprec]},
# M=Length[nu]-1;
# PB[k_,t_]:=If[basis==1,t^k,ChebyshevT[k,t]];
#
# ff[t_]:=((b-a)/2)*f[(b-a)*t/2+(a+b)/2];
#
# gg[t_]:=g[(b-a)*t/2+(a+b)/2];
# dgg[t_]:=Derivative[1][gg][t];
#
# If[test==0, betam=Min[Abs[dgg[-1]*w], Abs[dgg[1]*w]];
# While[prm*M/betam >=1, betam=2*betam]];
# If[test>0,x[k_]:=N[Cos[k*Pi/M], wprec],x[k_]:=
# Which[k<prm*M, N[-1+k/betam, wprec], k==Ceiling[prm*M],0,
# k>prm*M, N[1-(M-k)/betam, wprec]]];
#
# Psi[k_,t_]:=Derivative[0,1][PB][k,t]+I*w*dgg[t]*PB[k,t];
#
# ne[j_]:=nu[[j+1]]; S[-1]=0; S[j_]:=Sum[ne[i],{i,0,j}];
# nn=S[M]-1;
# A=ConstantArray[0,{nn+1,nn+1}];
# F=ConstantArray[0,nn+1]; r=0;
# While[r<M+1, Do[Do[ AA[j,k]=
# Limit[Derivative[0,Mod[j-S[r-1],ne[r]]][Psi][k,t],t->x[r]],
# {k,0,S[M]-1}],{j,S[r-1],S[r]-1}];
#
# Do[BB[j]=Limit[Derivative[Mod[j-S[r-1],ne[r]]][ff][t],
# t->x[r]],{j,S[r-1],S[r]-1}];
# Do[F[[j]]=BB[j-1],{j,S[r-1]+1,S[r]}];
# Do[Do[A[[j,k]]=AA[j-1,k-1],{k,1,S[M]}],{j,S[r-1]+1,S[r]}];
# r=r+1;]; (*sv=SingularValueList[N[A,wprec]];
# con=sv[[1]]/sv[[-1]]; Print["cond2(A)= ",N[con,3]];*)
# LS=Block[{MaxExtraPrecision=0},LeastSquares[N[A, wprec],F]];
# vvv[t_]:=Sum[LS[[k+1]]*PB[k,t], {k,0,nn}];
# NR=vvv[1]*Exp[I*w*gg[1]]-vvv[-1]*Exp[I*w*gg[-1]];
# Print["n=", np+ii+2s-2, ", Result= ", N[NR, wprec/2+5]];
# If[ii==0,PR=NR];];
# (* End of subroutine aLevinTQ /A.I. Hascelik, July 2013/ *)
#
# def main_levin():
# a=1; b=2;
# omega=100;
# prm=1/2;
# f[t_]=Exp[4t]
# g[t_]=t+Exp[4t]*Gamma[t]
#
# dg[t_]:=Derivative[1][g][t];
#
# prec=16
# wprec=2*prec
# delta = min(abs(omega*dg(a)), abs(omega*dg(b)))
# alpha = min(abs(omega*g(a)), abs(omega*g(b)))
# s=1; #(*if s>1, the integral is computed by Q_s^L*)
# test= 1 if delta<10 or alpha <=10 or s>1 else 0
#
# m = 1 if s>1 else np.floor(prec/max(np.log10(beta+1),1)+2)
# nc = 2*m+1 #(*or np=2m, number of collocation points*)
# basis=1; # (*take basis=0 for the Chebysev basis*)
# for ii in range(0, 2, 2):
# nu = np.ones((nc+ii,)) # ConstantArray[1,nc+ii];
# nu[0] = s
# nu[-1] = s
# #nu[[1]]=s;
# #nu[[-1]]=s;
# aLevinTQ[omega,a,b,f,g,nu,wprec,prm,test,basis,nc],
# #{ii,0,2,2}];
# Print["Error= ",Abs[NR-PR]];
if __name__ == '__main__': if __name__ == '__main__':
# levin_integrate()
test_docstrings() test_docstrings()
# qdemo(np.exp, 0, 3, plot_error=True) # qdemo(np.exp, 0, 3, plot_error=True)
# plt.show('hold') # plt.show('hold')

@ -7,4 +7,4 @@ Spectrum package in WAFO Toolbox.
from __future__ import absolute_import from __future__ import absolute_import
from .core import * from .core import *
from . import models from . import models
from wafo.wave_theory import dispersion_relation from ..wave_theory import dispersion_relation

File diff suppressed because it is too large Load Diff

@ -1,3 +1,4 @@
#!/usr/bin/env python
""" """
Models module Models module
------------- -------------
@ -26,17 +27,7 @@ Spreading - Directional spreading function.
""" """
# Name: models from __future__ import absolute_import, division
# Purpose: Interface to various spectrum models
#
# Author: pab
#
# Created: 29.08.2008
# Copyright: (c) pab 2008
# Licence: <your licence>
#!/usr/bin/env python
from __future__ import division
import warnings import warnings
from scipy.interpolate import interp1d from scipy.interpolate import interp1d
@ -50,17 +41,20 @@ from numpy import (inf, atleast_1d, newaxis, any, minimum, maximum, array,
cos, abs, sinh, isfinite, mod, expm1, tanh, cosh, finfo, cos, abs, sinh, isfinite, mod, expm1, tanh, cosh, finfo,
ones, ones_like, isnan, zeros_like, flatnonzero, sinc, ones, ones_like, isnan, zeros_like, flatnonzero, sinc,
hstack, vstack, real, flipud, clip) hstack, vstack, real, flipud, clip)
from wafo.wave_theory.dispersion_relation import w2k, k2w # @UnusedImport from ..wave_theory.dispersion_relation import w2k, k2w # @UnusedImport
from wafo.spectrum import SpecData1D, SpecData2D from .core import SpecData1D, SpecData2D
sech = lambda x: 1.0 / cosh(x)
eps = finfo(float).eps
__all__ = ['Bretschneider', 'Jonswap', 'Torsethaugen', 'Wallop', 'McCormick', __all__ = ['Bretschneider', 'Jonswap', 'Torsethaugen', 'Wallop', 'McCormick',
'OchiHubble', 'Tmaspec', 'jonswap_peakfact', 'jonswap_seastate', 'OchiHubble', 'Tmaspec', 'jonswap_peakfact', 'jonswap_seastate',
'spreading', 'w2k', 'k2w', 'phi1'] 'spreading', 'w2k', 'k2w', 'phi1']
_EPS = finfo(float).eps
def sech(x):
return 1.0 / cosh(x)
def _gengamspec(wn, N=5, M=4): def _gengamspec(wn, N=5, M=4):
''' Return Generalized gamma spectrum in dimensionless form ''' Return Generalized gamma spectrum in dimensionless form
@ -409,7 +403,7 @@ def jonswap_seastate(u10, fetch=150000., method='lewis', g=9.81,
# The following formulas are from Lewis and Allos 1990: # The following formulas are from Lewis and Allos 1990:
zeta = g * fetch / (u10 ** 2) # dimensionless fetch, Table 1 zeta = g * fetch / (u10 ** 2) # dimensionless fetch, Table 1
#zeta = min(zeta, 2.414655013429281e+004) # zeta = min(zeta, 2.414655013429281e+004)
if method.startswith('h'): if method.startswith('h'):
if method[-1] == '3': # Hasselman et.al (1973) if method[-1] == '3': # Hasselman et.al (1973)
A = 0.076 * zeta ** (-0.22) A = 0.076 * zeta ** (-0.22)
@ -543,7 +537,7 @@ class Jonswap(ModelSpectrum):
self.method = method self.method = method
self.wnc = wnc self.wnc = wnc
if self.gamma == None or not isfinite(self.gamma) or self.gamma < 1: if self.gamma is None or not isfinite(self.gamma) or self.gamma < 1:
self.gamma = jonswap_peakfact(Hm0, Tp) self.gamma = jonswap_peakfact(Hm0, Tp)
self._preCalculateAg() self._preCalculateAg()
@ -580,7 +574,7 @@ class Jonswap(ModelSpectrum):
if self.gamma == 1: if self.gamma == 1:
self.Ag = 1.0 self.Ag = 1.0
self.method = 'parametric' self.method = 'parametric'
elif self.Ag != None: elif self.Ag is not None:
self.method = 'custom' self.method = 'custom'
if self.Ag <= 0: if self.Ag <= 0:
raise ValueError('Ag must be larger than 0!') raise ValueError('Ag must be larger than 0!')
@ -615,7 +609,7 @@ class Jonswap(ModelSpectrum):
# elseif N == 5 && M == 4, # elseif N == 5 && M == 4,
# options.Ag = (1+1.0*log(gammai).**1.16)/gammai # options.Ag = (1+1.0*log(gammai).**1.16)/gammai
# options.Ag = (1-0.287*log(gammai)) # options.Ag = (1-0.287*log(gammai))
### options.normalizeMethod = 'Three' # options.normalizeMethod = 'Three'
# elseif N == 4 && M == 4, # elseif N == 4 && M == 4,
# options.Ag = (1+1.1*log(gammai).**1.19)/gammai # options.Ag = (1+1.1*log(gammai).**1.19)/gammai
else: else:
@ -624,7 +618,7 @@ class Jonswap(ModelSpectrum):
if self.sigmaA != 0.07 or self.sigmaB != 0.09: if self.sigmaA != 0.07 or self.sigmaB != 0.09:
warnings.warn('Use integration to calculate Ag when ' + warnings.warn('Use integration to calculate Ag when ' +
'sigmaA~=0.07 or sigmaB~=0.09') 'sigmaA!=0.07 or sigmaB!=0.09')
def peak_e_factor(self, wn): def peak_e_factor(self, wn):
''' PEAKENHANCEMENTFACTOR ''' PEAKENHANCEMENTFACTOR
@ -790,9 +784,9 @@ class Tmaspec(Jonswap):
self.type = 'TMA' self.type = 'TMA'
def phi(self, w, h=None, g=None): def phi(self, w, h=None, g=None):
if h == None: if h is None:
h = self.h h = self.h
if g == None: if g is None:
g = self.g g = self.g
return phi1(w, h, g) return phi1(w, h, g)
@ -1005,8 +999,8 @@ class Torsethaugen(ModelSpectrum):
C = (Nw - 1) / Mw C = (Nw - 1) / Mw
B = Nw / Mw B = Nw / Mw
G0w = B ** C * Mw / sp.gamma(C) # normalizing factor G0w = B ** C * Mw / sp.gamma(C) # normalizing factor
#G0w = exp(C*log(B)+log(Mw)-gammaln(C)) # G0w = exp(C*log(B)+log(Mw)-gammaln(C))
#G0w = Mw/((B)**(-C)*gamma(C)) # G0w = Mw/((B)**(-C)*gamma(C))
if Hpw > 0: if Hpw > 0:
Tpw = (16 * S0 * (1 - exp(-Hm0 / S1)) * (0.4) ** Tpw = (16 * S0 * (1 - exp(-Hm0 / S1)) * (0.4) **
@ -1014,7 +1008,7 @@ class Torsethaugen(ModelSpectrum):
else: else:
Tpw = inf Tpw = inf
#Tpw = max(Tpw,2.5) # Tpw = max(Tpw,2.5)
gammaw = 1 gammaw = 1
if monitor: if monitor:
if Rpw > 0.1: if Rpw > 0.1:
@ -1095,14 +1089,14 @@ class McCormick(Bretschneider):
self.type = 'McCormick' self.type = 'McCormick'
self.Hm0 = Hm0 self.Hm0 = Hm0
self.Tp = Tp self.Tp = Tp
if Tz == None: if Tz is None:
Tz = 0.8143 * Tp Tz = 0.8143 * Tp
self.Tz = Tz self.Tz = Tz
if chk_seastate: if chk_seastate:
self.chk_seastate() self.chk_seastate()
if M == None and self.Hm0 > 0: if M is None and self.Hm0 > 0:
self._TpdTz = Tp / Tz self._TpdTz = Tp / Tz
M = 1.0 / optimize.fminbound(self._localoptfun, 0.01, 5) M = 1.0 / optimize.fminbound(self._localoptfun, 0.01, 5)
self.M = M self.M = M
@ -1410,7 +1404,7 @@ class Spreading(object):
5 to 30 being a function of dimensionless wind speed. 5 to 30 being a function of dimensionless wind speed.
However, Goda and Suzuki (1975) proposed SP = 10 for wind waves, SP = 25 However, Goda and Suzuki (1975) proposed SP = 10 for wind waves, SP = 25
for swell with short decay distance and SP = 75 for long decay distance. for swell with short decay distance and SP = 75 for long decay distance.
Compared to experiments Krogstad et al. (1998) found that m_a = 5 +/- eps Compared to experiments Krogstad et al. (1998) found that m_a = 5 +/- _EPS
and that -1< m_b < -3.5. and that -1< m_b < -3.5.
Values given in the litterature: [s_a s_b m_a m_b wn_lo wn_c wn_up] Values given in the litterature: [s_a s_b m_a m_b wn_lo wn_c wn_up]
(Mitsuyasu: s_a == s_b) (cos-2s) [15 15 5 -2.5 0 1 3 ] (Mitsuyasu: s_a == s_b) (cos-2s) [15 15 5 -2.5 0 1 3 ]
@ -1510,7 +1504,7 @@ class Spreading(object):
if not self.method[0] in methods: if not self.method[0] in methods:
raise ValueError('Unknown method') raise ValueError('Unknown method')
self.method = methods[self.method[0]] self.method = methods[self.method[0]]
elif self.method == None: elif self.method is None:
pass pass
else: else:
if method < 0 or 3 < method: if method < 0 or 3 < method:
@ -1561,7 +1555,7 @@ class Spreading(object):
'The length of theta0 must equal to 1 or the length of w') 'The length of theta0 must equal to 1 or the length of w')
else: else:
TH = mod(theta - th0 + pi, 2 * pi) - pi # make sure -pi<=TH<pi TH = mod(theta - th0 + pi, 2 * pi) - pi # make sure -pi<=TH<pi
if self.method != None: # frequency dependent spreading if self.method is not None: # frequency dependent spreading
TH = TH[:, newaxis] TH = TH[:, newaxis]
# Get spreading parameter # Get spreading parameter
@ -1574,7 +1568,7 @@ class Spreading(object):
r1 = abs(s / (s + 1)) r1 = abs(s / (s + 1))
# Find distribution parameter from first Fourier coefficient. # Find distribution parameter from first Fourier coefficient.
s_par = self.fourier2distpar(r1) s_par = self.fourier2distpar(r1)
if self.method != None: if self.method is not None:
s_par = s_par[newaxis, :] s_par = s_par[newaxis, :]
return s_par, TH, phi0, Nt return s_par, TH, phi0, Nt
@ -1823,7 +1817,7 @@ class Spreading(object):
A[ix] = Ai + 0.5 * (da[ix] - Ai) * (Ai <= 0.0) A[ix] = Ai + 0.5 * (da[ix] - Ai) * (Ai <= 0.0)
ix = flatnonzero( ix = flatnonzero(
(abs(da) > sqrt(eps) * abs(A)) * (abs(da) > sqrt(eps))) (abs(da) > sqrt(_EPS) * abs(A)) * (abs(da) > sqrt(_EPS)))
if ix.size == 0: if ix.size == 0:
if any(A > pi): if any(A > pi):
raise ValueError( raise ValueError(
@ -1837,8 +1831,10 @@ class Spreading(object):
''' '''
Returns the solution of R1 = besseli(1,K)/besseli(0,K), Returns the solution of R1 = besseli(1,K)/besseli(0,K),
''' '''
def fun0(x):
return sp.ive(1, x) / sp.ive(0, x)
K0 = hstack((linspace(0, 10, 513), linspace(10.00001, 100))) K0 = hstack((linspace(0, 10, 513), linspace(10.00001, 100)))
fun0 = lambda x: sp.ive(1, x) / sp.ive(0, x)
funK = interp1d(fun0(K0), K0) funK = interp1d(fun0(K0), K0)
K0 = funK(r1.ravel()) K0 = funK(r1.ravel())
k1 = flatnonzero(isnan(K0)) k1 = flatnonzero(isnan(K0))
@ -1848,15 +1844,14 @@ class Spreading(object):
ix0 = flatnonzero(r1 != 0.0) ix0 = flatnonzero(r1 != 0.0)
K = zeros_like(r1) K = zeros_like(r1)
fun = lambda x: fun0(x) - r1[ix]
for ix in ix0: for ix in ix0:
K[ix] = optimize.fsolve(fun, K0[ix]) K[ix] = optimize.fsolve(lambda x: fun0(x) - r1[ix], K0[ix])
return K return K
def fourier2b(self, r1): def fourier2b(self, r1):
''' Returns the solution of R1 = pi/(2*B*sinh(pi/(2*B)). ''' Returns the solution of R1 = pi/(2*B*sinh(pi/(2*B)).
''' '''
B0 = hstack((linspace(eps, 5, 513), linspace(5.0001, 100))) B0 = hstack((linspace(_EPS, 5, 513), linspace(5.0001, 100)))
funB = interp1d(self._r1ofsech2(B0), B0) funB = interp1d(self._r1ofsech2(B0), B0)
B0 = funB(r1.ravel()) B0 = funB(r1.ravel())
@ -1867,7 +1862,9 @@ class Spreading(object):
ix0 = flatnonzero(r1 != 0.0) ix0 = flatnonzero(r1 != 0.0)
B = zeros_like(r1) B = zeros_like(r1)
fun = lambda x: 0.5 * pi / (sinh(.5 * pi / x)) - x * r1[ix]
def fun(x):
return 0.5 * pi / (sinh(.5 * pi / x)) - x * r1[ix]
for ix in ix0: for ix in ix0:
B[ix] = abs(optimize.fsolve(fun, B0[ix])) B[ix] = abs(optimize.fsolve(fun, B0[ix]))
return B return B
@ -1890,7 +1887,7 @@ class Spreading(object):
S : ndarray S : ndarray
spread parameter of COS2S functions spread parameter of COS2S functions
''' '''
if self.method == None: if self.method is None:
# no frequency dependent spreading, # no frequency dependent spreading,
# but possible frequency dependent direction # but possible frequency dependent direction
s = atleast_1d(self.s_a) s = atleast_1d(self.s_a)
@ -1923,12 +1920,12 @@ class Spreading(object):
# Banner parametrization for B in SECH-2 # Banner parametrization for B in SECH-2
s3m = spb * (wn_up) ** mb s3m = spb * (wn_up) ** mb
s3p = self._donelan(wn_up) s3p = self._donelan(wn_up)
# % Scale so that parametrization will be continous # Scale so that parametrization will be continous
scale = s3m / s3p scale = s3m / s3p
s[k] = scale * self.donelan(wn[k]) s[k] = scale * self.donelan(wn[k])
r1 = self.r1ofsech2(s) r1 = self.r1ofsech2(s)
#% Convert to S-paramater in COS-2S distribution # Convert to S-paramater in COS-2S distribution
s = r1 / (1. - r1) s = r1 / (1. - r1)
else: else:
s[k] = 0.0 s[k] = 0.0
@ -1994,7 +1991,7 @@ class Spreading(object):
theta = np.linspace(-pi, pi, nt) theta = np.linspace(-pi, pi, nt)
else: else:
L = abs(theta[-1] - theta[0]) L = abs(theta[-1] - theta[0])
if abs(L - pi) > eps: if abs(L - pi) > _EPS:
raise ValueError('theta must cover all angles -pi -> pi') raise ValueError('theta must cover all angles -pi -> pi')
nt = len(theta) nt = len(theta)
@ -2015,7 +2012,7 @@ class Spreading(object):
Snew.h = specdata.h Snew.h = specdata.h
Snew.phi = phi0 Snew.phi = phi0
Snew.norm = specdata.norm Snew.norm = specdata.norm
#Snew.note = specdata.note + ', spreading: %s' % self.type # Snew.note = specdata.note + ', spreading: %s' % self.type
return Snew return Snew
@ -2036,11 +2033,9 @@ def _test_some_spectra():
plb.show() plb.show()
plb.close('all') plb.close('all')
#import pylab as plb
#w = plb.linspace(0,3)
w, th = plb.ogrid[0:4, 0:6] w, th = plb.ogrid[0:4, 0:6]
k, k2 = w2k(w, th) k, k2 = w2k(w, th)
#k1, k12 = w2k(w, th, h=20)
plb.plot(w, k, w, k2) plb.plot(w, k, w, k2)
plb.show() plb.show()
@ -2080,8 +2075,8 @@ def _test_spreading():
pi = plb.pi pi = plb.pi
w = plb.linspace(0, 3, 257) w = plb.linspace(0, 3, 257)
theta = plb.linspace(-pi, pi, 129) theta = plb.linspace(-pi, pi, 129)
theta0 = lambda w: w * plb.pi / 6.0
D2 = Spreading('cos2s', theta0=theta0) D2 = Spreading('cos2s', theta0=lambda w: w * plb.pi / 6.0)
d1 = D2(theta, w)[0] d1 = D2(theta, w)[0]
_t = plb.contour(d1.squeeze()) _t = plb.contour(d1.squeeze())

@ -3,7 +3,8 @@ import wafo.transform.models as wtm
import wafo.objects as wo import wafo.objects as wo
from wafo.spectrum import SpecData1D from wafo.spectrum import SpecData1D
import numpy as np import numpy as np
from numpy.testing import assert_array_almost_equal from numpy import NAN
from numpy.testing import assert_array_almost_equal, assert_array_equal
import unittest import unittest
@ -12,213 +13,184 @@ def slow(f):
return f return f
class TestSpectrum(unittest.TestCase): class TestSpectrumHs7(unittest.TestCase):
def setUp(self):
self.Sj = sm.Jonswap(Hm0=7.0, Tp=11)
self.S = self.Sj.tospecdata()
@slow
def test_tocovmatrix(self): def test_tocovmatrix(self):
Sj = sm.Jonswap() acfmat = self.S.tocov_matrix(nr=3, nt=256, dt=0.1)
S = Sj.tospecdata()
acfmat = S.tocov_matrix(nr=3, nt=256, dt=0.1)
vals = acfmat[:2, :] vals = acfmat[:2, :]
true_vals = np.array([[3.06073383, 0.0000000, -1.67748256, 0.], true_vals = np.array([[3.06073383, 0.0000000, -1.67748256, 0.],
[3.05235423, -0.1674357, -1.66811444, [3.05235423, -0.1674357, -1.66811444,
0.18693242]]) 0.18693242]])
self.assertTrue((np.abs(vals - true_vals) < 1e-7).all()) assert_array_almost_equal(vals, true_vals)
def test_tocovdata(self):
def test_tocovdata():
Sj = sm.Jonswap() Nt = len(self.S.data) - 1
S = Sj.tospecdata() acf = self.S.tocovdata(nr=0, nt=Nt)
Nt = len(S.data) - 1 vals = acf.data[:5]
acf = S.tocovdata(nr=0, nt=Nt)
vals = acf.data[:5] true_vals = np.array(
[3.06090339, 2.22658399, 0.45307391, -1.17495501, -2.05649042])
true_vals = np.array( assert_array_almost_equal(vals, true_vals)
[3.06090339, 2.22658399, 0.45307391, -1.17495501, -2.05649042]) assert((np.abs(vals - true_vals) < 1e-6).all())
assert((np.abs(vals - true_vals) < 1e-6).all())
def test_to_t_pdf(self):
f = self.S.to_t_pdf(pdef='Tc', paramt=(0, 10, 51), speed=7, seed=100)
def test_to_t_pdf(): vals = ['%2.3f' % val for val in f.data[:10]]
Sj = sm.Jonswap() truevals = ['0.000', '0.014', '0.027', '0.040',
S = Sj.tospecdata() '0.050', '0.059', '0.067', '0.073', '0.077', '0.082']
f = S.to_t_pdf(pdef='Tc', paramt=(0, 10, 51), speed=7, seed=100) for t, v in zip(truevals, vals):
vals = ['%2.3f' % val for val in f.data[:10]] assert(t == v)
truevals = ['0.000', '0.014', '0.027', '0.040',
'0.050', '0.059', '0.067', '0.073', '0.077', '0.082'] # estimated error bounds
for t, v in zip(truevals, vals): vals = ['%2.4f' % val for val in f.err[:10]]
assert(t == v) truevals = ['0.0000', '0.0003', '0.0003', '0.0004',
'0.0006', '0.0008', '0.0016', '0.0019', '0.0020', '0.0021']
# estimated error bounds for t, v in zip(truevals, vals):
vals = ['%2.4f' % val for val in f.err[:10]] assert(t == v)
truevals = ['0.0000', '0.0003', '0.0003', '0.0004',
'0.0006', '0.0008', '0.0016', '0.0019', '0.0020', '0.0021'] @slow
for t, v in zip(truevals, vals): def test_sim(self):
assert(t == v) S = self.S
import scipy.stats as st
@slow x2 = S.sim(20000, 20)
def test_sim(): truth1 = [0, np.sqrt(S.moment(1)[0]), 0., 0.]
Sj = sm.Jonswap() funs = [np.mean, np.std, st.skew, st.kurtosis]
S = Sj.tospecdata() for fun, trueval in zip(funs, truth1):
#ns = 100 res = fun(x2[:, 1::], axis=0)
#dt = .2 m = res.mean()
#x1 = S.sim(ns, dt=dt) sa = res.std()
assert(np.abs(m - trueval) < sa)
import scipy.stats as st
x2 = S.sim(20000, 20) @slow
truth1 = [0, np.sqrt(S.moment(1)[0]), 0., 0.] def test_sim_nl(self):
funs = [np.mean, np.std, st.skew, st.kurtosis] S = self.S
for fun, trueval in zip(funs, truth1):
res = fun(x2[:, 1::], axis=0) import scipy.stats as st
m = res.mean() x2, _x1 = S.sim_nl(ns=20000, cases=40)
sa = res.std() truth1 = [0, np.sqrt(S.moment(1)[0][0])] + S.stats_nl(moments='sk')
#trueval, m, sa truth1[-1] = truth1[-1] - 3
assert(np.abs(m - trueval) < sa)
# truth1
# [0, 1.7495200310090633, 0.18673120577479801, 0.061988521262417606]
@slow
def test_sim_nl(): funs = [np.mean, np.std, st.skew, st.kurtosis]
for fun, trueval in zip(funs, truth1):
Sj = sm.Jonswap() res = fun(x2.data, axis=0)
S = Sj.tospecdata() m = res.mean()
# ns = 100 sa = res.std()
# dt = .2 # trueval, m, sa
# x1 = S.sim_nl(ns, dt=dt) assert(np.abs(m - trueval) < 2 * sa)
import scipy.stats as st
x2, _x1 = S.sim_nl(ns=20000, cases=40) def test_stats_nl(self):
truth1 = [0, np.sqrt(S.moment(1)[0][0])] + S.stats_nl(moments='sk') S = self.S
truth1[-1] = truth1[-1] - 3 me, va, sk, ku = S.stats_nl(moments='mvsk')
assert(me == 0.0)
# truth1 assert_array_almost_equal(va, 3.0608203389019537)
#[0, 1.7495200310090633, 0.18673120577479801, 0.061988521262417606] assert_array_almost_equal(sk, 0.18673120577479801)
assert_array_almost_equal(ku, 3.0619885212624176)
funs = [np.mean, np.std, st.skew, st.kurtosis]
for fun, trueval in zip(funs, truth1): def test_testgaussian(self):
res = fun(x2.data, axis=0) Hs = self.Sj.Hm0
m = res.mean() S0 = self.S
sa = res.std() # ns =100; dt = .2
# trueval, m, sa # x1 = S0.sim(ns, dt=dt)
assert(np.abs(m - trueval) < 2 * sa) S = S0.copy()
me, _va, sk, ku = S.stats_nl(moments='mvsk')
S.tr = wtm.TrHermite(
def test_stats_nl(): mean=me, sigma=Hs / 4, skew=sk, kurt=ku, ysigma=Hs / 4)
ys = wo.mat2timeseries(S.sim(ns=2 ** 13))
Hs = 7. g0, _gemp = ys.trdata()
Sj = sm.Jonswap(Hm0=Hs, Tp=11) t0 = g0.dist2gauss()
S = Sj.tospecdata() t1 = S0.testgaussian(ns=2 ** 13, test0=None, cases=50)
me, va, sk, ku = S.stats_nl(moments='mvsk') assert(sum(t1 > t0) < 5)
assert(me == 0.0)
assert_array_almost_equal(va, 3.0608203389019537)
assert_array_almost_equal(sk, 0.18673120577479801) class TestSpectrumHs5(unittest.TestCase):
assert_array_almost_equal(ku, 3.0619885212624176) def setUp(self):
self.Sj = sm.Jonswap(Hm0=5.0)
self.S = self.Sj.tospecdata()
def test_testgaussian():
def test_moment(self):
Hs = 7 S = self.S
Sj = sm.Jonswap(Hm0=Hs) vals, txt = S.moment()
S0 = Sj.tospecdata() true_vals = [1.5614600345079888, 0.95567089481941048]
# ns =100; dt = .2 true_txt = ['m0', 'm0tt']
# x1 = S0.sim(ns, dt=dt)
assert_array_almost_equal(vals, true_vals)
S = S0.copy() for tv, v in zip(true_txt, txt):
me, _va, sk, ku = S.stats_nl(moments='mvsk') assert(tv == v)
S.tr = wtm.TrHermite(
mean=me, sigma=Hs / 4, skew=sk, kurt=ku, ysigma=Hs / 4) def test_nyquist_freq(self):
ys = wo.mat2timeseries(S.sim(ns=2 ** 13)) S = self.S
g0, _gemp = ys.trdata() assert_array_almost_equal(S.nyquist_freq(), 3.0)
t0 = g0.dist2gauss()
t1 = S0.testgaussian(ns=2 ** 13, test0=None, cases=50) def test_sampling_period(self):
assert(sum(t1 > t0) < 5) S = self.S
assert_array_almost_equal(S.sampling_period(), 1.0471975511965976)
def test_moment(): def test_normalize(self):
Sj = sm.Jonswap(Hm0=5) S = self.S
S = Sj.tospecdata() # Make spectrum ob mom, txt = S.moment(2)
vals, txt = S.moment() assert_array_almost_equal(mom,
true_vals = [1.5614600345079888, 0.95567089481941048] [1.5614600345079888, 0.95567089481941048])
true_txt = ['m0', 'm0tt'] assert_array_equal(txt, ['m0', 'm0tt'])
for tv, v in zip(true_vals, vals): vals, _txt = S.moment(2)
assert_array_almost_equal(tv, v) true_vals = [1.5614600345079888, 0.95567089481941048]
for tv, v in zip(true_txt, txt): assert_array_almost_equal(vals, true_vals)
assert(tv==v)
Sn = S.copy()
Sn.normalize()
def test_nyquist_freq():
Sj = sm.Jonswap(Hm0=5) # Now the moments should be one
S = Sj.tospecdata() # Make spectrum ob new_vals, _txt = Sn.moment(2)
assert(S.nyquist_freq() == 3.0) assert_array_almost_equal(new_vals, np.ones(2))
def test_characteristic(self):
def test_sampling_period(): S = self.S
Sj = sm.Jonswap(Hm0=5) ch, R, txt = S.characteristic(1)
S = Sj.tospecdata() # Make spectrum ob assert_array_almost_equal(ch, 8.59007646)
assert(S.sampling_period() == 1.0471975511965976) assert_array_almost_equal(R, 0.03040216)
self.assert_(txt == ['Tm01'])
def test_normalize(): ch, R, txt = S.characteristic([1, 2, 3]) # fact a vector of integers
Sj = sm.Jonswap(Hm0=5) assert_array_almost_equal(ch, [8.59007646, 8.03139757, 5.62484314])
S = Sj.tospecdata() # Make spectrum ob assert_array_almost_equal(R,
S.moment(2) [[0.03040216, 0.02834263, NAN],
([1.5614600345079888, 0.95567089481941048], ['m0', 'm0tt']) [0.02834263, 0.0274645, NAN],
vals, _txt = S.moment(2) [NAN, NAN, 0.01500249]])
true_vals = [1.5614600345079888, 0.95567089481941048] assert_array_equal(txt, ['Tm01', 'Tm02', 'Tm24'])
for tv, v in zip(true_vals, vals):
assert_array_almost_equal(tv, v) ch, R, txt = S.characteristic('Ss') # fact a string
assert_array_almost_equal(ch, [0.04963112])
Sn = S.copy() assert_array_almost_equal(R, [[2.63624782e-06]])
Sn.normalize() assert_array_equal(txt, ['Ss'])
# Now the moments should be one # fact a list of strings
new_vals, _txt = Sn.moment(2) ch, R, txt = S.characteristic(['Hm0', 'Tm02'])
for v in new_vals:
assert(np.abs(v - 1.0) < 1e-7) assert_array_almost_equal(ch,
[4.99833578, 8.03139757])
assert_array_almost_equal(R, [[0.05292989, 0.02511371],
def test_characteristic(): [0.02511371, 0.0274645]])
''' assert_array_equal(txt, ['Hm0', 'Tm02'])
>>> import wafo.spectrum.models as sm
>>> Sj = sm.Jonswap(Hm0=5)
>>> S = Sj.tospecdata() #Make spectrum ob class TestSpectrumHs3(unittest.TestCase):
>>> S.characteristic(1) def test_bandwidth(self):
(array([ 8.59007646]), array([[ 0.03040216]]), ['Tm01'])
Sj = sm.Jonswap(Hm0=3, Tp=7)
>>> [ch, R, txt] = S.characteristic([1,2,3]) # fact a vector of integers w = np.linspace(0, 4, 256)
>>> ch; R; txt S = SpecData1D(Sj(w), w) # Make spectrum object from numerical values
array([ 8.59007646, 8.03139757, 5.62484314]) vals = S.bandwidth([0, 1, 2, 3])
array([[ 0.03040216, 0.02834263, nan], true_vals = [0.73062845, 0.34476034, 0.68277527, 2.90817052]
[ 0.02834263, 0.0274645 , nan], assert_array_almost_equal(vals, true_vals)
[ nan, nan, 0.01500249]])
['Tm01', 'Tm02', 'Tm24']
>>> S.characteristic('Ss') # fact a string
(array([ 0.04963112]), array([[ 2.63624782e-06]]), ['Ss'])
>>> S.characteristic(['Hm0','Tm02']) # fact a list of strings
(array([ 4.99833578, 8.03139757]), array([[ 0.05292989, 0.02511371],
[ 0.02511371, 0.0274645 ]]), ['Hm0', 'Tm02'])
'''
def test_bandwidth():
Sj = sm.Jonswap(Hm0=3, Tp=7)
w = np.linspace(0, 4, 256)
S = SpecData1D(Sj(w), w) # Make spectrum object from numerical values
vals = S.bandwidth([0, 1, 2, 3])
true_vals = np.array([0.73062845, 0.34476034, 0.68277527, 2.90817052])
assert((np.abs(vals - true_vals) < 1e-7).all())
def test_docstrings():
import doctest
doctest.testmod()
if __name__ == '__main__': if __name__ == '__main__':
import nose import nose
nose.run() nose.run()
# test_docstrings()
# test_tocovdata()
# test_tocovmatrix()
# test_sim()
# test_bandwidth()

@ -1,9 +1,11 @@
from scipy.stats._distn_infrastructure import * from __future__ import absolute_import
from scipy.stats._distn_infrastructure import (_skew, _kurtosis, # @UnresolvedImport from scipy.stats._distn_infrastructure import * # @UnusedWildImport
_lazywhere, _ncx2_log_pdf, _ncx2_pdf, _ncx2_cdf) from scipy.stats._distn_infrastructure import (_skew, # @UnusedImport
from wafo.stats.estimation import FitDistribution _kurtosis, _lazywhere, _ncx2_log_pdf, # @IgnorePep8 @UnusedImport
from wafo.stats._constants import _EPS, _XMAX _ncx2_pdf, _ncx2_cdf) # @UnusedImport @IgnorePep8
import numpy as np from .estimation import FitDistribution
from ._constants import _XMAX
_doc_default_example = """\ _doc_default_example = """\
Examples Examples
@ -326,10 +328,11 @@ def nlogps(self, theta, x):
return T return T
def _reduce_func(self, args, kwds): def _reduce_func(self, args, options):
# First of all, convert fshapes params to fnum: eg for stats.beta, # First of all, convert fshapes params to fnum: eg for stats.beta,
# shapes='a, b'. To fix `a`, can specify either `f1` or `fa`. # shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.
# Convert the latter into the former. # Convert the latter into the former.
kwds = options.copy()
if self.shapes: if self.shapes:
shapes = self.shapes.replace(',', ' ').split() shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes): for j, s in enumerate(shapes):
@ -384,9 +387,9 @@ def _reduce_func(self, args, kwds):
return x0, func, restore, args return x0, func, restore, args
def fit(self, data, *args, **kwds): def fit(self, data, *args, **kwargs):
""" """
Return ML or MPS estimate for shape, location, and scale parameters from data. Return ML/MPS estimate for shape, location, and scale parameters from data.
ML and MPS stands for Maximum Likelihood and Maximum Product Spacing, ML and MPS stands for Maximum Likelihood and Maximum Product Spacing,
respectively. Starting estimates for respectively. Starting estimates for
@ -476,6 +479,7 @@ def fit(self, data, *args, **kwds):
if Narg > self.numargs: if Narg > self.numargs:
raise TypeError("Too many input arguments.") raise TypeError("Too many input arguments.")
kwds = kwargs.copy()
start = [None]*2 start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds): 'scale' in kwds):
@ -573,4 +577,3 @@ rv_continuous.nlogps = nlogps
rv_continuous._reduce_func = _reduce_func rv_continuous._reduce_func = _reduce_func
rv_continuous.fit = fit rv_continuous.fit = fit
rv_continuous.fit2 = fit2 rv_continuous.fit2 = fit2

@ -9,4 +9,4 @@
""" """
from __future__ import print_function, absolute_import, division from __future__ import print_function, absolute_import, division
import pytest import pytest # @UnusedImport

@ -7,10 +7,23 @@ import unittest
import numpy as np import numpy as np
from numpy import exp, Inf from numpy import exp, Inf
from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_almost_equal
from wafo.integrate import gaussq from wafo.integrate import gaussq, quadgr, clencurt, romberg
class Gaussq(unittest.TestCase): class TestIntegrators(unittest.TestCase):
def test_clencurt(self):
val, err = clencurt(np.exp, 0, 2)
assert_array_almost_equal(val, np.expm1(2))
self.assert_(err < 1e-10)
def test_romberg(self):
tol = 1e-7
q, err = romberg(np.sqrt, 0, 10, 0, abseps=tol)
assert_array_almost_equal(q, 2.0/3 * 10**(3./2))
self.assert_(err < tol)
class TestGaussq(unittest.TestCase):
''' '''
1 : p(x) = 1 a =-1, b = 1 Gauss-Legendre 1 : p(x) = 1 a =-1, b = 1 Gauss-Legendre
2 : p(x) = exp(-x^2) a =-inf, b = inf Hermite 2 : p(x) = exp(-x^2) a =-inf, b = inf Hermite
@ -60,6 +73,52 @@ class Gaussq(unittest.TestCase):
val, _err = gaussq(lambda x: x, 0, 1, wfun=9) val, _err = gaussq(lambda x: x, 0, 1, wfun=9)
assert_array_almost_equal(val, 0.26666667) assert_array_almost_equal(val, 0.26666667)
class TestQuadgr(unittest.TestCase):
def test_log(self):
Q, err = quadgr(np.log, 0, 1)
assert_array_almost_equal(Q, -1)
self.assert_(err < 1e-5)
def test_exp(self):
Q, err = quadgr(np.exp, 0, 9999*1j*np.pi)
assert_array_almost_equal(Q, -2.0000000000122662)
self.assert_(err < 1.0e-8)
def test_integral3(self):
tol = 1e-12
Q, err = quadgr(lambda x: np.sqrt(4-x**2), 0, 2, tol)
assert_array_almost_equal(Q, np.pi)
self.assert_(err < tol)
# (3.1415926535897811, 1.5809575870662229e-13)
def test_integral4(self):
Q, err = quadgr(lambda x: 1./x**0.75, 0, 1)
assert_array_almost_equal(Q, 4)
self.assert_(err < 1.0e-12)
def test_integrand4(self):
tol = 1e-10
Q, err = quadgr(lambda x: 1./np.sqrt(1-x**2), -1, 1, tol)
assert_array_almost_equal(Q, np.pi)
self.assert_(err < tol)
# (3.141596056985029, 6.2146261559092864e-06)
def test_integrand5(self):
tol = 1e-9
Q, err = quadgr(lambda x: np.exp(-x**2), -np.inf, np.inf, tol)
assert_array_almost_equal(Q, np.sqrt(np.pi))
self.assert_(err < tol)
# (1.7724538509055152, 1.9722334876348668e-11)
def test_integrand6(self):
tol = 1e-9
Q, err = quadgr(lambda x: np.cos(x)*np.exp(-x), 0, np.inf, tol)
assert_array_almost_equal(Q, 0.5)
self.assert_(err < tol)
# (0.50000000000000044, 7.3296813063450372e-11)
if __name__ == "__main__": if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName'] # import sys;sys.argv = ['', 'Test.testName']
unittest.main() unittest.main()

@ -478,8 +478,8 @@ class TrOchi(TrCommon2):
''' '''
Returns ga, gb, sigma2, mean2 Returns ga, gb, sigma2, mean2
''' '''
if (self._phat is None or self.sigma != self._phat[0] if (self._phat is None or self.sigma != self._phat[0] or
or self.mean != self._phat[1]): self.mean != self._phat[1]):
self._par_from_stats() self._par_from_stats()
# sigma1 = self._phat[0] # sigma1 = self._phat[0]
# mean1 = self._phat[1] # mean1 = self._phat[1]

@ -7,9 +7,11 @@ from __future__ import absolute_import
import numpy as np import numpy as np
from numpy import exp, expm1, inf, nan, pi, hstack, where, atleast_1d, cos, sin from numpy import exp, expm1, inf, nan, pi, hstack, where, atleast_1d, cos, sin
from .dispersion_relation import w2k, k2w # @UnusedImport from .dispersion_relation import w2k, k2w # @UnusedImport
from ..misc import JITImport
__all__ = ['w2k', 'k2w', 'sensor_typeid', 'sensor_type', 'TransferFunction'] __all__ = ['w2k', 'k2w', 'sensor_typeid', 'sensor_type', 'TransferFunction']
_MODELS = JITImport('wafo.spectrum.models')
def hyperbolic_ratio(a, b, sa, sb): def hyperbolic_ratio(a, b, sa, sb):
''' '''
@ -349,7 +351,8 @@ class TransferFunction(object):
Gwt = -Gwt Gwt = -Gwt
return Hw, Gwt return Hw, Gwt
__call__ = tran __call__ = tran
#---Private member methods
# --- Private member methods ---
def _get_ee_cthxy(self, theta, kw): def _get_ee_cthxy(self, theta, kw):
# convert from angle in degrees to radians # convert from angle in degrees to radians
@ -358,7 +361,7 @@ class TransferFunction(object):
thyr = self.thetay * pi / 180 thyr = self.thetay * pi / 180
cthx = bet * cos(theta - thxr + pi / 2) cthx = bet * cos(theta - thxr + pi / 2)
#cthy = cos(theta-thyr-pi/2) # cthy = cos(theta-thyr-pi/2)
cthy = bet * sin(theta - thyr) cthy = bet * sin(theta - thyr)
# Compute location complex exponential # Compute location complex exponential
@ -380,14 +383,14 @@ class TransferFunction(object):
zk = kw * z # z measured positive upward from sea floor zk = kw * z # z measured positive upward from sea floor
return zk return zk
#--- Surface elevation --- # --- Surface elevation ---
def _n(self, w, theta, kw): def _n(self, w, theta, kw):
'''n = Eta = wave profile '''n = Eta = wave profile
''' '''
ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw) ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
return np.ones_like(w), ee return np.ones_like(w), ee
#---- Vertical surface velocity and acceleration----- # --- Vertical surface velocity and acceleration ---
def _n_t(self, w, theta, kw): def _n_t(self, w, theta, kw):
''' n_t = Eta_t ''' ''' n_t = Eta_t '''
ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw) ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -398,7 +401,7 @@ class TransferFunction(object):
ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw) ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
return w ** 2, -ee return w ** 2, -ee
#--- Surface slopes --- # --- Surface slopes ---
def _n_x(self, w, theta, kw): def _n_x(self, w, theta, kw):
''' n_x = Eta_x = x-slope''' ''' n_x = Eta_x = x-slope'''
ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw) ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -409,7 +412,7 @@ class TransferFunction(object):
ee, unused_cthx, cthy = self._get_ee_cthxy(theta, kw) ee, unused_cthx, cthy = self._get_ee_cthxy(theta, kw)
return kw, 1j * cthy * ee return kw, 1j * cthy * ee
#--- Surface curvatures --- # --- Surface curvatures ---
def _n_xx(self, w, theta, kw): def _n_xx(self, w, theta, kw):
''' n_xx = Eta_xx = Surface curvature (x-dir)''' ''' n_xx = Eta_xx = Surface curvature (x-dir)'''
ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw) ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -425,7 +428,7 @@ class TransferFunction(object):
ee, cthx, cthy = self._get_ee_cthxy(theta, kw) ee, cthx, cthy = self._get_ee_cthxy(theta, kw)
return kw ** 2, -cthx * cthy * ee return kw ** 2, -cthx * cthy * ee
#--- Pressure--- # --- Pressure---
def _p(self, w, theta, kw): def _p(self, w, theta, kw):
''' pressure fluctuations''' ''' pressure fluctuations'''
ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw) ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -434,7 +437,7 @@ class TransferFunction(object):
# hyperbolic_ratio = cosh(zk)/cosh(hk) # hyperbolic_ratio = cosh(zk)/cosh(hk)
return self.rho * self.g * hyperbolic_ratio(zk, hk, 1, 1), ee return self.rho * self.g * hyperbolic_ratio(zk, hk, 1, 1), ee
#---- Water particle velocities --- # --- Water particle velocities ---
def _u(self, w, theta, kw): def _u(self, w, theta, kw):
''' U = x-velocity''' ''' U = x-velocity'''
ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw) ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -459,7 +462,7 @@ class TransferFunction(object):
# w*sinh(zk)/sinh(hk), -? # w*sinh(zk)/sinh(hk), -?
return w * hyperbolic_ratio(zk, hk, -1, -1), -1j * ee return w * hyperbolic_ratio(zk, hk, -1, -1), -1j * ee
#---- Water particle acceleration --- # --- Water particle acceleration ---
def _u_t(self, w, theta, kw): def _u_t(self, w, theta, kw):
''' U_t = x-acceleration''' ''' U_t = x-acceleration'''
ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw) ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -484,7 +487,7 @@ class TransferFunction(object):
# w*sinh(zk)/sinh(hk), ? # w*sinh(zk)/sinh(hk), ?
return (w ** 2) * hyperbolic_ratio(zk, hk, -1, -1), -ee return (w ** 2) * hyperbolic_ratio(zk, hk, -1, -1), -ee
#---- Water particle displacement --- # --- Water particle displacement ---
def _x_p(self, w, theta, kw): def _x_p(self, w, theta, kw):
''' X_p = x-displacement''' ''' X_p = x-displacement'''
ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw) ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -508,88 +511,73 @@ class TransferFunction(object):
zk = self._get_zk(kw) zk = self._get_zk(kw)
return hyperbolic_ratio(zk, hk, -1, -1), ee # sinh(zk)./sinh(hk), ee return hyperbolic_ratio(zk, hk, -1, -1), ee # sinh(zk)./sinh(hk), ee
# def wave_pressure(z, Hm0, h=10000, g=9.81, rho=1028):
# ''' def wave_pressure(z, Hm0, h=10000, g=9.81, rho=1028):
# Calculate pressure amplitude due to water waves. '''
# Calculate pressure amplitude due to water waves.
# Parameters
# ---------- Parameters
# z : array-like ----------
# depth where pressure is calculated [m] z : array-like
# Hm0 : array-like depth where pressure is calculated [m]
# significant wave height (same as the average of the 1/3'rd highest Hm0 : array-like
# waves in a seastate. [m] significant wave height (same as the average of the 1/3'rd highest
# h : real scalar waves in a seastate. [m]
# waterdepth (default 10000 [m]) h : real scalar
# g : real scalar waterdepth (default 10000 [m])
# acceleration of gravity (default 9.81 m/s**2) g : real scalar
# rho : real scalar acceleration of gravity (default 9.81 m/s**2)
# water density (default 1028 kg/m**3) rho : real scalar
# water density (default 1028 kg/m**3)
#
# Returns
# ------- Returns
# p : ndarray -------
# pressure amplitude due to water waves at water depth z. [Pa] p : ndarray
# pressure amplitude due to water waves at water depth z. [Pa]
# PRESSURE calculate pressure amplitude due to water waves according to
# linear theory. PRESSURE calculate pressure amplitude due to water waves according to
# linear theory.
# Example
# ----- Example
# >>> import pylab as plt -----
# >>> z = -np.linspace(10,20) >>> import pylab as plt
# >>> fh = plt.plot(z, wave_pressure(z, Hm0=1, h=20)) >>> z = -np.linspace(10,20)
# >>> plt.show() >>> fh = plt.plot(z, wave_pressure(z, Hm0=1, h=20))
# >>> plt.show()
# See also
# -------- See also
# w2k --------
# w2k
#
# u = psweep.Fn*sqrt(mgf.length*9.81) '''
# z = -10; h = inf;
# Hm0 = 1.5;Tp = 4*sqrt(Hm0); # Assume seastate with jonswap spectrum:
# S = jonswap([],[Hm0,Tp]); Tp = 4 * np.sqrt(Hm0)
# Hw = tran(S.w,0,[0 0 -z],'P',h) gam = _MODELS.jonswap_peakfact(Hm0, Tp)
# Sm = S; Tm02 = Tp / (1.30301 - 0.01698 * gam + 0.12102 / gam)
# Sm.S = Hw.'.*S.S; w = 2 * np.pi / Tm02
# x1 = spec2sdat(Sm,1000); kw, unused_kw2 = w2k(w, 0, h)
# pwave = pressure(z,Hm0,h)
# hk = kw * h
# plot(psweep.x{1}/u, psweep.f) zk1 = kw * z
# hold on zk = hk + zk1 # z measured positive upward from mean water level (default)
# plot(x1(1:100,1)-30,x1(1:100,2),'r') # zk = hk-zk1 # z measured positive downward from mean water level
# ''' # zk1 = -zk1
# # zk = zk1 # z measured positive upward from sea floor
#
# Assume seastate with jonswap spectrum: # cosh(zk)/cosh(hk) approx exp(zk) for large h
# # hyperbolic_ratio(zk,hk,1,1) = cosh(zk)/cosh(hk)
# Tp = 4 * np.sqrt(Hm0) # pr = np.where(np.pi < hk, np.exp(zk1), hyperbolic_ratio(zk, hk, 1, 1))
# gam = jonswap_peakfact(Hm0, Tp) pr = hyperbolic_ratio(zk, hk, 1, 1)
# Tm02 = Tp / (1.30301 - 0.01698 * gam + 0.12102 / gam) pressure = (rho * g * Hm0 / 2) * pr
# w = 2 * np.pi / Tm02
# kw, unused_kw2 = w2k(w, 0, h) # pos = [np.zeros_like(z),np.zeros_like(z),z]
# # tf = TransferFunction(pos=pos, sensortype='p', h=h, rho=rho, g=g)
# hk = kw * h # Hw, Gwt = tf.tran(w,0)
# zk1 = kw * z # pressure2 = np.abs(Hw) * Hm0 / 2
# zk = hk + zk1 # z measured positive upward from mean water level (default)
# zk = hk-zk1; % z measured positive downward from mean water level return pressure
# zk1 = -zk1;
# zk = zk1; % z measured positive upward from sea floor
#
# cosh(zk)/cosh(hk) approx exp(zk) for large h
# hyperbolic_ratio(zk,hk,1,1) = cosh(zk)/cosh(hk)
# pr = np.where(np.pi < hk, np.exp(zk1), hyperbolic_ratio(zk, hk, 1, 1))
# pr = hyperbolic_ratio(zk, hk, 1, 1)
# pressure = (rho * g * Hm0 / 2) * pr
#
## pos = [np.zeros_like(z),np.zeros_like(z),z]
## tf = TransferFunction(pos=pos, sensortype='p', h=h, rho=rho, g=g)
## Hw, Gwt = tf.tran(w,0)
## pressure2 = np.abs(Hw) * Hm0 / 2
#
# return pressure
def test_docstrings(): def test_docstrings():

Loading…
Cancel
Save