pep8 + some small refactorings

master
Per.Andreas.Brodtkorb 10 years ago
parent f1b07d74b4
commit 0c185600cb

@ -1,16 +1,15 @@
from __future__ import division from __future__ import division
import warnings import warnings
import copy
import numpy as np import numpy as np
from numpy import pi, sqrt, ones, zeros # @UnresolvedImport from numpy import pi, sqrt, ones, zeros # @UnresolvedImport
from scipy import integrate as intg from scipy import integrate as intg
import scipy.special.orthogonal as ort import scipy.special.orthogonal as ort
from scipy import special as sp from scipy import special as sp
from wafo.plotbackend import plotbackend as plt from wafo.plotbackend import plotbackend as plt
from scipy.integrate import simps, trapz # @UnusedImport from scipy.integrate import simps, trapz
from wafo.misc import is_numlike
from wafo.demos import humps from wafo.demos import humps
_EPS = np.finfo(float).eps
_POINTS_AND_WEIGHTS = {} _POINTS_AND_WEIGHTS = {}
__all__ = ['dea3', 'clencurt', 'romberg', __all__ = ['dea3', 'clencurt', 'romberg',
@ -69,53 +68,29 @@ def dea3(v0, v1, v2):
"Lecture Notes in Math.", vol. 584, "Lecture Notes in Math.", vol. 584,
Springer-Verlag, New York, 1977. Springer-Verlag, New York, 1977.
''' '''
E0, E1, E2 = np.atleast_1d(v0, v1, v2) E0, E1, E2 = np.atleast_1d(v0, v1, v2)
abs = np.abs # @ReservedAssignment abs = np.abs # @ReservedAssignment
max = np.maximum # @ReservedAssignment max = np.maximum # @ReservedAssignment
ten = 10.0 delta2, delta1 = E2 - E1, E1 - E0
one = ones(1) err2, err1 = abs(delta2), abs(delta1)
small = np.finfo(float).eps # 1.0e-16 #spacing(one) tol2, tol1 = max(abs(E2), abs(E1)) * _EPS, max(abs(E1), abs(E0)) * _EPS
delta2 = E2 - E1
delta1 = E1 - E0 with warnings.catch_warnings():
err2 = abs(delta2) warnings.simplefilter("ignore") # ignore division by zero and overflow
err1 = abs(delta1) ss = 1.0 / delta2 - 1.0 / delta1
tol2 = max(abs(E2), abs(E1)) * small smalle2 = (abs(ss * E1) <= 1.0e-3).ravel()
tol1 = max(abs(E1), abs(E0)) * small
result = 1.0 * E2
result = zeros(E0.shape) abserr = err1 + err2 + E2 * _EPS * 10.0
abserr = result.copy() converged = (err1 <= tol1) & (err2 <= tol2).ravel() | smalle2
converged = (err1 <= tol1) & (err2 <= tol2).ravel() k4, = (1 - converged).nonzero()
k0, = converged.nonzero() if k4.size > 0:
if k0.size > 0: result[k4] = E1[k4] + 1.0 / ss[k4]
#%C IF E0, E1 AND E2 ARE EQUAL TO WITHIN MACHINE abserr[k4] = err1[k4] + err2[k4] + abs(result[k4] - E2[k4])
#%C ACCURACY, CONVERGENCE IS ASSUMED.
result[k0] = E2[k0]
abserr[k0] = err1[k0] + err2[k0] + E2[k0] * small * ten
k1, = (1 - converged).nonzero()
if k1.size > 0:
with warnings.catch_warnings():
# ignore division by zero and overflow
warnings.simplefilter("ignore")
ss = one / delta2[k1] - one / delta1[k1]
smallE2 = (abs(ss * E1[k1]) <= 1.0e-3).ravel()
k2 = k1[smallE2.nonzero()]
if k2.size > 0:
result[k2] = E2[k2]
abserr[k2] = err1[k2] + err2[k2] + E2[k2] * small * ten
k4, = (1 - smallE2).nonzero()
if k4.size > 0:
k3 = k1[k4]
result[k3] = E1[k3] + one / ss[k4]
abserr[k3] = err1[k3] + err2[k3] + abs(result[k3] - E2[k3])
return result, abserr return result, abserr
def clencurt(fun, a, b, n0=5, trace=False, *args): def clencurt(fun, a, b, n0=5, trace=False, args=()):
''' '''
Numerical evaluation of an integral, Clenshaw-Curtis method. Numerical evaluation of an integral, Clenshaw-Curtis method.
@ -163,7 +138,7 @@ def clencurt(fun, a, b, n0=5, trace=False, *args):
Numerische Matematik, Vol. 2, pp. 197--205 Numerische Matematik, Vol. 2, pp. 197--205
''' '''
#% make sure n is even # make sure n is even
n = 2 * n0 n = 2 * n0
a, b = np.atleast_1d(a, b) a, b = np.atleast_1d(a, b)
a_shape = a.shape a_shape = a.shape
@ -270,13 +245,13 @@ def romberg(fun, a, b, releps=1e-3, abseps=1e-3):
ipower = 1 ipower = 1
fp = ones(tableLimit) * 4 fp = ones(tableLimit) * 4
#Ih1 = 0 # Ih1 = 0
Ih2 = 0. Ih2 = 0.
Ih4 = rom[0, 0] Ih4 = rom[0, 0]
abserr = Ih4 abserr = Ih4
#epstab = zeros(1,decdigs+7) # epstab = zeros(1,decdigs+7)
#newflg = 1 # newflg = 1
#[res,abserr,epstab,newflg] = dea(newflg,Ih4,abserr,epstab) # [res,abserr,epstab,newflg] = dea(newflg,Ih4,abserr,epstab)
two = 1 two = 1
one = 0 one = 0
for i in xrange(1, tableLimit): for i in xrange(1, tableLimit):
@ -290,17 +265,15 @@ def romberg(fun, a, b, releps=1e-3, abseps=1e-3):
fp[i] = 4 * fp[i - 1] fp[i] = 4 * fp[i - 1]
# Richardson extrapolation # Richardson extrapolation
for k in xrange(i): for k in xrange(i):
# rom(2,k+1)=(fp(k)*rom(2,k)-rom(1,k))/(fp(k)-1)
rom[two, k + 1] = rom[two, k] + \ rom[two, k + 1] = rom[two, k] + \
(rom[two, k] - rom[one, k]) / (fp[k] - 1) (rom[two, k] - rom[one, k]) / (fp[k] - 1)
Ih1 = Ih2 Ih1 = Ih2
Ih2 = Ih4 Ih2 = Ih4
Ih4 = rom[two, i] Ih4 = rom[two, i]
if (2 <= i): if (2 <= i):
[res, abserr] = dea3(Ih1, Ih2, Ih4) res, abserr = dea3(Ih1, Ih2, Ih4)
# Ih4 = res # Ih4 = res
if (abserr <= max(abseps, releps * abs(res))): if (abserr <= max(abseps, releps * abs(res))):
break break
@ -386,8 +359,8 @@ def h_roots(n, method='newton'):
L[kp1, :] = PIM4 L[kp1, :] = PIM4
for j in xrange(1, n + 1): for j in xrange(1, n + 1):
#%Loop up the recurrence relation to get the Hermite # Loop up the recurrence relation to get the Hermite
#%polynomials evaluated at z. # polynomials evaluated at z.
km1 = k0 km1 = k0
k0 = kp1 k0 = kp1
kp1 = np.mod(kp1 + 1, 3) kp1 = np.mod(kp1 + 1, 3)
@ -468,7 +441,6 @@ def j_roots(n, alpha, beta, method='newton'):
if not method.startswith('n'): if not method.startswith('n'):
[x, w] = ort.j_roots(n, alpha, beta) [x, w] = ort.j_roots(n, alpha, beta)
else: else:
max_iter = 10 max_iter = 10
releps = 3e-14 releps = 3e-14
@ -505,8 +477,9 @@ def j_roots(n, alpha, beta, method='newton'):
# We next compute pp, the derivatives with a standard # We next compute pp, the derivatives with a standard
# relation involving the polynomials of one lower order. # relation involving the polynomials of one lower order.
pp = (n * (alpha - beta - tmp * z) * L[kp1, :] + pp = ((n * (alpha - beta - tmp * z) * L[kp1, :] +
2 * (n + alpha) * (n + beta) * L[k0, :]) / (tmp * (1 - z ** 2)) 2 * (n + alpha) * (n + beta) * L[k0, :]) /
(tmp * (1 - z ** 2)))
dz = L[kp1, :] / pp dz = L[kp1, :] / pp
z = z - dz # Newton's formula. z = z - dz # Newton's formula.
@ -592,7 +565,7 @@ def la_roots(n, alpha=0, method='newton'):
kp1 = 1 kp1 = 1
k = slice(len(z)) k = slice(len(z))
for _its in xrange(max_iter): for _its in xrange(max_iter):
#%Newton's method carried out simultaneously on the roots. # Newton's method carried out simultaneously on the roots.
L[k0, k] = 0. L[k0, k] = 0.
L[kp1, k] = 1. L[kp1, k] = 1.
@ -606,16 +579,16 @@ def la_roots(n, alpha=0, method='newton'):
L[kp1, k] = ((2 * jj - 1 + alpha - z[k]) * L[ L[kp1, k] = ((2 * jj - 1 + alpha - z[k]) * L[
k0, k] - (jj - 1 + alpha) * L[km1, k]) / jj k0, k] - (jj - 1 + alpha) * L[km1, k]) / jj
# end # end
#%L now contains the desired Laguerre polynomials. # L now contains the desired Laguerre polynomials.
#%We next compute pp, the derivatives with a standard # We next compute pp, the derivatives with a standard
#% relation involving the polynomials of one lower order. # relation involving the polynomials of one lower order.
Lp[k] = L[k0, k] Lp[k] = L[k0, k]
pp[k] = (n * L[kp1, k] - (n + alpha) * Lp[k]) / z[k] pp[k] = (n * L[kp1, k] - (n + alpha) * Lp[k]) / z[k]
dz[k] = L[kp1, k] / pp[k] dz[k] = L[kp1, k] / pp[k]
z[k] = z[k] - dz[k] # % Newton?s formula. z[k] = z[k] - dz[k] # % Newton?s formula.
#%k = find((abs(dz) > releps.*z)) # k = find((abs(dz) > releps.*z))
if not np.any(abs(dz) > releps): if not np.any(abs(dz) > releps):
break break
@ -886,8 +859,7 @@ def qrule(n, wfun=1, alpha=0, beta=0):
return bp, wf return bp, wf
def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1, class _Gaussq(object):
trace=False, args=None):
''' '''
Numerically evaluate integral, Gauss quadrature. Numerically evaluate integral, Gauss quadrature.
@ -896,9 +868,9 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
fun : callable fun : callable
a,b : array-like a,b : array-like
lower and upper integration limits, respectively. lower and upper integration limits, respectively.
reltol, abstol : real scalars, optional releps, abseps : real scalars, optional
relative and absolute tolerance, respectively. relative and absolute tolerance, respectively.
(default reltol=abstool=1e-3). (default releps=abseps=1e-3).
wfun : scalar integer, optional wfun : scalar integer, optional
defining the weight function, p(x). (default wfun = 1) defining the weight function, p(x). (default wfun = 1)
1 : p(x) = 1 a =-1, b = 1 Gauss-Legendre 1 : p(x) = 1 a =-1, b = 1 Gauss-Legendre
@ -967,178 +939,137 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
qrule qrule
gaussq2d gaussq2d
''' '''
global _POINTS_AND_WEIGHTS def _get_dx(self, wfun, jacob, alpha, beta):
max_iter = 11 if wfun in [1, 2, 3, 7]:
gn = 2 dx = jacob
if not hasattr(fun, '__call__'): elif wfun == 4:
raise ValueError('Function must be callable') dx = jacob ** (alpha + beta + 1)
elif wfun == 5:
A, B = np.atleast_1d(a, b) dx = ones((np.size(jacob), 1))
a_shape = np.atleast_1d(A.shape) elif wfun == 6:
b_shape = np.atleast_1d(B.shape) dx = jacob ** 2
elif wfun == 8:
# make sure the integration limits have correct size dx = sqrt(jacob)
if np.prod(a_shape) == 1: elif wfun == 9:
A = A * ones(b_shape) dx = sqrt(jacob) ** 3
a_shape = b_shape
elif np.prod(b_shape) == 1:
B = B * ones(a_shape)
elif any(a_shape != b_shape):
raise ValueError('The integration limits must have equal size!')
if args is None:
num_parameters = 0
else:
num_parameters = len(args)
P0 = copy.deepcopy(args)
isvector1 = zeros(num_parameters)
nk = np.prod(a_shape) # % # of integrals we have to compute
for ix in xrange(num_parameters):
if is_numlike(P0[ix]):
p0_shape = np.shape(P0[ix])
Np0 = np.prod(p0_shape)
isvector1[ix] = (Np0 > 1)
if isvector1[ix]:
if nk == 1:
a_shape = p0_shape
nk = Np0
A = A * ones(a_shape)
B = B * ones(a_shape)
elif nk != Np0:
raise ValueError('The input must have equal size!')
P0[ix].shape = (-1, 1) # make sure it is a column
k = np.arange(nk)
val = zeros(nk)
val_old = zeros(nk)
abserr = zeros(nk)
# setup mapping parameters
A.shape = (-1, 1)
B.shape = (-1, 1)
jacob = (B - A) / 2
shift = 1
if wfun == 1: # Gauss-legendre
dx = jacob
elif wfun == 2 or wfun == 3:
shift = 0
jacob = ones((nk, 1))
A = zeros((nk, 1))
dx = jacob
elif wfun == 4:
dx = jacob ** (alpha + beta + 1)
elif wfun == 5:
dx = ones((nk, 1))
elif wfun == 6:
dx = jacob ** 2
elif wfun == 7:
shift = 0
jacob = jacob * 2
dx = jacob
elif wfun == 8:
shift = 0
jacob = jacob * 2
dx = sqrt(jacob)
elif wfun == 9:
shift = 0
jacob = jacob * 2
dx = sqrt(jacob) ** 3
else:
raise ValueError('unknown option')
dx = dx.ravel()
if trace:
x_trace = [0, ] * max_iter
y_trace = [0, ] * max_iter
if num_parameters > 0:
ix_vec, = np.where(isvector1)
if len(ix_vec):
P1 = copy.copy(P0)
# Break out of the iteration loop for three reasons:
# 1) the last update is very small (compared to int and to reltol)
# 2) There are more than 11 iterations. This should NEVER happen.
for ix in xrange(max_iter):
x_and_w = 'wfun%d_%d_%g_%g' % (wfun, gn, alpha, beta)
if x_and_w in _POINTS_AND_WEIGHTS:
xn, w = _POINTS_AND_WEIGHTS[x_and_w]
else:
xn, w = qrule(gn, wfun, alpha, beta)
_POINTS_AND_WEIGHTS[x_and_w] = (xn, w)
# calculate the x values
x = (xn + shift) * jacob[k, :] + A[k, :]
# calculate function values y=fun(x,p1,p2,....,pn)
if num_parameters > 0:
if len(ix_vec):
#% Expand vector to the correct size
for iy in ix_vec:
P1[iy] = P0[iy][k, :]
y = fun(x, **P1)
else:
y = fun(x, **P0)
else: else:
y = fun(x) raise ValueError('unknown option')
return dx.ravel()
val[k] = np.sum(w * y, axis=1) * dx[k] # do the integration sum(y.*w)
def _points_and_weights(self, gn, wfun, alpha, beta):
if trace: global _POINTS_AND_WEIGHTS
x_trace.append(x.ravel()) name = 'wfun%d_%d_%g_%g' % (wfun, gn, alpha, beta)
y_trace.append(y.ravel()) x_and_w = _POINTS_AND_WEIGHTS.setdefault(name, [])
if len(x_and_w) == 0:
x_and_w.extend(qrule(gn, wfun, alpha, beta))
xn, w = x_and_w
return xn, w
def _initialize_trace(self, max_iter):
if self.trace:
self.x_trace = [0] * max_iter
self.y_trace = [0] * max_iter
def _plot_trace(self, x, y):
if self.trace:
self.x_trace.append(x.ravel())
self.y_trace.append(y.ravel())
hfig = plt.plot(x, y, 'r.') hfig = plt.plot(x, y, 'r.')
# hold on
# drawnow,shg
# if trace>1:
# pause
plt.setp(hfig, 'color', 'b') plt.setp(hfig, 'color', 'b')
abserr[k] = abs(val_old[k] - val[k]) # absolute tolerance def _plot_final_trace(self):
if ix > 1: if self.trace > 0:
k, = np.where(abserr > np.maximum(abs(reltol * val), abstol)) plt.clf()
# abserr > abs(abstol))%indices to integrals which plt.plot(np.hstack(self.x_trace), np.hstack(self.y_trace), '+')
# did not converge
nk = len(k) # of integrals we have to compute again def _get_jacob(self, wfun, A, B):
if nk: if wfun in [2, 3]:
val_old[k] = val[k] nk = np.size(A)
jacob = ones((nk, 1))
else: else:
break jacob = (B - A) * 0.5
if wfun in [7, 8, 9]:
jacob = jacob * 2
return jacob
gn *= 2 # double the # of basepoints and weights def _warn(self, k, a_shape):
else: nk = len(k)
if nk > 1: if nk > 1:
if (nk == np.prod(a_shape)): if (nk == np.prod(a_shape)):
tmptxt = 'All integrals did not converge' tmptxt = 'All integrals did not converge'
else: else:
tmptxt = '%d integrals did not converge' % (nk,) tmptxt = '%d integrals did not converge' % (nk, )
tmptxt = tmptxt + '--singularities likely!'
else: else:
tmptxt = 'Integral did not converge--singularity likely!' tmptxt = 'Integral did not converge--singularity likely!'
warnings.warn(tmptxt + '--singularities likely!') warnings.warn(tmptxt)
def _initialize(self, wfun, a, b, args):
args = np.broadcast_arrays(*np.atleast_1d(a, b, *args))
a_shape = args[0].shape
args = map(lambda x: np.reshape(x, (-1, 1)), args)
A, B = args[:2]
args = args[2:]
if wfun in [2, 3]:
A = zeros((A.size, 1))
return A, B, args, a_shape
def __call__(self, fun, a, b, releps=1e-3, abseps=1e-3, alpha=0, beta=0,
wfun=1, trace=False, args=(), max_iter=11):
self.trace = trace
gn = 2
A, B, args, a_shape = self._initialize(wfun, a, b, args)
jacob = self._get_jacob(wfun, A, B)
shift = int(wfun in [1, 4, 5, 6])
dx = self._get_dx(wfun, jacob, alpha, beta)
self._initialize_trace(max_iter)
# Break out of the iteration loop for three reasons:
# 1) the last update is very small (compared to int and to releps)
# 2) There are more than 11 iterations. This should NEVER happen.
dtype = np.result_type(fun((A+B)*0.5, *args))
nk = np.prod(a_shape) # # of integrals we have to compute
k = np.arange(nk)
opts = (nk, dtype)
val, val_old, abserr = zeros(*opts), ones(*opts), zeros(*opts)
for i in xrange(max_iter):
xn, w = self._points_and_weights(gn, wfun, alpha, beta)
x = (xn + shift) * jacob[k, :] + A[k, :]
pi = [xi[k, :] for xi in args]
y = fun(x, *pi)
self._plot_trace(x, y)
val[k] = np.sum(w * y, axis=1) * dx[k] # do the integration
if any(np.isnan(val)):
val[np.isnan(val)] = val_old[np.isnan(val)]
if 1 < i:
abserr[k] = abs(val_old[k] - val[k]) # absolute tolerance
k, = np.where(abserr > np.maximum(abs(releps * val), abseps))
nk = len(k) # of integrals we have to compute again
if nk == 0:
break
val_old[k] = val[k]
gn *= 2 # double the # of basepoints and weights
else:
self._warn(k, a_shape)
# make sure int is the same size as the integration limits # make sure int is the same size as the integration limits
val.shape = a_shape val.shape = a_shape
abserr.shape = a_shape abserr.shape = a_shape
if trace > 0: self._plot_final_trace()
plt.clf() return val, abserr
plt.plot(np.hstack(x_trace), np.hstack(y_trace), '+') gaussq = _Gaussq()
return val, abserr
def richardson(Q, k): def richardson(Q, k):
# license BSD # license BSD
# Richardson extrapolation with parameter estimation # Richardson extrapolation with parameter estimation
c = np.real((Q[k - 1] - Q[k - 2]) / (Q[k] - Q[k - 1])) - 1. c = np.real((Q[k - 1] - Q[k - 2]) / (Q[k] - Q[k - 1])) - 1.
#% The lower bound 0.07 admits the singularity x.^-0.9 # The lower bound 0.07 admits the singularity x.^-0.9
c = max(c, 0.07) c = max(c, 0.07)
R = Q[k] + (Q[k] - Q[k - 1]) / c R = Q[k] + (Q[k] - Q[k - 1]) / c
return R return R
@ -1197,7 +1128,7 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
else: else:
reverse = False reverse = False
#% Infinite limits # Infinite limits
if np.isinf(a) | np.isinf(b): if np.isinf(a) | np.isinf(b):
# Check real limits # Check real limits
if ~ np.isreal(a) | ~np.isreal(b) | np.isnan(a) | np.isnan(b): if ~ np.isreal(a) | ~np.isreal(b) | np.isnan(a) | np.isnan(b):
@ -1235,14 +1166,9 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
xq = np.hstack((xq, -xq)) xq = np.hstack((xq, -xq))
wq = np.hstack((wq, wq)) wq = np.hstack((wq, wq))
nq = len(xq) nq = len(xq)
# iscomplex = (np.iscomplex(a) | np.iscomplex(b)).any() dtype = np.result_type(fun(a), fun(b))
# if iscomplex:
# dtype = np.complex128
# else:
dtype = np.float64
# Initiate vectors # Initiate vectors
# max_iter = 17 # Max number of iterations
Q0 = zeros(max_iter, dtype=dtype) # Quadrature Q0 = zeros(max_iter, dtype=dtype) # Quadrature
Q1 = zeros(max_iter, dtype=dtype) # First Richardson extrapolation Q1 = zeros(max_iter, dtype=dtype) # First Richardson extrapolation
Q2 = zeros(max_iter, dtype=dtype) # Second Richardson extrapolation Q2 = zeros(max_iter, dtype=dtype) # Second Richardson extrapolation
@ -1270,7 +1196,7 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
elif k >= 3: elif k >= 3:
Q1[k] = richardson(Q0, k) Q1[k] = richardson(Q0, k)
#% Estimate absolute error # Estimate absolute error
if k >= 6: if k >= 6:
Qv = np.hstack((Q0[k], Q1[k], Q2[k])) Qv = np.hstack((Q0[k], Q1[k], Q2[k]))
Qw = np.hstack((Q0[k - 1], Q1[k - 1], Q2[k - 1])) Qw = np.hstack((Q0[k - 1], Q1[k - 1], Q2[k - 1]))
@ -1306,7 +1232,16 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
return Q, err return Q, err
def qdemo(f, a, b): def boole(y, x):
a, b = x[0], x[-1]
n = len(x)
h = (b - a) / (n - 1)
return (2 * h / 45) * (7 * (y[0] + y[-1]) + 12 * np.sum(y[2:n - 1:4]) +
32 * np.sum(y[1:n - 1:2]) +
14 * np.sum(y[4:n - 3:4]))
def qdemo(f, a, b, kmax=9, plot_error=False):
''' '''
Compares different quadrature rules. Compares different quadrature rules.
@ -1333,154 +1268,148 @@ def qdemo(f, a, b):
>>> import numpy as np >>> import numpy as np
>>> qdemo(np.exp,0,3) >>> qdemo(np.exp,0,3)
true value = 19.08553692 true value = 19.08553692
ftn Trapezoid Simpsons Booles ftn, Boole, Chebychev
evals approx error approx error approx error evals approx error approx error
3, 22.5366862979, 3.4511493747, 19.5061466023, 0.4206096791, 19.4008539142, 0.3153169910 3, 19.4008539142, 0.3153169910, 19.5061466023, 0.4206096791
5, 19.9718950387, 0.8863581155, 19.1169646189, 0.0314276957, 19.0910191534, 0.0054822302 5, 19.0910191534, 0.0054822302, 19.0910191534, 0.0054822302
9, 19.3086731081, 0.2231361849, 19.0875991312, 0.0020622080, 19.0856414320, 0.0001045088 9, 19.0856414320, 0.0001045088, 19.0855374134, 0.0000004902
17, 19.1414188470, 0.0558819239, 19.0856674267, 0.0001305035, 19.0855386464, 0.0000017232 17, 19.0855386464, 0.0000017232, 19.0855369232, 0.0000000000
33, 19.0995135407, 0.0139766175, 19.0855451052, 0.0000081821, 19.0855369505, 0.0000000273 33, 19.0855369505, 0.0000000273, 19.0855369232, 0.0000000000
65, 19.0890314614, 0.0034945382, 19.0855374350, 0.0000005118, 19.0855369236, 0.0000000004 65, 19.0855369236, 0.0000000004, 19.0855369232, 0.0000000000
129, 19.0864105817, 0.0008736585, 19.0855369552, 0.0000000320, 19.0855369232, 0.0000000000 129, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
257, 19.0857553393, 0.0002184161, 19.0855369252, 0.0000000020, 19.0855369232, 0.0000000000 257, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
513, 19.0855915273, 0.0000546041, 19.0855369233, 0.0000000001, 19.0855369232, 0.0000000000 513, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
ftn Clenshaw Chebychev Gauss-L ftn, Clenshaw-Curtis, Gauss-Legendre
evals approx error approx error approx error evals approx error approx error
3, 19.5061466023, 0.4206096791, 0.0000000000, 1.0000000000, 19.0803304585, 0.0052064647 3, 19.5061466023, 0.4206096791, 19.0803304585, 0.0052064647
5, 19.0834145766, 0.0021223465, 0.0000000000, 1.0000000000, 19.0855365951, 0.0000003281 5, 19.0834145766, 0.0021223465, 19.0855365951, 0.0000003281
9, 19.0855369150, 0.0000000082, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 9, 19.0855369150, 0.0000000082, 19.0855369232, 0.0000000000
17, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 17, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
33, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 33, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
65, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 65, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
129, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 129, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
257, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 257, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
513, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 513, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
ftn, Simps, Trapz
evals approx error approx error
3, 19.5061466023, 0.4206096791, 22.5366862979, 3.4511493747
5, 19.1169646189, 0.0314276957, 19.9718950387, 0.8863581155
9, 19.0875991312, 0.0020622080, 19.3086731081, 0.2231361849
17, 19.0856674267, 0.0001305035, 19.1414188470, 0.0558819239
33, 19.0855451052, 0.0000081821, 19.0995135407, 0.0139766175
65, 19.0855374350, 0.0000005118, 19.0890314614, 0.0034945382
129, 19.0855369552, 0.0000000320, 19.0864105817, 0.0008736585
257, 19.0855369252, 0.0000000020, 19.0857553393, 0.0002184161
513, 19.0855369233, 0.0000000001, 19.0855915273, 0.0000546041
''' '''
# use quad8 with small tolerance to get "true" value
#true1 = quad8(f,a,b,1e-10)
#[true tol]= gaussq(f,a,b,1e-12)
#[true tol] = agakron(f,a,b,1e-13)
true_val, _tol = intg.quad(f, a, b) true_val, _tol = intg.quad(f, a, b)
print('true value = %12.8f' % (true_val,)) print('true value = %12.8f' % (true_val,))
kmax = 9
neval = zeros(kmax, dtype=int) neval = zeros(kmax, dtype=int)
qt = zeros(kmax) vals_dic = {}
qs = zeros(kmax) err_dic = {}
qb = zeros(kmax)
qc = zeros(kmax)
qc2 = zeros(kmax)
qg = zeros(kmax)
et = ones(kmax)
es = ones(kmax)
eb = ones(kmax)
ec = ones(kmax)
ec2 = ones(kmax)
ec3 = ones(kmax)
eg = ones(kmax)
# try various approximations # try various approximations
methods = [trapz, simps, boole, ]
for k in xrange(kmax): for k in xrange(kmax):
n = 2 ** (k + 1) + 1 n = 2 ** (k + 1) + 1
neval[k] = n neval[k] = n
h = (b - a) / (n - 1)
x = np.linspace(a, b, n) x = np.linspace(a, b, n)
y = f(x) y = f(x)
for method in methods:
# trapezoid approximation name = method.__name__.title()
q = np.trapz(y, x) q = method(y, x)
# h*( (y(1)+y(n))/2 + sum(y(2:n-1)) ) vals_dic.setdefault(name, []).append(q)
qt[k] = q err_dic.setdefault(name, []).append(abs(q - true_val))
et[k] = abs(q - true_val)
# Simpson approximation name = 'Clenshaw-Curtis'
q = intg.simps(y, x) q, _ec3 = clencurt(f, a, b, (n - 1) / 2)
#(h/3)*( y(1)+y(n) + 4*sum(y(2:2:n-1)) + 2*sum(y(3:2:n-2)) ) vals_dic.setdefault(name, []).append(q[0])
qs[k] = q err_dic.setdefault(name, []).append(abs(q[0] - true_val))
es[k] = abs(q - true_val)
# Boole's rule name = 'Chebychev'
#q = boole(x,y) ck = np.polynomial.chebyshev.chebfit(x, y, deg=min(n-1, 36))
q = (2 * h / 45) * (7 * (y[0] + y[-1]) + 12 * np.sum(y[2:n - 1:4]) cki = np.polynomial.chebyshev.chebint(ck)
+ 32 * np.sum(y[1:n - 1:2]) + q = np.polynomial.chebyshev.chebval(x[-1], cki)
14 * np.sum(y[4:n - 3:4])) vals_dic.setdefault(name, []).append(q)
qb[k] = q err_dic.setdefault(name, []).append(abs(q - true_val))
eb[k] = abs(q - true_val) # ck = chebfit(f,n,a,b)
# q = chebval(b,chebint(ck,a,b),a,b)
# Clenshaw-Curtis # qc2[k] = q; ec2[k] = abs(q - true)
[q, ec3[k]] = clencurt(f, a, b, (n - 1) / 2)
qc[k] = q name = 'Gauss-Legendre' # quadrature
ec[k] = abs(q - true_val)
# Chebychev
#ck = chebfit(f,n,a,b)
#q = chebval(b,chebint(ck,a,b),a,b)
#qc2[k] = q; ec2[k] = abs(q - true)
# Gauss-Legendre quadrature
q = intg.fixed_quad(f, a, b, n=n)[0] q = intg.fixed_quad(f, a, b, n=n)[0]
#[x, w]=qrule(n,1) # [x, w]=qrule(n,1)
# x = (b-a)/2*x + (a+b)/2 % Transform base points X. # x = (b-a)/2*x + (a+b)/2 % Transform base points X.
# w = (b-a)/2*w % Adjust weigths. # w = (b-a)/2*w % Adjust weigths.
#q = sum(feval(f,x)*w) # q = sum(feval(f,x)*w)
qg[k] = q vals_dic.setdefault(name, []).append(q)
eg[k] = abs(q - true_val) err_dic.setdefault(name, []).append(abs(q - true_val))
#% display results
formats = ['%4.0f, ', ] + ['%10.10f, ', ] * 6
formats[-1] = formats[-1].split(',')[0]
data = np.vstack((neval, qt, et, qs, es, qb, eb)).T
print(' ftn Trapezoid Simpson''s Boole''s') # @IgnorePep8
print('evals approx error approx error approx error') # @IgnorePep8
for k in xrange(kmax):
tmp = data[k].tolist()
print(''.join(fi % t for fi, t in zip(formats, tmp)))
# display results # display results
data = np.vstack((neval, qc, ec, qc2, ec2, qg, eg)).T names = sorted(vals_dic.keys())
print(' ftn Clenshaw Chebychev Gauss-L') # @IgnorePep8 num_cols = 2
print('evals approx error approx error approx error') # @IgnorePep8 formats = ['%4.0f, ', ] + ['%10.10f, ', ] * num_cols * 2
for k in xrange(kmax): formats[-1] = formats[-1].split(',')[0]
tmp = data[k].tolist() formats_h = ['%4s, ', ] + ['%20s, ', ] * num_cols
print(''.join(fi % t for fi, t in zip(formats, tmp))) formats_h[-1] = formats_h[-1].split(',')[0]
headers = ['evals'] + ['%12s %12s' % ('approx', 'error')] * num_cols
plt.loglog(neval, np.vstack((et, es, eb, ec, ec2, eg)).T) while len(names) > 0:
plt.xlabel('number of function evaluations') print(''.join(fi % t for fi, t in zip(formats_h,
plt.ylabel('error') ['ftn'] + names[:num_cols])))
plt.legend( print(' '.join(headers))
('Trapezoid', 'Simpsons', 'Booles', 'Clenshaw', 'Chebychev', 'Gauss-L')) # @IgnorePep8
# ec3' data = [neval]
for name in names[:num_cols]:
data.append(vals_dic[name])
data.append(err_dic[name])
data = np.vstack(tuple(data)).T
for k in xrange(kmax):
tmp = data[k].tolist()
print(''.join(fi % t for fi, t in zip(formats, tmp)))
if plot_error:
plt.figure(0)
for name in names[:num_cols]:
plt.loglog(neval, err_dic[name], label=name)
names = names[num_cols:]
if plot_error:
plt.xlabel('number of function evaluations')
plt.ylabel('error')
plt.legend()
plt.show('hold')
def main(): def main():
# val, err = clencurt(np.exp, 0, 2) # val, err = clencurt(np.exp, 0, 2)
# valt = np.exp(2) - np.exp(0) # valt = np.exp(2) - np.exp(0)
# [Q, err] = quadgr(lambda x: x ** 2, 1, 4, 1e-9) # [Q, err] = quadgr(lambda x: x ** 2, 1, 4, 1e-9)
# [Q, err] = quadgr(humps, 1, 4, 1e-9) # [Q, err] = quadgr(humps, 1, 4, 1e-9)
# #
# [x, w] = h_roots(11, 'newton') # [x, w] = h_roots(11, 'newton')
# sum(w) # sum(w)
# [x2, w2] = la_roots(11, 1, 't') # [x2, w2] = la_roots(11, 1, 't')
# #
# from scitools import numpyutils as npu #@UnresolvedImport # from scitools import numpyutils as npu #@UnresolvedImport
# fun = npu.wrap2callable('x**2') # fun = npu.wrap2callable('x**2')
# p0 = fun(0) # p0 = fun(0)
# A = [0, 1, 1]; B = [2, 4, 3] # A = [0, 1, 1]; B = [2, 4, 3]
# area, err = gaussq(fun, A, B) # area, err = gaussq(fun, A, B)
# #
# fun = npu.wrap2callable('x**2') # fun = npu.wrap2callable('x**2')
# [val1, err1] = gaussq(fun, A, B) # [val1, err1] = gaussq(fun, A, B)
# #
# #
# Integration of x^2*exp(-x) from zero to infinity: # Integration of x^2*exp(-x) from zero to infinity:
# fun2 = npu.wrap2callable('1') # fun2 = npu.wrap2callable('1')
# [val2, err2] = gaussq(fun2, 0, np.inf, wfun=3, alpha=2) # [val2, err2] = gaussq(fun2, 0, np.inf, wfun=3, alpha=2)
# [val2, err2] = gaussq(lambda x: x ** 2, 0, np.inf, wfun=3, alpha=0) # [val2, err2] = gaussq(lambda x: x ** 2, 0, np.inf, wfun=3, alpha=0)
# #
# Integrate humps from 0 to 2 and from 1 to 4 # Integrate humps from 0 to 2 and from 1 to 4
# [val3, err3] = gaussq(humps, A, B) # [val3, err3] = gaussq(humps, A, B)
# #
# [x, w] = p_roots(11, 'newton', 1, 3) # [x, w] = p_roots(11, 'newton', 1, 3)
# y = np.sum(x ** 2 * w) # y = np.sum(x ** 2 * w)
x = np.linspace(0, np.pi / 2) x = np.linspace(0, np.pi / 2)
_q0 = np.trapz(humps(x), x) _q0 = np.trapz(humps(x), x)
@ -1493,6 +1422,9 @@ def test_docstrings():
import doctest import doctest
doctest.testmod() doctest.testmod()
if __name__ == '__main__': if __name__ == '__main__':
test_docstrings() test_docstrings()
# qdemo(np.exp, 0, 3, plot_error=True)
# plt.show('hold')
# main() # main()

@ -1,19 +1,8 @@
#-------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: pab
#
# Created: 30.12.2008
# Copyright: (c) pab 2008
# Licence: <your licence>
#-------------------------------------------------------------------------
#!/usr/bin/env python #!/usr/bin/env python
from __future__ import division from __future__ import division
import numpy as np import numpy as np
import scipy.signal import scipy.signal
#import scipy.special as spec # import scipy.sparse.linalg # @UnusedImport
import scipy.sparse.linalg # @UnusedImport
import scipy.sparse as sparse import scipy.sparse as sparse
from numpy.ma.core import ones, zeros, prod, sin from numpy.ma.core import ones, zeros, prod, sin
from numpy import diff, pi, inf # @UnresolvedImport from numpy import diff, pi, inf # @UnresolvedImport
@ -270,7 +259,7 @@ def sgolay2d(z, window_size, order, derivative=None):
np.fliplr(Z[-half_size:, half_size + 1:2 * half_size + 1]) - band) np.fliplr(Z[-half_size:, half_size + 1:2 * half_size + 1]) - band)
# solve system and convolve # solve system and convolve
if derivative == None: if derivative is None:
m = np.linalg.pinv(A)[0].reshape((window_size, -1)) m = np.linalg.pinv(A)[0].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, m, mode='valid') return scipy.signal.fftconvolve(Z, m, mode='valid')
elif derivative == 'col': elif derivative == 'col':
@ -364,7 +353,7 @@ class PPform(object):
return return
breaks = self.breaks.copy() breaks = self.breaks.copy()
coefs = self.coeffs.copy() coefs = self.coeffs.copy()
#pieces = len(breaks) - 1 # pieces = len(breaks) - 1
# Add new breaks beyond each end # Add new breaks beyond each end
breaks2add = breaks[[0, -1]] + np.array([-1, 1]) breaks2add = breaks[[0, -1]] + np.array([-1, 1])
@ -526,7 +515,7 @@ class SmoothSpline(PPform):
n = len(x) n = len(x)
#ndy = y.ndim # ndy = y.ndim
szy = y.shape szy = y.shape
nd = prod(szy[:-1]) nd = prod(szy[:-1])
@ -554,10 +543,9 @@ class SmoothSpline(PPform):
zrs = zeros(nd) zrs = zeros(nd)
if p < 1: if p < 1:
# faster than yi-6*(1-p)*Q*u # faster than yi-6*(1-p)*Q*u
ai = (y - (6 * (1 - p) * D * Qu = D * diff(vstack([zrs, diff(vstack([zrs, u, zrs]),
diff(vstack([zrs, axis=0) * dx1, zrs]), axis=0)
diff(vstack([zrs, u, zrs]), axis=0) * dx1, ai = (y - (6 * (1 - p) * Qu).T).T
zrs]), axis=0)).T).T
else: else:
ai = y.reshape(n, -1) ai = y.reshape(n, -1)
@ -612,7 +600,7 @@ class SmoothSpline(PPform):
# Make sure it uses symmetric matrix solver # Make sure it uses symmetric matrix solver
ddydx = diff(dydx, axis=0) ddydx = diff(dydx, axis=0)
#sp.linalg.use_solver(useUmfpack=True) # sp.linalg.use_solver(useUmfpack=True)
u = 2 * sparse.linalg.spsolve((QQ + QQ.T), ddydx) # @UndefinedVariable u = 2 * sparse.linalg.spsolve((QQ + QQ.T), ddydx) # @UndefinedVariable
return u.reshape(n - 2, -1), p return u.reshape(n - 2, -1), p
@ -786,7 +774,7 @@ def stineman_interp(xi, x, y, yp=None):
x = np.asarray(x, np.float_) x = np.asarray(x, np.float_)
y = np.asarray(y, np.float_) y = np.asarray(y, np.float_)
assert x.shape == y.shape assert x.shape == y.shape
#N = len(y) # N = len(y)
if yp is None: if yp is None:
yp = slopes(x, y) yp = slopes(x, y)
@ -794,7 +782,7 @@ def stineman_interp(xi, x, y, yp=None):
yp = np.asarray(yp, np.float_) yp = np.asarray(yp, np.float_)
xi = np.asarray(xi, np.float_) xi = np.asarray(xi, np.float_)
#yi = np.zeros(xi.shape, np.float_) # yi = np.zeros(xi.shape, np.float_)
# calculate linear slopes # calculate linear slopes
dx = x[1:] - x[:-1] dx = x[1:] - x[:-1]
@ -1082,13 +1070,13 @@ def test_smoothing_spline():
pp0 = pp1.integrate() pp0 = pp1.integrate()
dy1 = pp1(x1) dy1 = pp1(x1)
y01 = pp0(x1) y01 = pp0(x1)
#dy = y-y1 # dy = y-y1
import matplotlib.pyplot as plb import matplotlib.pyplot as plb
plb.plot(x, y, x1, y1, '.', x1, dy1, 'ro', x1, y01, 'r-') plb.plot(x, y, x1, y1, '.', x1, dy1, 'ro', x1, y01, 'r-')
plb.show() plb.show()
pass pass
#tck = interpolate.splrep(x, y, s=len(x)) # tck = interpolate.splrep(x, y, s=len(x))
def compare_methods(): def compare_methods():
@ -1168,7 +1156,7 @@ def demo_monoticity():
plt.plot(xvec, yvec2, 'k', label='interp1d') plt.plot(xvec, yvec2, 'k', label='interp1d')
plt.plot(xvec, yvec3, 'g', label='CHS') plt.plot(xvec, yvec3, 'g', label='CHS')
plt.plot(xvec, yvec4, 'm', label='Stineman') plt.plot(xvec, yvec4, 'm', label='Stineman')
#plt.plot(xvec, yvec5, 'yo', label='Pchip2') # plt.plot(xvec, yvec5, 'yo', label='Pchip2')
plt.xlabel("X") plt.xlabel("X")
plt.ylabel("Y") plt.ylabel("Y")
plt.title("Comparing Pchip() vs. Scipy interp1d() vs. non-monotonic CHS") plt.title("Comparing Pchip() vs. Scipy interp1d() vs. non-monotonic CHS")
@ -1244,8 +1232,8 @@ def test_docstrings():
if __name__ == '__main__': if __name__ == '__main__':
#test_func() # test_func()
# test_doctstrings() # test_doctstrings()
# test_smoothing_spline() # test_smoothing_spline()
#compare_methods() # compare_methods()
demo_monoticity() demo_monoticity()

@ -285,7 +285,7 @@ def plotspec(specdata, linetype='b-', flag=1):
# txt = ''.join(txt) # txt = ''.join(txt)
# if (flag == 3): # if (flag == 3):
# plotbackend.subplot(2, 1, 1) # plotbackend.subplot(2, 1, 1)
# if (flag == 1) or (flag == 3):#% Plot in normal scale # if (flag == 1) or (flag == 3):# Plot in normal scale
# plotbackend.plot(np.vstack([Fp, Fp]), # plotbackend.plot(np.vstack([Fp, Fp]),
# np.vstack([zeros(len(indm)), data.take(indm)]), # np.vstack([zeros(len(indm)), data.take(indm)]),
# ':', label=txt) # ':', label=txt)
@ -1120,11 +1120,11 @@ class SpecData1D(PlotData):
Hw2 = Hw1 Hw2 = Hw1
# end # end
#%Hw1(end) # Hw1(end)
#%maxS*1e-3 # maxS*1e-3
#%if Hw1[-1]*S.data>maxS*1e-3, # if Hw1[-1]*S.data>maxS*1e-3,
#% warning('The Nyquist frequency of the spectrum may be too low') # warning('The Nyquist frequency of the spectrum may be too low')
#%end # end
SL.date = now() # datestr(now) SL.date = now() # datestr(now)
# if nargout>1 # if nargout>1
@ -1354,11 +1354,11 @@ class SpecData1D(PlotData):
B_up = hstack([un + XtInf, XdInf, 0]) B_up = hstack([un + XtInf, XdInf, 0])
B_lo = hstack([un, 0, -XdInf]) B_lo = hstack([un, 0, -XdInf])
#%INFIN = [1 1 0] # INFIN = [1 1 0]
# BIG = zeros((Ntime+2,Ntime+2)) # BIG = zeros((Ntime+2,Ntime+2))
ex = zeros(Ntime + 2, dtype=float) ex = zeros(Ntime + 2, dtype=float)
#%CC = 2*pi*sqrt(-R(1,1)/R(1,3))*exp(un^2/(2*R(1,1))) # CC = 2*pi*sqrt(-R(1,1)/R(1,3))*exp(un^2/(2*R(1,1)))
#% XcScale = log(CC) # XcScale = log(CC)
opts['xcscale'] = log( opts['xcscale'] = log(
2 * pi * sqrt(-R[0, 0] / R[0, 2])) + (un ** 2 / (2 * R[0, 0])) 2 * pi * sqrt(-R[0, 0] / R[0, 2])) + (un ** 2 / (2 * R[0, 0]))
@ -1376,7 +1376,7 @@ class SpecData1D(PlotData):
indI[2] = Nt indI[2] = Nt
indI[3] = Ntd - 1 indI[3] = Ntd - 1
#% positive wave period # positive wave period
BIG = self._covinput_t_pdf(pt, R) BIG = self._covinput_t_pdf(pt, R)
tmp = rind(BIG, ex[:Ntdc], B_lo, B_up, indI, xc, Nt) tmp = rind(BIG, ex[:Ntdc], B_lo, B_up, indI, xc, Nt)
@ -1450,14 +1450,14 @@ class SpecData1D(PlotData):
Scd = array([[0, R[pt, 1]], [-R[pt, 1], 0]]) Scd = array([[0, R[pt, 1]], [-R[pt, 1], 0]])
if pt > 1: if pt > 1:
#%cov(Xt) # cov(Xt)
# Cov(X(tn),X(ts)) = r(ts-tn) = r(|ts-tn|) # Cov(X(tn),X(ts)) = r(ts-tn) = r(|ts-tn|)
Stt = toeplitz(R[:pt - 1, 0]) Stt = toeplitz(R[:pt - 1, 0])
#%cov(Xc,Xt) # cov(Xc,Xt)
# Cov(X(tn),X(ts)) = r(ts-tn) = r(|ts-tn|) # Cov(X(tn),X(ts)) = r(ts-tn) = r(|ts-tn|)
Sct = R[1:pt, 0] Sct = R[1:pt, 0]
Sct = vstack((Sct, Sct[::-1])) Sct = vstack((Sct, Sct[::-1]))
#%Cov(Xd,Xt) # Cov(Xd,Xt)
# Cov(X'(t1),X(ts)) = -r'(ts-t1) = r(|s-t|) # Cov(X'(t1),X(ts)) = -r'(ts-t1) = r(|s-t|)
Sdt = -R[1:pt, 1] Sdt = -R[1:pt, 1]
Sdt = vstack((Sdt, -Sdt[::-1])) Sdt = vstack((Sdt, -Sdt[::-1]))
@ -1577,7 +1577,7 @@ class SpecData1D(PlotData):
in_space = (kind[-2].upper() == 'L') in_space = (kind[-2].upper() == 'L')
if in_space: if in_space:
# spec = spec2spec(spec,'k1d') # spec = spec2spec(spec,'k1d')
ptxt = 'space' ptxt = 'space'
else: else:
# spec = spec2spec(spec,'freq') # spec = spec2spec(spec,'freq')
@ -1623,7 +1623,6 @@ class SpecData1D(PlotData):
'Discretization levels must be larger than zero') 'Discretization levels must be larger than zero')
# Transform amplitudes to Gaussian levels: # Transform amplitudes to Gaussian levels:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
h = linspace(*paramu) h = linspace(*paramu)
if defnr > 1: # level v separated Max2min densities if defnr > 1: # level v separated Max2min densities
@ -2045,9 +2044,9 @@ class SpecData1D(PlotData):
# end %do # end %do
# close(h11) # close(h11)
err = sqrt(err) err = sqrt(err)
#% goto 800 # goto 800
else: # def_nr>3 else: # def_nr>3
#%200 continue # 200 continue
# waitTxt = sprintf('Please wait ...(start at: %s)',datestr(now)) # waitTxt = sprintf('Please wait ...(start at: %s)',datestr(now))
# h11 = fwaitbar(0,[],waitTxt) # h11 = fwaitbar(0,[],waitTxt)
tnold = -1 tnold = -1
@ -2095,14 +2094,14 @@ class SpecData1D(PlotData):
# end # end
elif def_nr == 5: elif def_nr == 5:
if (Nx == 1): if (Nx == 1):
#% Joint density (Tdm,TMm) given the Max and the min. # Joint density (Tdm,TMm) given the Max and the min.
#% Note the density is not scaled to unity # Note the density is not scaled to unity
pdf[0, tn - ts, tn] = fxind[0] # %*CC pdf[0, tn - ts, tn] = fxind[0] # %*CC
err[0, tn - ts, tn] = err0[0] ** 2 err[0, tn - ts, tn] = err0[0] ** 2
terr[0, tn - ts, tn] = terr0[0] terr[0, tn - ts, tn] = terr0[0]
else: else:
#% 5, gives level u separated Max2min and wave period from # 5, gives level u separated Max2min and wave period from
#% the crossing of level u to the min (M,m,Tdm). # the crossing of level u to the min (M,m,Tdm).
IJ = 0 IJ = 0
for i in range(1, Nx1): # = 2:Nx1 for i in range(1, Nx1): # = 2:Nx1
@ -2120,11 +2119,11 @@ class SpecData1D(PlotData):
# end % SELECT # end % SELECT
# end% enddo # end% enddo
else: # % exploit symmetry else: # % exploit symmetry
#%300 Symmetry # 300 Symmetry
for ts in range(1, Ntd // 2): # = 2:floor(Ntd//2) for ts in range(1, Ntd // 2): # = 2:floor(Ntd//2)
#% Using the symmetry since U = 0 and the transformation is # Using the symmetry since U = 0 and the transformation is
#% linear. # linear.
#% positive wave period # positive wave period
BIG[:Ntdc, :Ntdc] = covinput(BIG[:Ntdc, :Ntdc], BIG[:Ntdc, :Ntdc] = covinput(BIG[:Ntdc, :Ntdc],
R0, R1, R2, R3, R4, R0, R1, R2, R3, R4,
tn, ts, tnold) tn, ts, tnold)
@ -2132,11 +2131,11 @@ class SpecData1D(PlotData):
a_lo, a_up, indI, xc, Nt, *opt0) a_lo, a_up, indI, xc, Nt, *opt0)
#[fxind,err0] = rind(BIG(1:Ntdc,1:Ntdc),ex,a_lo,a_up,indI, xc,Nt,opt0{:}) #[fxind,err0] = rind(BIG(1:Ntdc,1:Ntdc),ex,a_lo,a_up,indI, xc,Nt,opt0{:})
#%tnold = tn # tnold = tn
if (Nx == 1): # % THEN if (Nx == 1): # % THEN
#% Joint density of (TMd,TMm),(Tdm,TMm) given the max and # Joint density of (TMd,TMm),(Tdm,TMm) given the max and
#% the min. # the min.
#% Note that the density is not scaled to unity # Note that the density is not scaled to unity
pdf[0, ts, tn] = fxind[0] # %*CC pdf[0, ts, tn] = fxind[0] # %*CC
err[0, ts, tn] = err0[0] ** 2 err[0, ts, tn] = err0[0] ** 2
err[0, ts, tn] = terr0(1) err[0, ts, tn] = terr0(1)
@ -2145,12 +2144,12 @@ class SpecData1D(PlotData):
err[0, tn - ts, tn] = err0[0] ** 2 err[0, tn - ts, tn] = err0[0] ** 2
terr[0, tn - ts, tn] = terr0[0] terr[0, tn - ts, tn] = terr0[0]
# end # end
#%GOTO 350 # GOTO 350
else: else:
IJ = 0 IJ = 0
if def_nr == 4: if def_nr == 4:
#% 4, gives level u separated Max2min and wave period from # 4, gives level u separated Max2min and wave period from
#% Max to the crossing of level u (M,m,TMd). # Max to the crossing of level u (M,m,TMd).
for i in range(1, Nx1): for i in range(1, Nx1):
J = IJ + Nx1 J = IJ + Nx1
pdf[1:Nx1, i, ts] = pdf[ pdf[1:Nx1, i, ts] = pdf[
@ -2160,7 +2159,7 @@ class SpecData1D(PlotData):
terr[1:Nx1, i, ts] = terr[ terr[1:Nx1, i, ts] = terr[
1:Nx1, i, ts] + (terr0[IJ:J] * dt) 1:Nx1, i, ts] + (terr0[IJ:J] * dt)
if (ts < tn - ts): if (ts < tn - ts):
#% exploiting the symmetry # exploiting the symmetry
# %*CC # %*CC
pdf[i, 1:Nx1, tn - ts] = pdf[i, pdf[i, 1:Nx1, tn - ts] = pdf[i,
1:Nx1, tn - ts] + fxind[IJ:J] * dt 1:Nx1, tn - ts] + fxind[IJ:J] * dt
@ -2172,8 +2171,8 @@ class SpecData1D(PlotData):
IJ = J IJ = J
# end %do # end %do
elif def_nr == 5: elif def_nr == 5:
#% 5, gives level u separated Max2min and wave period # 5, gives level u separated Max2min and wave period
#% from the crossing of level u to min (M,m,Tdm). # from the crossing of level u to min (M,m,Tdm).
for i in range(1, Nx1): # = 2:Nx1, for i in range(1, Nx1): # = 2:Nx1,
J = IJ + Nx1 J = IJ + Nx1
pdf[1:Nx1, i, tn - ts] = pdf[1:Nx1, pdf[1:Nx1, i, tn - ts] = pdf[1:Nx1,
@ -2195,7 +2194,7 @@ class SpecData1D(PlotData):
# end %do # end %do
# end %END SELECT # end %END SELECT
# end # end
#%350 # 350
# end %do # end %do
# end # end
# waitTxt = sprintf('%s Ready: %d of %d',datestr(now),tn,Ntime) # waitTxt = sprintf('%s Ready: %d of %d',datestr(now),tn,Ntime)
@ -2206,7 +2205,7 @@ class SpecData1D(PlotData):
err = sqrt(err) err = sqrt(err)
# end % if # end % if
#%Nx1,size(pdf) def Ntime # Nx1,size(pdf) def Ntime
if (Nx > 1): # % THEN if (Nx > 1): # % THEN
IJ = 1 IJ = 1
if (def_nr > 2 or def_nr == 1): if (def_nr > 2 or def_nr == 1):
@ -2267,14 +2266,14 @@ class SpecData1D(PlotData):
# Cov(Xt,Xc) # Cov(Xt,Xc)
# for # for
i = np.arange(tn - 2) # 1:tn-2 i = np.arange(tn - 2) # 1:tn-2
#%j = abs(i+1-ts) # j = abs(i+1-ts)
#%BIG(i,N) = -sign(R1(j+1),R1(j+1)*dble(ts-i-1)) %cov(X'(ti+1),X(ts)) # BIG(i,N) = -sign(R1(j+1),R1(j+1)*dble(ts-i-1)) %cov(X'(ti+1),X(ts))
j = i + 1 - ts j = i + 1 - ts
tau = abs(j) tau = abs(j)
#%BIG(i,N) = abs(R1(tau)).*sign(R1(tau).*j.') # BIG(i,N) = abs(R1(tau)).*sign(R1(tau).*j.')
BIG[i, N] = R1[tau] * sign(j) BIG[i, N] = R1[tau] * sign(j)
#%end do # end do
#%Cov(Xc) # Cov(Xc)
BIG[N, N] = R0[0] # cov(X(ts),X(ts)) BIG[N, N] = R0[0] # cov(X(ts),X(ts))
BIG[tn + shft + 1, N] = -R1[ts] # cov(X'(t1),X(ts)) BIG[tn + shft + 1, N] = -R1[ts] # cov(X'(t1),X(ts))
BIG[tn + shft + 2, N] = R1[tn - ts] # cov(X'(tn),X(ts)) BIG[tn + shft + 2, N] = R1[tn - ts] # cov(X'(tn),X(ts))
@ -2284,20 +2283,20 @@ class SpecData1D(PlotData):
BIG[tn - 1, N] = R2[ts] # %cov(X''(t1),X(ts)) BIG[tn - 1, N] = R2[ts] # %cov(X''(t1),X(ts))
BIG[tn, N] = R2[tn - ts] # %cov(X''(tn),X(ts)) BIG[tn, N] = R2[tn - ts] # %cov(X''(tn),X(ts))
#%ADD a level u crossing at ts # ADD a level u crossing at ts
#%Cov(Xt,Xd) # Cov(Xt,Xd)
#%for # for
i = np.arange(tn - 2) # 1:tn-2 i = np.arange(tn - 2) # 1:tn-2
j = abs(i + 1 - ts) j = abs(i + 1 - ts)
BIG[i, tn + shft] = -R2[j] # %cov(X'(ti+1),X'(ts)) BIG[i, tn + shft] = -R2[j] # %cov(X'(ti+1),X'(ts))
#%end do # end do
#%Cov(Xd) # Cov(Xd)
BIG[tn + shft, tn + shft] = -R2[0] # %cov(X'(ts),X'(ts)) BIG[tn + shft, tn + shft] = -R2[0] # %cov(X'(ts),X'(ts))
BIG[tn - 1, tn + shft] = R3[ts] # %cov(X''(t1),X'(ts)) BIG[tn - 1, tn + shft] = R3[ts] # %cov(X''(t1),X'(ts))
BIG[tn, tn + shft] = -R3[tn - ts] # %cov(X''(tn),X'(ts)) BIG[tn, tn + shft] = -R3[tn - ts] # %cov(X''(tn),X'(ts))
#%Cov(Xd,Xc) # Cov(Xd,Xc)
BIG[tn + shft, N] = 0.0 # %cov(X'(ts),X(ts)) BIG[tn + shft, N] = 0.0 # %cov(X'(ts),X(ts))
# % cov(X'(ts),X'(t1)) # % cov(X'(ts),X'(t1))
BIG[tn + shft, tn + shft + 1] = -R2[ts] BIG[tn + shft, tn + shft + 1] = -R2[ts]
@ -2308,19 +2307,19 @@ class SpecData1D(PlotData):
BIG[tn + shft, tn + shft + 4] = -R1[tn - ts] BIG[tn + shft, tn + shft + 4] = -R1[tn - ts]
if (tnold == tn): if (tnold == tn):
#% A previous call to covinput with tn==tnold has been made # A previous call to covinput with tn==tnold has been made
#% need only to update row and column N and tn+1 of big: # need only to update row and column N and tn+1 of big:
return BIG return BIG
#% % make lower triangular part equal to upper and then return # % make lower triangular part equal to upper and then return
#% for j=1:tn+shft # for j=1:tn+shft
#% BIG(N,j) = BIG(j,N) # BIG(N,j) = BIG(j,N)
#% BIG(tn+shft,j) = BIG(j,tn+shft) # BIG(tn+shft,j) = BIG(j,tn+shft)
#% end # end
#% for j=tn+shft+1:N-1 # for j=tn+shft+1:N-1
#% BIG(N,j) = BIG(j,N) # BIG(N,j) = BIG(j,N)
#% BIG(j,tn+shft) = BIG(tn+shft,j) # BIG(j,tn+shft) = BIG(tn+shft,j)
#% end # end
#% return # return
# end %if # end %if
# %tnold = tn # %tnold = tn
else: else:
@ -2329,11 +2328,11 @@ class SpecData1D(PlotData):
# end %if # end %if
if (tn > 2): if (tn > 2):
#%for i=1:tn-2 # for i=1:tn-2
#%cov(Xt) # cov(Xt)
#% for j=i:tn-2 # for j=i:tn-2
#% BIG(i,j) = -R2(j-i+1) % cov(X'(ti+1),X'(tj+1)) # BIG(i,j) = -R2(j-i+1) % cov(X'(ti+1),X'(tj+1))
#% end %do # end %do
# % cov(Xt) = % cov(X'(ti+1),X'(tj+1)) # % cov(Xt) = % cov(X'(ti+1),X'(tj+1))
BIG[:tn - 2, :tn - 2] = toeplitz(-R2[:tn - 2]) BIG[:tn - 2, :tn - 2] = toeplitz(-R2[:tn - 2])
@ -2346,17 +2345,17 @@ class SpecData1D(PlotData):
# cov(X'(ti+1),X(tn)) # cov(X'(ti+1),X(tn))
BIG[:tn - 2, tn + shft + 3] = -R1[tn - 2:0:-1] BIG[:tn - 2, tn + shft + 3] = -R1[tn - 2:0:-1]
#%Cov(Xt,Xd) # Cov(Xt,Xd)
BIG[:tn - 2, tn - 2] = R3[1:tn - 1] # cov(X'(ti+1),X''(t1)) BIG[:tn - 2, tn - 2] = R3[1:tn - 1] # cov(X'(ti+1),X''(t1))
BIG[:tn - 2, tn - 1] = -R3[tn - 2:0:-1] # cov(X'(ti+1),X''(tn)) BIG[:tn - 2, tn - 1] = -R3[tn - 2:0:-1] # cov(X'(ti+1),X''(tn))
#%end %do # end %do
# end # end
#%cov(Xd) # cov(Xd)
BIG[tn - 2, tn - 2] = R4[0] BIG[tn - 2, tn - 2] = R4[0]
BIG[tn - 2, tn - 1] = R4[tn - 1] # cov(X''(t1),X''(tn)) BIG[tn - 2, tn - 1] = R4[tn - 1] # cov(X''(t1),X''(tn))
BIG[tn - 1, tn - 1] = R4[0] BIG[tn - 1, tn - 1] = R4[0]
#%cov(Xc) # cov(Xc)
BIG[tn + shft + 2, tn + shft + 2] = R0[0] # cov(X(t1),X(t1)) BIG[tn + shft + 2, tn + shft + 2] = R0[0] # cov(X(t1),X(t1))
# cov(X(t1),X(tn)) # cov(X(t1),X(tn))
BIG[tn + shft + 2, tn + shft + 3] = R0[tn - 1] BIG[tn + shft + 2, tn + shft + 3] = R0[tn - 1]
@ -2408,10 +2407,10 @@ class SpecData1D(PlotData):
f.write('%12.10f\n', hg) f.write('%12.10f\n', hg)
# XSPLT = options.xsplit # XSPLT = options.xsplit
nit = options.nit nit = options.nit
speed = options.speed speed = options.speed
seed = options.seed seed = options.seed
SCIS = abs(options.method) # method<=0 SCIS = abs(options.method) # method<=0
with open('reflev.in', 'wt') as fid: with open('reflev.in', 'wt') as fid:
fid.write('%2.0f \n', Ntime) fid.write('%2.0f \n', Ntime)
@ -2428,10 +2427,10 @@ class SpecData1D(PlotData):
filenames2 = self._writecov(R) filenames2 = self._writecov(R)
print(' Starting Fortran executable.') print(' Starting Fortran executable.')
#compiled cov2mmtpdf.f with rind70.f # compiled cov2mmtpdf.f with rind70.f
#dos([ wafoexepath 'cov2mmtpdf.exe']) # dos([ wafoexepath 'cov2mmtpdf.exe'])
dens = 1 #load('dens.out') dens = 1 # load('dens.out')
self._cleanup(*filenames) self._cleanup(*filenames)
self._cleanup(*filenames2) self._cleanup(*filenames2)
@ -2929,7 +2928,7 @@ class SpecData1D(PlotData):
# 1'st order + 2'nd order component. # 1'st order + 2'nd order component.
x2[:, 1::] = x[:, 1::] + x2o[0:ns, :].real x2[:, 1::] = x[:, 1::] + x2o[0:ns, :].real
if output=='timeseries': if output == 'timeseries':
xx2 = mat2timeseries(x2[:, 1::], x2[:, 0].ravel()) xx2 = mat2timeseries(x2[:, 1::], x2[:, 0].ravel())
xx = mat2timeseries(x[:, 1::], x[:, 0].ravel()) xx = mat2timeseries(x[:, 1::], x[:, 0].ravel())
return xx2, xx return xx2, xx
@ -3016,7 +3015,7 @@ class SpecData1D(PlotData):
Vol. 3, pp.1-15 Vol. 3, pp.1-15
""" """
#% default options # default options
if h is None: if h is None:
h = self.h h = self.h
@ -3055,7 +3054,7 @@ class SpecData1D(PlotData):
else: else:
raise ValueError('Unknown option!') raise ValueError('Unknown option!')
# elif method[0]== 'q': #, #% quasi method # elif method[0]== 'q': #, # quasi method
# Fn = self.nyquist_freq() # Fn = self.nyquist_freq()
# dw = Fn/Nw # dw = Fn/Nw
# tmp1 =sqrt(S[:,newaxis]*S[newaxis,:])*dw # tmp1 =sqrt(S[:,newaxis]*S[newaxis,:])*dw
@ -3373,7 +3372,7 @@ class SpecData1D(PlotData):
w = self.args.ravel() w = self.args.ravel()
n = w.size n = w.size
#%doInterpolate = 0 # doInterpolate = 0
# Nyquist to sampling interval factor # Nyquist to sampling interval factor
Cnf2dt = 0.5 if ftype == 'f' else pi # % ftype == w og ftype == k Cnf2dt = 0.5 if ftype == 'f' else pi # % ftype == w og ftype == k
@ -3762,9 +3761,9 @@ class SpecData1D(PlotData):
** 2 + m[2] * mij[8] / m[4] ** 3), ** 2 + m[2] * mij[8] / m[4] ** 3),
m_11 / m[0] ** 2 + (m[5] / m[0] ** 2) ** 2 * m_11 / m[0] ** 2 + (m[5] / m[0] ** 2) ** 2 *
mij[0] - 2 * m[5] / m[0] ** 3 * m_10, mij[0] - 2 * m[5] / m[0] ** 3 * m_10,
nan, nan, (8 * pi / g) ** 2 *
(8 * pi / g) ** 2 * (m[2] ** 2 / (4 * m[0] ** 3) * (m[2] ** 2 / (4 * m[0] ** 3) *
mij[0] + mij[4] / m[0] - m[2] / m[0] ** 2 * mij[2]), mij[0] + mij[4] / m[0] - m[2] / m[0] ** 2 * mij[2]),
nan * ones(4), nan * ones(4),
m[2] ** 2 * mij[0] / (4 * m[0] ** 3 * m[4]) + mij[4] / m[2] ** 2 * mij[0] / (4 * m[0] ** 3 * m[4]) + mij[4] /
(m[0] * m[4]) + mij[8] * m[2] ** 2 / (4 * m[0] * m[4] ** 3) - (m[0] * m[4]) + mij[8] * m[2] ** 2 / (4 * m[0] * m[4] ** 3) -
@ -3785,7 +3784,7 @@ class SpecData1D(PlotData):
S0 = r_[2. / (sqrt(m[0]) * m[1]) * (mij[0] - m[0] * mij[1] / m[1]), S0 = r_[2. / (sqrt(m[0]) * m[1]) * (mij[0] - m[0] * mij[1] / m[1]),
1. / sqrt(m[2]) * (mij[0] / m[0] - mij[2] / m[2]), 1. / sqrt(m[2]) * (mij[0] / m[0] - mij[2] / m[2]),
1. / (2 * m[1]) * sqrt(m[0] / m[2]) * (mij[0] / m[0] - mij[2] / 1. / (2 * m[1]) * sqrt(m[0] / m[2]) * (mij[0] / m[0] - mij[2] /
m[2] - mij[1] / m[1] + m[0] * mij[3] / (m[1] * m[2]))] m[2] - mij[1] / m[1] + m[0] * mij[3] / (m[1] * m[2]))]
R1 = ones((15, 15)) R1 = ones((15, 15))
R1[:, :] = nan R1[:, :] = nan
@ -4093,11 +4092,11 @@ class SpecData2D(PlotData):
if self.type not in two_dim_spectra: if self.type not in two_dim_spectra:
raise ValueError('Unknown 2D spectrum type!') raise ValueError('Unknown 2D spectrum type!')
if vari == None and nr <= 1: if vari is None and nr <= 1:
vari = 'x' vari = 'x'
elif vari == None: elif vari is None:
vari = 'xt' vari = 'xt'
else: # % secure the mutual order ('xyt') else: # secure the mutual order ('xyt')
vari = ''.join(sorted(vari.lower())) vari = ''.join(sorted(vari.lower()))
Nv = len(vari) Nv = len(vari)

@ -0,0 +1,72 @@
'''
Created on 23. okt. 2014
@author: pab
'''
import unittest
import numpy as np
from numpy import exp, Inf
from numpy.testing import assert_array_almost_equal
from wafo.integrate import gaussq
class Gaussq(unittest.TestCase):
'''
1 : p(x) = 1 a =-1, b = 1 Gauss-Legendre
2 : p(x) = exp(-x^2) a =-inf, b = inf Hermite
3 : p(x) = x^alpha*exp(-x) a = 0, b = inf Laguerre
4 : p(x) = (x-a)^alpha*(b-x)^beta a =-1, b = 1 Jacobi
5 : p(x) = 1/sqrt((x-a)*(b-x)), a =-1, b = 1 Chebyshev 1'st kind
6 : p(x) = sqrt((x-a)*(b-x)), a =-1, b = 1 Chebyshev 2'nd kind
7 : p(x) = sqrt((x-a)/(b-x)), a = 0, b = 1
8 : p(x) = 1/sqrt(b-x), a = 0, b = 1
9 : p(x) = sqrt(b-x), a = 0, b = 1
'''
def test_gauss_legendre(self):
val, _err = gaussq(exp, 0, 1)
self.assertAlmostEqual(val, exp(1)-exp(0))
a, b, y = [0, 0], [1, 1], np.array([1., 2.])
val, _err = gaussq(lambda x, y: x * y, a, b, args=(y, ))
assert_array_almost_equal(val, 0.5*y)
def test_gauss_hermite(self):
f = lambda x: x
val, _err = gaussq(f, -Inf, Inf, wfun=2)
self.assertAlmostEqual(val, 0)
def test_gauss_laguerre(self):
f = lambda x: x
val, _err = gaussq(f, 0, Inf, wfun=3, alpha=1)
self.assertAlmostEqual(val, 2)
def test_gauss_jacobi(self):
f = lambda x: x
val, _err = gaussq(f, -1, 1, wfun=4, alpha=-0.5, beta=-0.5)
self.assertAlmostEqual(val, 0)
def test_gauss_wfun5_6(self):
f = lambda x: x
for i in [5, 6]:
val, _err = gaussq(f, -1, 1, wfun=i)
self.assertAlmostEqual(val, 0)
def test_gauss_wfun7(self):
f = lambda x: x
val, _err = gaussq(f, 0, 1, wfun=7)
self.assertAlmostEqual(val, 1.17809725)
def test_gauss_wfun8(self):
f = lambda x: x
val, _err = gaussq(f, 0, 1, wfun=8)
self.assertAlmostEqual(val, 1.33333333)
def test_gauss_wfun9(self):
f = lambda x: x
val, _err = gaussq(f, 0, 1, wfun=9)
self.assertAlmostEqual(val, 0.26666667)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
Loading…
Cancel
Save