master
Per.Andreas.Brodtkorb 11 years ago
parent 31f80c5798
commit 5c84825641

@ -97,7 +97,8 @@ def dea3(v0, v1, v2):
if k1.size > 0: if k1.size > 0:
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter("ignore") # ignore division by zero and overflow # ignore division by zero and overflow
warnings.simplefilter("ignore")
ss = one / delta2[k1] - one / delta1[k1] ss = one / delta2[k1] - one / delta1[k1]
smallE2 = (abs(ss * E1[k1]) <= 1.0e-3).ravel() smallE2 = (abs(ss * E1[k1]) <= 1.0e-3).ravel()
k2 = k1[smallE2.nonzero()] k2 = k1[smallE2.nonzero()]
@ -113,6 +114,7 @@ def dea3(v0, v1, v2):
return result, abserr return result, abserr
def clencurt(fun, a, b, n0=5, trace=False, *args): def clencurt(fun, a, b, n0=5, trace=False, *args):
''' '''
Numerical evaluation of an integral, Clenshaw-Curtis method. Numerical evaluation of an integral, Clenshaw-Curtis method.
@ -161,7 +163,6 @@ def clencurt(fun, a, b, n0=5, trace=False, *args):
Numerische Matematik, Vol. 2, pp. 197--205 Numerische Matematik, Vol. 2, pp. 197--205
''' '''
#% make sure n is even #% make sure n is even
n = 2 * n0 n = 2 * n0
a, b = np.atleast_1d(a, b) a, b = np.atleast_1d(a, b)
@ -184,7 +185,8 @@ def clencurt(fun, a, b, n0=5, trace=False, *args):
x0 = np.flipud(fun[:, 0]) x0 = np.flipud(fun[:, 0])
n = len(x0) - 1 n = len(x0) - 1
if abs(x - x0) > 1e-8: if abs(x - x0) > 1e-8:
raise ValueError('Input vector x must equal cos(pi*s/n)*(b-a)/2+(b+a)/2') raise ValueError(
'Input vector x must equal cos(pi*s/n)*(b-a)/2+(b+a)/2')
f = np.flipud(fun[:, 1::]) f = np.flipud(fun[:, 1::])
@ -196,23 +198,22 @@ def clencurt(fun, a, b, n0=5, trace=False, *args):
f[0, :] = f[0, :] / 2 f[0, :] = f[0, :] / 2
f[n, :] = f[n, :] / 2 f[n, :] = f[n, :] / 2
## % x = cos(pi*0:n/n) # % x = cos(pi*0:n/n)
## % f = f(x) # % f = f(x)
## % # %
## % N+1 # % N+1
## % c(k) = (2/N) sum f''(n)*cos(pi*(2*k-2)*(n-1)/N), 1 <= k <= N/2+1. # % c(k) = (2/N) sum f''(n)*cos(pi*(2*k-2)*(n-1)/N), 1 <= k <= N/2+1.
## % n=1 # % n=1
fft = np.fft.fft fft = np.fft.fft
tmp = np.real(fft(f[:n, :], axis=0)) tmp = np.real(fft(f[:n, :], axis=0))
c = 2 / n * (tmp[0:n / 2 + 1, :] + np.cos(np.pi * s2) * f[n, :]) c = 2 / n * (tmp[0:n / 2 + 1, :] + np.cos(np.pi * s2) * f[n, :])
## % old call # % old call
## % c = 2/n * cos(s2*s'*pi/n) * f # % c = 2/n * cos(s2*s'*pi/n) * f
c[0, :] = c[0, :] / 2 c[0, :] = c[0, :] / 2
c[n / 2, :] = c[n / 2, :] / 2 c[n / 2, :] = c[n / 2, :] / 2
## % alternative call # % alternative call
## % c = dct(f) # % c = dct(f)
c = c[0:n / 2 + 1, :] / ((s2 - 1) * (s2 + 1)) c = c[0:n / 2 + 1, :] / ((s2 - 1) * (s2 + 1))
Q = (af - bf) * np.sum(c, axis=0) Q = (af - bf) * np.sum(c, axis=0)
@ -225,6 +226,7 @@ def clencurt(fun, a, b, n0=5, trace=False, *args):
Q = np.reshape(Q, a_shape) Q = np.reshape(Q, a_shape)
return Q, abserr return Q, abserr
def romberg(fun, a, b, releps=1e-3, abseps=1e-3): def romberg(fun, a, b, releps=1e-3, abseps=1e-3):
''' '''
Numerical integration with the Romberg method Numerical integration with the Romberg method
@ -289,7 +291,8 @@ def romberg(fun, a, b, releps=1e-3, abseps=1e-3):
# Richardson extrapolation # Richardson extrapolation
for k in xrange(i): for k in xrange(i):
# rom(2,k+1)=(fp(k)*rom(2,k)-rom(1,k))/(fp(k)-1) # rom(2,k+1)=(fp(k)*rom(2,k)-rom(1,k))/(fp(k)-1)
rom[two, k + 1] = rom[two, k] + (rom[two, k] - rom[one, k]) / (fp[k] - 1) rom[two, k + 1] = rom[two, k] + \
(rom[two, k] - rom[one, k]) / (fp[k] - 1)
Ih1 = Ih2 Ih1 = Ih2
Ih2 = Ih4 Ih2 = Ih4
@ -308,6 +311,7 @@ def romberg(fun, a, b, releps=1e-3, abseps=1e-3):
ipower *= 2 ipower *= 2
return res, abserr return res, abserr
def h_roots(n, method='newton'): def h_roots(n, method='newton'):
''' '''
Returns the roots (x) of the nth order Hermite polynomial, Returns the roots (x) of the nth order Hermite polynomial,
@ -351,7 +355,6 @@ def h_roots(n, method='newton'):
prentice-hall, Englewood cliffs, n.j. prentice-hall, Englewood cliffs, n.j.
''' '''
if not method.startswith('n'): if not method.startswith('n'):
return ort.h_roots(n) return ort.h_roots(n)
else: else:
@ -389,8 +392,8 @@ def h_roots(n, method='newton'):
k0 = kp1 k0 = kp1
kp1 = np.mod(kp1 + 1, 3) kp1 = np.mod(kp1 + 1, 3)
L[kp1, :] = z * sqrt(2 / j) * L[k0, :] - np.sqrt((j - 1) / j) * L[km1, :] L[kp1, :] = (z * sqrt(2 / j) * L[k0, :] -
np.sqrt((j - 1) / j) * L[km1, :])
# L now contains the desired Hermite polynomials. # L now contains the desired Hermite polynomials.
# We next compute pp, the derivatives, # We next compute pp, the derivatives,
@ -415,9 +418,10 @@ def h_roots(n, method='newton'):
w[n - 1:n - m - 1:-1] = w[0:m] # and its symmetric counterpart. w[n - 1:n - m - 1:-1] = w[0:m] # and its symmetric counterpart.
return x, w return x, w
def j_roots(n, alpha, beta, method='newton'): def j_roots(n, alpha, beta, method='newton'):
''' '''
Returns the roots (x) of the nth order Jacobi polynomial, P^(alpha,beta)_n(x) Returns the roots of the nth order Jacobi polynomial, P^(alpha,beta)_n(x)
and weights (w) to use in Gaussian Quadrature over [-1,1] with weighting and weights (w) to use in Gaussian Quadrature over [-1,1] with weighting
function (1-x)**alpha (1+x)**beta with alpha,beta > -1. function (1-x)**alpha (1+x)**beta with alpha,beta > -1.
@ -471,8 +475,8 @@ def j_roots(n, alpha, beta, method='newton'):
# Initial approximations to the roots go into z. # Initial approximations to the roots go into z.
alfbet = alpha + beta alfbet = alpha + beta
z = np.cos(np.pi * (np.arange(1, n + 1) - 0.25 + 0.5 * alpha) /
z = np.cos(np.pi * (np.arange(1, n + 1) - 0.25 + 0.5 * alpha) / (n + 0.5 * (alfbet + 1))) (n + 0.5 * (alfbet + 1)))
L = zeros((3, len(z))) L = zeros((3, len(z)))
k0 = 0 k0 = 0
@ -501,27 +505,29 @@ def j_roots(n, alpha, beta, method='newton'):
# We next compute pp, the derivatives with a standard # We next compute pp, the derivatives with a standard
# relation involving the polynomials of one lower order. # relation involving the polynomials of one lower order.
pp = (n * (alpha - beta - tmp * z) * L[kp1, :] + 2 * (n + alpha) * (n + beta) * L[k0, :]) / (tmp * (1 - z ** 2)) pp = (n * (alpha - beta - tmp * z) * L[kp1, :] +
2 * (n + alpha) * (n + beta) * L[k0, :]) / (tmp * (1 - z ** 2))
dz = L[kp1, :] / pp dz = L[kp1, :] / pp
z = z - dz # Newton's formula. z = z - dz # Newton's formula.
if not any(abs(dz) > releps * abs(z)): if not any(abs(dz) > releps * abs(z)):
break break
else: else:
warnings.warn('too many iterations in jrule') warnings.warn('too many iterations in jrule')
x = z # %Store the root and the weight. x = z # %Store the root and the weight.
w = np.exp(sp.gammaln(alpha + n) + sp.gammaln(beta + n) - sp.gammaln(n + 1) - f = (sp.gammaln(alpha + n) + sp.gammaln(beta + n) -
sp.gammaln(alpha + beta + n + 1)) * tmp * 2 ** alfbet / (pp * L[k0, :]) sp.gammaln(n + 1) - sp.gammaln(alpha + beta + n + 1))
w = (np.exp(f) * tmp * 2 ** alfbet / (pp * L[k0, :]))
return x, w return x, w
def la_roots(n, alpha=0, method='newton'): def la_roots(n, alpha=0, method='newton'):
''' '''
Returns the roots (x) of the nth order generalized (associated) Laguerre Returns the roots (x) of the nth order generalized (associated) Laguerre
polynomial, L^(alpha)_n(x), and weights (w) to use in Gaussian quadrature over polynomial, L^(alpha)_n(x), and weights (w) to use in Gaussian quadrature
[0,inf] with weighting function exp(-x) x**alpha with alpha > -1. over [0,inf] with weighting function exp(-x) x**alpha with alpha > -1.
Parameters Parameters
---------- ----------
@ -597,7 +603,8 @@ def la_roots(n, alpha=0, method='newton'):
k0 = kp1 k0 = kp1
kp1 = np.mod(kp1 + 1, 3) kp1 = np.mod(kp1 + 1, 3)
L[kp1, k] = ((2 * jj - 1 + alpha - z[k]) * L[k0, k] - (jj - 1 + alpha) * L[km1, k]) / jj L[kp1, k] = ((2 * jj - 1 + alpha - z[k]) * L[
k0, k] - (jj - 1 + alpha) * L[km1, k]) / jj
# end # end
#%L now contains the desired Laguerre polynomials. #%L now contains the desired Laguerre polynomials.
#%We next compute pp, the derivatives with a standard #%We next compute pp, the derivatives with a standard
@ -610,7 +617,6 @@ def la_roots(n, alpha=0, method='newton'):
z[k] = z[k] - dz[k] # % Newton?s formula. z[k] = z[k] - dz[k] # % Newton?s formula.
#%k = find((abs(dz) > releps.*z)) #%k = find((abs(dz) > releps.*z))
if not np.any(abs(dz) > releps): if not np.any(abs(dz) > releps):
break break
else: else:
@ -620,6 +626,7 @@ def la_roots(n, alpha=0, method='newton'):
w = -np.exp(sp.gammaln(alpha + n) - sp.gammaln(n)) / (pp * n * Lp) w = -np.exp(sp.gammaln(alpha + n) - sp.gammaln(n)) / (pp * n * Lp)
return x, w return x, w
def p_roots(n, method='newton', a=-1, b=1): def p_roots(n, method='newton', a=-1, b=1):
''' '''
Returns the roots (x) of the nth order Legendre polynomial, P_n(x), Returns the roots (x) of the nth order Legendre polynomial, P_n(x),
@ -658,8 +665,8 @@ def p_roots(n, method='newton', a= -1, b=1):
References References
---------- ----------
[1] Davis and Rabinowitz (1975) 'Methods of Numerical Integration', page 365, [1] Davis and Rabinowitz (1975) 'Methods of Numerical Integration',
Academic Press. page 365, Academic Press.
[2] Golub, G. H. and Welsch, J. H. (1969) [2] Golub, G. H. and Welsch, J. H. (1969)
'Calculation of Gaussian Quadrature Rules' 'Calculation of Gaussian Quadrature Rules'
@ -685,7 +692,6 @@ def p_roots(n, method='newton', a= -1, b=1):
# Compute the zeros of the N+1 Legendre Polynomial # Compute the zeros of the N+1 Legendre Polynomial
# using the recursion relation and the Newton-Raphson method # using the recursion relation and the Newton-Raphson method
# Legendre-Gauss Polynomials # Legendre-Gauss Polynomials
L = zeros((3, m)) L = zeros((3, m))
@ -698,7 +704,8 @@ def p_roots(n, method='newton', a= -1, b=1):
# Compute the zeros of the N+1 Legendre Polynomial # Compute the zeros of the N+1 Legendre Polynomial
# using the recursion relation and the Newton-Raphson method # using the recursion relation and the Newton-Raphson method
# Iterate until new points are uniformly within epsilon of old points # Iterate until new points are uniformly within epsilon of old
# points
k = slice(m) k = slice(m)
k0 = 0 k0 = 0
kp1 = 1 kp1 = 1
@ -710,7 +717,8 @@ def p_roots(n, method='newton', a= -1, b=1):
km1 = k0 km1 = k0
k0 = kp1 k0 = kp1
kp1 = np.mod(k0 + 1, 3) kp1 = np.mod(k0 + 1, 3)
L[kp1, k] = ((2 * jj - 1) * xo[k] * L[k0, k] - (jj - 1) * L[km1, k]) / jj L[kp1, k] = ((2 * jj - 1) * xo[k] * L[
k0, k] - (jj - 1) * L[km1, k]) / jj
Lp[k] = n * (L[k0, k] - xo[k] * L[kp1, k]) / (1 - xo[k] ** 2) Lp[k] = n * (L[k0, k] - xo[k] * L[kp1, k]) / (1 - xo[k] ** 2)
@ -747,14 +755,18 @@ def p_roots(n, method='newton', a= -1, b=1):
d4pn = (6. * xo * d3pn + (6 - e1) * d2pn) / den d4pn = (6. * xo * d3pn + (6 - e1) * d2pn) / den
u = pk / dpn u = pk / dpn
v = d2pn / dpn v = d2pn / dpn
h = -u * (1 + (.5 * u) * (v + u * (v * v - u * d3pn / (3 * dpn)))) h = (-u * (1 + (.5 * u) * (v + u *
p = pk + h * (dpn + (.5 * h) * (d2pn + (h / 3) * (d3pn + .25 * h * d4pn))) (v * v - u * d3pn / (3 * dpn)))))
p = (pk + h * (dpn + (.5 * h) * (d2pn + (h / 3) *
(d3pn + .25 * h * d4pn))))
dp = dpn + h * (d2pn + (.5 * h) * (d3pn + h * d4pn / 3)) dp = dpn + h * (d2pn + (.5 * h) * (d3pn + h * d4pn / 3))
h = h - p / dp h = h - p / dp
xo = xo + h xo = xo + h
x = -xo - h x = -xo - h
fx = d1 - h * e1 * (pk + (h / 2) * (dpn + (h / 3) * (d2pn + (h / 4) * (d3pn + (.2 * h) * d4pn)))) fx = (d1 - h * e1 * (pk + (h / 2) *
(dpn + (h / 3) * (d2pn + (h / 4) *
(d3pn + (.2 * h) * d4pn)))))
w = 2 * (1 - x ** 2) / (fx ** 2) w = 2 * (1 - x ** 2) / (fx ** 2)
if (m + m) > n: if (m + m) > n:
@ -766,7 +778,6 @@ def p_roots(n, method='newton', a= -1, b=1):
x = np.hstack((x, -x[m - 1::-1])) x = np.hstack((x, -x[m - 1::-1]))
w = np.hstack((w, w[m - 1::-1])) w = np.hstack((w, w[m - 1::-1]))
if (a != -1) | (b != 1): if (a != -1) | (b != 1):
# Linear map from[-1,1] to [a,b] # Linear map from[-1,1] to [a,b]
dh = (b - a) / 2 dh = (b - a) / 2
@ -775,6 +786,7 @@ def p_roots(n, method='newton', a= -1, b=1):
return x, w return x, w
def qrule(n, wfun=1, alpha=0, beta=0): def qrule(n, wfun=1, alpha=0, beta=0):
''' '''
Return nodes and weights for Gaussian quadratures. Return nodes and weights for Gaussian quadratures.
@ -885,7 +897,8 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
a,b : array-like a,b : array-like
lower and upper integration limits, respectively. lower and upper integration limits, respectively.
reltol, abstol : real scalars, optional reltol, abstol : real scalars, optional
relative and absolute tolerance, respectively. (default reltol=abstool=1e-3). relative and absolute tolerance, respectively.
(default reltol=abstool=1e-3).
wfun : scalar integer, optional wfun : scalar integer, optional
defining the weight function, p(x). (default wfun = 1) defining the weight function, p(x). (default wfun = 1)
1 : p(x) = 1 a =-1, b = 1 Gauss-Legendre 1 : p(x) = 1 a =-1, b = 1 Gauss-Legendre
@ -964,7 +977,8 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
a_shape = np.atleast_1d(A.shape) a_shape = np.atleast_1d(A.shape)
b_shape = np.atleast_1d(B.shape) b_shape = np.atleast_1d(B.shape)
if np.prod(a_shape) == 1: # make sure the integration limits have correct size # make sure the integration limits have correct size
if np.prod(a_shape) == 1:
A = A * ones(b_shape) A = A * ones(b_shape)
a_shape = b_shape a_shape = b_shape
elif np.prod(b_shape) == 1: elif np.prod(b_shape) == 1:
@ -972,7 +986,6 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
elif any(a_shape != b_shape): elif any(a_shape != b_shape):
raise ValueError('The integration limits must have equal size!') raise ValueError('The integration limits must have equal size!')
if args is None: if args is None:
num_parameters = 0 num_parameters = 0
else: else:
@ -997,13 +1010,11 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
P0[ix].shape = (-1, 1) # make sure it is a column P0[ix].shape = (-1, 1) # make sure it is a column
k = np.arange(nk) k = np.arange(nk)
val = zeros(nk) val = zeros(nk)
val_old = zeros(nk) val_old = zeros(nk)
abserr = zeros(nk) abserr = zeros(nk)
# setup mapping parameters # setup mapping parameters
A.shape = (-1, 1) A.shape = (-1, 1)
B.shape = (-1, 1) B.shape = (-1, 1)
@ -1044,16 +1055,14 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
x_trace = [0, ] * max_iter x_trace = [0, ] * max_iter
y_trace = [0, ] * max_iter y_trace = [0, ] * max_iter
if num_parameters > 0: if num_parameters > 0:
ix_vec, = np.where(isvector1) ix_vec, = np.where(isvector1)
if len(ix_vec): if len(ix_vec):
P1 = copy.copy(P0) P1 = copy.copy(P0)
#% Break out of the iteration loop for three reasons: # Break out of the iteration loop for three reasons:
#% 1) the last update is very small (compared to int and compared to reltol) # 1) the last update is very small (compared to int and to reltol)
#% 2) There are more than 11 iterations. This should NEVER happen. # 2) There are more than 11 iterations. This should NEVER happen.
for ix in xrange(max_iter): for ix in xrange(max_iter):
x_and_w = 'wfun%d_%d_%g_%g' % (wfun, gn, alpha, beta) x_and_w = 'wfun%d_%d_%g_%g' % (wfun, gn, alpha, beta)
@ -1066,7 +1075,6 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
# calculate the x values # calculate the x values
x = (xn + shift) * jacob[k, :] + A[k, :] x = (xn + shift) * jacob[k, :] + A[k, :]
# calculate function values y=fun(x,p1,p2,....,pn) # calculate function values y=fun(x,p1,p2,....,pn)
if num_parameters > 0: if num_parameters > 0:
if len(ix_vec): if len(ix_vec):
@ -1080,10 +1088,8 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
else: else:
y = fun(x) y = fun(x)
val[k] = np.sum(w * y, axis=1) * dx[k] # do the integration sum(y.*w) val[k] = np.sum(w * y, axis=1) * dx[k] # do the integration sum(y.*w)
if trace: if trace:
x_trace.append(x.ravel()) x_trace.append(x.ravel())
y_trace.append(y.ravel()) y_trace.append(y.ravel())
@ -1096,11 +1102,11 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
plt.setp(hfig, 'color', 'b') plt.setp(hfig, 'color', 'b')
abserr[k] = abs(val_old[k] - val[k]) # absolute tolerance abserr[k] = abs(val_old[k] - val[k]) # absolute tolerance
if ix > 1: if ix > 1:
k, = np.where(abserr > np.maximum(abs(reltol * val), abstol))
k, = np.where(abserr > np.maximum(abs(reltol * val), abstol)) # abserr > abs(abstol))%indices to integrals which did not converge # abserr > abs(abstol))%indices to integrals which
# did not converge
nk = len(k) # of integrals we have to compute again nk = len(k) # of integrals we have to compute again
if nk: if nk:
val_old[k] = val[k] val_old[k] = val[k]
@ -1111,15 +1117,15 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
else: else:
if nk > 1: if nk > 1:
if (nk == np.prod(a_shape)): if (nk == np.prod(a_shape)):
tmptxt = 'All integrals did not converge--singularities likely!' tmptxt = 'All integrals did not converge'
else: else:
tmptxt = '%d integrals did not converge--singularities likely!' % (nk,) tmptxt = '%d integrals did not converge' % (nk,)
else: else:
tmptxt = 'Integral did not converge--singularity likely!' tmptxt = 'Integral did not converge--singularity likely!'
warnings.warn(tmptxt) warnings.warn(tmptxt + '--singularities likely!')
val.shape = a_shape # make sure int is the same size as the integration limits # make sure int is the same size as the integration limits
val.shape = a_shape
abserr.shape = a_shape abserr.shape = a_shape
if trace > 0: if trace > 0:
@ -1127,6 +1133,7 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
plt.plot(np.hstack(x_trace), np.hstack(y_trace), '+') plt.plot(np.hstack(x_trace), np.hstack(y_trace), '+')
return val, abserr return val, abserr
def richardson(Q, k): def richardson(Q, k):
# license BSD # license BSD
# Richardson extrapolation with parameter estimation # Richardson extrapolation with parameter estimation
@ -1136,6 +1143,7 @@ def richardson(Q, k):
R = Q[k] + (Q[k] - Q[k - 1]) / c R = Q[k] + (Q[k] - Q[k - 1]) / c
return R return R
def quadgr(fun, a, b, abseps=1e-5, max_iter=17): def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
''' '''
Gauss-Legendre quadrature with Richardson extrapolation. Gauss-Legendre quadrature with Richardson extrapolation.
@ -1189,7 +1197,6 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
else: else:
reverse = False reverse = False
#% Infinite limits #% Infinite limits
if np.isinf(a) | np.isinf(b): if np.isinf(a) | np.isinf(b):
# Check real limits # Check real limits
@ -1219,9 +1226,11 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
return Q, err return Q, err
# Gauss-Legendre quadrature (12-point) # Gauss-Legendre quadrature (12-point)
xq = np.asarray([0.12523340851146894, 0.36783149899818018, 0.58731795428661748, xq = np.asarray(
[0.12523340851146894, 0.36783149899818018, 0.58731795428661748,
0.76990267419430469, 0.9041172563704748, 0.98156063424671924]) 0.76990267419430469, 0.9041172563704748, 0.98156063424671924])
wq = np.asarray([0.24914704581340288, 0.23349253653835478, 0.20316742672306584, wq = np.asarray(
[0.24914704581340288, 0.23349253653835478, 0.20316742672306584,
0.16007832854334636, 0.10693932599531818, 0.047175336386511842]) 0.16007832854334636, 0.10693932599531818, 0.047175336386511842])
xq = np.hstack((xq, -xq)) xq = np.hstack((xq, -xq))
wq = np.hstack((wq, wq)) wq = np.hstack((wq, wq))
@ -1251,7 +1260,8 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
hh = hh / 2 hh = hh / 2
x = np.hstack([x + a, x + b]) / 2 x = np.hstack([x + a, x + b]) / 2
# Quadrature # Quadrature
Q0[k] = hh * np.sum(wq * np.sum(np.reshape(fun(x), (-1, nq)), axis=0), axis=0) Q0[k] = hh * \
np.sum(wq * np.sum(np.reshape(fun(x), (-1, nq)), axis=0), axis=0)
# Richardson extrapolation # Richardson extrapolation
if k >= 5: if k >= 5:
@ -1260,7 +1270,6 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
elif k >= 3: elif k >= 3:
Q1[k] = richardson(Q0, k) Q1[k] = richardson(Q0, k)
#% Estimate absolute error #% Estimate absolute error
if k >= 6: if k >= 6:
Qv = np.hstack((Q0[k], Q1[k], Q2[k])) Qv = np.hstack((Q0[k], Q1[k], Q2[k]))
@ -1288,7 +1297,6 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
if ~ np.isfinite(Q): if ~ np.isfinite(Q):
warnings.warn('Integral approximation is Infinite or NaN.') warnings.warn('Integral approximation is Infinite or NaN.')
# The error estimate should not be zero # The error estimate should not be zero
err = err + 2 * np.finfo(Q).eps err = err + 2 * np.finfo(Q).eps
# Reverse direction # Reverse direction
@ -1297,6 +1305,7 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
return Q, err return Q, err
def qdemo(f, a, b): def qdemo(f, a, b):
''' '''
Compares different quadrature rules. Compares different quadrature rules.
@ -1346,7 +1355,6 @@ def qdemo(f, a, b):
129, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 129, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
257, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 257, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
513, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 513, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
''' '''
# use quad8 with small tolerance to get "true" value # use quad8 with small tolerance to get "true" value
#true1 = quad8(f,a,b,1e-10) #true1 = quad8(f,a,b,1e-10)
@ -1392,7 +1400,8 @@ def qdemo(f, a, b):
# Boole's rule # Boole's rule
#q = boole(x,y) #q = boole(x,y)
q = (2 * h / 45) * (7 * (y[0] + y[-1]) + 12 * np.sum(y[2:n - 1:4]) q = (2 * h / 45) * (7 * (y[0] + y[-1]) + 12 * np.sum(y[2:n - 1:4])
+ 32 * np.sum(y[1:n - 1:2]) + 14 * np.sum(y[4:n - 3:4])) + 32 * np.sum(y[1:n - 1:2]) +
14 * np.sum(y[4:n - 3:4]))
qb[k] = q qb[k] = q
eb[k] = abs(q - true_val) eb[k] = abs(q - true_val)
@ -1415,7 +1424,6 @@ def qdemo(f, a, b):
qg[k] = q qg[k] = q
eg[k] = abs(q - true_val) eg[k] = abs(q - true_val)
#% display results #% display results
formats = ['%4.0f, ', ] + ['%10.10f, ', ] * 6 formats = ['%4.0f, ', ] + ['%10.10f, ', ] * 6
formats[-1] = formats[-1].split(',')[0] formats[-1] = formats[-1].split(',')[0]
@ -1435,16 +1443,14 @@ def qdemo(f, a, b):
tmp = data[k].tolist() tmp = data[k].tolist()
print(''.join(fi % t for fi, t in zip(formats, tmp))) print(''.join(fi % t for fi, t in zip(formats, tmp)))
plt.loglog(neval, np.vstack((et, es, eb, ec, ec2, eg)).T) plt.loglog(neval, np.vstack((et, es, eb, ec, ec2, eg)).T)
plt.xlabel('number of function evaluations') plt.xlabel('number of function evaluations')
plt.ylabel('error') plt.ylabel('error')
plt.legend(('Trapezoid', 'Simpsons', 'Booles', 'Clenshaw', 'Chebychev', 'Gauss-L')) plt.legend(
('Trapezoid', 'Simpsons', 'Booles', 'Clenshaw', 'Chebychev', 'Gauss-L'))
# ec3' # ec3'
def main(): def main():
# val, err = clencurt(np.exp, 0, 2) # val, err = clencurt(np.exp, 0, 2)
# valt = np.exp(2) - np.exp(0) # valt = np.exp(2) - np.exp(0)
@ -1465,22 +1471,23 @@ def main():
# [val1, err1] = gaussq(fun, A, B) # [val1, err1] = gaussq(fun, A, B)
# #
# #
# #Integration of x^2*exp(-x) from zero to infinity: # Integration of x^2*exp(-x) from zero to infinity:
# fun2 = npu.wrap2callable('1') # fun2 = npu.wrap2callable('1')
# [val2, err2] = gaussq(fun2, 0, np.inf, wfun=3, alpha=2) # [val2, err2] = gaussq(fun2, 0, np.inf, wfun=3, alpha=2)
# [val2, err2] = gaussq(lambda x: x ** 2, 0, np.inf, wfun=3, alpha=0) # [val2, err2] = gaussq(lambda x: x ** 2, 0, np.inf, wfun=3, alpha=0)
# #
# #Integrate humps from 0 to 2 and from 1 to 4 # Integrate humps from 0 to 2 and from 1 to 4
# [val3, err3] = gaussq(humps, A, B) # [val3, err3] = gaussq(humps, A, B)
# #
# [x, w] = p_roots(11, 'newton', 1, 3) # [x, w] = p_roots(11, 'newton', 1, 3)
# y = np.sum(x ** 2 * w) # y = np.sum(x ** 2 * w)
x = np.linspace(0, np.pi / 2) x = np.linspace(0, np.pi / 2)
q0 = np.trapz(humps(x), x) _q0 = np.trapz(humps(x), x)
[q, err] = romberg(humps, 0, np.pi / 2, 1e-4) [q, err] = romberg(humps, 0, np.pi / 2, 1e-4)
print q, err print q, err
def test_docstrings(): def test_docstrings():
np.set_printoptions(precision=7) np.set_printoptions(precision=7)
import doctest import doctest

@ -1,4 +1,4 @@
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------
# Name: module1 # Name: module1
# Purpose: # Purpose:
# #
@ -7,11 +7,12 @@
# Created: 30.12.2008 # Created: 30.12.2008
# Copyright: (c) pab 2008 # Copyright: (c) pab 2008
# Licence: <your licence> # Licence: <your licence>
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------
#!/usr/bin/env python #!/usr/bin/env python
from __future__ import division from __future__ import division
import numpy as np import numpy as np
import scipy.signal import scipy.signal
import scipy.special as spec
import scipy.sparse as sp import scipy.sparse as sp
import scipy.sparse.linalg # @UnusedImport import scipy.sparse.linalg # @UnusedImport
from numpy.ma.core import ones, zeros, prod, sin from numpy.ma.core import ones, zeros, prod, sin
@ -23,11 +24,14 @@ from scipy.interpolate import PiecewisePolynomial
import polynomial as pl import polynomial as pl
__all__ = ['PPform', 'savitzky_golay', 'savitzky_golay_piecewise', 'sgolay2d','SmoothSpline', __all__ = [
'pchip_slopes','slopes','stineman_interp', 'Pchip','StinemanInterp', 'CubicHermiteSpline'] 'PPform', 'savitzky_golay', 'savitzky_golay_piecewise', 'sgolay2d',
'SmoothSpline', 'pchip_slopes', 'slopes', 'stineman_interp', 'Pchip',
'StinemanInterp', 'CubicHermiteSpline']
def savitzky_golay(y, window_size, order, deriv=0): def savitzky_golay(y, window_size, order, deriv=0):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter. """Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data. The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and It has the advantage of preserving the original shape and
features of the signal better than other types of filtering features of the signal better than other types of filtering
@ -43,7 +47,8 @@ def savitzky_golay(y, window_size, order, deriv=0):
the order of the polynomial used in the filtering. the order of the polynomial used in the filtering.
Must be less then `window_size` - 1. Must be less then `window_size` - 1.
deriv: int deriv: int
the order of the derivative to compute (default = 0 means only smoothing) order of the derivative to compute (default = 0 means only smoothing)
Returns Returns
------- -------
ys : ndarray, shape (N) ys : ndarray, shape (N)
@ -90,7 +95,8 @@ def savitzky_golay(y, window_size, order, deriv=0):
order_range = range(order + 1) order_range = range(order + 1)
half_window = (window_size - 1) // 2 half_window = (window_size - 1) // 2
# precompute coefficients # precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)]) b = np.mat([[k ** i for i in order_range]
for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] m = np.linalg.pinv(b).A[deriv]
# pad the signal at the extremes with # pad the signal at the extremes with
# values taken from the signal itself # values taken from the signal itself
@ -99,13 +105,15 @@ def savitzky_golay(y, window_size, order, deriv=0):
y = np.concatenate((firstvals, y, lastvals)) y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m, y, mode='valid') return np.convolve(m, y, mode='valid')
def savitzky_golay_piecewise(xvals, data, kernel=11, order=4): def savitzky_golay_piecewise(xvals, data, kernel=11, order=4):
''' '''
One of the most popular applications of S-G filter, apart from smoothing UV-VIS One of the most popular applications of S-G filter, apart from smoothing
and IR spectra, is smoothing of curves obtained in electroanalytical experiments. UV-VIS and IR spectra, is smoothing of curves obtained in electroanalytical
In cyclic voltammetry, voltage (being the abcissa) changes like a triangle wave. experiments. In cyclic voltammetry, voltage (being the abcissa) changes
And in the signal there are cusps at the turning points (at switching potentials) like a triangle wave. And in the signal there are cusps at the turning
which should never be smoothed. In this case, Savitzky-Golay smoothing should be points (at switching potentials) which should never be smoothed.
In this case, Savitzky-Golay smoothing should be
done piecewise, ie. separately on pieces monotonic in x done piecewise, ie. separately on pieces monotonic in x
Example Example
@ -117,8 +125,9 @@ def savitzky_golay_piecewise(xvals, data, kernel=11, order =4):
>>> y = np.round(sin(x)) >>> y = np.round(sin(x))
>>> sig2 = linspace(0,0.5,50) >>> sig2 = linspace(0,0.5,50)
# As an example, this figure shows the effect of an additive noise with a variance # As an example, this figure shows the effect of an additive noise with a
# of 0.2 (original signal (black), noisy signal (red) and filtered signal (blue dots)). # variance of 0.2 (original signal (black), noisy signal (red) and filtered
# signal (blue dots)).
>>> yn = y + np.sqrt(0.2)*np.random.randn(*x.shape) >>> yn = y + np.sqrt(0.2)*np.random.randn(*x.shape)
>>> yr = savitzky_golay_piecewise(x, yn, kernel=11, order=4) >>> yr = savitzky_golay_piecewise(x, yn, kernel=11, order=4)
@ -142,21 +151,29 @@ def savitzky_golay_piecewise(xvals, data, kernel=11, order =4):
# smooth the first piece # smooth the first piece
firstpart = savitzky_golay(data[0:turnpoint], kernel, order) firstpart = savitzky_golay(data[0:turnpoint], kernel, order)
# recursively smooth the rest # recursively smooth the rest
rest=savitzky_golay_piecewise(xvals[turnpoint:], data[turnpoint:], kernel, order) rest = savitzky_golay_piecewise(
xvals[turnpoint:], data[turnpoint:], kernel, order)
return np.concatenate((firstpart, rest)) return np.concatenate((firstpart, rest))
def sgolay2d(z, window_size, order, derivative=None): def sgolay2d(z, window_size, order, derivative=None):
""" """
Savitsky - Golay filters can also be used to smooth two dimensional data affected Savitsky - Golay filters can also be used to smooth two dimensional data
by noise. The algorithm is exactly the same as for the one dimensional case, only affected by noise. The algorithm is exactly the same as for the one
the math is a bit more tricky. The basic algorithm is as follow: dimensional case, only the math is a bit more tricky. The basic algorithm
for each point of the two dimensional matrix extract a sub - matrix, centered at is as follow: for each point of the two dimensional matrix extract a sub
that point and with a size equal to an odd number "window_size". - matrix, centered at that point and with a size equal to an odd number
for this sub - matrix compute a least - square fit of a polynomial surface, defined as "window_size". for this sub - matrix compute a least - square fit of a
polynomial surface, defined as
p(x, y) = a0 + a1 * x + a2 * y + a3 * x2 + a4 * y2 + a5 * x * y + ... . p(x, y) = a0 + a1 * x + a2 * y + a3 * x2 + a4 * y2 + a5 * x * y + ... .
Note that x and y are equal to zero at the central point. Note that x and y are equal to zero at the central point.
replace the initial central point with the value computed with the fit. replace the initial central point with the value computed with the fit.
Note that because the fit coefficients are linear with respect to the data spacing, they can pre - computed for efficiency. Moreover, it is important to appropriately pad the borders of the data, with a mirror image of the data itself, so that the evaluation of the fit at the borders of the data can happen smoothly. Note that because the fit coefficients are linear with respect to the data
spacing, they can pre - computed for efficiency. Moreover, it is important
to appropriately pad the borders of the data, with a mirror image of the
data itself, so that the evaluation of the fit at the borders of the data
can happen smoothly.
Here is the code for two dimensional filtering. Here is the code for two dimensional filtering.
Example Example
@ -213,32 +230,44 @@ def sgolay2d ( z, window_size, order, derivative=None):
Z = np.zeros((new_shape)) Z = np.zeros((new_shape))
# top band # top band
band = z[0, :] band = z[0, :]
Z[:half_size, half_size:-half_size] = band - np.abs(np.flipud(z[1:half_size + 1, :]) - band) Z[:half_size, half_size:-half_size] = band - \
np.abs(np.flipud(z[1:half_size + 1, :]) - band)
# bottom band # bottom band
band = z[-1, :] band = z[-1, :]
Z[-half_size:, half_size:-half_size] = band + np.abs(np.flipud(z[-half_size - 1:-1, :]) - band) Z[-half_size:, half_size:-half_size] = band + \
np.abs(np.flipud(z[-half_size - 1:-1, :]) - band)
# left band # left band
band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size]) band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, :half_size] = band - np.abs(np.fliplr(z[:, 1:half_size + 1]) - band) Z[half_size:-half_size, :half_size] = band - \
np.abs(np.fliplr(z[:, 1:half_size + 1]) - band)
# right band # right band
band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size]) band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, -half_size:] = band + np.abs(np.fliplr(z[:, -half_size - 1:-1]) - band) Z[half_size:-half_size, -half_size:] = band + \
np.abs(np.fliplr(z[:, -half_size - 1:-1]) - band)
# central band # central band
Z[half_size:-half_size, half_size:-half_size] = z Z[half_size:-half_size, half_size:-half_size] = z
# top left corner # top left corner
band = z[0, 0] band = z[0, 0]
Z[:half_size, :half_size] = band - np.abs(np.flipud(np.fliplr(z[1:half_size + 1, 1:half_size + 1])) - band) Z[:half_size, :half_size] = band - \
np.abs(
np.flipud(np.fliplr(z[1:half_size + 1, 1:half_size + 1])) - band)
# bottom right corner # bottom right corner
band = z[-1, -1] band = z[-1, -1]
Z[-half_size:, -half_size:] = band + np.abs(np.flipud(np.fliplr(z[-half_size - 1:-1, -half_size - 1:-1])) - band) Z[-half_size:, -half_size:] = band + \
np.abs(np.flipud(np.fliplr(z[-half_size - 1:-1, -half_size - 1:-1])) -
band)
# top right corner # top right corner
band = Z[half_size, -half_size:] band = Z[half_size, -half_size:]
Z[:half_size, -half_size:] = band - np.abs(np.flipud(Z[half_size + 1:2 * half_size + 1, -half_size:]) - band) Z[:half_size, -half_size:] = band - \
np.abs(
np.flipud(Z[half_size + 1:2 * half_size + 1, -half_size:]) - band)
# bottom left corner # bottom left corner
band = Z[-half_size:, half_size].reshape(-1, 1) band = Z[-half_size:, half_size].reshape(-1, 1)
Z[-half_size:, :half_size] = band - np.abs(np.fliplr(Z[-half_size:, half_size + 1:2 * half_size + 1]) - band) Z[-half_size:, :half_size] = band - \
np.abs(
np.fliplr(Z[-half_size:, half_size + 1:2 * half_size + 1]) - band)
# solve system and convolve # solve system and convolve
if derivative == None: if derivative == None:
@ -253,11 +282,15 @@ def sgolay2d ( z, window_size, order, derivative=None):
elif derivative == 'both': elif derivative == 'both':
c = np.linalg.pinv(A)[1].reshape((window_size, -1)) c = np.linalg.pinv(A)[1].reshape((window_size, -1))
r = np.linalg.pinv(A)[2].reshape((window_size, -1)) r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, -r, mode='valid'), scipy.signal.fftconvolve(Z, -c, mode='valid') return (scipy.signal.fftconvolve(Z, -r, mode='valid'),
scipy.signal.fftconvolve(Z, -c, mode='valid'))
class PPform(object): class PPform(object):
"""The ppform of the piecewise polynomials is given in terms of coefficients
and breaks. The polynomial in the ith interval is """The ppform of the piecewise polynomials
is given in terms of coefficients and breaks.
The polynomial in the ith interval is
x_{i} <= x < x_{i+1} x_{i} <= x < x_{i+1}
S_i = sum(coefs[m,i]*(x-breaks[i])^(k-m), m=0..k) S_i = sum(coefs[m,i]*(x-breaks[i])^(k-m), m=0..k)
@ -274,6 +307,7 @@ class PPform(object):
>>> x = linspace(-1,3) >>> x = linspace(-1,3)
>>> h=plt.plot(x,self(x)) >>> h=plt.plot(x,self(x))
""" """
def __init__(self, coeffs, breaks, fill=0.0, sort=False, a=None, b=None): def __init__(self, coeffs, breaks, fill=0.0, sort=False, a=None, b=None):
if sort: if sort:
self.breaks = np.sort(breaks) self.breaks = np.sort(breaks)
@ -309,7 +343,8 @@ class PPform(object):
V = np.vander(dx, N=self.order) V = np.vander(dx, N=self.order)
# values = np.diag(dot(V,pp[:,indxs])) # values = np.diag(dot(V,pp[:,indxs]))
dot = np.dot dot = np.dot
values = np.array([dot(V[k, :], pp[:, indxs[k]]) for k in xrange(len(xx))]) values = np.array([dot(V[k, :], pp[:, indxs[k]])
for k in xrange(len(xx))])
res[mask] = values res[mask] = values
res.shape = saveshape res.shape = saveshape
@ -317,7 +352,7 @@ class PPform(object):
def linear_extrapolate(self, output=True): def linear_extrapolate(self, output=True):
''' '''
Return a 1D PPform which extrapolate linearly outside its basic interval Return 1D PPform which extrapolate linearly outside its basic interval
''' '''
max_order = 2 max_order = 2
@ -354,7 +389,6 @@ class PPform(object):
# by first setting all terms of order > maxOrder to zero and then # by first setting all terms of order > maxOrder to zero and then
# relocate the polynomial. # relocate the polynomial.
# Set to zero all terms of order > maxOrder, i.e., not using them # Set to zero all terms of order > maxOrder, i.e., not using them
a_11 = coefs[self.order - max_order::, 0] a_11 = coefs[self.order - max_order::, 0]
dx1 = dx[0] dx1 = dx[0]
@ -380,7 +414,6 @@ class PPform(object):
brks = self.breaks.copy() brks = self.breaks.copy()
return PPform(cof, brks, fill=self.fill) return PPform(cof, brks, fill=self.fill)
def integrate(self): def integrate(self):
""" """
Return the indefinite integral of the piecewise polynomial Return the indefinite integral of the piecewise polynomial
@ -389,7 +422,8 @@ class PPform(object):
pieces = len(self.breaks) - 1 pieces = len(self.breaks) - 1
if 1 < pieces: if 1 < pieces:
# evaluate each integrated polynomial at the right endpoint of its interval # evaluate each integrated polynomial at the right endpoint of its
# interval
xs = diff(self.breaks[:-1, ...], axis=0) xs = diff(self.breaks[:-1, ...], axis=0)
index = np.arange(pieces - 1) index = np.arange(pieces - 1)
@ -402,19 +436,19 @@ class PPform(object):
return PPform(cof, self.breaks, fill=self.fill) return PPform(cof, self.breaks, fill=self.fill)
# def fromspline(self, xk, cvals, order, fill=0.0):
# N = len(xk) - 1
# sivals = np.empty((order + 1, N), dtype=float)
# for m in xrange(order, -1, -1):
# fact = spec.gamma(m + 1)
# res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
# res /= fact
# sivals[order - m, :] = res
# return self(sivals, xk, fill=fill)
## def fromspline(cls, xk, cvals, order, fill=0.0):
## N = len(xk)-1
## sivals = np.empty((order+1,N), dtype=float)
## for m in xrange(order,-1,-1):
## fact = spec.gamma(m+1)
## res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
## res /= fact
## sivals[order-m,:] = res
## return cls(sivals, xk, fill=fill)
class SmoothSpline(PPform): class SmoothSpline(PPform):
""" """
Cubic Smoothing Spline. Cubic Smoothing Spline.
@ -472,6 +506,7 @@ class SmoothSpline(PPform):
Springer Verlag Springer Verlag
Uses EqXIV.6--9, self 239 Uses EqXIV.6--9, self 239
""" """
def __init__(self, xx, yy, p=None, lin_extrap=True, var=1): def __init__(self, xx, yy, p=None, lin_extrap=True, var=1):
coefs, brks = self._compute_coefs(xx, yy, p, var) coefs, brks = self._compute_coefs(xx, yy, p, var)
super(SmoothSpline, self).__init__(coefs, brks) super(SmoothSpline, self).__init__(coefs, brks)
@ -518,9 +553,11 @@ class SmoothSpline(PPform):
dx.shape = (n - 1, -1) dx.shape = (n - 1, -1)
zrs = zeros(nd) zrs = zeros(nd)
if p < 1: if p < 1:
ai = (y - (6 * (1 - p) * D * diff(vstack([zrs, # faster than yi-6*(1-p)*Q*u
ai = (y - (6 * (1 - p) * D *
diff(vstack([zrs,
diff(vstack([zrs, u, zrs]), axis=0) * dx1, diff(vstack([zrs, u, zrs]), axis=0) * dx1,
zrs]), axis=0)).T).T #faster than yi-6*(1-p)*Q*u zrs]), axis=0)).T).T
else: else:
ai = y.reshape(n, -1) ai = y.reshape(n, -1)
@ -532,7 +569,7 @@ class SmoothSpline(PPform):
# dfi = diff(ai)./dx-(ci+di.*dx).*dx = bi; # dfi = diff(ai)./dx-(ci+di.*dx).*dx = bi;
ci = np.vstack([zrs, 3 * p * u]) ci = np.vstack([zrs, 3 * p * u])
di = (diff(vstack([ci, zrs]), axis=0) * dx1 / 3); di = (diff(vstack([ci, zrs]), axis=0) * dx1 / 3)
bi = (diff(ai, axis=0) * dx1 - (ci + di * dx) * dx) bi = (diff(ai, axis=0) * dx1 - (ci + di * dx) * dx)
ai = ai[:n - 1, ...] ai = ai[:n - 1, ...]
if nd > 1: if nd > 1:
@ -545,7 +582,8 @@ class SmoothSpline(PPform):
else: else:
coefs = vstack([ci.ravel(), bi.ravel(), ai.ravel()]) coefs = vstack([ci.ravel(), bi.ravel(), ai.ravel()])
else: else:
coefs = vstack([di.ravel(), ci.ravel(), bi.ravel(), ai.ravel()]) coefs = vstack(
[di.ravel(), ci.ravel(), bi.ravel(), ai.ravel()])
return coefs, x return coefs, x
@ -555,11 +593,15 @@ class SmoothSpline(PPform):
R = sp.spdiags(data, [-1, 0, 1], n - 2, n - 2) R = sp.spdiags(data, [-1, 0, 1], n - 2, n - 2)
if p is None or p < 1: if p is None or p < 1:
Q = sp.spdiags([dx1[:n - 2], -(dx1[:n - 2] + dx1[1:n - 1]), dx1[1:n - 1]], [0, -1, -2], n, n - 2) Q = sp.spdiags(
[dx1[:n - 2], -(dx1[:n - 2] + dx1[1:n - 1]), dx1[1:n - 1]],
[0, -1, -2], n, n - 2)
QDQ = (Q.T * D * Q) QDQ = (Q.T * D * Q)
if p is None or p < 0: if p is None or p < 0:
# Estimate p # Estimate p
p = 1. / (1. + QDQ.diagonal().sum() / (100. * R.diagonal().sum()** 2)); p = 1. / \
(1. + QDQ.diagonal().sum() /
(100. * R.diagonal().sum() ** 2))
if p == 0: if p == 0:
QQ = 6 * QDQ QQ = 6 * QDQ
@ -574,9 +616,11 @@ class SmoothSpline(PPform):
u = 2 * sp.linalg.spsolve((QQ + QQ.T), ddydx) u = 2 * sp.linalg.spsolve((QQ + QQ.T), ddydx)
return u.reshape(n - 2, -1), p return u.reshape(n - 2, -1), p
def _edge_case(m0, d1): def _edge_case(m0, d1):
return np.where((d1 == 0) | (m0 == 0), 0.0, 1.0 / (1.0 / m0 + 1.0 / d1)) return np.where((d1 == 0) | (m0 == 0), 0.0, 1.0 / (1.0 / m0 + 1.0 / d1))
def pchip_slopes(x, y): def pchip_slopes(x, y):
# Determine the derivatives at the points y_k, d_k, by using # Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is: # PCHIP algorithm is:
@ -608,6 +652,7 @@ def pchip_slopes(x, y):
dk[-1] = _edge_case(mk[-1], dk[-2]) dk[-1] = _edge_case(mk[-1], dk[-2])
return dk return dk
def slopes(x, y, method='parabola', tension=0, monotone=False): def slopes(x, y, method='parabola', tension=0, monotone=False):
''' '''
Return estimated slopes y'(x) Return estimated slopes y'(x)
@ -645,14 +690,14 @@ def slopes(x,y, method='parabola', tension=0, monotone=False):
y = np.asarray(y, np.float_) y = np.asarray(y, np.float_)
yp = np.zeros(y.shape, np.float_) yp = np.zeros(y.shape, np.float_)
dx = x[1:] - x[:-1] dx = x[1:] - x[:-1]
# Compute the slopes of the secant lines between successive points # Compute the slopes of the secant lines between successive points
dydx = (y[1:] - y[:-1]) / dx dydx = (y[1:] - y[:-1]) / dx
method = method.lower() method = method.lower()
if method.startswith('p'): # parabola'): if method.startswith('p'): # parabola'):
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1]) / (dx[1:] + dx[:-1]) yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1]) / \
(dx[1:] + dx[:-1])
yp[0] = 2.0 * dydx[0] - yp[1] yp[0] = 2.0 * dydx[0] - yp[1]
yp[-1] = 2.0 * dydx[-1] - yp[-2] yp[-1] = 2.0 * dydx[-1] - yp[-2]
else: else:
@ -682,7 +727,8 @@ def slopes(x,y, method='parabola', tension=0, monotone=False):
# To prevent overshoot or undershoot, restrict the position vector # To prevent overshoot or undershoot, restrict the position vector
# (alpha, beta) to a circle of radius 3. If (alpha**2 + beta**2)>9, # (alpha, beta) to a circle of radius 3. If (alpha**2 + beta**2)>9,
# then set m[k] = tau[k]alpha[k]delta[k] and m[k+1] = tau[k]beta[b]delta[k] # then set m[k] = tau[k]alpha[k]delta[k] and
# m[k+1] = tau[k]beta[b]delta[k]
# where tau = 3/sqrt(alpha**2 + beta**2). # where tau = 3/sqrt(alpha**2 + beta**2).
# Find the indices that need adjustment # Find the indices that need adjustment
@ -693,6 +739,7 @@ def slopes(x,y, method='parabola', tension=0, monotone=False):
return yp return yp
def stineman_interp(xi, x, y, yp=None): def stineman_interp(xi, x, y, yp=None):
""" """
Given data vectors *x* and *y*, the slope vector *yp* and a new Given data vectors *x* and *y*, the slope vector *yp* and a new
@ -759,7 +806,8 @@ def stineman_interp(xi, x, y, yp=None):
idx = np.searchsorted(x[1:-1], xi) idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1] # now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1] # except at the boundaries, where it may be that xi[j] < x[0] or xi[j] >
# x[-1]
# the y-values that would come out from a linear interpolation: # the y-values that would come out from a linear interpolation:
sidx = s.take(idx) sidx = s.take(idx)
@ -769,8 +817,10 @@ def stineman_interp(xi, x, y, yp=None):
yo = yidx + sidx * (xi - xidx) yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp # the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx) - sidx) * (xi - xidx) # using the yp slope of the left point # using the yp slope of the left point
dy2 = (yp.take(idx + 1) - sidx) * (xi - xidxp1) # using the yp slope of the right point dy1 = (yp.take(idx) - sidx) * (xi - xidx)
# using the yp slope of the right point
dy2 = (yp.take(idx + 1) - sidx) * (xi - xidxp1)
dy1dy2 = dy1 * dy2 dy1dy2 = dy1 * dy2
# The following is optimized for Python. The solution actually # The following is optimized for Python. The solution actually
@ -779,52 +829,70 @@ def stineman_interp(xi, x, y, yp=None):
# in Python # in Python
dy1mdy2 = np.where(dy1dy2, dy1 - dy2, np.inf) dy1mdy2 = np.where(dy1dy2, dy1 - dy2, np.inf)
dy1pdy2 = np.where(dy1dy2, dy1 + dy2, np.inf) dy1pdy2 = np.where(dy1dy2, dy1 + dy2, np.inf)
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32) + 1, yi = yo + dy1dy2 * np.choose(
((2 * xi - xidx - xidxp1) / ((dy1mdy2) * (xidxp1 - xidx)), np.array(np.sign(dy1dy2), np.int32) + 1,
0.0, ((2 * xi - xidx - xidxp1) / ((dy1mdy2) * (xidxp1 - xidx)), 0.0,
1 / (dy1pdy2))) 1 / (dy1pdy2)))
return yi return yi
class StinemanInterp(object): class StinemanInterp(object):
''' '''
Returns the values of an interpolating function that runs through a set of points according to the algorithm of Stineman (1980). Returns an interpolating function
that runs through a set of points according to the algorithm of
Stineman (1980).
Parameters Parameters
--------- ----------
x,y : array-like x,y : array-like
coordinates of points defining the interpolating function. coordinates of points defining the interpolating function.
yp : array-like yp : array-like
slopes of the interpolating function at x. Optional: only given if they are known, else the argument is not used. slopes of the interpolating function at x.
Optional: only given if they are known, else the argument is not used.
method : string method : string
method for computing the slope at the given points if the slope is not known. With method= method for computing the slope at the given points if the slope is not
"parabola" calculates the slopes from a parabola through every three points. known. With method= "parabola" calculates the slopes from a parabola
through every three points.
Notes Notes
----- -----
The interpolation method is described in an article by Russell W. Stineman (1980) The interpolation method is described by Russell W. Stineman (1980)
According to Stineman, the interpolation procedure has "the following properties: According to Stineman, the interpolation procedure has "the following
properties:
If values of the ordinates of the specified points change monotonically, and the slopes of the line segments joining
the points change monotonically, then the interpolating curve and its slope will change monotonically. If values of the ordinates of the specified points change monotonically,
If the slopes of the line segments joining the specified points change monotonically, then the slopes of the interpolating and the slopes of the line segments joining the points change
curve will change monotonically. Suppose that the conditions in (1) or (2) are satisfied by a set of points, but a small monotonically, then the interpolating curve and its slope will change
change in the ordinate or slope at one of the points will result conditions (1) or (2) being not longer satisfied. Then monotonically. If the slopes of the line segments joining the specified
making this small change in the ordinate or slope at a point will cause no more than a small change in the interpolating points change monotonically, then the slopes of the interpolating curve
curve." The method is based on rational interpolation with specially chosen rational functions to satisfy the above three will change monotonically. Suppose that the conditions in (1) or (2) are
conditions. satisfied by a set of points, but a small change in the ordinate or slope
at one of the points will result conditions(1) or (2) being not longer
Slopes computed at the given points with the methods provided by the `StinemanInterp' function satisfy Stineman's requirements. satisfied. Then making this small change in the ordinate or slope at a
The original method suggested by Stineman (method="scaledstineman", the default, and "stineman") result in lower slopes near point will cause no more than a small change in the interpolating
abrupt steps or spikes in the point sequence, and therefore a smaller tendency for overshooting. The method based on a second curve." The method is based on rational interpolation with specially chosen
degree polynomial (method="parabola") provides better approximation to smooth functions, but it results in in higher slopes rational functions to satisfy the above three conditions.
near abrupt steps or spikes and can lead to some overshooting where Stineman's method does not. Both methods lead to much
less tendency for `spurious' oscillations than traditional interplation methods based on polynomials, such as splines Slopes computed at the given points with the methods provided by the
`StinemanInterp' function satisfy Stineman's requirements.
The original method suggested by Stineman(method="scaledstineman", the
default, and "stineman") result in lower slopes near abrupt steps or spikes
in the point sequence, and therefore a smaller tendency for overshooting.
The method based on a second degree polynomial(method="parabola") provides
better approximation to smooth functions, but it results in in higher
slopes near abrupt steps or spikes and can lead to some overshooting where
Stineman's method does not. Both methods lead to much less tendency for
`spurious' oscillations than traditional interplation methods based on
polynomials, such as splines
(see the examples section). (see the examples section).
Stineman states that "The complete assurance that the procedure will never generate `wild' points makes it attractive as a Stineman states that "The complete assurance that the procedure will never
general purpose procedure". generate `wild' points makes it attractive as a general purpose procedure".
This interpolation method has been implemented in Matlab and R in addition to Python. This interpolation method has been implemented in Matlab and R in addition
to Python.
Examples Examples
-------- --------
@ -840,11 +908,14 @@ class StinemanInterp(object):
>>> h=plt.subplot(211) >>> h=plt.subplot(211)
>>> h=plt.plot(x,y,'o',xi,yi,'r', xi,yi1, 'g', xi,yi1, 'b') >>> h=plt.plot(x,y,'o',xi,yi,'r', xi,yi1, 'g', xi,yi1, 'b')
>>> h=plt.subplot(212) >>> h=plt.subplot(212)
>>> h=plt.plot(xi,np.abs(sin(xi)-yi), 'r', xi, np.abs(sin(xi)-yi1), 'g', xi, np.abs(sin(xi)-yi2), 'b') >>> h=plt.plot(xi,np.abs(sin(xi)-yi), 'r',
... xi, np.abs(sin(xi)-yi1), 'g',
... xi, np.abs(sin(xi)-yi2), 'b')
References References
---------- ----------
Stineman, R. W. A Consistently Well Behaved Method of Interpolation. Creative Computing (1980), volume 6, number 7, p. 54-57. Stineman, R. W. A Consistently Well Behaved Method of Interpolation.
Creative Computing (1980), volume 6, number 7, p. 54-57.
See Also See Also
-------- --------
@ -868,11 +939,13 @@ class StinemanInterp(object):
s = dy / dx # note length of s is N-1 so last element is #N-2 s = dy / dx # note length of s is N-1 so last element is #N-2
# find the segment each xi is in # find the segment each xi is in
# this line actually is the key to the efficiency of this implementation # this line actually is the key to the efficiency of this
# implementation
idx = np.searchsorted(x[1:-1], xi) idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1] # now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1] # except at the boundaries, where it may be that xi[j] < x[0] or xi[j]
# > x[-1]
# the y-values that would come out from a linear interpolation: # the y-values that would come out from a linear interpolation:
sidx = s.take(idx) sidx = s.take(idx)
@ -882,8 +955,10 @@ class StinemanInterp(object):
yo = yidx + sidx * (xi - xidx) yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp # the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx) - sidx) * (xi - xidx) # using the yp slope of the left point # using the yp slope of the left point
dy2 = (yp.take(idx + 1) - sidx) * (xi - xidxp1) # using the yp slope of the right point dy1 = (yp.take(idx) - sidx) * (xi - xidx)
# using the yp slope of the right point
dy2 = (yp.take(idx + 1) - sidx) * (xi - xidxp1)
dy1dy2 = dy1 * dy2 dy1dy2 = dy1 * dy2
# The following is optimized for Python. The solution actually # The following is optimized for Python. The solution actually
@ -892,29 +967,36 @@ class StinemanInterp(object):
# in Python # in Python
dy1mdy2 = np.where(dy1dy2, dy1 - dy2, np.inf) dy1mdy2 = np.where(dy1dy2, dy1 - dy2, np.inf)
dy1pdy2 = np.where(dy1dy2, dy1 + dy2, np.inf) dy1pdy2 = np.where(dy1dy2, dy1 + dy2, np.inf)
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32) + 1, yi = yo + dy1dy2 * np.choose(
((2 * xi - xidx - xidxp1) / ((dy1mdy2) * (xidxp1 - xidx)), np.array(np.sign(dy1dy2), np.int32) + 1,
0.0, ((2 * xi - xidx - xidxp1) / ((dy1mdy2) * (xidxp1 - xidx)), 0.0,
1 / (dy1pdy2))) 1 / (dy1pdy2)))
return yi return yi
class StinemanInterp2(PiecewisePolynomial): class StinemanInterp2(PiecewisePolynomial):
def __init__(self, x, y, yp=None, method='parabola', monotone=False): def __init__(self, x, y, yp=None, method='parabola', monotone=False):
if yp is None: if yp is None:
yp = slopes(x, y, method, monotone=monotone) yp = slopes(x, y, method, monotone=monotone)
super(StinemanInterp2, self).__init__(x, zip(y, yp)) super(StinemanInterp2, self).__init__(x, zip(y, yp))
class CubicHermiteSpline(PiecewisePolynomial): class CubicHermiteSpline(PiecewisePolynomial):
''' '''
Piecewise Cubic Hermite Interpolation using Catmull-Rom Piecewise Cubic Hermite Interpolation using Catmull-Rom
method for computing the slopes. method for computing the slopes.
''' '''
def __init__(self, x, y, yp=None, method='Catmull-Rom'): def __init__(self, x, y, yp=None, method='Catmull-Rom'):
if yp is None: if yp is None:
yp = slopes(x, y, method, monotone=False) yp = slopes(x, y, method, monotone=False)
super(CubicHermiteSpline, self).__init__(x, zip(y, yp), orders=3) super(CubicHermiteSpline, self).__init__(x, zip(y, yp), orders=3)
class Pchip(PiecewisePolynomial): class Pchip(PiecewisePolynomial):
"""PCHIP 1-d monotonic cubic interpolation """PCHIP 1-d monotonic cubic interpolation
Description Description
@ -933,10 +1015,12 @@ class Pchip(PiecewisePolynomial):
A 1-D array of real values. y's length along the interpolation A 1-D array of real values. y's length along the interpolation
axis must be equal to the length of x. axis must be equal to the length of x.
yp : array yp : array
slopes of the interpolating function at x. Optional: only given if they are known, else the argument is not used. slopes of the interpolating function at x.
Optional: only given if they are known, else the argument is not used.
method : string method : string
method for computing the slope at the given points if the slope is not known. With method= method for computing the slope at the given points if the slope is not
"parabola" calculates the slopes from a parabola through every three points. known. With method="parabola" calculates the slopes from a parabola
through every three points.
Assumes x is sorted in monotonic order (e.g. x[1] > x[0]) Assumes x is sorted in monotonic order (e.g. x[1] > x[0])
@ -980,11 +1064,13 @@ class Pchip(PiecewisePolynomial):
>>> plt.show() >>> plt.show()
""" """
def __init__(self, x, y, yp=None, method='secant'): def __init__(self, x, y, yp=None, method='secant'):
if yp is None: if yp is None:
yp = slopes(x, y, method=method, monotone=True) yp = slopes(x, y, method=method, monotone=True)
super(Pchip, self).__init__(x, zip(y, yp), orders=3) super(Pchip, self).__init__(x, zip(y, yp), orders=3)
def test_smoothing_spline(): def test_smoothing_spline():
x = linspace(0, 2 * pi + pi / 4, 20) x = linspace(0, 2 * pi + pi / 4, 20)
y = sin(x) # + np.random.randn(x.size) y = sin(x) # + np.random.randn(x.size)
@ -1003,10 +1089,11 @@ def test_smoothing_spline():
pass pass
#tck = interpolate.splrep(x, y, s=len(x)) #tck = interpolate.splrep(x, y, s=len(x))
def compare_methods(): def compare_methods():
############################################################ #
# Sine wave test # Sine wave test
############################################################ #
fun = np.sin fun = np.sin
# Create a example vector containing a sine wave. # Create a example vector containing a sine wave.
x = np.arange(30.0) / 10. x = np.arange(30.0) / 10.
@ -1036,12 +1123,13 @@ def compare_methods():
# Plot the interpolated points # Plot the interpolated points
plt.plot(xvec, yvec, xvec, yvec1, xvec, yvec2, 'g.', xvec, yvec3) plt.plot(xvec, yvec, xvec, yvec1, xvec, yvec2, 'g.', xvec, yvec3)
plt.legend(['true','true','parbola_monoton','parabola','catmul','pchip'], frameon=False, loc=0) plt.legend(
['true', 'true', 'parbola_monoton', 'parabola', 'catmul', 'pchip'],
frameon=False, loc=0)
plt.ioff() plt.ioff()
plt.show() plt.show()
def demo_monoticity(): def demo_monoticity():
# Step function test... # Step function test...
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
@ -1088,11 +1176,12 @@ def demo_monoticity():
plt.ioff() plt.ioff()
plt.show() plt.show()
def test_doctstrings():
def test_func():
from scipy import interpolate from scipy import interpolate
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import matplotlib import matplotlib
matplotlib.interactive(True) matplotlib.interactive(False)
coef = np.array([[1, 1], [0, 1]]) # linear from 0 to 2 coef = np.array([[1, 1], [0, 1]]) # linear from 0 to 2
# coef = np.array([[1,1],[1,1],[0,2]]) # linear from 0 to 2 # coef = np.array([[1,1],[1,1],[0,2]]) # linear from 0 to 2
@ -1102,33 +1191,37 @@ def test_doctstrings():
y = pp(x) # @UnusedVariable y = pp(x) # @UnusedVariable
x = linspace(0, 2 * pi + pi / 4, 20) x = linspace(0, 2 * pi + pi / 4, 20)
y = x + np.random.randn(x.size) y = sin(x) + np.random.randn(x.size)
tck = interpolate.splrep(x, y, s=len(x)) tck = interpolate.splrep(x, y, s=len(x)) # @UndefinedVariable
xnew = linspace(0, 2 * pi, 100) xnew = linspace(0, 2 * pi, 100)
ynew = interpolate.splev(xnew, tck, der=0) ynew = interpolate.splev(xnew, tck, der=0) # @UndefinedVariable
tck0 = interpolate.splmake(xnew, ynew, order=3, kind='smoothest', conds=None) tck0 = interpolate.splmake( # @UndefinedVariable
pp = interpolate.ppform.fromspline(*tck0) xnew, ynew, order=3, kind='smoothest', conds=None)
pp = interpolate.ppform.fromspline(*tck0) # @UndefinedVariable
plt.plot(x, y, "x", xnew, ynew, xnew, sin(xnew), x, y, "b") plt.plot(x, y, "x", xnew, ynew, xnew, sin(xnew), x, y, "b", x, pp(x), 'g')
plt.legend(['Linear', 'Cubic Spline', 'True']) plt.legend(['Linear', 'Cubic Spline', 'True'])
plt.title('Cubic-spline interpolation') plt.title('Cubic-spline interpolation')
plt.show()
t = np.arange(0, 1.1, .1) t = np.arange(0, 1.1, .1)
x = np.sin(2 * np.pi * t) x = np.sin(2 * np.pi * t)
y = np.cos(2 * np.pi * t) y = np.cos(2 * np.pi * t)
tck1, u = interpolate.splprep([t, y], s=0) #@UnusedVariable _tck1, _u = interpolate.splprep([t, y], s=0) # @UndefinedVariable
tck2 = interpolate.splrep(t, y, s=len(t), task=0) tck2 = interpolate.splrep(t, y, s=len(t), task=0) # @UndefinedVariable
# interpolate.spl # interpolate.spl
tck = interpolate.splmake(t, y, order=3, kind='smoothest', conds=None) tck = interpolate.splmake(t, y, order=3, kind='smoothest', conds=None) # @UndefinedVariable
self = interpolate.ppform.fromspline(*tck2) self = interpolate.ppform.fromspline(*tck2) # @UndefinedVariable
plt.plot(t, self(t)) plt.plot(t, self(t))
plt.show()
pass pass
def test_pp(): def test_pp():
coef = np.array([[1, 1], [0, 0]]) # linear from 0 to 2 @UnusedVariable coef = np.array([[1, 1], [0, 0]]) # linear from 0 to 2 @UnusedVariable
coef = np.array([[1, 1], [1, 1], [0, 2]]) # quadratic from 0 to 1 and 1 to 2. # quadratic from 0 to 1 and 1 to 2.
coef = np.array([[1, 1], [1, 1], [0, 2]])
dc = pl.polyder(coef, 1) dc = pl.polyder(coef, 1)
c2 = pl.polyint(dc, 1) # @UnusedVariable c2 = pl.polyint(dc, 1) # @UnusedVariable
breaks = [0, 1, 2] breaks = [0, 1, 2]
@ -1149,8 +1242,8 @@ def test_docstrings():
if __name__ == '__main__': if __name__ == '__main__':
# test_docstrings() test_func()
# test_doctstrings() # test_doctstrings()
# test_smoothing_spline() # test_smoothing_spline()
# compare_methods() # compare_methods()
demo_monoticity() #demo_monoticity()

File diff suppressed because it is too large Load Diff

@ -6,6 +6,7 @@ Created on Tue Apr 17 13:59:12 2012
""" """
import numpy as np import numpy as np
def magic(n): def magic(n):
ix = np.arange(n) + 1 ix = np.arange(n) + 1
J, I = np.meshgrid(ix, ix) J, I = np.meshgrid(ix, ix)

@ -1,4 +1,6 @@
import numpy as np import numpy as np
def meshgrid(*xi, **kwargs): def meshgrid(*xi, **kwargs):
""" """
Return coordinate matrices from one or more coordinate vectors. Return coordinate matrices from one or more coordinate vectors.
@ -33,10 +35,10 @@ def meshgrid(*xi, **kwargs):
Notes Notes
----- -----
This function supports both indexing conventions through the indexing keyword This function supports both indexing conventions through the indexing
argument. Giving the string 'ij' returns a meshgrid with matrix indexing, keyword argument. Giving the string 'ij' returns a meshgrid with matrix
while 'xy' returns a meshgrid with Cartesian indexing. The difference is indexing, while 'xy' returns a meshgrid with Cartesian indexing. The
illustrated by the following code snippet: difference is illustrated by the following code snippet:
xv, yv = meshgrid(x, y, sparse=False, indexing='ij') xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx): for i in range(nx):
@ -89,13 +91,15 @@ def meshgrid(*xi, **kwargs):
ndim = len(args) ndim = len(args)
if not isinstance(args, list) or ndim < 2: if not isinstance(args, list) or ndim < 2:
raise TypeError('meshgrid() takes 2 or more arguments (%d given)' % int(ndim>0)) raise TypeError(
'meshgrid() takes 2 or more arguments (%d given)' % int(ndim > 0))
sparse = kwargs.get('sparse', False) sparse = kwargs.get('sparse', False)
indexing = kwargs.get('indexing', 'xy') indexing = kwargs.get('indexing', 'xy')
s0 = (1,) * ndim s0 = (1,) * ndim
output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::]) for i, x in enumerate(args)] output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(args)]
shape = [x.size for x in output] shape = [x.size for x in output]
@ -130,4 +134,3 @@ def ndgrid(*args, **kwargs):
if __name__ == '__main__': if __name__ == '__main__':
import doctest import doctest
doctest.testmod() doctest.testmod()

@ -6,7 +6,8 @@ from __future__ import division
import sys import sys
import fractions import fractions
import numpy as np import numpy as np
from numpy import (abs, amax, any, logical_and, arange, linspace, atleast_1d, #atleast_2d, from numpy import (
abs, amax, any, logical_and, arange, linspace, atleast_1d, # atleast_2d,
array, asarray, broadcast_arrays, ceil, floor, frexp, hypot, array, asarray, broadcast_arrays, ceil, floor, frexp, hypot,
sqrt, arctan2, sin, cos, exp, log, mod, diff, empty_like, sqrt, arctan2, sin, cos, exp, log, mod, diff, empty_like,
finfo, inf, pi, interp, isnan, isscalar, zeros, ones, linalg, finfo, inf, pi, interp, isnan, isscalar, zeros, ones, linalg,
@ -25,7 +26,8 @@ except:
floatinfo = finfo(float) floatinfo = finfo(float)
__all__ = ['is_numlike', 'JITImport', 'DotDict', 'Bunch', 'printf', 'sub_dict_select', __all__ = [
'is_numlike', 'JITImport', 'DotDict', 'Bunch', 'printf', 'sub_dict_select',
'parse_kwargs', 'detrendma', 'ecross', 'findcross', 'parse_kwargs', 'detrendma', 'ecross', 'findcross',
'findextrema', 'findpeaks', 'findrfc', 'rfcfilter', 'findtp', 'findtc', 'findextrema', 'findpeaks', 'findrfc', 'rfcfilter', 'findtp', 'findtc',
'findoutliers', 'common_shape', 'argsreduce', 'findoutliers', 'common_shape', 'argsreduce',
@ -43,7 +45,9 @@ def is_numlike(obj):
else: else:
return True return True
class JITImport(object): class JITImport(object):
''' '''
Just In Time Import of module Just In Time Import of module
@ -53,9 +57,11 @@ class JITImport(object):
>>> np.exp(0)==1.0 >>> np.exp(0)==1.0
True True
''' '''
def __init__(self, module_name): def __init__(self, module_name):
self._module_name = module_name self._module_name = module_name
self._module = None self._module = None
def __getattr__(self, attr): def __getattr__(self, attr):
try: try:
return getattr(self._module, attr) return getattr(self._module, attr)
@ -67,7 +73,9 @@ class JITImport(object):
else: else:
raise raise
class DotDict(dict): class DotDict(dict):
''' Implement dot access to dict values ''' Implement dot access to dict values
Example Example
@ -78,7 +86,9 @@ class DotDict(dict):
''' '''
__getattr__ = dict.__getitem__ __getattr__ = dict.__getitem__
class Bunch(object): class Bunch(object):
''' Implement keyword argument initialization of class ''' Implement keyword argument initialization of class
Example Example
@ -87,13 +97,17 @@ class Bunch(object):
>>> d.test1 >>> d.test1
1 1
''' '''
def __init__(self, **kwargs): def __init__(self, **kwargs):
self.__dict__.update(kwargs) self.__dict__.update(kwargs)
def keys(self): def keys(self):
return self.__dict__.keys() return self.__dict__.keys()
def update(self, ** kwargs): def update(self, ** kwargs):
self.__dict__.update(kwargs) self.__dict__.update(kwargs)
def printf(format, *args): # @ReservedAssignment def printf(format, *args): # @ReservedAssignment
sys.stdout.write(format % args) sys.stdout.write(format % args)
@ -122,7 +136,7 @@ def sub_dict_select(somedict, somekeys):
def parse_kwargs(options, **kwargs): def parse_kwargs(options, **kwargs):
''' Update options dict from keyword arguments if the keyword exists in options ''' Update options dict from keyword arguments if it exists in options
Example Example
>>> opt = dict(arg1=2, arg2=3) >>> opt = dict(arg1=2, arg2=3)
@ -140,13 +154,16 @@ def parse_kwargs(options, **kwargs):
options.update(newopts) options.update(newopts)
return options return options
def testfun(*args, **kwargs): def testfun(*args, **kwargs):
opts = dict(opt1=1, opt2=2) opts = dict(opt1=1, opt2=2)
if len(args) == 1 and len(kwargs) == 0 and type(args[0]) is str and args[0].startswith('default'): if (len(args) == 1 and len(kwargs) == 0 and type(args[0]) is str and
args[0].startswith('default')):
return opts return opts
opts = parse_kwargs(opts, **kwargs) opts = parse_kwargs(opts, **kwargs)
return opts return opts
def detrendma(x, L): def detrendma(x, L):
""" """
Removes a trend from data using a moving average Removes a trend from data using a moving average
@ -194,7 +211,6 @@ def detrendma(x, L):
if n < 2 * L + 1: # only able to remove the mean if n < 2 * L + 1: # only able to remove the mean
return x1 - x1.mean(axis=0) return x1 - x1.mean(axis=0)
mn = x1[0:2 * L + 1].mean(axis=0) mn = x1[0:2 * L + 1].mean(axis=0)
y = empty_like(x1) y = empty_like(x1)
y[0:L] = x1[0:L] - mn y[0:L] = x1[0:L] - mn
@ -205,6 +221,7 @@ def detrendma(x, L):
y[n - L::] = x1[n - L::] - trend[-1] y[n - L::] = x1[n - L::] - trend[-1]
return y return y
def ecross(t, f, ind, v=0): def ecross(t, f, ind, v=0):
''' '''
Extracts exact level v crossings Extracts exact level v crossings
@ -252,7 +269,9 @@ def ecross(t, f, ind, v=0):
# Tested on: Python 2.5 # Tested on: Python 2.5
# revised pab Feb2004 # revised pab Feb2004
# By pab 18.06.2001 # By pab 18.06.2001
return t[ind] + (v - f[ind]) * (t[ind + 1] - t[ind]) / (f[ind + 1] - f[ind]) return (t[ind] + (v - f[ind]) * (t[ind + 1] - t[ind]) /
(f[ind + 1] - f[ind]))
def _findcross(xn): def _findcross(xn):
'''Return indices to zero up and downcrossings of a vector '''Return indices to zero up and downcrossings of a vector
@ -288,6 +307,7 @@ def _findcross(xn):
ind, = (xn[:n - 1] * xn[1:] < 0).nonzero() ind, = (xn[:n - 1] * xn[1:] < 0).nonzero()
return ind return ind
def findcross(x, v=0.0, kind=None): def findcross(x, v=0.0, kind=None):
''' '''
Return indices to level v up and/or downcrossings of a vector Return indices to level v up and/or downcrossings of a vector
@ -352,10 +372,11 @@ def findcross(x, v=0.0, kind=None):
t_0 = int(xn[ind[0] + 1] < 0) t_0 = int(xn[ind[0] + 1] < 0)
ind = ind[t_0::2] ind = ind[t_0::2]
elif kind in ('dw', 'uw', 'tw', 'cw'): elif kind in ('dw', 'uw', 'tw', 'cw'):
#make sure that the first is a level v down-crossing if wdef=='dw' # make sure the first is a level v down-crossing if wdef=='dw'
#or make sure that the first is a level v up-crossing if wdef=='uw' # or make sure the first is a level v up-crossing if wdef=='uw'
#make sure that the first is a level v down-crossing if wdef=='tw' # make sure the first is a level v down-crossing if wdef=='tw'
#or make sure that the first is a level v up-crossing if wdef=='cw' # or make sure the first is a level v up-crossing if
# wdef=='cw'
xor = lambda a, b: a ^ b xor = lambda a, b: a ^ b
first_is_down_crossing = int(xn[ind[0]] > xn[ind[0] + 1]) first_is_down_crossing = int(xn[ind[0]] > xn[ind[0] + 1])
if xor(first_is_down_crossing, kind in ('dw', 'tw')): if xor(first_is_down_crossing, kind in ('dw', 'tw')):
@ -372,6 +393,7 @@ def findcross(x, v=0.0, kind=None):
raise ValueError('Unknown wave/crossing definition!') raise ValueError('Unknown wave/crossing definition!')
return ind return ind
def findextrema(x): def findextrema(x):
''' '''
Return indices to minima and maxima of a vector Return indices to minima and maxima of a vector
@ -402,6 +424,8 @@ def findextrema(x):
''' '''
xn = atleast_1d(x).ravel() xn = atleast_1d(x).ravel()
return findcross(diff(xn), 0.0) + 1 return findcross(diff(xn), 0.0) + 1
def findpeaks(data, n=2, min_h=None, min_p=0.0): def findpeaks(data, n=2, min_h=None, min_p=0.0):
''' '''
Find peaks of vector or matrix possibly rainflow filtered Find peaks of vector or matrix possibly rainflow filtered
@ -461,7 +485,8 @@ def findpeaks(data, n=2, min_h=None, min_p=0.0):
elif iy == nrows - 1: elif iy == nrows - 1:
ind2 = np.flatnonzero(S[iy, ind] > S[iy - 1, ind]) ind2 = np.flatnonzero(S[iy, ind] > S[iy - 1, ind])
else: else:
ind2 = np.flatnonzero((S[iy, ind] > S[iy - 1, ind]) & (S[iy, ind] > S[iy + 1, ind])) ind2 = np.flatnonzero((S[iy, ind] > S[iy - 1, ind]) &
(S[iy, ind] > S[iy + 1, ind]))
if len(ind2): if len(ind2):
indP.append((ind[ind2] + iy * mcols)) indP.append((ind[ind2] + iy * mcols))
@ -474,16 +499,17 @@ def findpeaks(data, n=2, min_h=None, min_p=0.0):
peaks = S.take(ind) peaks = S.take(ind)
ind2 = peaks.argsort()[::-1] ind2 = peaks.argsort()[::-1]
# keeping only the Np most significant peak frequencies. # keeping only the Np most significant peak frequencies.
nmax = min(n, len(ind)) nmax = min(n, len(ind))
ind = ind[ind2[:nmax]] ind = ind[ind2[:nmax]]
if (min_p > 0): if (min_p > 0):
# Keeping only peaks larger than min_p percent relative to the maximum peak # Keeping only peaks larger than min_p percent relative to the maximum
# peak
ind = ind[(S.take(ind) > min_p * smax)] ind = ind[(S.take(ind) > min_p * smax)]
return ind return ind
def findrfc_astm(tp): def findrfc_astm(tp):
""" """
Return rainflow counted cycles Return rainflow counted cycles
@ -514,6 +540,7 @@ def findrfc_astm(tp):
# sig_rfc holds the actual rainflow counted cycles, not the indices # sig_rfc holds the actual rainflow counted cycles, not the indices
return sig_rfc return sig_rfc
def findrfc(tp, h=0.0, method='clib'): def findrfc(tp, h=0.0, method='clib'):
''' '''
Return indices to rainflow cycles of a sequence of TP. Return indices to rainflow cycles of a sequence of TP.
@ -624,7 +651,6 @@ def findrfc(tp, h=0.0, method='clib'):
#iy = i #iy = i
continue continue
# goto L180 # goto L180
# L170: # L170:
if (xplus <= xminus): if (xplus <= xminus):
@ -646,9 +672,11 @@ def findrfc(tp, h=0.0, method='clib'):
ind, ix = clib.findrfc(y, h) ind, ix = clib.findrfc(y, h)
return np.sort(ind[:ix]) return np.sort(ind[:ix])
def mctp2rfc(fmM, fMm=None): def mctp2rfc(fmM, fMm=None):
''' '''
Return Rainflow matrix given a Markov matrix of a Markov chain of turning points Return Rainflow matrix given a Markov matrix of a Markov chain
of turning points
computes f_rfc = f_mM + F_mct(f_mM). computes f_rfc = f_mM + F_mct(f_mM).
@ -707,7 +735,8 @@ def mctp2rfc(fmM, fMm=None):
SRA = RAA.sum() SRA = RAA.sum()
DRFC = SA - SRA DRFC = SA - SRA
NT = min(mA[0] - sum(RAA[:, 0]), MA[0] - sum(RAA[0, :])) # ?? check # ?? check
NT = min(mA[0] - sum(RAA[:, 0]), MA[0] - sum(RAA[0, :]))
NT = max(NT, 0) # ??check NT = max(NT, 0) # ??check
if NT > 1e-6 * max(MA[0], mA[0]): if NT > 1e-6 * max(MA[0], mA[0]):
@ -722,13 +751,14 @@ def mctp2rfc(fmM, fMm=None):
e[j] = e[j] / norm e[j] = e[j] / norm
# end # end
# end # end
fx = 0.0; fx = 0.0
if max(abs(e)) > 1e-6 and max(abs(NN)) > 1e-6 * max(MA[0], mA[0]): if (max(abs(e)) > 1e-6 and
max(abs(NN)) > 1e-6 * max(MA[0], mA[0])):
PMm = AA1.copy() PMm = AA1.copy()
for j in range(nA): for j in range(nA):
norm = MA[j] norm = MA[j]
if norm != 0: if norm != 0:
PMm[j, :] = PMm[j, :] / norm; PMm[j, :] = PMm[j, :] / norm
# end # end
# end # end
PMm = np.fliplr(PMm) PMm = np.fliplr(PMm)
@ -740,7 +770,8 @@ def mctp2rfc(fmM, fMm=None):
fx = NN * (A / (1 - B * A) * e) fx = NN * (A / (1 - B * A) * e)
else: else:
rh = np.eye(A.shape[0]) - np.dot(B, A) rh = np.eye(A.shape[0]) - np.dot(B, A)
fx = np.dot(NN, np.dot(A, linalg.solve(rh, e))) #least squares #least squares
fx = np.dot(NN, np.dot(A, linalg.solve(rh, e)))
# end # end
# end # end
f_rfc[N - 1 - k, k - i] = fx + DRFC f_rfc[N - 1 - k, k - i] = fx + DRFC
@ -758,8 +789,8 @@ def mctp2rfc(fmM, fMm=None):
# end # end
for k in range(1, N): for k in range(1, N):
M0 = max(0, f_max[0] - np.sum(f_rfc[0, N - k:N])); M0 = max(0, f_max[0] - np.sum(f_rfc[0, N - k:N]))
m0 = max(0, f_min[N - 1 - k] - np.sum(f_rfc[1:k+1, N - 1 - k])); m0 = max(0, f_min[N - 1 - k] - np.sum(f_rfc[1:k + 1, N - 1 - k]))
f_rfc[0, N - 1 - k] = min(m0, M0) f_rfc[0, N - 1 - k] = min(m0, M0)
# end # end
@ -781,7 +812,6 @@ def mctp2rfc(fmM, fMm=None):
return f_rfc return f_rfc
def rfcfilter(x, h, method=0): def rfcfilter(x, h, method=0):
""" """
Rainflow filter a signal. Rainflow filter a signal.
@ -866,9 +896,11 @@ def rfcfilter(x, h, method=0):
z1 = 0 z1 = 0
t1, y1 = (t0, y0) if z1 == 0 else (ti, yi) t1, y1 = (t0, y0) if z1 == 0 else (ti, yi)
else: else:
if (((z0 == +1) & cmpfun1(yi, fmi)) | ((z0 == -1) & cmpfun2(yi, fpi))): if (((z0 == +1) & cmpfun1(yi, fmi)) |
((z0 == -1) & cmpfun2(yi, fpi))):
z1 = -1 z1 = -1
elif (((z0 == +1) & cmpfun2(fmi, yi)) | ((z0 == -1) & cmpfun1(fpi, yi))): elif (((z0 == +1) & cmpfun2(fmi, yi)) |
((z0 == -1) & cmpfun1(fpi, yi))):
z1 = +1 z1 = +1
else: else:
warnings.warn('Something wrong, i=%d' % tim1) warnings.warn('Something wrong, i=%d' % tim1)
@ -898,6 +930,7 @@ def rfcfilter(x, h, method=0):
t[j] = t0 t[j] = t0
return y[t[:j + 1]] return y[t[:j + 1]]
def findtp(x, h=0.0, kind=None): def findtp(x, h=0.0, kind=None):
''' '''
Return indices to turning points (tp) of data, optionally rainflowfiltered. Return indices to turning points (tp) of data, optionally rainflowfiltered.
@ -936,7 +969,9 @@ def findtp(x, h=0.0, kind=None):
>>> itph = wm.findtp(x1[:,1],0.3,'Mw') >>> itph = wm.findtp(x1[:,1],0.3,'Mw')
>>> tp = x1[itp,:] >>> tp = x1[itp,:]
>>> tph = x1[itph,:] >>> tph = x1[itph,:]
>>> a = plb.plot(x1[:,0],x1[:,1],tp[:,0],tp[:,1],'ro',tph[:,1],tph[:,1],'k.') >>> a = plb.plot(x1[:,0],x1[:,1],
... tp[:,0],tp[:,1],'ro',
... tph[:,1],tph[:,1],'k.')
>>> plb.close('all') >>> plb.close('all')
>>> itp >>> itp
array([ 11, 21, 22, 24, 26, 28, 31, 39, 43, 45, 47, 51, 56, array([ 11, 21, 22, 24, 26, 28, 31, 39, 43, 45, 47, 51, 56,
@ -970,7 +1005,8 @@ def findtp(x, h=0.0, kind=None):
if kind == 'astm': if kind == 'astm':
# the Nieslony approach always put the first loading point as the first # the Nieslony approach always put the first loading point as the first
# turning point. # turning point.
if x[ind[0]] != x[0]: # add the first turning point is the first of the signal # add the first turning point is the first of the signal
if x[ind[0]] != x[0]:
ind = np.r_[0, ind, n - 1] ind = np.r_[0, ind, n - 1]
else: # only add the last point of the signal else: # only add the last point of the signal
ind = np.r_[ind, n - 1] ind = np.r_[ind, n - 1]
@ -1000,6 +1036,7 @@ def findtp(x, h=0.0, kind=None):
ind = ind[:-1] ind = ind[:-1]
return ind return ind
def findtc(x_in, v=None, kind=None): def findtc(x_in, v=None, kind=None):
""" """
Return indices to troughs and crests of data. Return indices to troughs and crests of data.
@ -1092,6 +1129,7 @@ def findtc(x_in, v=None, kind=None):
return v_ind[:n_c - 1] + ind + 1, v_ind return v_ind[:n_c - 1] + ind + 1, v_ind
def findoutliers(x, zcrit=0.0, dcrit=None, ddcrit=None, verbose=False): def findoutliers(x, zcrit=0.0, dcrit=None, ddcrit=None, verbose=False):
""" """
Return indices to spurious points of data Return indices to spurious points of data
@ -1120,7 +1158,8 @@ def findoutliers(x, zcrit=0.0, dcrit=None, ddcrit=None, verbose=False):
----- -----
Consecutive points less than zcrit apart are considered as spurious. Consecutive points less than zcrit apart are considered as spurious.
The point immediately after and before are also removed. Jumps greater than The point immediately after and before are also removed. Jumps greater than
dcrit in Dxn and greater than ddcrit in D^2xn are also considered as spurious. dcrit in Dxn and greater than ddcrit in D^2xn are also considered as
spurious.
(All distances to be interpreted in the vertical direction.) (All distances to be interpreted in the vertical direction.)
Another good choice for dcrit and ddcrit are: Another good choice for dcrit and ddcrit are:
@ -1153,7 +1192,6 @@ def findoutliers(x, zcrit=0.0, dcrit=None, ddcrit=None, verbose=False):
waveplot, reconstruct waveplot, reconstruct
""" """
# finding outliers # finding outliers
findjumpsDx = True # find jumps in Dx findjumpsDx = True # find jumps in Dx
# two point spikes and Spikes dcrit above/under the # two point spikes and Spikes dcrit above/under the
@ -1168,7 +1206,6 @@ def findoutliers(x, zcrit=0.0, dcrit=None, ddcrit=None, verbose=False):
if xn.size < 2: if xn.size < 2:
raise ValueError('The vector must have more than 2 elements!') raise ValueError('The vector must have more than 2 elements!')
ind = zeros(0, dtype=int) ind = zeros(0, dtype=int)
# indg=[] # indg=[]
indmiss = isnan(xn) indmiss = isnan(xn)
@ -1249,7 +1286,8 @@ def findoutliers(x, zcrit=0.0, dcrit=None, ddcrit=None, verbose=False):
indtr, = nonzero((diff(indzeros))) indtr, = nonzero((diff(indzeros)))
indtr = indtr + 1 indtr = indtr + 1
#%indices to consecutive equal points #%indices to consecutive equal points
if True: # removing the point before + all equal points + the point after # removing the point before + all equal points + the point after
if True:
ind = hstack((ind, indtr - 1, indz, indtr, indtr + 1)) ind = hstack((ind, indtr - 1, indz, indtr, indtr + 1))
else: # % removing all points + the point after else: # % removing all points + the point after
ind = hstack((ind, indz, indtr, indtr + 1)) ind = hstack((ind, indz, indtr, indtr + 1))
@ -1258,7 +1296,8 @@ def findoutliers(x, zcrit=0.0, dcrit=None, ddcrit=None, verbose=False):
if zcrit == 0.: if zcrit == 0.:
print('Found %d consecutive equal values' % indz.size) print('Found %d consecutive equal values' % indz.size)
else: else:
print('Found %d consecutive values less than %g apart.' % (indz.size, zcrit)) print('Found %d consecutive values less than %g apart.' %
(indz.size, zcrit))
indg = ones(xn.size, dtype=bool) indg = ones(xn.size, dtype=bool)
if ind.size > 1: if ind.size > 1:
@ -1271,6 +1310,7 @@ def findoutliers(x, zcrit=0.0, dcrit=None, ddcrit=None, verbose=False):
return ind, indg return ind, indg
def common_shape(*args, ** kwds): def common_shape(*args, ** kwds):
''' '''
Return the common shape of a sequence of arrays Return the common shape of a sequence of arrays
@ -1339,8 +1379,8 @@ def common_shape(*args, ** kwds):
raise ValueError("shape mismatch: two or more arrays have " raise ValueError("shape mismatch: two or more arrays have "
"incompatible dimensions on axis %r." % (axis,)) "incompatible dimensions on axis %r." % (axis,))
elif len(unique) == 2: elif len(unique) == 2:
# There is exactly one non-1 length. The common shape will take this # There is exactly one non-1 length.
# value. # The common shape will take this value.
unique.remove(1) unique.remove(1)
new_length = unique.pop() new_length = unique.pop()
c_shape.append(new_length) c_shape.append(new_length)
@ -1351,6 +1391,7 @@ def common_shape(*args, ** kwds):
return tuple(c_shape) return tuple(c_shape)
def argsreduce(condition, * args): def argsreduce(condition, * args):
""" Return the elements of each input array that satisfy some condition. """ Return the elements of each input array that satisfy some condition.
@ -1399,7 +1440,8 @@ def argsreduce(condition, * args):
def stirlerr(n): def stirlerr(n):
''' '''
Return error of Stirling approximation, i.e., log(n!) - log( sqrt(2*pi*n)*(n/exp(1))**n ) Return error of Stirling approximation,
i.e., log(n!) - log( sqrt(2*pi*n)*(n/exp(1))**n )
Example Example
------- -------
@ -1430,7 +1472,6 @@ def stirlerr(n):
y = gammaln(n1 + 1) - log(sqrt(2 * pi * n1) * (n1 / exp(1)) ** n1) y = gammaln(n1 + 1) - log(sqrt(2 * pi * n1) * (n1 / exp(1)) ** n1)
nn = n1 * n1 nn = n1 * n1
n500 = 500 < n1 n500 = 500 < n1
@ -1446,11 +1487,16 @@ def stirlerr(n):
n15 = logical_and(15 < n1, n1 <= 35) n15 = logical_and(15 < n1, n1 <= 35)
if any(n15): if any(n15):
nn15 = nn[n15] nn15 = nn[n15]
y[n15] = (S0 - (S1 - (S2 - (S3 - S4 / nn15) / nn15) / nn15) / nn15) / n1[n15] y[n15] = (
S0 - (S1 - (S2 - (S3 - S4 / nn15) / nn15) / nn15) / nn15) / n1[n15]
return y return y
def getshipchar(value=None, property="max_deadweight", **kwds): #@ReservedAssignment #@ReservedAssignment
def getshipchar(value=None, property="max_deadweight", # @ReservedAssignment
**kwds):
''' '''
Return ship characteristics from value of one ship-property Return ship characteristics from value of one ship-property
@ -1464,15 +1510,16 @@ def getshipchar(value=None, property="max_deadweight", **kwds): #@ReservedAssign
'propeller_diameter'. 'propeller_diameter'.
The length was found from statistics of 40 vessels of size 85 to The length was found from statistics of 40 vessels of size 85 to
100000 tonn. An exponential curve through 0 was selected, and the 100000 tonn. An exponential curve through 0 was selected, and the
factor and exponent that minimized the standard deviation of the relative factor and exponent that minimized the standard deviation of the
error was selected. (The error returned is the same for any ship.) The relative error was selected. (The error returned is the same for
servicespeed was found for ships above 1000 tonns only. any ship.) The servicespeed was found for ships above 1000 tonns
The propeller diameter formula is from [1]_. only. The propeller diameter formula is from [1]_.
Returns Returns
------- -------
sc : dict sc : dict
containing estimated mean values and standard-deviations of ship characteristics: containing estimated mean values and standard-deviations of ship
characteristics:
max_deadweight [kkg], (weight of cargo, fuel etc.) max_deadweight [kkg], (weight of cargo, fuel etc.)
length [m] length [m]
beam [m] beam [m]
@ -1504,8 +1551,8 @@ def getshipchar(value=None, property="max_deadweight", **kwds): #@ReservedAssign
Reference Reference
--------- ---------
.. [1] Gray and Greeley, (1978), .. [1] Gray and Greeley, (1978),
"Source level model for propeller blade rate radiation for the world's merchant "Source level model for propeller blade rate radiation for the world's
fleet", Bolt Beranek and Newman Technical Memorandum No. 458. merchant fleet", Bolt Beranek and Newman Technical Memorandum No. 458.
''' '''
if value is None: if value is None:
names = kwds.keys() names = kwds.keys()
@ -1522,7 +1569,8 @@ def getshipchar(value=None, property="max_deadweight", **kwds): #@ReservedAssign
beam=lambda x: ((x / 1.78) ** (1 / 0.27)), beam=lambda x: ((x / 1.78) ** (1 / 0.27)),
draught=lambda x: ((x / 0.8) ** (1 / 0.24)), draught=lambda x: ((x / 0.8) ** (1 / 0.24)),
service_speed=lambda x: ((x / 1.14) ** (1 / 0.21)), service_speed=lambda x: ((x / 1.14) ** (1 / 0.21)),
propeller_diameter=lambda x: (((x / 0.12) ** (4 / 3) / 3.45) ** (2.5))) propeller_diameter=lambda x: (((x / 0.12) ** (4 / 3) /
3.45) ** (2.5)))
max_deadweight = prop2max_dw.get(prop, lambda x: x)(value) max_deadweight = prop2max_dw.get(prop, lambda x: x)(value)
propertySTD = prop + 'STD' propertySTD = prop + 'STD'
@ -1540,7 +1588,6 @@ def getshipchar(value=None, property="max_deadweight", **kwds): #@ReservedAssign
speed = round(1.14 * max_deadweight ** 0.21 * 10) / 10 speed = round(1.14 * max_deadweight ** 0.21 * 10) / 10
speed_err = speed * 0.10 speed_err = speed * 0.10
p_diam = 0.12 * length ** (3.0 / 4.0) p_diam = 0.12 * length ** (3.0 / 4.0)
p_diam_err = 0.12 * length_err ** (3.0 / 4.0) p_diam_err = 0.12 * length_err ** (3.0 / 4.0)
@ -1550,13 +1597,16 @@ def getshipchar(value=None, property="max_deadweight", **kwds): #@ReservedAssign
shipchar = OrderedDict(beam=beam, beamSTD=beam_err, shipchar = OrderedDict(beam=beam, beamSTD=beam_err,
draught=draught, draughtSTD=draught_err, draught=draught, draughtSTD=draught_err,
length=length, lengthSTD=length_err, length=length, lengthSTD=length_err,
max_deadweight=max_deadweight, max_deadweightSTD=max_deadweightSTD, max_deadweight=max_deadweight,
propeller_diameter=p_diam, propeller_diameterSTD=p_diam_err, max_deadweightSTD=max_deadweightSTD,
propeller_diameter=p_diam,
propeller_diameterSTD=p_diam_err,
service_speed=speed, service_speedSTD=speed_err) service_speed=speed, service_speedSTD=speed_err)
shipchar[propertySTD] = 0 shipchar[propertySTD] = 0
return shipchar return shipchar
def betaloge(z, w): def betaloge(z, w):
''' '''
Natural Logarithm of beta function. Natural Logarithm of beta function.
@ -1595,6 +1645,7 @@ def betaloge(z, w):
# (-(zpw-0.5).*log(zpw) +(w-0.5).*log(w)+(z-0.5).*log(z) +0.5*log(2*pi)) # (-(zpw-0.5).*log(zpw) +(w-0.5).*log(w)+(z-0.5).*log(z) +0.5*log(2*pi))
# return y # return y
def gravity(phi=45): def gravity(phi=45):
''' Returns the constant acceleration of gravity ''' Returns the constant acceleration of gravity
@ -1638,7 +1689,9 @@ def gravity(phi=45):
''' '''
phir = phi * pi / 180. # change from degrees to radians phir = phi * pi / 180. # change from degrees to radians
return 9.78049 * (1. + 0.0052884 * sin(phir) ** 2. - 0.0000059 * sin(2 * phir) ** 2.) return 9.78049 * (1. + 0.0052884 * sin(phir) ** 2. -
0.0000059 * sin(2 * phir) ** 2.)
def nextpow2(x): def nextpow2(x):
''' '''
@ -1662,6 +1715,7 @@ def nextpow2(x):
n = n - 1 n = n - 1
return n return n
def discretize(fun, a, b, tol=0.005, n=5, method='linear'): def discretize(fun, a, b, tol=0.005, n=5, method='linear'):
''' '''
Automatic discretization of function Automatic discretization of function
@ -1702,13 +1756,13 @@ def discretize(fun, a, b, tol=0.005, n=5, method='linear'):
else: else:
return _discretize_linear(fun, a, b, tol, n) return _discretize_linear(fun, a, b, tol, n)
def _discretize_linear(fun, a, b, tol=0.005, n=5): def _discretize_linear(fun, a, b, tol=0.005, n=5):
''' '''
Automatic discretization of function, linear gridding Automatic discretization of function, linear gridding
''' '''
tiny = floatinfo.tiny tiny = floatinfo.tiny
x = linspace(a, b, n) x = linspace(a, b, n)
y = fun(x) y = fun(x)
@ -1726,6 +1780,7 @@ def _discretize_linear(fun, a, b, tol=0.005, n=5):
err = 0.5 * amax(abs((y00 - y) / (abs(y00 + y) + tiny))) err = 0.5 * amax(abs((y00 - y) / (abs(y00 + y) + tiny)))
return x, y return x, y
def _discretize_adaptive(fun, a, b, tol=0.005, n=5): def _discretize_adaptive(fun, a, b, tol=0.005, n=5):
''' '''
Automatic discretization of function, adaptive gridding. Automatic discretization of function, adaptive gridding.
@ -1747,7 +1802,8 @@ def _discretize_adaptive(fun, a, b, tol=0.005, n=5):
I, = where(erri > tol) I, = where(erri > tol)
# double the sample rate in intervals with the most error # double the sample rate in intervals with the most error
y = (vstack(((x[I] + x[I - 1]) / 2, (x[I + 1] + x[I]) / 2)).T).ravel() y = (vstack(((x[I] + x[I - 1]) / 2,
(x[I + 1] + x[I]) / 2)).T).ravel()
fy = fun(y) fy = fun(y)
fy0 = interp(y, x, fx) fy0 = interp(y, x, fx)
@ -1903,10 +1959,10 @@ def meshgrid(*xi, **kwargs):
sparse = kwargs.get('sparse', False) sparse = kwargs.get('sparse', False)
indexing = kwargs.get('indexing', 'xy') # 'ij' indexing = kwargs.get('indexing', 'xy') # 'ij'
ndim = len(args) ndim = len(args)
s0 = (1,) * ndim s0 = (1,) * ndim
output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::]) for i, x in enumerate(args)] output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(args)]
shape = [x.size for x in output] shape = [x.size for x in output]
@ -1938,6 +1994,7 @@ def ndgrid(*args, **kwargs):
kwargs['indexing'] = 'ij' kwargs['indexing'] = 'ij'
return meshgrid(*args, ** kwargs) return meshgrid(*args, ** kwargs)
def trangood(x, f, min_n=None, min_x=None, max_x=None, max_n=inf): def trangood(x, f, min_n=None, min_x=None, max_x=None, max_n=inf):
""" """
Make sure transformation is efficient. Make sure transformation is efficient.
@ -2004,10 +2061,10 @@ def trangood(x, f, min_n=None, min_x=None, max_x=None, max_n=inf):
L = float(xn - x0) L = float(xn - x0)
eps = floatinfo.eps eps = floatinfo.eps
if ((nf < min_n) or (max_n < nf) or any(abs(ddx) > 10 * eps * (L))): if ((nf < min_n) or (max_n < nf) or any(abs(ddx) > 10 * eps * (L))):
## % pab 07.01.2001: Always choose the stepsize df so that # % pab 07.01.2001: Always choose the stepsize df so that
## % it is an exactly representable number. # % it is an exactly representable number.
## % This is important when calculating numerical derivatives and is # % This is important when calculating numerical derivatives and is
## % accomplished by the following. # % accomplished by the following.
dx = L / (min(min_n, max_n) - 1) dx = L / (min(min_n, max_n) - 1)
dx = (dx + 2.) - 2. dx = (dx + 2.) - 2.
xi = arange(x0, xn + dx / 2., dx) xi = arange(x0, xn + dx / 2., dx)
@ -2033,6 +2090,7 @@ def trangood(x, f, min_n=None, min_x=None, max_x=None, max_n=inf):
return xo, fo return xo, fo
def tranproc(x, f, x0, *xi): def tranproc(x, f, x0, *xi):
""" """
Transforms process X and up to four derivatives Transforms process X and up to four derivatives
@ -2108,16 +2166,17 @@ def tranproc(x, f, x0, *xi):
y = [y0] y = [y0]
hn = xo[1] - xo[0] hn = xo[1] - xo[0]
if hn ** N < sqrt(eps): if hn ** N < sqrt(eps):
print('Numerical problems may occur for the derivatives in tranproc.') msg = ('Numerical problems may occur for the derivatives in ' +
warnings.warn('The sampling of the transformation may be too small.') 'tranproc.\nThe sampling of the transformation may be too small.')
warnings.warn(msg)
#% Transform X with the derivatives of f. # Transform X with the derivatives of f.
fxder = zeros((N, x0.size)) fxder = zeros((N, x0.size))
fder = vstack((xo, fo)) fder = vstack((xo, fo))
for k in range(N): #% Derivation of f(x) using a difference method. for k in range(N): # Derivation of f(x) using a difference method.
n = fder.shape[-1] n = fder.shape[-1]
#%fder = [(fder(1:n-1,1)+fder(2:n,1))/2 diff(fder(:,2))./diff(fder(:,1))] fder = vstack([(fder[0, 0:n - 1] + fder[0, 1:n]) / 2,
fder = vstack([(fder[0, 0:n - 1] + fder[0, 1:n]) / 2, diff(fder[1, :]) / hn]) diff(fder[1, :]) / hn])
fxder[k] = tranproc(fder[0], fder[1], x0) fxder[k] = tranproc(fder[0], fder[1], x0)
# Calculate the transforms of the derivatives of X. # Calculate the transforms of the derivatives of X.
@ -2141,14 +2200,18 @@ def tranproc(x, f, x0, *xi):
# Fourth time derivative of y: # Fourth time derivative of y:
# y4 = f''''(x)*x1.^4+f'(x)*x4 # y4 = f''''(x)*x1.^4+f'(x)*x4
# +6*f'''(x)*x1^2*x2+f''(x)*(3*x2^2+4x1*x3) # +6*f'''(x)*x1^2*x2+f''(x)*(3*x2^2+4x1*x3)
y4 = (fxder[3] * xi[0] ** 4. + fxder[0] * xi[3] + \ y4 = (fxder[3] * xi[0] ** 4. + fxder[0] * xi[3] +
6. * fxder[2] * xi[0] ** 2. * xi[1] + \ 6. * fxder[2] * xi[0] ** 2. * xi[1] +
fxder[1] * (3. * xi[1] ** 2. + 4. * xi[0] * xi[1])) fxder[1] * (3. * xi[1] ** 2. + 4. * xi[0] * xi[1]))
y.append(y4) y.append(y4)
if N > 4: if N > 4:
warnings.warn('Transformation of derivatives of order>4 not supported.') warnings.warn('Transformation of derivatives of ' +
'order>4 not supported.')
return y # y0,y1,y2,y3,y4 return y # y0,y1,y2,y3,y4
def good_bins(data=None, range=None, num_bins=None, num_data=None, odd=False, loose=True): #@ReservedAssignment
def good_bins(data=None, range=None, num_bins=None, # @ReservedAssignment
num_data=None, odd=False, loose=True):
''' Return good bins for histogram ''' Return good bins for histogram
Parameters Parameters
@ -2158,7 +2221,8 @@ def good_bins(data=None, range=None, num_bins=None, num_data=None, odd=False, lo
range : (float, float) range : (float, float)
minimum and maximum range of bins (default data.min(), data.max()) minimum and maximum range of bins (default data.min(), data.max())
num_bins : scalar integer num_bins : scalar integer
approximate number of bins wanted (default depending on num_data=len(data)) approximate number of bins wanted
(default depending on num_data=len(data))
odd : bool odd : bool
placement of bins (0 or 1) (default 0) placement of bins (0 or 1) (default 0)
loose : bool loose : bool
@ -2188,7 +2252,7 @@ def good_bins(data=None, range=None, num_bins=None, num_data=None, odd=False, lo
num_bins = np.ceil(4 * np.sqrt(np.sqrt(num_data))) num_bins = np.ceil(4 * np.sqrt(np.sqrt(num_data)))
d = float(mx - mn) / num_bins * 2 d = float(mx - mn) / num_bins * 2
e = np.floor(np.log(d) / np.log(10)); e = np.floor(np.log(d) / np.log(10))
m = np.floor(d / 10 ** e) m = np.floor(d / 10 ** e)
if m > 5: if m > 5:
m = 5 m = 5
@ -2201,7 +2265,9 @@ def good_bins(data=None, range=None, num_bins=None, num_data=None, odd=False, lo
limits = np.arange(mn, mx + d / 2, d) limits = np.arange(mn, mx + d / 2, d)
return limits return limits
def plot_histgrm(data, bins=None, range=None, normed=False, weights=None, lintype='b-'): #@ReservedAssignment
def plot_histgrm(data, bins=None, range=None, # @ReservedAssignment
normed=False, weights=None, lintype='b-'):
''' '''
Plot histogram Plot histogram
@ -2254,7 +2320,9 @@ def plot_histgrm(data, bins=None, range=None, normed=False, weights=None, lintyp
if bins is None: if bins is None:
bins = np.ceil(4 * np.sqrt(np.sqrt(len(x)))) bins = np.ceil(4 * np.sqrt(np.sqrt(len(x))))
bin_, limits = np.histogram(data, bins=bins, normed=normed, weights=weights) #, new=True) #, new=True)
bin_, limits = np.histogram(
data, bins=bins, normed=normed, weights=weights)
limits.shape = (-1, 1) limits.shape = (-1, 1)
xx = limits.repeat(3, axis=1) xx = limits.repeat(3, axis=1)
xx.shape = (-1,) xx.shape = (-1,)
@ -2267,6 +2335,7 @@ def plot_histgrm(data, bins=None, range=None, normed=False, weights=None, lintyp
yy = np.hstack((yy, 0.0)) yy = np.hstack((yy, 0.0))
return plotbackend.plot(xx, yy, lintype, limits, limits * 0) return plotbackend.plot(xx, yy, lintype, limits, limits * 0)
def num2pistr(x, n=3): def num2pistr(x, n=3):
''' '''
Convert a scalar to a text string in fractions of pi Convert a scalar to a text string in fractions of pi
@ -2303,6 +2372,7 @@ def num2pistr(x, n=3):
xtxt = format % x xtxt = format % x
return xtxt return xtxt
def fourier(data, t=None, T=None, m=None, n=None, method='trapz'): def fourier(data, t=None, T=None, m=None, n=None, method='trapz'):
''' '''
Returns Fourier coefficients. Returns Fourier coefficients.
@ -2314,7 +2384,8 @@ def fourier(data, t=None, T=None, m=None, n=None, method='trapz'):
t : array-like t : array-like
vector with n values indexed from 1 to N. vector with n values indexed from 1 to N.
T : real scalar T : real scalar
primitive period of signal, i.e., smallest period. (default T = t[-1]-t[0] primitive period of signal, i.e., smallest period.
(default T = t[-1]-t[0]
m : scalar integer m : scalar integer
defines no of harmonics desired (default M = N) defines no of harmonics desired (default M = N)
n : scalar integer n : scalar integer
@ -2398,7 +2469,7 @@ def fourier(data, t=None, T=None, m=None, n=None, method='trapz'):
# #
# #
# #
# # Fourier coefficients by fft # Fourier coefficients by fft
# Fcof1 = 2*ifft(x(1:N1,:),[],1); # Fcof1 = 2*ifft(x(1:N1,:),[],1);
# Pcor = [1; exp(sqrt(-1)*(1:M-1).'*t(1))]; % correction term to get # Pcor = [1; exp(sqrt(-1)*(1:M-1).'*t(1))]; % correction term to get
# % the correct integration limits # % the correct integration limits
@ -2409,10 +2480,10 @@ def fourier(data, t=None, T=None, m=None, n=None, method='trapz'):
return a, b return a, b
def _test_find_cross(): def _test_find_cross():
t = findcross([0, 0, 1, -1, 1], 0) # @UnusedVariable t = findcross([0, 0, 1, -1, 1], 0) # @UnusedVariable
def _test_common_shape(): def _test_common_shape():
A = ones((4, 1)) A = ones((4, 1))
@ -2443,18 +2514,20 @@ def _test_meshgrid():
yv[0, 0] = 10 yv[0, 0] = 10
print(xv) print(xv)
print(yv) print(yv)
## >>> xv # >>> xv
## array([[ 0. , 0.5, 1. ]]) ## array([[ 0. , 0.5, 1. ]])
## >>> yv # >>> yv
## array([[ 0.], # array([[ 0.],
## [ 1.]]) # [ 1.]])
## array([[-1. , -0.5, 1. , 4. , 5. ], # array([[-1. , -0.5, 1. , 4. , 5. ],
## [-1. , -0.5, 1. , 4. , 5. ], ## [-1. , -0.5, 1. , 4. , 5. ],
## [-1. , -0.5, 1. , 4. , 5. ]]) # [-1. , -0.5, 1. , 4. , 5. ]])
## #
## array([[ 0., 0., 0., 0., 0.], # array([[ 0., 0., 0., 0., 0.],
## [-2., -2., -2., -2., -2.], ## [-2., -2., -2., -2., -2.],
## [-5., -5., -5., -5., -5.]]) # [-5., -5., -5., -5., -5.]])
def _test_tranproc(): def _test_tranproc():
import wafo.transform.models as wtm import wafo.transform.models as wtm
tr = wtm.TrHermite() tr = wtm.TrHermite()
@ -2466,14 +2539,19 @@ def _test_tranproc():
#>>> plot(g(:,1),[g(:,2),gder(:,2)]) #>>> plot(g(:,1),[g(:,2),gder(:,2)])
#>>> plot(g(:,1),pdfnorm(g(:,2)).*gder(:,2),g(:,1),pdfnorm(g(:,1))) #>>> plot(g(:,1),pdfnorm(g(:,2)).*gder(:,2),g(:,1),pdfnorm(g(:,1)))
#>>> legend('Transformed model','Gaussian model') #>>> legend('Transformed model','Gaussian model')
def _test_detrend(): def _test_detrend():
import pylab as plb import pylab as plb
cos = plb.cos;randn = plb.randn cos = plb.cos
randn = plb.randn
x = linspace(0, 1, 200) x = linspace(0, 1, 200)
y = exp(x) + cos(5 * 2 * pi * x) + 1e-1 * randn(x.size) y = exp(x) + cos(5 * 2 * pi * x) + 1e-1 * randn(x.size)
y0 = detrendma(y, 20);tr = y - y0 y0 = detrendma(y, 20)
tr = y - y0
plb.plot(x, y, x, y0, 'r', x, exp(x), 'k', x, tr, 'm') plb.plot(x, y, x, y0, 'r', x, exp(x), 'k', x, tr, 'm')
def _test_extrema(): def _test_extrema():
import pylab as pb import pylab as pb
from pylab import plot from pylab import plot
@ -2485,7 +2563,6 @@ def _test_extrema():
_ind1 = findrfc(tp, 0.3) _ind1 = findrfc(tp, 0.3)
def _test_discretize(): def _test_discretize():
import pylab as plb import pylab as plb
x, y = discretize(cos, 0, pi) x, y = discretize(cos, 0, pi)
@ -2501,6 +2578,7 @@ def _test_stirlerr():
print getshipchar(1000) print getshipchar(1000)
print betaloge(3, 2) print betaloge(3, 2)
def _test_parse_kwargs(): def _test_parse_kwargs():
opt = dict(arg1=1, arg2=3) opt = dict(arg1=1, arg2=3)
print opt print opt
@ -2519,6 +2597,7 @@ def _test_parse_kwargs():
out1 = testfun(opt0['opt1'], **opt0) out1 = testfun(opt0['opt1'], **opt0)
print out1 print out1
def test_docstrings(): def test_docstrings():
import doctest import doctest
doctest.testmod() doctest.testmod()

Loading…
Cancel
Save