master
Per.Andreas.Brodtkorb 11 years ago
parent 31f80c5798
commit 5c84825641

@ -2,19 +2,19 @@ from __future__ import division
import warnings import warnings
import copy import copy
import numpy as np import numpy as np
from numpy import pi, sqrt, ones, zeros #@UnresolvedImport from numpy import pi, sqrt, ones, zeros # @UnresolvedImport
from scipy import integrate as intg from scipy import integrate as intg
import scipy.special.orthogonal as ort import scipy.special.orthogonal as ort
from scipy import special as sp from scipy import special as sp
from wafo.plotbackend import plotbackend as plt from wafo.plotbackend import plotbackend as plt
from scipy.integrate import simps, trapz #@UnusedImport from scipy.integrate import simps, trapz # @UnusedImport
from wafo.misc import is_numlike from wafo.misc import is_numlike
from wafo.demos import humps from wafo.demos import humps
_POINTS_AND_WEIGHTS = {} _POINTS_AND_WEIGHTS = {}
__all__ = ['dea3', 'clencurt', 'romberg', __all__ = ['dea3', 'clencurt', 'romberg',
'h_roots','j_roots', 'la_roots','p_roots','qrule', 'h_roots', 'j_roots', 'la_roots', 'p_roots', 'qrule',
'gaussq', 'richardson', 'quadgr', 'qdemo'] 'gaussq', 'richardson', 'quadgr', 'qdemo']
@ -71,11 +71,11 @@ def dea3(v0, v1, v2):
''' '''
E0, E1, E2 = np.atleast_1d(v0, v1, v2) E0, E1, E2 = np.atleast_1d(v0, v1, v2)
abs = np.abs #@ReservedAssignment abs = np.abs # @ReservedAssignment
max = np.maximum #@ReservedAssignment max = np.maximum # @ReservedAssignment
ten = 10.0 ten = 10.0
one = ones(1) one = ones(1)
small = np.finfo(float).eps #1.0e-16 #spacing(one) small = np.finfo(float).eps # 1.0e-16 #spacing(one)
delta2 = E2 - E1 delta2 = E2 - E1
delta1 = E1 - E0 delta1 = E1 - E0
err2 = abs(delta2) err2 = abs(delta2)
@ -87,7 +87,7 @@ def dea3(v0, v1, v2):
abserr = result.copy() abserr = result.copy()
converged = (err1 <= tol1) & (err2 <= tol2).ravel() converged = (err1 <= tol1) & (err2 <= tol2).ravel()
k0, = converged.nonzero() k0, = converged.nonzero()
if k0.size > 0 : if k0.size > 0:
#%C IF E0, E1 AND E2 ARE EQUAL TO WITHIN MACHINE #%C IF E0, E1 AND E2 ARE EQUAL TO WITHIN MACHINE
#%C ACCURACY, CONVERGENCE IS ASSUMED. #%C ACCURACY, CONVERGENCE IS ASSUMED.
result[k0] = E2[k0] result[k0] = E2[k0]
@ -95,24 +95,26 @@ def dea3(v0, v1, v2):
k1, = (1 - converged).nonzero() k1, = (1 - converged).nonzero()
if k1.size > 0 : if k1.size > 0:
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter("ignore") # ignore division by zero and overflow # ignore division by zero and overflow
warnings.simplefilter("ignore")
ss = one / delta2[k1] - one / delta1[k1] ss = one / delta2[k1] - one / delta1[k1]
smallE2 = (abs(ss * E1[k1]) <= 1.0e-3).ravel() smallE2 = (abs(ss * E1[k1]) <= 1.0e-3).ravel()
k2 = k1[smallE2.nonzero()] k2 = k1[smallE2.nonzero()]
if k2.size > 0 : if k2.size > 0:
result[k2] = E2[k2] result[k2] = E2[k2]
abserr[k2] = err1[k2] + err2[k2] + E2[k2] * small * ten abserr[k2] = err1[k2] + err2[k2] + E2[k2] * small * ten
k4, = (1 - smallE2).nonzero() k4, = (1 - smallE2).nonzero()
if k4.size > 0 : if k4.size > 0:
k3 = k1[k4] k3 = k1[k4]
result[k3] = E1[k3] + one / ss[k4] result[k3] = E1[k3] + one / ss[k4]
abserr[k3] = err1[k3] + err2[k3] + abs(result[k3] - E2[k3]) abserr[k3] = err1[k3] + err2[k3] + abs(result[k3] - E2[k3])
return result, abserr return result, abserr
def clencurt(fun, a, b, n0=5, trace=False, *args): def clencurt(fun, a, b, n0=5, trace=False, *args):
''' '''
Numerical evaluation of an integral, Clenshaw-Curtis method. Numerical evaluation of an integral, Clenshaw-Curtis method.
@ -161,7 +163,6 @@ def clencurt(fun, a, b, n0=5, trace=False, *args):
Numerische Matematik, Vol. 2, pp. 197--205 Numerische Matematik, Vol. 2, pp. 197--205
''' '''
#% make sure n is even #% make sure n is even
n = 2 * n0 n = 2 * n0
a, b = np.atleast_1d(a, b) a, b = np.atleast_1d(a, b)
@ -184,7 +185,8 @@ def clencurt(fun, a, b, n0=5, trace=False, *args):
x0 = np.flipud(fun[:, 0]) x0 = np.flipud(fun[:, 0])
n = len(x0) - 1 n = len(x0) - 1
if abs(x - x0) > 1e-8: if abs(x - x0) > 1e-8:
raise ValueError('Input vector x must equal cos(pi*s/n)*(b-a)/2+(b+a)/2') raise ValueError(
'Input vector x must equal cos(pi*s/n)*(b-a)/2+(b+a)/2')
f = np.flipud(fun[:, 1::]) f = np.flipud(fun[:, 1::])
@ -196,27 +198,26 @@ def clencurt(fun, a, b, n0=5, trace=False, *args):
f[0, :] = f[0, :] / 2 f[0, :] = f[0, :] / 2
f[n, :] = f[n, :] / 2 f[n, :] = f[n, :] / 2
## % x = cos(pi*0:n/n) # % x = cos(pi*0:n/n)
## % f = f(x) # % f = f(x)
## % # %
## % N+1 # % N+1
## % c(k) = (2/N) sum f''(n)*cos(pi*(2*k-2)*(n-1)/N), 1 <= k <= N/2+1. # % c(k) = (2/N) sum f''(n)*cos(pi*(2*k-2)*(n-1)/N), 1 <= k <= N/2+1.
## % n=1 # % n=1
fft = np.fft.fft fft = np.fft.fft
tmp = np.real(fft(f[:n, :], axis=0)) tmp = np.real(fft(f[:n, :], axis=0))
c = 2 / n * (tmp[0:n / 2 + 1, :] + np.cos(np.pi * s2) * f[n, :]) c = 2 / n * (tmp[0:n / 2 + 1, :] + np.cos(np.pi * s2) * f[n, :])
## % old call # % old call
## % c = 2/n * cos(s2*s'*pi/n) * f # % c = 2/n * cos(s2*s'*pi/n) * f
c[0, :] = c[0, :] / 2 c[0, :] = c[0, :] / 2
c[n / 2, :] = c[n / 2, :] / 2 c[n / 2, :] = c[n / 2, :] / 2
## % alternative call # % alternative call
## % c = dct(f) # % c = dct(f)
c = c[0:n / 2 + 1, :] / ((s2 - 1) * (s2 + 1)) c = c[0:n / 2 + 1, :] / ((s2 - 1) * (s2 + 1))
Q = (af - bf) * np.sum(c, axis=0) Q = (af - bf) * np.sum(c, axis=0)
#Q = (a-b).*sum( c(1:n/2+1,:)./repmat((s2-1).*(s2+1),1,Na)) # Q = (a-b).*sum( c(1:n/2+1,:)./repmat((s2-1).*(s2+1),1,Na))
abserr = (bf - af) * np.abs(c[n / 2, :]) abserr = (bf - af) * np.abs(c[n / 2, :])
@ -225,6 +226,7 @@ def clencurt(fun, a, b, n0=5, trace=False, *args):
Q = np.reshape(Q, a_shape) Q = np.reshape(Q, a_shape)
return Q, abserr return Q, abserr
def romberg(fun, a, b, releps=1e-3, abseps=1e-3): def romberg(fun, a, b, releps=1e-3, abseps=1e-3):
''' '''
Numerical integration with the Romberg method Numerical integration with the Romberg method
@ -282,14 +284,15 @@ def romberg(fun, a, b, releps=1e-3, abseps=1e-3):
Un5 = np.sum(fun(a + np.arange(1, 2 * ipower, 2) * h)) * h Un5 = np.sum(fun(a + np.arange(1, 2 * ipower, 2) * h)) * h
# trapezoidal approximations # trapezoidal approximations
#T2n = 0.5 * (Tn + Un) = 0.5*Tn + Un5 # T2n = 0.5 * (Tn + Un) = 0.5*Tn + Un5
rom[two, 0] = 0.5 * rom[one, 0] + Un5 rom[two, 0] = 0.5 * rom[one, 0] + Un5
fp[i] = 4 * fp[i - 1] fp[i] = 4 * fp[i - 1]
# Richardson extrapolation # Richardson extrapolation
for k in xrange(i): for k in xrange(i):
#rom(2,k+1)=(fp(k)*rom(2,k)-rom(1,k))/(fp(k)-1) # rom(2,k+1)=(fp(k)*rom(2,k)-rom(1,k))/(fp(k)-1)
rom[two, k + 1] = rom[two, k] + (rom[two, k] - rom[one, k]) / (fp[k] - 1) rom[two, k + 1] = rom[two, k] + \
(rom[two, k] - rom[one, k]) / (fp[k] - 1)
Ih1 = Ih2 Ih1 = Ih2
Ih2 = Ih4 Ih2 = Ih4
@ -308,6 +311,7 @@ def romberg(fun, a, b, releps=1e-3, abseps=1e-3):
ipower *= 2 ipower *= 2
return res, abserr return res, abserr
def h_roots(n, method='newton'): def h_roots(n, method='newton'):
''' '''
Returns the roots (x) of the nth order Hermite polynomial, Returns the roots (x) of the nth order Hermite polynomial,
@ -351,7 +355,6 @@ def h_roots(n, method='newton'):
prentice-hall, Englewood cliffs, n.j. prentice-hall, Englewood cliffs, n.j.
''' '''
if not method.startswith('n'): if not method.startswith('n'):
return ort.h_roots(n) return ort.h_roots(n)
else: else:
@ -359,7 +362,7 @@ def h_roots(n, method='newton'):
max_iter = 10 max_iter = 10
releps = 3e-14 releps = 3e-14
C = [9.084064e-01, 5.214976e-02, 2.579930e-03, 3.986126e-03] C = [9.084064e-01, 5.214976e-02, 2.579930e-03, 3.986126e-03]
#PIM4=0.7511255444649425 # PIM4=0.7511255444649425
PIM4 = np.pi ** (-1. / 4) PIM4 = np.pi ** (-1. / 4)
# The roots are symmetric about the origin, so we have to # The roots are symmetric about the origin, so we have to
@ -378,7 +381,7 @@ def h_roots(n, method='newton'):
k0 = 0 k0 = 0
kp1 = 1 kp1 = 1
for _its in xrange(max_iter): for _its in xrange(max_iter):
#Newtons method carried out simultaneously on the roots. # Newtons method carried out simultaneously on the roots.
L[k0, :] = 0 L[k0, :] = 0
L[kp1, :] = PIM4 L[kp1, :] = PIM4
@ -389,8 +392,8 @@ def h_roots(n, method='newton'):
k0 = kp1 k0 = kp1
kp1 = np.mod(kp1 + 1, 3) kp1 = np.mod(kp1 + 1, 3)
L[kp1, :] = z * sqrt(2 / j) * L[k0, :] - np.sqrt((j - 1) / j) * L[km1, :] L[kp1, :] = (z * sqrt(2 / j) * L[k0, :] -
np.sqrt((j - 1) / j) * L[km1, :])
# L now contains the desired Hermite polynomials. # L now contains the desired Hermite polynomials.
# We next compute pp, the derivatives, # We next compute pp, the derivatives,
@ -415,9 +418,10 @@ def h_roots(n, method='newton'):
w[n - 1:n - m - 1:-1] = w[0:m] # and its symmetric counterpart. w[n - 1:n - m - 1:-1] = w[0:m] # and its symmetric counterpart.
return x, w return x, w
def j_roots(n, alpha, beta, method='newton'): def j_roots(n, alpha, beta, method='newton'):
''' '''
Returns the roots (x) of the nth order Jacobi polynomial, P^(alpha,beta)_n(x) Returns the roots of the nth order Jacobi polynomial, P^(alpha,beta)_n(x)
and weights (w) to use in Gaussian Quadrature over [-1,1] with weighting and weights (w) to use in Gaussian Quadrature over [-1,1] with weighting
function (1-x)**alpha (1+x)**beta with alpha,beta > -1. function (1-x)**alpha (1+x)**beta with alpha,beta > -1.
@ -471,21 +475,21 @@ def j_roots(n, alpha, beta, method='newton'):
# Initial approximations to the roots go into z. # Initial approximations to the roots go into z.
alfbet = alpha + beta alfbet = alpha + beta
z = np.cos(np.pi * (np.arange(1, n + 1) - 0.25 + 0.5 * alpha) /
z = np.cos(np.pi * (np.arange(1, n + 1) - 0.25 + 0.5 * alpha) / (n + 0.5 * (alfbet + 1))) (n + 0.5 * (alfbet + 1)))
L = zeros((3, len(z))) L = zeros((3, len(z)))
k0 = 0 k0 = 0
kp1 = 1 kp1 = 1
for _its in xrange(max_iter): for _its in xrange(max_iter):
#Newton's method carried out simultaneously on the roots. # Newton's method carried out simultaneously on the roots.
tmp = 2 + alfbet tmp = 2 + alfbet
L[k0, :] = 1 L[k0, :] = 1
L[kp1, :] = (alpha - beta + tmp * z) / 2 L[kp1, :] = (alpha - beta + tmp * z) / 2
for j in xrange(2, n + 1): for j in xrange(2, n + 1):
#Loop up the recurrence relation to get the Jacobi # Loop up the recurrence relation to get the Jacobi
#polynomials evaluated at z. # polynomials evaluated at z.
km1 = k0 km1 = k0
k0 = kp1 k0 = kp1
kp1 = np.mod(kp1 + 1, 3) kp1 = np.mod(kp1 + 1, 3)
@ -497,31 +501,33 @@ def j_roots(n, alpha, beta, method='newton'):
L[kp1, :] = (b * L[k0, :] - c * L[km1, :]) / a L[kp1, :] = (b * L[k0, :] - c * L[km1, :]) / a
#L now contains the desired Jacobi polynomials. # L now contains the desired Jacobi polynomials.
#We next compute pp, the derivatives with a standard # We next compute pp, the derivatives with a standard
# relation involving the polynomials of one lower order. # relation involving the polynomials of one lower order.
pp = (n * (alpha - beta - tmp * z) * L[kp1, :] + 2 * (n + alpha) * (n + beta) * L[k0, :]) / (tmp * (1 - z ** 2)) pp = (n * (alpha - beta - tmp * z) * L[kp1, :] +
2 * (n + alpha) * (n + beta) * L[k0, :]) / (tmp * (1 - z ** 2))
dz = L[kp1, :] / pp dz = L[kp1, :] / pp
z = z - dz # Newton's formula. z = z - dz # Newton's formula.
if not any(abs(dz) > releps * abs(z)): if not any(abs(dz) > releps * abs(z)):
break break
else: else:
warnings.warn('too many iterations in jrule') warnings.warn('too many iterations in jrule')
x = z # %Store the root and the weight. x = z # %Store the root and the weight.
w = np.exp(sp.gammaln(alpha + n) + sp.gammaln(beta + n) - sp.gammaln(n + 1) - f = (sp.gammaln(alpha + n) + sp.gammaln(beta + n) -
sp.gammaln(alpha + beta + n + 1)) * tmp * 2 ** alfbet / (pp * L[k0, :]) sp.gammaln(n + 1) - sp.gammaln(alpha + beta + n + 1))
w = (np.exp(f) * tmp * 2 ** alfbet / (pp * L[k0, :]))
return x, w return x, w
def la_roots(n, alpha=0, method='newton'): def la_roots(n, alpha=0, method='newton'):
''' '''
Returns the roots (x) of the nth order generalized (associated) Laguerre Returns the roots (x) of the nth order generalized (associated) Laguerre
polynomial, L^(alpha)_n(x), and weights (w) to use in Gaussian quadrature over polynomial, L^(alpha)_n(x), and weights (w) to use in Gaussian quadrature
[0,inf] with weighting function exp(-x) x**alpha with alpha > -1. over [0,inf] with weighting function exp(-x) x**alpha with alpha > -1.
Parameters Parameters
---------- ----------
@ -597,8 +603,9 @@ def la_roots(n, alpha=0, method='newton'):
k0 = kp1 k0 = kp1
kp1 = np.mod(kp1 + 1, 3) kp1 = np.mod(kp1 + 1, 3)
L[kp1, k] = ((2 * jj - 1 + alpha - z[k]) * L[k0, k] - (jj - 1 + alpha) * L[km1, k]) / jj L[kp1, k] = ((2 * jj - 1 + alpha - z[k]) * L[
#end k0, k] - (jj - 1 + alpha) * L[km1, k]) / jj
# end
#%L now contains the desired Laguerre polynomials. #%L now contains the desired Laguerre polynomials.
#%We next compute pp, the derivatives with a standard #%We next compute pp, the derivatives with a standard
#% relation involving the polynomials of one lower order. #% relation involving the polynomials of one lower order.
@ -607,10 +614,9 @@ def la_roots(n, alpha=0, method='newton'):
pp[k] = (n * L[kp1, k] - (n + alpha) * Lp[k]) / z[k] pp[k] = (n * L[kp1, k] - (n + alpha) * Lp[k]) / z[k]
dz[k] = L[kp1, k] / pp[k] dz[k] = L[kp1, k] / pp[k]
z[k] = z[k] - dz[k]# % Newton?s formula. z[k] = z[k] - dz[k] # % Newton?s formula.
#%k = find((abs(dz) > releps.*z)) #%k = find((abs(dz) > releps.*z))
if not np.any(abs(dz) > releps): if not np.any(abs(dz) > releps):
break break
else: else:
@ -620,7 +626,8 @@ def la_roots(n, alpha=0, method='newton'):
w = -np.exp(sp.gammaln(alpha + n) - sp.gammaln(n)) / (pp * n * Lp) w = -np.exp(sp.gammaln(alpha + n) - sp.gammaln(n)) / (pp * n * Lp)
return x, w return x, w
def p_roots(n, method='newton', a= -1, b=1):
def p_roots(n, method='newton', a=-1, b=1):
''' '''
Returns the roots (x) of the nth order Legendre polynomial, P_n(x), Returns the roots (x) of the nth order Legendre polynomial, P_n(x),
and weights (w) to use in Gaussian Quadrature over [-1,1] with weighting and weights (w) to use in Gaussian Quadrature over [-1,1] with weighting
@ -658,8 +665,8 @@ def p_roots(n, method='newton', a= -1, b=1):
References References
---------- ----------
[1] Davis and Rabinowitz (1975) 'Methods of Numerical Integration', page 365, [1] Davis and Rabinowitz (1975) 'Methods of Numerical Integration',
Academic Press. page 365, Academic Press.
[2] Golub, G. H. and Welsch, J. H. (1969) [2] Golub, G. H. and Welsch, J. H. (1969)
'Calculation of Gaussian Quadrature Rules' 'Calculation of Gaussian Quadrature Rules'
@ -685,7 +692,6 @@ def p_roots(n, method='newton', a= -1, b=1):
# Compute the zeros of the N+1 Legendre Polynomial # Compute the zeros of the N+1 Legendre Polynomial
# using the recursion relation and the Newton-Raphson method # using the recursion relation and the Newton-Raphson method
# Legendre-Gauss Polynomials # Legendre-Gauss Polynomials
L = zeros((3, m)) L = zeros((3, m))
@ -698,7 +704,8 @@ def p_roots(n, method='newton', a= -1, b=1):
# Compute the zeros of the N+1 Legendre Polynomial # Compute the zeros of the N+1 Legendre Polynomial
# using the recursion relation and the Newton-Raphson method # using the recursion relation and the Newton-Raphson method
# Iterate until new points are uniformly within epsilon of old points # Iterate until new points are uniformly within epsilon of old
# points
k = slice(m) k = slice(m)
k0 = 0 k0 = 0
kp1 = 1 kp1 = 1
@ -710,7 +717,8 @@ def p_roots(n, method='newton', a= -1, b=1):
km1 = k0 km1 = k0
k0 = kp1 k0 = kp1
kp1 = np.mod(k0 + 1, 3) kp1 = np.mod(k0 + 1, 3)
L[kp1, k] = ((2 * jj - 1) * xo[k] * L[k0, k] - (jj - 1) * L[km1, k]) / jj L[kp1, k] = ((2 * jj - 1) * xo[k] * L[
k0, k] - (jj - 1) * L[km1, k]) / jj
Lp[k] = n * (L[k0, k] - xo[k] * L[kp1, k]) / (1 - xo[k] ** 2) Lp[k] = n * (L[k0, k] - xo[k] * L[kp1, k]) / (1 - xo[k] ** 2)
@ -747,14 +755,18 @@ def p_roots(n, method='newton', a= -1, b=1):
d4pn = (6. * xo * d3pn + (6 - e1) * d2pn) / den d4pn = (6. * xo * d3pn + (6 - e1) * d2pn) / den
u = pk / dpn u = pk / dpn
v = d2pn / dpn v = d2pn / dpn
h = -u * (1 + (.5 * u) * (v + u * (v * v - u * d3pn / (3 * dpn)))) h = (-u * (1 + (.5 * u) * (v + u *
p = pk + h * (dpn + (.5 * h) * (d2pn + (h / 3) * (d3pn + .25 * h * d4pn))) (v * v - u * d3pn / (3 * dpn)))))
p = (pk + h * (dpn + (.5 * h) * (d2pn + (h / 3) *
(d3pn + .25 * h * d4pn))))
dp = dpn + h * (d2pn + (.5 * h) * (d3pn + h * d4pn / 3)) dp = dpn + h * (d2pn + (.5 * h) * (d3pn + h * d4pn / 3))
h = h - p / dp h = h - p / dp
xo = xo + h xo = xo + h
x = -xo - h x = -xo - h
fx = d1 - h * e1 * (pk + (h / 2) * (dpn + (h / 3) * (d2pn + (h / 4) * (d3pn + (.2 * h) * d4pn)))) fx = (d1 - h * e1 * (pk + (h / 2) *
(dpn + (h / 3) * (d2pn + (h / 4) *
(d3pn + (.2 * h) * d4pn)))))
w = 2 * (1 - x ** 2) / (fx ** 2) w = 2 * (1 - x ** 2) / (fx ** 2)
if (m + m) > n: if (m + m) > n:
@ -766,7 +778,6 @@ def p_roots(n, method='newton', a= -1, b=1):
x = np.hstack((x, -x[m - 1::-1])) x = np.hstack((x, -x[m - 1::-1]))
w = np.hstack((w, w[m - 1::-1])) w = np.hstack((w, w[m - 1::-1]))
if (a != -1) | (b != 1): if (a != -1) | (b != 1):
# Linear map from[-1,1] to [a,b] # Linear map from[-1,1] to [a,b]
dh = (b - a) / 2 dh = (b - a) / 2
@ -775,6 +786,7 @@ def p_roots(n, method='newton', a= -1, b=1):
return x, w return x, w
def qrule(n, wfun=1, alpha=0, beta=0): def qrule(n, wfun=1, alpha=0, beta=0):
''' '''
Return nodes and weights for Gaussian quadratures. Return nodes and weights for Gaussian quadratures.
@ -839,7 +851,7 @@ def qrule(n, wfun=1, alpha=0, beta=0):
[bp, wf] = h_roots(n) [bp, wf] = h_roots(n)
elif wfun == 3: # Generalized Laguerre elif wfun == 3: # Generalized Laguerre
[bp, wf] = la_roots(n, alpha) [bp, wf] = la_roots(n, alpha)
elif wfun == 4: #Gauss-Jacobi elif wfun == 4: # Gauss-Jacobi
[bp, wf] = j_roots(n, alpha, beta) [bp, wf] = j_roots(n, alpha, beta)
elif wfun == 5: # p(x)=1/sqrt((x-a)*(b-x)), a=-1 and b=1 (default) elif wfun == 5: # p(x)=1/sqrt((x-a)*(b-x)), a=-1 and b=1 (default)
jj = np.arange(1, n + 1) jj = np.arange(1, n + 1)
@ -885,7 +897,8 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
a,b : array-like a,b : array-like
lower and upper integration limits, respectively. lower and upper integration limits, respectively.
reltol, abstol : real scalars, optional reltol, abstol : real scalars, optional
relative and absolute tolerance, respectively. (default reltol=abstool=1e-3). relative and absolute tolerance, respectively.
(default reltol=abstool=1e-3).
wfun : scalar integer, optional wfun : scalar integer, optional
defining the weight function, p(x). (default wfun = 1) defining the weight function, p(x). (default wfun = 1)
1 : p(x) = 1 a =-1, b = 1 Gauss-Legendre 1 : p(x) = 1 a =-1, b = 1 Gauss-Legendre
@ -964,7 +977,8 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
a_shape = np.atleast_1d(A.shape) a_shape = np.atleast_1d(A.shape)
b_shape = np.atleast_1d(B.shape) b_shape = np.atleast_1d(B.shape)
if np.prod(a_shape) == 1: # make sure the integration limits have correct size # make sure the integration limits have correct size
if np.prod(a_shape) == 1:
A = A * ones(b_shape) A = A * ones(b_shape)
a_shape = b_shape a_shape = b_shape
elif np.prod(b_shape) == 1: elif np.prod(b_shape) == 1:
@ -972,7 +986,6 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
elif any(a_shape != b_shape): elif any(a_shape != b_shape):
raise ValueError('The integration limits must have equal size!') raise ValueError('The integration limits must have equal size!')
if args is None: if args is None:
num_parameters = 0 num_parameters = 0
else: else:
@ -980,7 +993,7 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
P0 = copy.deepcopy(args) P0 = copy.deepcopy(args)
isvector1 = zeros(num_parameters) isvector1 = zeros(num_parameters)
nk = np.prod(a_shape) #% # of integrals we have to compute nk = np.prod(a_shape) # % # of integrals we have to compute
for ix in xrange(num_parameters): for ix in xrange(num_parameters):
if is_numlike(P0[ix]): if is_numlike(P0[ix]):
p0_shape = np.shape(P0[ix]) p0_shape = np.shape(P0[ix])
@ -997,20 +1010,18 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
P0[ix].shape = (-1, 1) # make sure it is a column P0[ix].shape = (-1, 1) # make sure it is a column
k = np.arange(nk) k = np.arange(nk)
val = zeros(nk) val = zeros(nk)
val_old = zeros(nk) val_old = zeros(nk)
abserr = zeros(nk) abserr = zeros(nk)
# setup mapping parameters
#setup mapping parameters
A.shape = (-1, 1) A.shape = (-1, 1)
B.shape = (-1, 1) B.shape = (-1, 1)
jacob = (B - A) / 2 jacob = (B - A) / 2
shift = 1 shift = 1
if wfun == 1:# Gauss-legendre if wfun == 1: # Gauss-legendre
dx = jacob dx = jacob
elif wfun == 2 or wfun == 3: elif wfun == 2 or wfun == 3:
shift = 0 shift = 0
@ -1041,19 +1052,17 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
dx = dx.ravel() dx = dx.ravel()
if trace: if trace:
x_trace = [0, ]*max_iter x_trace = [0, ] * max_iter
y_trace = [0, ]*max_iter y_trace = [0, ] * max_iter
if num_parameters > 0: if num_parameters > 0:
ix_vec, = np.where(isvector1) ix_vec, = np.where(isvector1)
if len(ix_vec): if len(ix_vec):
P1 = copy.copy(P0) P1 = copy.copy(P0)
#% Break out of the iteration loop for three reasons: # Break out of the iteration loop for three reasons:
#% 1) the last update is very small (compared to int and compared to reltol) # 1) the last update is very small (compared to int and to reltol)
#% 2) There are more than 11 iterations. This should NEVER happen. # 2) There are more than 11 iterations. This should NEVER happen.
for ix in xrange(max_iter): for ix in xrange(max_iter):
x_and_w = 'wfun%d_%d_%g_%g' % (wfun, gn, alpha, beta) x_and_w = 'wfun%d_%d_%g_%g' % (wfun, gn, alpha, beta)
@ -1066,7 +1075,6 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
# calculate the x values # calculate the x values
x = (xn + shift) * jacob[k, :] + A[k, :] x = (xn + shift) * jacob[k, :] + A[k, :]
# calculate function values y=fun(x,p1,p2,....,pn) # calculate function values y=fun(x,p1,p2,....,pn)
if num_parameters > 0: if num_parameters > 0:
if len(ix_vec): if len(ix_vec):
@ -1080,46 +1088,44 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
else: else:
y = fun(x) y = fun(x)
val[k] = np.sum(w * y, axis=1) * dx[k] # do the integration sum(y.*w) val[k] = np.sum(w * y, axis=1) * dx[k] # do the integration sum(y.*w)
if trace: if trace:
x_trace.append(x.ravel()) x_trace.append(x.ravel())
y_trace.append(y.ravel()) y_trace.append(y.ravel())
hfig = plt.plot(x, y, 'r.') hfig = plt.plot(x, y, 'r.')
#hold on # hold on
#drawnow,shg # drawnow,shg
#if trace>1: # if trace>1:
# pause # pause
plt.setp(hfig, 'color', 'b') plt.setp(hfig, 'color', 'b')
abserr[k] = abs(val_old[k] - val[k]) # absolute tolerance
abserr[k] = abs(val_old[k] - val[k]) #absolute tolerance
if ix > 1: if ix > 1:
k, = np.where(abserr > np.maximum(abs(reltol * val), abstol))
k, = np.where(abserr > np.maximum(abs(reltol * val), abstol)) # abserr > abs(abstol))%indices to integrals which did not converge # abserr > abs(abstol))%indices to integrals which
nk = len(k)# of integrals we have to compute again # did not converge
if nk : nk = len(k) # of integrals we have to compute again
if nk:
val_old[k] = val[k] val_old[k] = val[k]
else: else:
break break
gn *= 2 #double the # of basepoints and weights gn *= 2 # double the # of basepoints and weights
else: else:
if nk > 1: if nk > 1:
if (nk == np.prod(a_shape)): if (nk == np.prod(a_shape)):
tmptxt = 'All integrals did not converge--singularities likely!' tmptxt = 'All integrals did not converge'
else: else:
tmptxt = '%d integrals did not converge--singularities likely!' % (nk,) tmptxt = '%d integrals did not converge' % (nk,)
else: else:
tmptxt = 'Integral did not converge--singularity likely!' tmptxt = 'Integral did not converge--singularity likely!'
warnings.warn(tmptxt) warnings.warn(tmptxt + '--singularities likely!')
val.shape = a_shape # make sure int is the same size as the integration limits # make sure int is the same size as the integration limits
val.shape = a_shape
abserr.shape = a_shape abserr.shape = a_shape
if trace > 0: if trace > 0:
@ -1127,6 +1133,7 @@ def gaussq(fun, a, b, reltol=1e-3, abstol=1e-3, alpha=0, beta=0, wfun=1,
plt.plot(np.hstack(x_trace), np.hstack(y_trace), '+') plt.plot(np.hstack(x_trace), np.hstack(y_trace), '+')
return val, abserr return val, abserr
def richardson(Q, k): def richardson(Q, k):
# license BSD # license BSD
# Richardson extrapolation with parameter estimation # Richardson extrapolation with parameter estimation
@ -1136,6 +1143,7 @@ def richardson(Q, k):
R = Q[k] + (Q[k] - Q[k - 1]) / c R = Q[k] + (Q[k] - Q[k - 1]) / c
return R return R
def quadgr(fun, a, b, abseps=1e-5, max_iter=17): def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
''' '''
Gauss-Legendre quadrature with Richardson extrapolation. Gauss-Legendre quadrature with Richardson extrapolation.
@ -1189,7 +1197,6 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
else: else:
reverse = False reverse = False
#% Infinite limits #% Infinite limits
if np.isinf(a) | np.isinf(b): if np.isinf(a) | np.isinf(b):
# Check real limits # Check real limits
@ -1199,7 +1206,7 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
# Change of variable # Change of variable
if np.isfinite(a) & np.isinf(b): if np.isfinite(a) & np.isinf(b):
# a to inf # a to inf
fun1 = lambda t : fun(a + t / (1 - t)) / (1 - t) ** 2 fun1 = lambda t: fun(a + t / (1 - t)) / (1 - t) ** 2
[Q, err] = quadgr(fun1, 0, 1, abseps) [Q, err] = quadgr(fun1, 0, 1, abseps)
elif np.isinf(a) & np.isfinite(b): elif np.isinf(a) & np.isfinite(b):
# -inf to b # -inf to b
@ -1219,9 +1226,11 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
return Q, err return Q, err
# Gauss-Legendre quadrature (12-point) # Gauss-Legendre quadrature (12-point)
xq = np.asarray([0.12523340851146894, 0.36783149899818018, 0.58731795428661748, xq = np.asarray(
[0.12523340851146894, 0.36783149899818018, 0.58731795428661748,
0.76990267419430469, 0.9041172563704748, 0.98156063424671924]) 0.76990267419430469, 0.9041172563704748, 0.98156063424671924])
wq = np.asarray([0.24914704581340288, 0.23349253653835478, 0.20316742672306584, wq = np.asarray(
[0.24914704581340288, 0.23349253653835478, 0.20316742672306584,
0.16007832854334636, 0.10693932599531818, 0.047175336386511842]) 0.16007832854334636, 0.10693932599531818, 0.047175336386511842])
xq = np.hstack((xq, -xq)) xq = np.hstack((xq, -xq))
wq = np.hstack((wq, wq)) wq = np.hstack((wq, wq))
@ -1251,7 +1260,8 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
hh = hh / 2 hh = hh / 2
x = np.hstack([x + a, x + b]) / 2 x = np.hstack([x + a, x + b]) / 2
# Quadrature # Quadrature
Q0[k] = hh * np.sum(wq * np.sum(np.reshape(fun(x), (-1, nq)), axis=0), axis=0) Q0[k] = hh * \
np.sum(wq * np.sum(np.reshape(fun(x), (-1, nq)), axis=0), axis=0)
# Richardson extrapolation # Richardson extrapolation
if k >= 5: if k >= 5:
@ -1260,7 +1270,6 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
elif k >= 3: elif k >= 3:
Q1[k] = richardson(Q0, k) Q1[k] = richardson(Q0, k)
#% Estimate absolute error #% Estimate absolute error
if k >= 6: if k >= 6:
Qv = np.hstack((Q0[k], Q1[k], Q2[k])) Qv = np.hstack((Q0[k], Q1[k], Q2[k]))
@ -1276,7 +1285,7 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
j = errors.argmin() j = errors.argmin()
err = errors[j] err = errors[j]
Q = Qv[j] Q = Qv[j]
if k >= 2 : #and not iscomplex: if k >= 2: # and not iscomplex:
_val, err1 = dea3(Q0[k - 2], Q0[k - 1], Q0[k]) _val, err1 = dea3(Q0[k - 2], Q0[k - 1], Q0[k])
# Convergence # Convergence
@ -1288,7 +1297,6 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
if ~ np.isfinite(Q): if ~ np.isfinite(Q):
warnings.warn('Integral approximation is Infinite or NaN.') warnings.warn('Integral approximation is Infinite or NaN.')
# The error estimate should not be zero # The error estimate should not be zero
err = err + 2 * np.finfo(Q).eps err = err + 2 * np.finfo(Q).eps
# Reverse direction # Reverse direction
@ -1297,6 +1305,7 @@ def quadgr(fun, a, b, abseps=1e-5, max_iter=17):
return Q, err return Q, err
def qdemo(f, a, b): def qdemo(f, a, b):
''' '''
Compares different quadrature rules. Compares different quadrature rules.
@ -1346,7 +1355,6 @@ def qdemo(f, a, b):
129, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 129, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
257, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 257, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
513, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000 513, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
''' '''
# use quad8 with small tolerance to get "true" value # use quad8 with small tolerance to get "true" value
#true1 = quad8(f,a,b,1e-10) #true1 = quad8(f,a,b,1e-10)
@ -1381,7 +1389,7 @@ def qdemo(f, a, b):
# trapezoid approximation # trapezoid approximation
q = np.trapz(y, x) q = np.trapz(y, x)
#h*( (y(1)+y(n))/2 + sum(y(2:n-1)) ) # h*( (y(1)+y(n))/2 + sum(y(2:n-1)) )
qt[k] = q qt[k] = q
et[k] = abs(q - true_val) et[k] = abs(q - true_val)
# Simpson approximation # Simpson approximation
@ -1392,7 +1400,8 @@ def qdemo(f, a, b):
# Boole's rule # Boole's rule
#q = boole(x,y) #q = boole(x,y)
q = (2 * h / 45) * (7 * (y[0] + y[-1]) + 12 * np.sum(y[2:n - 1:4]) q = (2 * h / 45) * (7 * (y[0] + y[-1]) + 12 * np.sum(y[2:n - 1:4])
+ 32 * np.sum(y[1:n - 1:2]) + 14 * np.sum(y[4:n - 3:4])) + 32 * np.sum(y[1:n - 1:2]) +
14 * np.sum(y[4:n - 3:4]))
qb[k] = q qb[k] = q
eb[k] = abs(q - true_val) eb[k] = abs(q - true_val)
@ -1409,15 +1418,14 @@ def qdemo(f, a, b):
# Gauss-Legendre quadrature # Gauss-Legendre quadrature
q = intg.fixed_quad(f, a, b, n=n)[0] q = intg.fixed_quad(f, a, b, n=n)[0]
#[x, w]=qrule(n,1) #[x, w]=qrule(n,1)
#x = (b-a)/2*x + (a+b)/2 % Transform base points X. # x = (b-a)/2*x + (a+b)/2 % Transform base points X.
#w = (b-a)/2*w % Adjust weigths. # w = (b-a)/2*w % Adjust weigths.
#q = sum(feval(f,x)*w) #q = sum(feval(f,x)*w)
qg[k] = q qg[k] = q
eg[k] = abs(q - true_val) eg[k] = abs(q - true_val)
#% display results #% display results
formats = ['%4.0f, ', ] + ['%10.10f, ', ]*6 formats = ['%4.0f, ', ] + ['%10.10f, ', ] * 6
formats[-1] = formats[-1].split(',')[0] formats[-1] = formats[-1].split(',')[0]
data = np.vstack((neval, qt, et, qs, es, qb, eb)).T data = np.vstack((neval, qt, et, qs, es, qb, eb)).T
print(' ftn Trapezoid Simpson''s Boole''s') print(' ftn Trapezoid Simpson''s Boole''s')
@ -1435,14 +1443,12 @@ def qdemo(f, a, b):
tmp = data[k].tolist() tmp = data[k].tolist()
print(''.join(fi % t for fi, t in zip(formats, tmp))) print(''.join(fi % t for fi, t in zip(formats, tmp)))
plt.loglog(neval, np.vstack((et, es, eb, ec, ec2, eg)).T) plt.loglog(neval, np.vstack((et, es, eb, ec, ec2, eg)).T)
plt.xlabel('number of function evaluations') plt.xlabel('number of function evaluations')
plt.ylabel('error') plt.ylabel('error')
plt.legend(('Trapezoid', 'Simpsons', 'Booles', 'Clenshaw', 'Chebychev', 'Gauss-L')) plt.legend(
#ec3' ('Trapezoid', 'Simpsons', 'Booles', 'Clenshaw', 'Chebychev', 'Gauss-L'))
# ec3'
def main(): def main():
@ -1465,22 +1471,23 @@ def main():
# [val1, err1] = gaussq(fun, A, B) # [val1, err1] = gaussq(fun, A, B)
# #
# #
# #Integration of x^2*exp(-x) from zero to infinity: # Integration of x^2*exp(-x) from zero to infinity:
# fun2 = npu.wrap2callable('1') # fun2 = npu.wrap2callable('1')
# [val2, err2] = gaussq(fun2, 0, np.inf, wfun=3, alpha=2) # [val2, err2] = gaussq(fun2, 0, np.inf, wfun=3, alpha=2)
# [val2, err2] = gaussq(lambda x: x ** 2, 0, np.inf, wfun=3, alpha=0) # [val2, err2] = gaussq(lambda x: x ** 2, 0, np.inf, wfun=3, alpha=0)
# #
# #Integrate humps from 0 to 2 and from 1 to 4 # Integrate humps from 0 to 2 and from 1 to 4
# [val3, err3] = gaussq(humps, A, B) # [val3, err3] = gaussq(humps, A, B)
# #
# [x, w] = p_roots(11, 'newton', 1, 3) # [x, w] = p_roots(11, 'newton', 1, 3)
# y = np.sum(x ** 2 * w) # y = np.sum(x ** 2 * w)
x = np.linspace(0, np.pi / 2) x = np.linspace(0, np.pi / 2)
q0 = np.trapz(humps(x), x) _q0 = np.trapz(humps(x), x)
[q, err] = romberg(humps, 0, np.pi / 2, 1e-4) [q, err] = romberg(humps, 0, np.pi / 2, 1e-4)
print q, err print q, err
def test_docstrings(): def test_docstrings():
np.set_printoptions(precision=7) np.set_printoptions(precision=7)
import doctest import doctest
@ -1488,4 +1495,4 @@ def test_docstrings():
if __name__ == '__main__': if __name__ == '__main__':
test_docstrings() test_docstrings()
#main() # main()

@ -1,4 +1,4 @@
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------
# Name: module1 # Name: module1
# Purpose: # Purpose:
# #
@ -7,15 +7,16 @@
# Created: 30.12.2008 # Created: 30.12.2008
# Copyright: (c) pab 2008 # Copyright: (c) pab 2008
# Licence: <your licence> # Licence: <your licence>
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------
#!/usr/bin/env python #!/usr/bin/env python
from __future__ import division from __future__ import division
import numpy as np import numpy as np
import scipy.signal import scipy.signal
import scipy.special as spec
import scipy.sparse as sp import scipy.sparse as sp
import scipy.sparse.linalg #@UnusedImport import scipy.sparse.linalg # @UnusedImport
from numpy.ma.core import ones, zeros, prod, sin from numpy.ma.core import ones, zeros, prod, sin
from numpy import diff, pi, inf #@UnresolvedImport from numpy import diff, pi, inf # @UnresolvedImport
from numpy.lib.shape_base import vstack from numpy.lib.shape_base import vstack
from numpy.lib.function_base import linspace from numpy.lib.function_base import linspace
from scipy.interpolate import PiecewisePolynomial from scipy.interpolate import PiecewisePolynomial
@ -23,11 +24,14 @@ from scipy.interpolate import PiecewisePolynomial
import polynomial as pl import polynomial as pl
__all__ = ['PPform', 'savitzky_golay', 'savitzky_golay_piecewise', 'sgolay2d','SmoothSpline', __all__ = [
'pchip_slopes','slopes','stineman_interp', 'Pchip','StinemanInterp', 'CubicHermiteSpline'] 'PPform', 'savitzky_golay', 'savitzky_golay_piecewise', 'sgolay2d',
'SmoothSpline', 'pchip_slopes', 'slopes', 'stineman_interp', 'Pchip',
'StinemanInterp', 'CubicHermiteSpline']
def savitzky_golay(y, window_size, order, deriv=0): def savitzky_golay(y, window_size, order, deriv=0):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter. """Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data. The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and It has the advantage of preserving the original shape and
features of the signal better than other types of filtering features of the signal better than other types of filtering
@ -43,7 +47,8 @@ def savitzky_golay(y, window_size, order, deriv=0):
the order of the polynomial used in the filtering. the order of the polynomial used in the filtering.
Must be less then `window_size` - 1. Must be less then `window_size` - 1.
deriv: int deriv: int
the order of the derivative to compute (default = 0 means only smoothing) order of the derivative to compute (default = 0 means only smoothing)
Returns Returns
------- -------
ys : ndarray, shape (N) ys : ndarray, shape (N)
@ -87,25 +92,28 @@ def savitzky_golay(y, window_size, order, deriv=0):
raise TypeError("window_size size must be a positive odd number") raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2: if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order") raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1) order_range = range(order + 1)
half_window = (window_size -1) // 2 half_window = (window_size - 1) // 2
# precompute coefficients # precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)]) b = np.mat([[k ** i for i in order_range]
for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] m = np.linalg.pinv(b).A[deriv]
# pad the signal at the extremes with # pad the signal at the extremes with
# values taken from the signal itself # values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] ) firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1]) lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals)) y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m, y, mode='valid') return np.convolve(m, y, mode='valid')
def savitzky_golay_piecewise(xvals, data, kernel=11, order =4): def savitzky_golay_piecewise(xvals, data, kernel=11, order=4):
''' '''
One of the most popular applications of S-G filter, apart from smoothing UV-VIS One of the most popular applications of S-G filter, apart from smoothing
and IR spectra, is smoothing of curves obtained in electroanalytical experiments. UV-VIS and IR spectra, is smoothing of curves obtained in electroanalytical
In cyclic voltammetry, voltage (being the abcissa) changes like a triangle wave. experiments. In cyclic voltammetry, voltage (being the abcissa) changes
And in the signal there are cusps at the turning points (at switching potentials) like a triangle wave. And in the signal there are cusps at the turning
which should never be smoothed. In this case, Savitzky-Golay smoothing should be points (at switching potentials) which should never be smoothed.
In this case, Savitzky-Golay smoothing should be
done piecewise, ie. separately on pieces monotonic in x done piecewise, ie. separately on pieces monotonic in x
Example Example
@ -117,46 +125,55 @@ def savitzky_golay_piecewise(xvals, data, kernel=11, order =4):
>>> y = np.round(sin(x)) >>> y = np.round(sin(x))
>>> sig2 = linspace(0,0.5,50) >>> sig2 = linspace(0,0.5,50)
# As an example, this figure shows the effect of an additive noise with a variance # As an example, this figure shows the effect of an additive noise with a
# of 0.2 (original signal (black), noisy signal (red) and filtered signal (blue dots)). # variance of 0.2 (original signal (black), noisy signal (red) and filtered
# signal (blue dots)).
>>> yn = y + np.sqrt(0.2)*np.random.randn(*x.shape) >>> yn = y + np.sqrt(0.2)*np.random.randn(*x.shape)
>>> yr = savitzky_golay_piecewise(x, yn, kernel=11, order=4) >>> yr = savitzky_golay_piecewise(x, yn, kernel=11, order=4)
>>> h=plt.plot(x, yn, 'r', x, y, 'k', x, yr, 'b.') >>> h=plt.plot(x, yn, 'r', x, y, 'k', x, yr, 'b.')
''' '''
turnpoint=0 turnpoint = 0
last=len(xvals) last = len(xvals)
if xvals[1]>xvals[0] : #x is increasing? if xvals[1] > xvals[0]: # x is increasing?
for i in range(1,last) : #yes for i in range(1, last): # yes
if xvals[i]<xvals[i-1] : #search where x starts to fall if xvals[i] < xvals[i - 1]: # search where x starts to fall
turnpoint=i turnpoint = i
break break
else: #no, x is decreasing else: # no, x is decreasing
for i in range(1,last) : #search where it starts to rise for i in range(1, last): # search where it starts to rise
if xvals[i]>xvals[i-1] : if xvals[i] > xvals[i - 1]:
turnpoint=i turnpoint = i
break break
if turnpoint==0 : #no change in direction of x if turnpoint == 0: # no change in direction of x
return savitzky_golay(data, kernel, order) return savitzky_golay(data, kernel, order)
else: else:
#smooth the first piece # smooth the first piece
firstpart=savitzky_golay(data[0:turnpoint],kernel,order) firstpart = savitzky_golay(data[0:turnpoint], kernel, order)
#recursively smooth the rest # recursively smooth the rest
rest=savitzky_golay_piecewise(xvals[turnpoint:], data[turnpoint:], kernel, order) rest = savitzky_golay_piecewise(
return np.concatenate((firstpart,rest)) xvals[turnpoint:], data[turnpoint:], kernel, order)
return np.concatenate((firstpart, rest))
def sgolay2d ( z, window_size, order, derivative=None):
def sgolay2d(z, window_size, order, derivative=None):
""" """
Savitsky - Golay filters can also be used to smooth two dimensional data affected Savitsky - Golay filters can also be used to smooth two dimensional data
by noise. The algorithm is exactly the same as for the one dimensional case, only affected by noise. The algorithm is exactly the same as for the one
the math is a bit more tricky. The basic algorithm is as follow: dimensional case, only the math is a bit more tricky. The basic algorithm
for each point of the two dimensional matrix extract a sub - matrix, centered at is as follow: for each point of the two dimensional matrix extract a sub
that point and with a size equal to an odd number "window_size". - matrix, centered at that point and with a size equal to an odd number
for this sub - matrix compute a least - square fit of a polynomial surface, defined as "window_size". for this sub - matrix compute a least - square fit of a
polynomial surface, defined as
p(x, y) = a0 + a1 * x + a2 * y + a3 * x2 + a4 * y2 + a5 * x * y + ... . p(x, y) = a0 + a1 * x + a2 * y + a3 * x2 + a4 * y2 + a5 * x * y + ... .
Note that x and y are equal to zero at the central point. Note that x and y are equal to zero at the central point.
replace the initial central point with the value computed with the fit. replace the initial central point with the value computed with the fit.
Note that because the fit coefficients are linear with respect to the data spacing, they can pre - computed for efficiency. Moreover, it is important to appropriately pad the borders of the data, with a mirror image of the data itself, so that the evaluation of the fit at the borders of the data can happen smoothly. Note that because the fit coefficients are linear with respect to the data
spacing, they can pre - computed for efficiency. Moreover, it is important
to appropriately pad the borders of the data, with a mirror image of the
data itself, so that the evaluation of the fit at the borders of the data
can happen smoothly.
Here is the code for two dimensional filtering. Here is the code for two dimensional filtering.
Example Example
@ -196,7 +213,7 @@ def sgolay2d ( z, window_size, order, derivative=None):
# the exponents of the k-th term. First element of tuple is for x # the exponents of the k-th term. First element of tuple is for x
# second element for y. # second element for y.
# Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...] # Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...]
exps = [ (k - n, n) for k in range(order + 1) for n in range(k + 1) ] exps = [(k - n, n) for k in range(order + 1) for n in range(k + 1)]
# coordinates of points # coordinates of points
ind = np.arange(-half_size, half_size + 1, dtype=np.float64) ind = np.arange(-half_size, half_size + 1, dtype=np.float64)
@ -213,32 +230,44 @@ def sgolay2d ( z, window_size, order, derivative=None):
Z = np.zeros((new_shape)) Z = np.zeros((new_shape))
# top band # top band
band = z[0, :] band = z[0, :]
Z[:half_size, half_size:-half_size] = band - np.abs(np.flipud(z[1:half_size + 1, :]) - band) Z[:half_size, half_size:-half_size] = band - \
np.abs(np.flipud(z[1:half_size + 1, :]) - band)
# bottom band # bottom band
band = z[-1, :] band = z[-1, :]
Z[-half_size:, half_size:-half_size] = band + np.abs(np.flipud(z[-half_size - 1:-1, :]) - band) Z[-half_size:, half_size:-half_size] = band + \
np.abs(np.flipud(z[-half_size - 1:-1, :]) - band)
# left band # left band
band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size]) band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, :half_size] = band - np.abs(np.fliplr(z[:, 1:half_size + 1]) - band) Z[half_size:-half_size, :half_size] = band - \
np.abs(np.fliplr(z[:, 1:half_size + 1]) - band)
# right band # right band
band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size]) band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, -half_size:] = band + np.abs(np.fliplr(z[:, -half_size - 1:-1]) - band) Z[half_size:-half_size, -half_size:] = band + \
np.abs(np.fliplr(z[:, -half_size - 1:-1]) - band)
# central band # central band
Z[half_size:-half_size, half_size:-half_size] = z Z[half_size:-half_size, half_size:-half_size] = z
# top left corner # top left corner
band = z[0, 0] band = z[0, 0]
Z[:half_size, :half_size] = band - np.abs(np.flipud(np.fliplr(z[1:half_size + 1, 1:half_size + 1])) - band) Z[:half_size, :half_size] = band - \
np.abs(
np.flipud(np.fliplr(z[1:half_size + 1, 1:half_size + 1])) - band)
# bottom right corner # bottom right corner
band = z[-1, -1] band = z[-1, -1]
Z[-half_size:, -half_size:] = band + np.abs(np.flipud(np.fliplr(z[-half_size - 1:-1, -half_size - 1:-1])) - band) Z[-half_size:, -half_size:] = band + \
np.abs(np.flipud(np.fliplr(z[-half_size - 1:-1, -half_size - 1:-1])) -
band)
# top right corner # top right corner
band = Z[half_size, -half_size:] band = Z[half_size, -half_size:]
Z[:half_size, -half_size:] = band - np.abs(np.flipud(Z[half_size + 1:2 * half_size + 1, -half_size:]) - band) Z[:half_size, -half_size:] = band - \
np.abs(
np.flipud(Z[half_size + 1:2 * half_size + 1, -half_size:]) - band)
# bottom left corner # bottom left corner
band = Z[-half_size:, half_size].reshape(-1, 1) band = Z[-half_size:, half_size].reshape(-1, 1)
Z[-half_size:, :half_size] = band - np.abs(np.fliplr(Z[-half_size:, half_size + 1:2 * half_size + 1]) - band) Z[-half_size:, :half_size] = band - \
np.abs(
np.fliplr(Z[-half_size:, half_size + 1:2 * half_size + 1]) - band)
# solve system and convolve # solve system and convolve
if derivative == None: if derivative == None:
@ -253,11 +282,15 @@ def sgolay2d ( z, window_size, order, derivative=None):
elif derivative == 'both': elif derivative == 'both':
c = np.linalg.pinv(A)[1].reshape((window_size, -1)) c = np.linalg.pinv(A)[1].reshape((window_size, -1))
r = np.linalg.pinv(A)[2].reshape((window_size, -1)) r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, -r, mode='valid'), scipy.signal.fftconvolve(Z, -c, mode='valid') return (scipy.signal.fftconvolve(Z, -r, mode='valid'),
scipy.signal.fftconvolve(Z, -c, mode='valid'))
class PPform(object): class PPform(object):
"""The ppform of the piecewise polynomials is given in terms of coefficients
and breaks. The polynomial in the ith interval is """The ppform of the piecewise polynomials
is given in terms of coefficients and breaks.
The polynomial in the ith interval is
x_{i} <= x < x_{i+1} x_{i} <= x < x_{i+1}
S_i = sum(coefs[m,i]*(x-breaks[i])^(k-m), m=0..k) S_i = sum(coefs[m,i]*(x-breaks[i])^(k-m), m=0..k)
@ -274,6 +307,7 @@ class PPform(object):
>>> x = linspace(-1,3) >>> x = linspace(-1,3)
>>> h=plt.plot(x,self(x)) >>> h=plt.plot(x,self(x))
""" """
def __init__(self, coeffs, breaks, fill=0.0, sort=False, a=None, b=None): def __init__(self, coeffs, breaks, fill=0.0, sort=False, a=None, b=None):
if sort: if sort:
self.breaks = np.sort(breaks) self.breaks = np.sort(breaks)
@ -309,7 +343,8 @@ class PPform(object):
V = np.vander(dx, N=self.order) V = np.vander(dx, N=self.order)
# values = np.diag(dot(V,pp[:,indxs])) # values = np.diag(dot(V,pp[:,indxs]))
dot = np.dot dot = np.dot
values = np.array([dot(V[k, :], pp[:, indxs[k]]) for k in xrange(len(xx))]) values = np.array([dot(V[k, :], pp[:, indxs[k]])
for k in xrange(len(xx))])
res[mask] = values res[mask] = values
res.shape = saveshape res.shape = saveshape
@ -317,7 +352,7 @@ class PPform(object):
def linear_extrapolate(self, output=True): def linear_extrapolate(self, output=True):
''' '''
Return a 1D PPform which extrapolate linearly outside its basic interval Return 1D PPform which extrapolate linearly outside its basic interval
''' '''
max_order = 2 max_order = 2
@ -347,24 +382,23 @@ class PPform(object):
dxN = dx[-1] dxN = dx[-1]
a_n = pl.polyreloc(a_nn, -dxN) # Relocate last polynomial a_n = pl.polyreloc(a_nn, -dxN) # Relocate last polynomial
#set to zero all terms of order > maxOrder # set to zero all terms of order > maxOrder
a_n[0:self.order - max_order] = 0 a_n[0:self.order - max_order] = 0
#Get the coefficients for the new first piece (a_1) # Get the coefficients for the new first piece (a_1)
# by first setting all terms of order > maxOrder to zero and then # by first setting all terms of order > maxOrder to zero and then
# relocate the polynomial. # relocate the polynomial.
# Set to zero all terms of order > maxOrder, i.e., not using them
#Set to zero all terms of order > maxOrder, i.e., not using them
a_11 = coefs[self.order - max_order::, 0] a_11 = coefs[self.order - max_order::, 0]
dx1 = dx[0] dx1 = dx[0]
a_1 = pl.polyreloc(a_11, -dx1) # Relocate first polynomial a_1 = pl.polyreloc(a_11, -dx1) # Relocate first polynomial
a_1 = np.hstack([zeros(self.order - max_order), a_1]) a_1 = np.hstack([zeros(self.order - max_order), a_1])
newcoefs = np.hstack([ a_1.reshape(-1, 1), coefs, a_n.reshape(-1, 1)]) newcoefs = np.hstack([a_1.reshape(-1, 1), coefs, a_n.reshape(-1, 1)])
if output: if output:
return PPform(newcoefs, newbreaks, a= -inf, b=inf) return PPform(newcoefs, newbreaks, a=-inf, b=inf)
else: else:
self.coeffs = newcoefs self.coeffs = newcoefs
self.breaks = newbreaks self.breaks = newbreaks
@ -380,7 +414,6 @@ class PPform(object):
brks = self.breaks.copy() brks = self.breaks.copy()
return PPform(cof, brks, fill=self.fill) return PPform(cof, brks, fill=self.fill)
def integrate(self): def integrate(self):
""" """
Return the indefinite integral of the piecewise polynomial Return the indefinite integral of the piecewise polynomial
@ -388,8 +421,9 @@ class PPform(object):
cof = pl.polyint(self.coeffs) cof = pl.polyint(self.coeffs)
pieces = len(self.breaks) - 1 pieces = len(self.breaks) - 1
if 1 < pieces : if 1 < pieces:
# evaluate each integrated polynomial at the right endpoint of its interval # evaluate each integrated polynomial at the right endpoint of its
# interval
xs = diff(self.breaks[:-1, ...], axis=0) xs = diff(self.breaks[:-1, ...], axis=0)
index = np.arange(pieces - 1) index = np.arange(pieces - 1)
@ -402,19 +436,19 @@ class PPform(object):
return PPform(cof, self.breaks, fill=self.fill) return PPform(cof, self.breaks, fill=self.fill)
# def fromspline(self, xk, cvals, order, fill=0.0):
# N = len(xk) - 1
# sivals = np.empty((order + 1, N), dtype=float)
# for m in xrange(order, -1, -1):
# fact = spec.gamma(m + 1)
# res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
# res /= fact
# sivals[order - m, :] = res
# return self(sivals, xk, fill=fill)
## def fromspline(cls, xk, cvals, order, fill=0.0):
## N = len(xk)-1
## sivals = np.empty((order+1,N), dtype=float)
## for m in xrange(order,-1,-1):
## fact = spec.gamma(m+1)
## res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
## res /= fact
## sivals[order-m,:] = res
## return cls(sivals, xk, fill=fill)
class SmoothSpline(PPform): class SmoothSpline(PPform):
""" """
Cubic Smoothing Spline. Cubic Smoothing Spline.
@ -472,6 +506,7 @@ class SmoothSpline(PPform):
Springer Verlag Springer Verlag
Uses EqXIV.6--9, self 239 Uses EqXIV.6--9, self 239
""" """
def __init__(self, xx, yy, p=None, lin_extrap=True, var=1): def __init__(self, xx, yy, p=None, lin_extrap=True, var=1):
coefs, brks = self._compute_coefs(xx, yy, p, var) coefs, brks = self._compute_coefs(xx, yy, p, var)
super(SmoothSpline, self).__init__(coefs, brks) super(SmoothSpline, self).__init__(coefs, brks)
@ -506,7 +541,7 @@ class SmoothSpline(PPform):
dydx = np.diff(y) / dx dydx = np.diff(y) / dx
if (n == 2) : #% straight line if (n == 2): # % straight line
coefs = np.vstack([dydx.ravel(), y[0, :]]) coefs = np.vstack([dydx.ravel(), y[0, :]])
else: else:
@ -518,9 +553,11 @@ class SmoothSpline(PPform):
dx.shape = (n - 1, -1) dx.shape = (n - 1, -1)
zrs = zeros(nd) zrs = zeros(nd)
if p < 1: if p < 1:
ai = (y - (6 * (1 - p) * D * diff(vstack([zrs, # faster than yi-6*(1-p)*Q*u
ai = (y - (6 * (1 - p) * D *
diff(vstack([zrs,
diff(vstack([zrs, u, zrs]), axis=0) * dx1, diff(vstack([zrs, u, zrs]), axis=0) * dx1,
zrs]), axis=0)).T).T #faster than yi-6*(1-p)*Q*u zrs]), axis=0)).T).T
else: else:
ai = y.reshape(n, -1) ai = y.reshape(n, -1)
@ -532,7 +569,7 @@ class SmoothSpline(PPform):
# dfi = diff(ai)./dx-(ci+di.*dx).*dx = bi; # dfi = diff(ai)./dx-(ci+di.*dx).*dx = bi;
ci = np.vstack([zrs, 3 * p * u]) ci = np.vstack([zrs, 3 * p * u])
di = (diff(vstack([ci, zrs]), axis=0) * dx1 / 3); di = (diff(vstack([ci, zrs]), axis=0) * dx1 / 3)
bi = (diff(ai, axis=0) * dx1 - (ci + di * dx) * dx) bi = (diff(ai, axis=0) * dx1 - (ci + di * dx) * dx)
ai = ai[:n - 1, ...] ai = ai[:n - 1, ...]
if nd > 1: if nd > 1:
@ -545,7 +582,8 @@ class SmoothSpline(PPform):
else: else:
coefs = vstack([ci.ravel(), bi.ravel(), ai.ravel()]) coefs = vstack([ci.ravel(), bi.ravel(), ai.ravel()])
else: else:
coefs = vstack([di.ravel(), ci.ravel(), bi.ravel(), ai.ravel()]) coefs = vstack(
[di.ravel(), ci.ravel(), bi.ravel(), ai.ravel()])
return coefs, x return coefs, x
@ -555,11 +593,15 @@ class SmoothSpline(PPform):
R = sp.spdiags(data, [-1, 0, 1], n - 2, n - 2) R = sp.spdiags(data, [-1, 0, 1], n - 2, n - 2)
if p is None or p < 1: if p is None or p < 1:
Q = sp.spdiags([dx1[:n - 2], -(dx1[:n - 2] + dx1[1:n - 1]), dx1[1:n - 1]], [0, -1, -2], n, n - 2) Q = sp.spdiags(
[dx1[:n - 2], -(dx1[:n - 2] + dx1[1:n - 1]), dx1[1:n - 1]],
[0, -1, -2], n, n - 2)
QDQ = (Q.T * D * Q) QDQ = (Q.T * D * Q)
if p is None or p < 0: if p is None or p < 0:
# Estimate p # Estimate p
p = 1. / (1. + QDQ.diagonal().sum() / (100. * R.diagonal().sum()** 2)); p = 1. / \
(1. + QDQ.diagonal().sum() /
(100. * R.diagonal().sum() ** 2))
if p == 0: if p == 0:
QQ = 6 * QDQ QQ = 6 * QDQ
@ -574,8 +616,10 @@ class SmoothSpline(PPform):
u = 2 * sp.linalg.spsolve((QQ + QQ.T), ddydx) u = 2 * sp.linalg.spsolve((QQ + QQ.T), ddydx)
return u.reshape(n - 2, -1), p return u.reshape(n - 2, -1), p
def _edge_case(m0, d1): def _edge_case(m0, d1):
return np.where((d1==0) | (m0==0), 0.0, 1.0/(1.0/m0+1.0/d1)) return np.where((d1 == 0) | (m0 == 0), 0.0, 1.0 / (1.0 / m0 + 1.0 / d1))
def pchip_slopes(x, y): def pchip_slopes(x, y):
# Determine the derivatives at the points y_k, d_k, by using # Determine the derivatives at the points y_k, d_k, by using
@ -591,24 +635,25 @@ def pchip_slopes(x, y):
hk = x[1:] - x[:-1] hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk mk = (y[1:] - y[:-1]) / hk
smk = np.sign(mk) smk = np.sign(mk)
condition = ((smk[1:] != smk[:-1]) | (mk[1:]==0) | (mk[:-1]==0)) condition = ((smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0))
w1 = 2*hk[1:] + hk[:-1] w1 = 2 * hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1] w2 = hk[1:] + 2 * hk[:-1]
whmean = 1.0/(w1+w2)*(w1/mk[1:] + w2/mk[:-1]) whmean = 1.0 / (w1 + w2) * (w1 / mk[1:] + w2 / mk[:-1])
dk = np.zeros_like(y) dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0 dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0/whmean[~condition] dk[1:-1][~condition] = 1.0 / whmean[~condition]
# For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless # For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless
# one of d_1 or m_0 is 0, then choose d_0 = 0 # one of d_1 or m_0 is 0, then choose d_0 = 0
dk[0] = _edge_case(mk[0],dk[1]) dk[0] = _edge_case(mk[0], dk[1])
dk[-1] = _edge_case(mk[-1],dk[-2]) dk[-1] = _edge_case(mk[-1], dk[-2])
return dk return dk
def slopes(x,y, method='parabola', tension=0, monotone=False):
def slopes(x, y, method='parabola', tension=0, monotone=False):
''' '''
Return estimated slopes y'(x) Return estimated slopes y'(x)
@ -645,27 +690,27 @@ def slopes(x,y, method='parabola', tension=0, monotone=False):
y = np.asarray(y, np.float_) y = np.asarray(y, np.float_)
yp = np.zeros(y.shape, np.float_) yp = np.zeros(y.shape, np.float_)
dx = x[1:] - x[:-1] dx = x[1:] - x[:-1]
# Compute the slopes of the secant lines between successive points # Compute the slopes of the secant lines between successive points
dydx = (y[1:] - y[:-1]) / dx dydx = (y[1:] - y[:-1]) / dx
method = method.lower() method = method.lower()
if method.startswith('p'): #parabola'): if method.startswith('p'): # parabola'):
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1]) / (dx[1:] + dx[:-1]) yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1]) / \
(dx[1:] + dx[:-1])
yp[0] = 2.0 * dydx[0] - yp[1] yp[0] = 2.0 * dydx[0] - yp[1]
yp[-1] = 2.0 * dydx[-1] - yp[-2] yp[-1] = 2.0 * dydx[-1] - yp[-2]
else: else:
# At the endpoints - use one-sided differences # At the endpoints - use one-sided differences
yp[0] = dydx[0] yp[0] = dydx[0]
yp[-1] = dydx[-1] yp[-1] = dydx[-1]
if method.startswith('s'): #secant'): if method.startswith('s'): # secant'):
# In the middle - use the average of the secants # In the middle - use the average of the secants
yp[1:-1] = (dydx[:-1] + dydx[1:]) / 2.0 yp[1:-1] = (dydx[:-1] + dydx[1:]) / 2.0
else: # Cardinal or Catmull-Rom method else: # Cardinal or Catmull-Rom method
yp[1:-1] = (y[2:] - y[:-2]) / (x[2:] - x[:-2]) yp[1:-1] = (y[2:] - y[:-2]) / (x[2:] - x[:-2])
if method.startswith('car'): #cardinal'): if method.startswith('car'): # cardinal'):
yp = (1-tension) * yp yp = (1 - tension) * yp
if monotone: if monotone:
# Special case: intervals where y[k] == y[k+1] # Special case: intervals where y[k] == y[k+1]
@ -673,26 +718,28 @@ def slopes(x,y, method='parabola', tension=0, monotone=False):
# these points will be flat which preserves monotonicity # these points will be flat which preserves monotonicity
ii, = (dydx == 0.0).nonzero() ii, = (dydx == 0.0).nonzero()
yp[ii] = 0.0 yp[ii] = 0.0
yp[ii+1] = 0.0 yp[ii + 1] = 0.0
alpha = yp[:-1]/dydx alpha = yp[:-1] / dydx
beta = yp[1:]/dydx beta = yp[1:] / dydx
dist = alpha**2 + beta**2 dist = alpha ** 2 + beta ** 2
tau = 3.0 / np.sqrt(dist) tau = 3.0 / np.sqrt(dist)
# To prevent overshoot or undershoot, restrict the position vector # To prevent overshoot or undershoot, restrict the position vector
# (alpha, beta) to a circle of radius 3. If (alpha**2 + beta**2)>9, # (alpha, beta) to a circle of radius 3. If (alpha**2 + beta**2)>9,
# then set m[k] = tau[k]alpha[k]delta[k] and m[k+1] = tau[k]beta[b]delta[k] # then set m[k] = tau[k]alpha[k]delta[k] and
# m[k+1] = tau[k]beta[b]delta[k]
# where tau = 3/sqrt(alpha**2 + beta**2). # where tau = 3/sqrt(alpha**2 + beta**2).
# Find the indices that need adjustment # Find the indices that need adjustment
indices_to_fix, = (dist > 9.0).nonzero() indices_to_fix, = (dist > 9.0).nonzero()
for ii in indices_to_fix: for ii in indices_to_fix:
yp[ii] = tau[ii] * alpha[ii] * dydx[ii] yp[ii] = tau[ii] * alpha[ii] * dydx[ii]
yp[ii+1] = tau[ii] * beta[ii] * dydx[ii] yp[ii + 1] = tau[ii] * beta[ii] * dydx[ii]
return yp return yp
def stineman_interp(xi, x, y, yp=None): def stineman_interp(xi, x, y, yp=None):
""" """
Given data vectors *x* and *y*, the slope vector *yp* and a new Given data vectors *x* and *y*, the slope vector *yp* and a new
@ -752,14 +799,15 @@ def stineman_interp(xi, x, y, yp=None):
# calculate linear slopes # calculate linear slopes
dx = x[1:] - x[:-1] dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1] dy = y[1:] - y[:-1]
s = dy / dx #note length of s is N-1 so last element is #N-2 s = dy / dx # note length of s is N-1 so last element is #N-2
# find the segment each xi is in # find the segment each xi is in
# this line actually is the key to the efficiency of this implementation # this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi) idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1] # now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1] # except at the boundaries, where it may be that xi[j] < x[0] or xi[j] >
# x[-1]
# the y-values that would come out from a linear interpolation: # the y-values that would come out from a linear interpolation:
sidx = s.take(idx) sidx = s.take(idx)
@ -769,62 +817,82 @@ def stineman_interp(xi, x, y, yp=None):
yo = yidx + sidx * (xi - xidx) yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp # the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx) - sidx) * (xi - xidx) # using the yp slope of the left point # using the yp slope of the left point
dy2 = (yp.take(idx + 1) - sidx) * (xi - xidxp1) # using the yp slope of the right point dy1 = (yp.take(idx) - sidx) * (xi - xidx)
# using the yp slope of the right point
dy2 = (yp.take(idx + 1) - sidx) * (xi - xidxp1)
dy1dy2 = dy1 * dy2 dy1dy2 = dy1 * dy2
# The following is optimized for Python. The solution actually # The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power # does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand # of numpy, this is far more efficient than coding a loop by hand
# in Python # in Python
dy1mdy2 = np.where(dy1dy2,dy1-dy2,np.inf) dy1mdy2 = np.where(dy1dy2, dy1 - dy2, np.inf)
dy1pdy2 = np.where(dy1dy2,dy1+dy2,np.inf) dy1pdy2 = np.where(dy1dy2, dy1 + dy2, np.inf)
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32) + 1, yi = yo + dy1dy2 * np.choose(
((2 * xi - xidx - xidxp1) / ((dy1mdy2) * (xidxp1 - xidx)), np.array(np.sign(dy1dy2), np.int32) + 1,
0.0, ((2 * xi - xidx - xidxp1) / ((dy1mdy2) * (xidxp1 - xidx)), 0.0,
1 / (dy1pdy2))) 1 / (dy1pdy2)))
return yi return yi
class StinemanInterp(object): class StinemanInterp(object):
''' '''
Returns the values of an interpolating function that runs through a set of points according to the algorithm of Stineman (1980). Returns an interpolating function
that runs through a set of points according to the algorithm of
Stineman (1980).
Parameters Parameters
--------- ----------
x,y : array-like x,y : array-like
coordinates of points defining the interpolating function. coordinates of points defining the interpolating function.
yp : array-like yp : array-like
slopes of the interpolating function at x. Optional: only given if they are known, else the argument is not used. slopes of the interpolating function at x.
Optional: only given if they are known, else the argument is not used.
method : string method : string
method for computing the slope at the given points if the slope is not known. With method= method for computing the slope at the given points if the slope is not
"parabola" calculates the slopes from a parabola through every three points. known. With method= "parabola" calculates the slopes from a parabola
through every three points.
Notes Notes
----- -----
The interpolation method is described in an article by Russell W. Stineman (1980) The interpolation method is described by Russell W. Stineman (1980)
According to Stineman, the interpolation procedure has "the following properties: According to Stineman, the interpolation procedure has "the following
properties:
If values of the ordinates of the specified points change monotonically, and the slopes of the line segments joining
the points change monotonically, then the interpolating curve and its slope will change monotonically. If values of the ordinates of the specified points change monotonically,
If the slopes of the line segments joining the specified points change monotonically, then the slopes of the interpolating and the slopes of the line segments joining the points change
curve will change monotonically. Suppose that the conditions in (1) or (2) are satisfied by a set of points, but a small monotonically, then the interpolating curve and its slope will change
change in the ordinate or slope at one of the points will result conditions (1) or (2) being not longer satisfied. Then monotonically. If the slopes of the line segments joining the specified
making this small change in the ordinate or slope at a point will cause no more than a small change in the interpolating points change monotonically, then the slopes of the interpolating curve
curve." The method is based on rational interpolation with specially chosen rational functions to satisfy the above three will change monotonically. Suppose that the conditions in (1) or (2) are
conditions. satisfied by a set of points, but a small change in the ordinate or slope
at one of the points will result conditions(1) or (2) being not longer
Slopes computed at the given points with the methods provided by the `StinemanInterp' function satisfy Stineman's requirements. satisfied. Then making this small change in the ordinate or slope at a
The original method suggested by Stineman (method="scaledstineman", the default, and "stineman") result in lower slopes near point will cause no more than a small change in the interpolating
abrupt steps or spikes in the point sequence, and therefore a smaller tendency for overshooting. The method based on a second curve." The method is based on rational interpolation with specially chosen
degree polynomial (method="parabola") provides better approximation to smooth functions, but it results in in higher slopes rational functions to satisfy the above three conditions.
near abrupt steps or spikes and can lead to some overshooting where Stineman's method does not. Both methods lead to much
less tendency for `spurious' oscillations than traditional interplation methods based on polynomials, such as splines Slopes computed at the given points with the methods provided by the
`StinemanInterp' function satisfy Stineman's requirements.
The original method suggested by Stineman(method="scaledstineman", the
default, and "stineman") result in lower slopes near abrupt steps or spikes
in the point sequence, and therefore a smaller tendency for overshooting.
The method based on a second degree polynomial(method="parabola") provides
better approximation to smooth functions, but it results in in higher
slopes near abrupt steps or spikes and can lead to some overshooting where
Stineman's method does not. Both methods lead to much less tendency for
`spurious' oscillations than traditional interplation methods based on
polynomials, such as splines
(see the examples section). (see the examples section).
Stineman states that "The complete assurance that the procedure will never generate `wild' points makes it attractive as a Stineman states that "The complete assurance that the procedure will never
general purpose procedure". generate `wild' points makes it attractive as a general purpose procedure".
This interpolation method has been implemented in Matlab and R in addition to Python. This interpolation method has been implemented in Matlab and R in addition
to Python.
Examples Examples
-------- --------
@ -840,17 +908,20 @@ class StinemanInterp(object):
>>> h=plt.subplot(211) >>> h=plt.subplot(211)
>>> h=plt.plot(x,y,'o',xi,yi,'r', xi,yi1, 'g', xi,yi1, 'b') >>> h=plt.plot(x,y,'o',xi,yi,'r', xi,yi1, 'g', xi,yi1, 'b')
>>> h=plt.subplot(212) >>> h=plt.subplot(212)
>>> h=plt.plot(xi,np.abs(sin(xi)-yi), 'r', xi, np.abs(sin(xi)-yi1), 'g', xi, np.abs(sin(xi)-yi2), 'b') >>> h=plt.plot(xi,np.abs(sin(xi)-yi), 'r',
... xi, np.abs(sin(xi)-yi1), 'g',
... xi, np.abs(sin(xi)-yi2), 'b')
References References
---------- ----------
Stineman, R. W. A Consistently Well Behaved Method of Interpolation. Creative Computing (1980), volume 6, number 7, p. 54-57. Stineman, R. W. A Consistently Well Behaved Method of Interpolation.
Creative Computing (1980), volume 6, number 7, p. 54-57.
See Also See Also
-------- --------
slopes, Pchip slopes, Pchip
''' '''
def __init__(self, x,y,yp=None,method='parabola', monotone=False): def __init__(self, x, y, yp=None, method='parabola', monotone=False):
if yp is None: if yp is None:
yp = slopes(x, y, method, monotone) yp = slopes(x, y, method, monotone)
self.x = np.asarray(x, np.float_) self.x = np.asarray(x, np.float_)
@ -865,14 +936,16 @@ class StinemanInterp(object):
# calculate linear slopes # calculate linear slopes
dx = x[1:] - x[:-1] dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1] dy = y[1:] - y[:-1]
s = dy / dx #note length of s is N-1 so last element is #N-2 s = dy / dx # note length of s is N-1 so last element is #N-2
# find the segment each xi is in # find the segment each xi is in
# this line actually is the key to the efficiency of this implementation # this line actually is the key to the efficiency of this
# implementation
idx = np.searchsorted(x[1:-1], xi) idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1] # now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1] # except at the boundaries, where it may be that xi[j] < x[0] or xi[j]
# > x[-1]
# the y-values that would come out from a linear interpolation: # the y-values that would come out from a linear interpolation:
sidx = s.take(idx) sidx = s.take(idx)
@ -882,39 +955,48 @@ class StinemanInterp(object):
yo = yidx + sidx * (xi - xidx) yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp # the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx) - sidx) * (xi - xidx) # using the yp slope of the left point # using the yp slope of the left point
dy2 = (yp.take(idx + 1) - sidx) * (xi - xidxp1) # using the yp slope of the right point dy1 = (yp.take(idx) - sidx) * (xi - xidx)
# using the yp slope of the right point
dy2 = (yp.take(idx + 1) - sidx) * (xi - xidxp1)
dy1dy2 = dy1 * dy2 dy1dy2 = dy1 * dy2
# The following is optimized for Python. The solution actually # The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power # does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand # of numpy, this is far more efficient than coding a loop by hand
# in Python # in Python
dy1mdy2 = np.where(dy1dy2,dy1-dy2,np.inf) dy1mdy2 = np.where(dy1dy2, dy1 - dy2, np.inf)
dy1pdy2 = np.where(dy1dy2,dy1+dy2,np.inf) dy1pdy2 = np.where(dy1dy2, dy1 + dy2, np.inf)
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32) + 1, yi = yo + dy1dy2 * np.choose(
((2 * xi - xidx - xidxp1) / ((dy1mdy2) * (xidxp1 - xidx)), np.array(np.sign(dy1dy2), np.int32) + 1,
0.0, ((2 * xi - xidx - xidxp1) / ((dy1mdy2) * (xidxp1 - xidx)), 0.0,
1 / (dy1pdy2))) 1 / (dy1pdy2)))
return yi return yi
class StinemanInterp2(PiecewisePolynomial): class StinemanInterp2(PiecewisePolynomial):
def __init__(self, x, y, yp=None, method='parabola', monotone=False): def __init__(self, x, y, yp=None, method='parabola', monotone=False):
if yp is None: if yp is None:
yp = slopes(x, y, method, monotone=monotone) yp = slopes(x, y, method, monotone=monotone)
super(StinemanInterp2,self).__init__(x, zip(y,yp)) super(StinemanInterp2, self).__init__(x, zip(y, yp))
class CubicHermiteSpline(PiecewisePolynomial): class CubicHermiteSpline(PiecewisePolynomial):
''' '''
Piecewise Cubic Hermite Interpolation using Catmull-Rom Piecewise Cubic Hermite Interpolation using Catmull-Rom
method for computing the slopes. method for computing the slopes.
''' '''
def __init__(self, x, y, yp=None, method='Catmull-Rom'): def __init__(self, x, y, yp=None, method='Catmull-Rom'):
if yp is None: if yp is None:
yp = slopes(x, y, method, monotone=False) yp = slopes(x, y, method, monotone=False)
super(CubicHermiteSpline, self).__init__(x, zip(y,yp), orders=3) super(CubicHermiteSpline, self).__init__(x, zip(y, yp), orders=3)
class Pchip(PiecewisePolynomial): class Pchip(PiecewisePolynomial):
"""PCHIP 1-d monotonic cubic interpolation """PCHIP 1-d monotonic cubic interpolation
Description Description
@ -933,10 +1015,12 @@ class Pchip(PiecewisePolynomial):
A 1-D array of real values. y's length along the interpolation A 1-D array of real values. y's length along the interpolation
axis must be equal to the length of x. axis must be equal to the length of x.
yp : array yp : array
slopes of the interpolating function at x. Optional: only given if they are known, else the argument is not used. slopes of the interpolating function at x.
Optional: only given if they are known, else the argument is not used.
method : string method : string
method for computing the slope at the given points if the slope is not known. With method= method for computing the slope at the given points if the slope is not
"parabola" calculates the slopes from a parabola through every three points. known. With method="parabola" calculates the slopes from a parabola
through every three points.
Assumes x is sorted in monotonic order (e.g. x[1] > x[0]) Assumes x is sorted in monotonic order (e.g. x[1] > x[0])
@ -980,14 +1064,16 @@ class Pchip(PiecewisePolynomial):
>>> plt.show() >>> plt.show()
""" """
def __init__(self, x, y, yp=None, method='secant'): def __init__(self, x, y, yp=None, method='secant'):
if yp is None: if yp is None:
yp = slopes(x, y, method=method, monotone=True) yp = slopes(x, y, method=method, monotone=True)
super(Pchip, self).__init__(x, zip(y,yp), orders=3) super(Pchip, self).__init__(x, zip(y, yp), orders=3)
def test_smoothing_spline(): def test_smoothing_spline():
x = linspace(0, 2 * pi + pi / 4, 20) x = linspace(0, 2 * pi + pi / 4, 20)
y = sin(x) #+ np.random.randn(x.size) y = sin(x) # + np.random.randn(x.size)
pp = SmoothSpline(x, y, p=1) pp = SmoothSpline(x, y, p=1)
x1 = linspace(-1, 2 * pi + pi / 4 + 1, 20) x1 = linspace(-1, 2 * pi + pi / 4 + 1, 20)
y1 = pp(x1) y1 = pp(x1)
@ -1003,17 +1089,18 @@ def test_smoothing_spline():
pass pass
#tck = interpolate.splrep(x, y, s=len(x)) #tck = interpolate.splrep(x, y, s=len(x))
def compare_methods(): def compare_methods():
############################################################ #
# Sine wave test # Sine wave test
############################################################ #
fun = np.sin fun = np.sin
# Create a example vector containing a sine wave. # Create a example vector containing a sine wave.
x = np.arange(30.0)/10. x = np.arange(30.0) / 10.
y = fun(x) y = fun(x)
# Interpolate the data above to the grid defined by "xvec" # Interpolate the data above to the grid defined by "xvec"
xvec = np.arange(250.)/100. xvec = np.arange(250.) / 100.
# Initialize the interpolator slopes # Initialize the interpolator slopes
# Create the pchip slopes # Create the pchip slopes
@ -1031,17 +1118,18 @@ def compare_methods():
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
plt.figure() plt.figure()
plt.plot(x,y, 'ro', xvec, fun(xvec),'r') plt.plot(x, y, 'ro', xvec, fun(xvec), 'r')
plt.title("pchip() Sin test code") plt.title("pchip() Sin test code")
# Plot the interpolated points # Plot the interpolated points
plt.plot(xvec, yvec, xvec, yvec1, xvec, yvec2,'g.',xvec, yvec3) plt.plot(xvec, yvec, xvec, yvec1, xvec, yvec2, 'g.', xvec, yvec3)
plt.legend(['true','true','parbola_monoton','parabola','catmul','pchip'], frameon=False, loc=0) plt.legend(
['true', 'true', 'parbola_monoton', 'parabola', 'catmul', 'pchip'],
frameon=False, loc=0)
plt.ioff() plt.ioff()
plt.show() plt.show()
def demo_monoticity(): def demo_monoticity():
# Step function test... # Step function test...
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
@ -1049,13 +1137,13 @@ def demo_monoticity():
plt.title("pchip() step function test") plt.title("pchip() step function test")
# Create a step function (will demonstrate monotonicity) # Create a step function (will demonstrate monotonicity)
x = np.arange(7.0) - 3.0 x = np.arange(7.0) - 3.0
y = np.array([-1.0, -1,-1,0,1,1,1]) y = np.array([-1.0, -1, -1, 0, 1, 1, 1])
# Interpolate using monotonic piecewise Hermite cubic spline # Interpolate using monotonic piecewise Hermite cubic spline
xvec = np.arange(599.)/100. - 3.0 xvec = np.arange(599.) / 100. - 3.0
# Create the pchip slopes # Create the pchip slopes
m = slopes(x,y, monotone=True) m = slopes(x, y, monotone=True)
# m1 = slopes(x, y, monotone=False) # m1 = slopes(x, y, monotone=False)
# m2 = slopes(x,y,method='catmul',monotone=False) # m2 = slopes(x,y,method='catmul',monotone=False)
m3 = pchip_slopes(x, y) m3 = pchip_slopes(x, y)
@ -1069,9 +1157,9 @@ def demo_monoticity():
# Non-montonic cubic Hermite spline interpolator using # Non-montonic cubic Hermite spline interpolator using
# Catmul-Rom method for computing slopes... # Catmul-Rom method for computing slopes...
yvec3 = CubicHermiteSpline(x,y)(xvec) yvec3 = CubicHermiteSpline(x, y)(xvec)
yvec4 = StinemanInterp(x, y)(xvec) yvec4 = StinemanInterp(x, y)(xvec)
yvec5 = Pchip(x, y, m3)(xvec) #@UnusedVariable yvec5 = Pchip(x, y, m3)(xvec) # @UnusedVariable
# Plot the results # Plot the results
plt.plot(x, y, 'ro', label='Data') plt.plot(x, y, 'ro', label='Data')
@ -1088,49 +1176,54 @@ def demo_monoticity():
plt.ioff() plt.ioff()
plt.show() plt.show()
def test_doctstrings():
def test_func():
from scipy import interpolate from scipy import interpolate
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import matplotlib import matplotlib
matplotlib.interactive(True) matplotlib.interactive(False)
coef = np.array([[1, 1], [0, 1]]) # linear from 0 to 2 coef = np.array([[1, 1], [0, 1]]) # linear from 0 to 2
#coef = np.array([[1,1],[1,1],[0,2]]) # linear from 0 to 2 # coef = np.array([[1,1],[1,1],[0,2]]) # linear from 0 to 2
breaks = [0, 1, 2] breaks = [0, 1, 2]
pp = PPform(coef, breaks, a= -100, b=100) pp = PPform(coef, breaks, a=-100, b=100)
x = linspace(-1, 3, 20) x = linspace(-1, 3, 20)
y = pp(x) #@UnusedVariable y = pp(x) # @UnusedVariable
x = linspace(0, 2 * pi + pi / 4, 20) x = linspace(0, 2 * pi + pi / 4, 20)
y = x + np.random.randn(x.size) y = sin(x) + np.random.randn(x.size)
tck = interpolate.splrep(x, y, s=len(x)) tck = interpolate.splrep(x, y, s=len(x)) # @UndefinedVariable
xnew = linspace(0, 2 * pi, 100) xnew = linspace(0, 2 * pi, 100)
ynew = interpolate.splev(xnew, tck, der=0) ynew = interpolate.splev(xnew, tck, der=0) # @UndefinedVariable
tck0 = interpolate.splmake(xnew, ynew, order=3, kind='smoothest', conds=None) tck0 = interpolate.splmake( # @UndefinedVariable
pp = interpolate.ppform.fromspline(*tck0) xnew, ynew, order=3, kind='smoothest', conds=None)
pp = interpolate.ppform.fromspline(*tck0) # @UndefinedVariable
plt.plot(x, y, "x", xnew, ynew, xnew, sin(xnew), x, y, "b") plt.plot(x, y, "x", xnew, ynew, xnew, sin(xnew), x, y, "b", x, pp(x), 'g')
plt.legend(['Linear', 'Cubic Spline', 'True']) plt.legend(['Linear', 'Cubic Spline', 'True'])
plt.title('Cubic-spline interpolation') plt.title('Cubic-spline interpolation')
plt.show()
t = np.arange(0, 1.1, .1) t = np.arange(0, 1.1, .1)
x = np.sin(2 * np.pi * t) x = np.sin(2 * np.pi * t)
y = np.cos(2 * np.pi * t) y = np.cos(2 * np.pi * t)
tck1, u = interpolate.splprep([t, y], s=0) #@UnusedVariable _tck1, _u = interpolate.splprep([t, y], s=0) # @UndefinedVariable
tck2 = interpolate.splrep(t, y, s=len(t), task=0) tck2 = interpolate.splrep(t, y, s=len(t), task=0) # @UndefinedVariable
#interpolate.spl # interpolate.spl
tck = interpolate.splmake(t, y, order=3, kind='smoothest', conds=None) tck = interpolate.splmake(t, y, order=3, kind='smoothest', conds=None) # @UndefinedVariable
self = interpolate.ppform.fromspline(*tck2) self = interpolate.ppform.fromspline(*tck2) # @UndefinedVariable
plt.plot(t, self(t)) plt.plot(t, self(t))
plt.show()
pass pass
def test_pp(): def test_pp():
coef = np.array([[1, 1], [0, 0]]) # linear from 0 to 2 @UnusedVariable coef = np.array([[1, 1], [0, 0]]) # linear from 0 to 2 @UnusedVariable
coef = np.array([[1, 1], [1, 1], [0, 2]]) # quadratic from 0 to 1 and 1 to 2. # quadratic from 0 to 1 and 1 to 2.
coef = np.array([[1, 1], [1, 1], [0, 2]])
dc = pl.polyder(coef, 1) dc = pl.polyder(coef, 1)
c2 = pl.polyint(dc, 1) #@UnusedVariable c2 = pl.polyint(dc, 1) # @UnusedVariable
breaks = [0, 1, 2] breaks = [0, 1, 2]
pp = PPform(coef, breaks) pp = PPform(coef, breaks)
pp(0.5) pp(0.5)
@ -1149,8 +1242,8 @@ def test_docstrings():
if __name__ == '__main__': if __name__ == '__main__':
# test_docstrings() test_func()
#test_doctstrings() # test_doctstrings()
#test_smoothing_spline() # test_smoothing_spline()
#compare_methods() # compare_methods()
demo_monoticity() #demo_monoticity()

File diff suppressed because it is too large Load Diff

@ -6,10 +6,11 @@ Created on Tue Apr 17 13:59:12 2012
""" """
import numpy as np import numpy as np
def magic(n): def magic(n):
ix = np.arange(n)+1 ix = np.arange(n) + 1
J, I = np.meshgrid(ix,ix) J, I = np.meshgrid(ix, ix)
A = np.mod(I+J-(n+3)/2,n) A = np.mod(I + J - (n + 3) / 2, n)
B = np.mod(I+2*J-2,n) B = np.mod(I + 2 * J - 2, n)
M = n*A + B + 1 M = n * A + B + 1
return M return M

@ -1,4 +1,6 @@
import numpy as np import numpy as np
def meshgrid(*xi, **kwargs): def meshgrid(*xi, **kwargs):
""" """
Return coordinate matrices from one or more coordinate vectors. Return coordinate matrices from one or more coordinate vectors.
@ -33,10 +35,10 @@ def meshgrid(*xi, **kwargs):
Notes Notes
----- -----
This function supports both indexing conventions through the indexing keyword This function supports both indexing conventions through the indexing
argument. Giving the string 'ij' returns a meshgrid with matrix indexing, keyword argument. Giving the string 'ij' returns a meshgrid with matrix
while 'xy' returns a meshgrid with Cartesian indexing. The difference is indexing, while 'xy' returns a meshgrid with Cartesian indexing. The
illustrated by the following code snippet: difference is illustrated by the following code snippet:
xv, yv = meshgrid(x, y, sparse=False, indexing='ij') xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx): for i in range(nx):
@ -88,21 +90,23 @@ def meshgrid(*xi, **kwargs):
args = np.atleast_1d(*xi) args = np.atleast_1d(*xi)
ndim = len(args) ndim = len(args)
if not isinstance(args, list) or ndim<2: if not isinstance(args, list) or ndim < 2:
raise TypeError('meshgrid() takes 2 or more arguments (%d given)' % int(ndim>0)) raise TypeError(
'meshgrid() takes 2 or more arguments (%d given)' % int(ndim > 0))
sparse = kwargs.get('sparse', False) sparse = kwargs.get('sparse', False)
indexing = kwargs.get('indexing', 'xy') indexing = kwargs.get('indexing', 'xy')
s0 = (1,)*ndim s0 = (1,) * ndim
output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::]) for i, x in enumerate(args)] output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(args)]
shape = [x.size for x in output] shape = [x.size for x in output]
if indexing == 'xy': if indexing == 'xy':
# switch first and second axis # switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2) output[0].shape = (1, -1) + (1,) * (ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2) output[1].shape = (-1, 1) + (1,) * (ndim - 2)
shape[0], shape[1] = shape[1], shape[0] shape[0], shape[1] = shape[1], shape[0]
if sparse: if sparse:
@ -130,4 +134,3 @@ def ndgrid(*args, **kwargs):
if __name__ == '__main__': if __name__ == '__main__':
import doctest import doctest
doctest.testmod() doctest.testmod()

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save