master
Per A Brodtkorb 7 years ago
parent 888afd48fb
commit 1fa0dc5217

@ -523,13 +523,13 @@ def transformdata_1d(x, f, plotflag):
transform_id = np.mod(plotflag // 10, 10) transform_id = np.mod(plotflag // 10, 10)
transform = [lambda f, x: f, transform = [lambda f, x: f,
lambda f, x: 1 - f, lambda f, x: 1 - f,
lambda f, x: cumtrapz(f, x), cumtrapz(f, x),
lambda f, x: 1 - cumtrapz(f, x), lambda f, x: 1 - cumtrapz(f, x),
lambda f, x: np.log(f), lambda f, x: np.log(f),
lambda f, x: np.log1p(-f), lambda f, x: np.log1p(-f),
lambda f, x: np.log(cumtrapz(f, x)), lambda f, x: np.log(cumtrapz(f, x)),
lambda f, x: np.log1p(-cumtrapz(f, x)), lambda f, x: np.log1p(-cumtrapz(f, x)),
lambda f, x: 10*np.log10(f) lambda f, x: 10 * np.log10(f)
][transform_id] ][transform_id]
return transform(f, x) return transform(f, x)
@ -622,9 +622,9 @@ def plot2d(axis, wdata, plotflag, *args, **kwds):
def test_plotdata(): def test_plotdata():
plt.ioff() plt.ioff()
x = np.linspace(0, np.pi, 9) x = np.linspace(0, np.pi, 9)
xi = np.linspace(0, np.pi, 4*9) xi = np.linspace(0, np.pi, 4 * 9)
d = PlotData(np.sin(x)/2, x, dataCI=[], xlab='x', ylab='sin', d = PlotData(np.sin(x) / 2, x, dataCI=[], xlab='x', ylab='sin',
title='sinus', plot_args=['r.']) title='sinus', plot_args=['r.'])
di = PlotData(d.eval_points(xi, method='cubic'), xi) di = PlotData(d.eval_points(xi, method='cubic'), xi)
unused_hi = di.plot() unused_hi = di.plot()

@ -42,7 +42,7 @@ def _set_seed(iseed):
if iseed is not None: if iseed is not None:
try: try:
random.set_state(iseed) random.set_state(iseed)
except: except Exception:
random.seed(iseed) random.seed(iseed)

@ -347,7 +347,6 @@ def shiftdim(x, n=None):
return x.reshape(no_leading_ones(x.shape)) return x.reshape(no_leading_ones(x.shape))
elif n >= 0: elif n >= 0:
return x.transpose(np.roll(range(x.ndim), -n)) return x.transpose(np.roll(range(x.ndim), -n))
else:
return x.reshape((1,) * -n + x.shape) return x.reshape((1,) * -n + x.shape)
@ -396,7 +395,7 @@ def example_dct2(debug=True):
plt.imshow(np.log(np.abs(J))) plt.imshow(np.log(np.abs(J)))
# plt.show('hold') # plt.show('hold')
rgb_10 = idctn(J) rgb_10 = idctn(J)
out['diff_rgb_rgb_10'] = np.abs(rgb-rgb_10).max() out['diff_rgb_rgb_10'] = np.abs(rgb - rgb_10).max()
plt.figure(3) plt.figure(3)
plt.imshow(rgb) plt.imshow(rgb)
plt.figure(4) plt.figure(4)
@ -405,6 +404,7 @@ def example_dct2(debug=True):
print(out) print(out)
return out return out
if __name__ == '__main__': if __name__ == '__main__':
from wafo.testing import test_docstrings from wafo.testing import test_docstrings
test_docstrings(__file__) test_docstrings(__file__)

@ -1021,11 +1021,11 @@ class _Gaussq(object):
# Break out of the iteration loop for three reasons: # Break out of the iteration loop for three reasons:
# 1) the last update is very small (compared to int and to releps) # 1) the last update is very small (compared to int and to releps)
# 2) There are more than 11 iterations. This should NEVER happen. # 2) There are more than 11 iterations. This should NEVER happen.
dtype = np.result_type(fun((a_0+b_0)*0.5, *args)) dtype = np.result_type(fun((a_0 + b_0) * 0.5, *args))
n_k = np.prod(a_shape) # # of integrals we have to compute n_k = np.prod(a_shape) # # of integrals we have to compute
k = np.arange(n_k) k = np.arange(n_k)
opt = (n_k, dtype) opt = (n_k, dtype)
val, val_old, abserr = zeros(*opt), np.nan*ones(*opt), 1e100*ones(*opt) val, val_old, abserr = zeros(*opt), np.nan * ones(*opt), 1e100 * ones(*opt)
nodes_and_weights = self._nodes_and_weights nodes_and_weights = self._nodes_and_weights
for i in range(max_iter): for i in range(max_iter):
x_n, weights = nodes_and_weights(num_nodes, wfun, alpha, beta) x_n, weights = nodes_and_weights(num_nodes, wfun, alpha, beta)
@ -1053,6 +1053,8 @@ class _Gaussq(object):
self._plot_final_trace() self._plot_final_trace()
return val, abserr return val, abserr
gaussq = _Gaussq() gaussq = _Gaussq()
@ -1190,7 +1192,7 @@ class _Quadgr(object):
return q_val, err return q_val, err
def _integrate(self, fun, a, b, abseps, max_iter): def _integrate(self, fun, a, b, abseps, max_iter):
dtype = np.result_type(fun((a+b)/2), fun((a+b)/4)) dtype = np.result_type(fun((a + b) / 2), fun((a + b) / 4))
# Initiate vectors # Initiate vectors
val0 = zeros(max_iter, dtype=dtype) # Quadrature val0 = zeros(max_iter, dtype=dtype) # Quadrature
@ -1259,6 +1261,7 @@ class _Quadgr(object):
return q_val, err return q_val, err
quadgr = _Quadgr() quadgr = _Quadgr()

@ -55,6 +55,8 @@ class PolyBasis(object):
def __call__(self, t, k): def __call__(self, t, k):
return t**k return t**k
poly_basis = PolyBasis() poly_basis = PolyBasis()
@ -73,6 +75,8 @@ class ChebyshevBasis(PolyBasis):
def __call__(self, t, k): def __call__(self, t, k):
c = self._coefficients(k) c = self._coefficients(k)
return self.eval(t, c) return self.eval(t, c)
chebyshev_basis = ChebyshevBasis() chebyshev_basis = ChebyshevBasis()
@ -97,10 +101,10 @@ def evans_webster_weights(omega, g, d_g, x, basis, *args, **kwds):
dbasis = basis.derivative dbasis = basis.derivative
lim_g = Limit(g) lim_g = Limit(g)
b_1 = np.exp(j_w*lim_g(1, *args, **kwds)) b_1 = np.exp(j_w * lim_g(1, *args, **kwds))
if np.isnan(b_1): if np.isnan(b_1):
b_1 = 0.0 b_1 = 0.0
a_1 = np.exp(j_w*lim_g(-1, *args, **kwds)) a_1 = np.exp(j_w * lim_g(-1, *args, **kwds))
if np.isnan(a_1): if np.isnan(a_1):
a_1 = 0.0 a_1 = 0.0
@ -159,14 +163,14 @@ class QuadOsc(_Integrator):
@staticmethod @staticmethod
def _change_interval_to_0_1(f, g, d_g, a, _b): def _change_interval_to_0_1(f, g, d_g, a, _b):
def f_01(t, *args, **kwds): def f_01(t, *args, **kwds):
den = 1-t den = 1 - t
return f(a + t / den, *args, **kwds) / den ** 2 return f(a + t / den, *args, **kwds) / den ** 2
def g_01(t, *args, **kwds): def g_01(t, *args, **kwds):
return g(a + t / (1 - t), *args, **kwds) return g(a + t / (1 - t), *args, **kwds)
def d_g_01(t, *args, **kwds): def d_g_01(t, *args, **kwds):
den = 1-t den = 1 - t
return d_g(a + t / den, *args, **kwds) / den ** 2 return d_g(a + t / den, *args, **kwds) / den ** 2
return f_01, g_01, d_g_01, 0., 1. return f_01, g_01, d_g_01, 0., 1.
@ -188,7 +192,7 @@ class QuadOsc(_Integrator):
def _change_interval_to_m1_1(f, g, d_g, _a, _b): def _change_interval_to_m1_1(f, g, d_g, _a, _b):
def f_m11(t, *args, **kwds): def f_m11(t, *args, **kwds):
den = (1 - t**2) den = (1 - t**2)
return f(t / den, *args, **kwds) * (1+t**2) / den ** 2 return f(t / den, *args, **kwds) * (1 + t**2) / den ** 2
def g_m11(t, *args, **kwds): def g_m11(t, *args, **kwds):
den = (1 - t**2) den = (1 - t**2)
@ -196,7 +200,7 @@ class QuadOsc(_Integrator):
def d_g_m11(t, *args, **kwds): def d_g_m11(t, *args, **kwds):
den = (1 - t**2) den = (1 - t**2)
return d_g(t / den, *args, **kwds) * (1+t**2) / den ** 2 return d_g(t / den, *args, **kwds) * (1 + t**2) / den ** 2
return f_m11, g_m11, d_g_m11, -1., 1. return f_m11, g_m11, d_g_m11, -1., 1.
def _get_functions(self): def _get_functions(self):
@ -332,33 +336,33 @@ def adaptive_levin_points(m, delta):
def open_levin_points(m, delta): def open_levin_points(m, delta):
return adaptive_levin_points(m+2, delta)[1:-1] return adaptive_levin_points(m + 2, delta)[1:-1]
def chebyshev_extrema(m, delta=None): def chebyshev_extrema(m, delta=None):
k = np.arange(m) k = np.arange(m)
x = np.cos(k * np.pi / (m-1)) x = np.cos(k * np.pi / (m - 1))
return x return x
def tanh_sinh_nodes(m, delta=None, tol=_EPS): def tanh_sinh_nodes(m, delta=None, tol=_EPS):
tmax = np.arcsinh(np.arctanh(1-_EPS)*2/np.pi) tmax = np.arcsinh(np.arctanh(1 - _EPS) * 2 / np.pi)
# tmax = 3.18 # tmax = 3.18
m_1 = int(np.floor(-np.log2(tmax/max(m-1, 1)))) - 1 m_1 = int(np.floor(-np.log2(tmax / max(m - 1, 1)))) - 1
h = 2.0**-m_1 h = 2.0**-m_1
t = np.arange((m+1)//2+1)*h t = np.arange((m + 1) // 2 + 1) * h
x = np.tanh(np.pi/2*np.sinh(t)) x = np.tanh(np.pi / 2 * np.sinh(t))
k = np.flatnonzero(np.abs(x - 1) <= 10*tol) k = np.flatnonzero(np.abs(x - 1) <= 10 * tol)
y = x[:k[0]+1] if len(k) else x y = x[:k[0] + 1] if len(k) else x
return np.hstack((-y[:0:-1], y)) return np.hstack((-y[:0:-1], y))
def tanh_sinh_open_nodes(m, delta=None, tol=_EPS): def tanh_sinh_open_nodes(m, delta=None, tol=_EPS):
return tanh_sinh_nodes(m+1, delta, tol)[1:-1] return tanh_sinh_nodes(m + 1, delta, tol)[1:-1]
def chebyshev_roots(m, delta=None): def chebyshev_roots(m, delta=None):
k = np.arange(1, 2*m, 2) * 0.5 k = np.arange(1, 2 * m, 2) * 0.5
x = np.cos(k * np.pi / m) x = np.cos(k * np.pi / m)
return x return x
@ -390,9 +394,9 @@ class AdaptiveLevin(_Integrator):
rhs[j] = dff(t, *args, **kwds) rhs[j] = dff(t, *args, **kwds)
d_psi.fun.n = order d_psi.fun.n = order
for k in range(n): for k in range(n):
a_matrix[j, k] = (dbasis(t, k, n=order+1) + a_matrix[j, k] = (dbasis(t, k, n=order + 1) +
j_w * d_psi(t, k)) j_w * d_psi(t, k))
k1 = np.flatnonzero(1-np.isfinite(rhs)) k1 = np.flatnonzero(1 - np.isfinite(rhs))
if k1.size > 0: # Remove singularities if k1.size > 0: # Remove singularities
warnings.warn('Singularities detected! ') warnings.warn('Singularities detected! ')
a_matrix[k1] = 0 a_matrix[k1] = 0
@ -487,8 +491,8 @@ class AdaptiveLevin(_Integrator):
points = open_levin_points # tanh_sinh_open_nodes points = open_levin_points # tanh_sinh_open_nodes
m = self._get_num_points(s, prec, betam) m = self._get_num_points(s, prec, betam)
abseps = 10*10.0**-prec abseps = 10 * 10.0**-prec
num_collocation_point_list = m*2**np.arange(1, 5) + 1 num_collocation_point_list = m * 2**np.arange(1, 5) + 1
basis = self.basis basis = self.basis
q_val = 1e+300 q_val = 1e+300
@ -503,7 +507,7 @@ class AdaptiveLevin(_Integrator):
q_val = self._a_levin(omega, ff, gg, dgg, x, s, basis, *args, q_val = self._a_levin(omega, ff, gg, dgg, x, s, basis, *args,
**kwds) **kwds)
num_function_evaluations += n num_function_evaluations += n
err = np.abs(q_val-q_old) err = np.abs(q_val - q_old)
if err <= abseps: if err <= abseps:
break break
info = self.info(err, num_function_evaluations) info = self.info(err, num_function_evaluations)
@ -524,7 +528,7 @@ class EvansWebster(AdaptiveLevin):
w = evans_webster_weights(omega, gg, dgg, x, basis, *args, **kwds) w = evans_webster_weights(omega, gg, dgg, x, basis, *args, **kwds)
f = Limit(ff)(x, *args, **kwds) f = Limit(ff)(x, *args, **kwds)
return np.sum(f*w) return np.sum(f * w)
def _get_num_points(self, s, prec, betam): def _get_num_points(self, s, prec, betam):
return 8 if s > 1 else int(prec / max(np.log10(betam + 1), 1) + 1) return 8 if s > 1 else int(prec / max(np.log10(betam + 1), 1) + 1)

@ -227,7 +227,7 @@ def sgolay2d(z, window_size, order, derivative=None):
zout[:size, size:-size] = band - np.abs(z[size:0:-1, :] - band) zout[:size, size:-size] = band - np.abs(z[size:0:-1, :] - band)
# bottom band # bottom band
band = z[-1, :] band = z[-1, :]
zout[-size:, size:-size] = band + np.abs(z[-2:-size-2:-1, :] - band) zout[-size:, size:-size] = band + np.abs(z[-2:-size - 2:-1, :] - band)
# left band # left band
band = z[:, 0].reshape(-1, 1) band = z[:, 0].reshape(-1, 1)
zout[size:-size, :size] = band - np.abs(z[:, size:0:-1] - band) zout[size:-size, :size] = band - np.abs(z[:, size:0:-1] - band)

@ -221,7 +221,7 @@ def kreg_demo1(hs=None, fast=True, fun='hisj'):
va_1 = 0.3 ** 2 va_1 = 0.3 ** 2
va_2 = 0.7 ** 2 va_2 = 0.7 ** 2
y0 = np.exp(-x ** 2 / (2 * va_1)) + 1.3*np.exp(-(x - 1) ** 2 / (2 * va_2)) y0 = np.exp(-x ** 2 / (2 * va_1)) + 1.3 * np.exp(-(x - 1) ** 2 / (2 * va_2))
y = y0 + ei y = y0 + ei
kernel = Kernel('gauss', fun=fun) kernel = Kernel('gauss', fun=fun)
hopt = kernel.hisj(x) hopt = kernel.hisj(x)

@ -108,7 +108,7 @@ class _KDE(object):
if xmin is None: if xmin is None:
xmin = self.dataset.min(axis=-1) - 2 * self.sigma xmin = self.dataset.min(axis=-1) - 2 * self.sigma
# pylint: disable=attribute-defined-outside-init # pylint: disable=attribute-defined-outside-init
self._xmin = self._check_xmin(xmin*np.ones(self.d)) self._xmin = self._check_xmin(xmin * np.ones(self.d))
def _check_xmin(self, xmin): def _check_xmin(self, xmin):
return xmin return xmin

@ -325,6 +325,7 @@ class _KernelMulti(_Kernel):
p=3; Multivariate Tri-weight Kernel p=3; Multivariate Tri-weight Kernel
p=4; Multivariate Four-weight Kernel p=4; Multivariate Four-weight Kernel
""" """
def __init__(self, r=1.0, p=1, stats=None, name=''): def __init__(self, r=1.0, p=1, stats=None, name=''):
self.p = p self.p = p
super(_KernelMulti, self).__init__(r, stats, name) super(_KernelMulti, self).__init__(r, stats, name)
@ -342,6 +343,7 @@ class _KernelMulti(_Kernel):
x2 = x ** 2 x2 = x ** 2
return ((1.0 - x2.sum(axis=0) / r ** 2).clip(min=0.0)) ** p return ((1.0 - x2.sum(axis=0) / r ** 2).clip(min=0.0)) ** p
mkernel_epanechnikov = _KernelMulti(p=1, stats=_stats_epan, mkernel_epanechnikov = _KernelMulti(p=1, stats=_stats_epan,
name='epanechnikov') name='epanechnikov')
mkernel_biweight = _KernelMulti(p=2, stats=_stats_biwe, name='biweight') mkernel_biweight = _KernelMulti(p=2, stats=_stats_biwe, name='biweight')
@ -356,6 +358,7 @@ class _KernelProduct(_KernelMulti):
p=3; 1D product Tri-weight Kernel p=3; 1D product Tri-weight Kernel
p=4; 1D product Four-weight Kernel p=4; 1D product Four-weight Kernel
""" """
def norm_factor(self, d=1, n=None): def norm_factor(self, d=1, n=None):
r = self.r r = self.r
p = self.p p = self.p
@ -368,6 +371,7 @@ class _KernelProduct(_KernelMulti):
pdf = (1 - (x / r) ** 2).clip(min=0.0) ** self.p pdf = (1 - (x / r) ** 2).clip(min=0.0) ** self.p
return pdf.prod(axis=0) return pdf.prod(axis=0)
mkernel_p1epanechnikov = _KernelProduct(p=1, stats=_stats_epan, mkernel_p1epanechnikov = _KernelProduct(p=1, stats=_stats_epan,
name='p1epanechnikov') name='p1epanechnikov')
mkernel_p1biweight = _KernelProduct(p=2, stats=_stats_biwe, name='p1biweight') mkernel_p1biweight = _KernelProduct(p=2, stats=_stats_biwe, name='p1biweight')
@ -383,6 +387,8 @@ class _KernelRectangular(_Kernel):
def norm_factor(self, d=1, n=None): def norm_factor(self, d=1, n=None):
r = self.r r = self.r
return (2 * r) ** d return (2 * r) ** d
mkernel_rectangular = _KernelRectangular(stats=_stats_rect) mkernel_rectangular = _KernelRectangular(stats=_stats_rect)
@ -391,6 +397,8 @@ class _KernelTriangular(_Kernel):
def _kernel(self, x): def _kernel(self, x):
pdf = (1 - np.abs(x)).clip(min=0.0) pdf = (1 - np.abs(x)).clip(min=0.0)
return pdf.prod(axis=0) return pdf.prod(axis=0)
mkernel_triangular = _KernelTriangular(stats=_stats_tria) mkernel_triangular = _KernelTriangular(stats=_stats_tria)
@ -425,6 +433,7 @@ class _KernelGaussian(_Kernel):
) / (sqrt(pi) * (2 * sigma) ** (r + 1)) ) / (sqrt(pi) * (2 * sigma) ** (r + 1))
return psi_r return psi_r
mkernel_gaussian = _KernelGaussian(r=4.0, stats=_stats_gaus) mkernel_gaussian = _KernelGaussian(r=4.0, stats=_stats_gaus)
_GAUSS_KERNEL = mkernel_gaussian _GAUSS_KERNEL = mkernel_gaussian
@ -437,6 +446,8 @@ class _KernelLaplace(_Kernel):
def norm_factor(self, d=1, n=None): def norm_factor(self, d=1, n=None):
return 2 ** d return 2 ** d
mkernel_laplace = _KernelLaplace(r=7.0, stats=_stats_lapl) mkernel_laplace = _KernelLaplace(r=7.0, stats=_stats_lapl)
@ -445,6 +456,8 @@ class _KernelLogistic(_Kernel):
def _kernel(self, x): def _kernel(self, x):
s = exp(x) s = exp(x)
return np.prod(s / (s + 1) ** 2, axis=0) return np.prod(s / (s + 1) ** 2, axis=0)
mkernel_logistic = _KernelLogistic(r=7.0, stats=_stats_logi) mkernel_logistic = _KernelLogistic(r=7.0, stats=_stats_logi)
_MKERNEL_DICT = dict( _MKERNEL_DICT = dict(
@ -749,7 +762,7 @@ class Kernel(object):
@staticmethod @staticmethod
def _get_g(k_order_2, mu2, psi_order, n, order): def _get_g(k_order_2, mu2, psi_order, n, order):
return (-2. * k_order_2 / (mu2 * psi_order * n)) ** (1. / (order+1)) return (-2. * k_order_2 / (mu2 * psi_order * n)) ** (1. / (order + 1))
def hste(self, data, h0=None, inc=128, maxit=100, releps=0.01, abseps=0.0): def hste(self, data, h0=None, inc=128, maxit=100, releps=0.01, abseps=0.0):
'''HSTE 2-Stage Solve the Equation estimate of smoothing parameter. '''HSTE 2-Stage Solve the Equation estimate of smoothing parameter.
@ -1016,7 +1029,7 @@ class Kernel(object):
kw4 = self.kernel(xn / h1) / (n * h1 * self.norm_factor(d=1)) kw4 = self.kernel(xn / h1) / (n * h1 * self.norm_factor(d=1))
kw = np.r_[kw4, 0, kw4[-1:0:-1]] # Apply 'fftshift' to kw. kw = np.r_[kw4, 0, kw4[-1:0:-1]] # Apply 'fftshift' to kw.
f = np.real(ifft(fft(c, 2*inc) * fft(kw))) # convolution. f = np.real(ifft(fft(c, 2 * inc) * fft(kw))) # convolution.
# Estimate psi4=R(f'') using simple finite differences and # Estimate psi4=R(f'') using simple finite differences and
# quadrature. # quadrature.
@ -1035,10 +1048,10 @@ class Kernel(object):
def _estimate_psi(c, xn, gi, n, order=4): def _estimate_psi(c, xn, gi, n, order=4):
# order = numout*2+2 # order = numout*2+2
inc = len(xn) inc = len(xn)
kw0 = _GAUSS_KERNEL.deriv4_6_8_10(xn / gi, numout=(order-2)//2)[-1] kw0 = _GAUSS_KERNEL.deriv4_6_8_10(xn / gi, numout=(order - 2) // 2)[-1]
kw = np.r_[kw0, 0, kw0[-1:0:-1]] # Apply fftshift to kw. kw = np.r_[kw0, 0, kw0[-1:0:-1]] # Apply fftshift to kw.
z = np.real(ifft(fft(c, 2*inc) * fft(kw))) # convolution. z = np.real(ifft(fft(c, 2 * inc) * fft(kw))) # convolution.
return np.sum(c * z[:inc]) / (n ** 2 * gi ** (order+1)) return np.sum(c * z[:inc]) / (n ** 2 * gi ** (order + 1))
def hscv(self, data, hvec=None, inc=128, maxit=100, fulloutput=False): def hscv(self, data, hvec=None, inc=128, maxit=100, fulloutput=False):
''' '''
@ -1234,8 +1247,8 @@ class Kernel(object):
# L-stage iterations to estimate PSI_4 # L-stage iterations to estimate PSI_4
for ix in range(L, 0, -1): for ix in range(L, 0, -1):
gi = self._get_g(Kd[ix - 1], mu2, psi, n, order=2*ix + 4) gi = self._get_g(Kd[ix - 1], mu2, psi, n, order=2 * ix + 4)
psi = self._estimate_psi(c, xn, gi, n, order=2*ix+2) psi = self._estimate_psi(c, xn, gi, n, order=2 * ix + 2)
h[dim] = (ste_constant / psi) ** (1. / 5) h[dim] = (ste_constant / psi) ** (1. / 5)
return h return h

@ -370,8 +370,8 @@ def _deep_water_disufq(rvec, ivec, rA, iA, w, kw, h, g, nmin, nmax, m, n):
jyi = jy * m jyi = jy * m
iz1 = ixi + jyi iz1 = ixi + jyi
iv1 = jyi - ixi iv1 = jyi - ixi
iz2 = (n*m-iz1) iz2 = (n * m - iz1)
iv2 = (n*m-iv1) iv2 = (n * m - iv1)
for _i in range(m): for _i in range(m):
rrA = rA[ixi] * rA[jyi] # rrA = rA[i][ix]*rA[i][jy] rrA = rA[ixi] * rA[jyi] # rrA = rA[i][ix]*rA[i][jy]
iiA = iA[ixi] * iA[jyi] # iiA = iA[i][ix]*iA[i][jy] iiA = iA[ixi] * iA[jyi] # iiA = iA[i][ix]*iA[i][jy]
@ -588,5 +588,6 @@ def findrfc_astm(tp, t=None):
# n = len(sig_rfc) # n = len(sig_rfc)
return sig_rfc[:n - cnr[0]] return sig_rfc[:n - cnr[0]]
if __name__ == '__main__': if __name__ == '__main__':
pass pass

@ -613,6 +613,7 @@ class CycleMatrix(PlotData):
""" """
Container class for Cycle Matrix data objects in WAFO Container class for Cycle Matrix data objects in WAFO
""" """
def __init__(self, *args, **kwds): def __init__(self, *args, **kwds):
self.kind = kwds.pop('kind', 'min2max') self.kind = kwds.pop('kind', 'min2max')
self.sigma = kwds.pop('sigma', None) self.sigma = kwds.pop('sigma', None)
@ -798,15 +799,25 @@ class CyclePairs(PlotData):
nx = extr[0].argmax() + 1 nx = extr[0].argmax() + 1
levels = extr[0, 0:nx] levels = extr[0, 0:nx]
if defnr == 2: # This are upcrossings + maxima
dcount = cumsum(extr[1, 0:nx]) + extr[2, 0:nx] - extr[3, 0:nx] def _upcrossings_and_maxima(extr, nx):
elif defnr == 4: # This are upcrossings + minima return cumsum(extr[1, 0:nx]) + extr[2, 0:nx] - extr[3, 0:nx]
def _upcrossings_and_minima(extr, nx):
dcount = cumsum(extr[1, 0:nx]) dcount = cumsum(extr[1, 0:nx])
dcount[nx - 1] = dcount[nx - 2] dcount[nx - 1] = dcount[nx - 2]
elif defnr == 1: # This are only upcrossings return dcount
dcount = cumsum(extr[1, 0:nx]) - extr[3, 0:nx]
elif defnr == 3: # This are upcrossings + minima + maxima def _upcrossings(extr, nx):
dcount = cumsum(extr[1, 0:nx]) + extr[2, 0:nx] return cumsum(extr[1, 0:nx]) - extr[3, 0:nx]
def _upcrossings_minima_and_maxima(extr, nx):
return cumsum(extr[1, 0:nx]) + extr[2, 0:nx]
dcount = {1: _upcrossings,
2: _upcrossings_and_maxima,
3: _upcrossings_minima_and_maxima,
4: _upcrossings_and_minima}[defnr](extr, nx)
ylab = 'Count' ylab = 'Count'
if intensity: if intensity:
dcount = dcount / self.time dcount = dcount / self.time
@ -1142,7 +1153,7 @@ class TurningPoints(PlotData):
ind = findrfc(self.data, max(h, 0.0), method) ind = findrfc(self.data, max(h, 0.0), method)
try: try:
t = self.args[ind] t = self.args[ind]
except: except Exception:
t = ind t = ind
mean = self.mean mean = self.mean
sigma = self.sigma sigma = self.sigma
@ -1386,10 +1397,11 @@ class TimeSeries(PlotData):
''' '''
if isinstance(wname, tuple): if isinstance(wname, tuple):
wname = wname[0] wname = wname[0]
dof = int(dict(parzen=3.71, hanning=2.67, dof = int(dict(parzen=3.71,
bartlett=3).get(wname, np.nan) * n/L) hanning=2.67,
bartlett=3).get(wname, np.nan) * n / L)
Be = dict(parzen=1.33, hanning=1, Be = dict(parzen=1.33, hanning=1,
bartlett=1.33).get(wname, np.nan) * 2 * pi / (L*dt) bartlett=1.33).get(wname, np.nan) * 2 * pi / (L * dt)
if ftype == 'f': if ftype == 'f':
Be = Be / (2 * pi) # bandwidth in Hz Be = Be / (2 * pi) # bandwidth in Hz
return Be, dof return Be, dof
@ -1680,7 +1692,7 @@ class TimeSeries(PlotData):
ind = findtp(self.data, max(h, 0.0), wavetype) ind = findtp(self.data, max(h, 0.0), wavetype)
try: try:
t = self.args[ind] t = self.args[ind]
except: except Exception:
t = ind t = ind
mean = self.data.mean() mean = self.data.mean()
sigma = self.data.std() sigma = self.data.std()
@ -1710,7 +1722,7 @@ class TimeSeries(PlotData):
ind = findtc(self.data, v, wavetype)[0] ind = findtc(self.data, v, wavetype)[0]
try: try:
t = self.args[ind] t = self.args[ind]
except: except Exception:
t = ind t = ind
mean = self.data.mean() mean = self.data.mean()
sigma = self.data.std() sigma = self.data.std()
@ -1773,7 +1785,7 @@ class TimeSeries(PlotData):
-------- --------
wafo.definitions wafo.definitions
''' '''
dT = self.sampling_period()/np.maximum(rate, 1) dT = self.sampling_period() / np.maximum(rate, 1)
xi, ti = self._interpolate(rate) xi, ti = self._interpolate(rate)
tc_ind, z_ind = findtc(xi, v=0, kind='tw') tc_ind, z_ind = findtc(xi, v=0, kind='tw')
@ -2492,7 +2504,7 @@ class TimeSeries(PlotData):
plt.title('Surface elevation from mean water level (MWL).') plt.title('Surface elevation from mean water level (MWL).')
for ix in range(nsub): for ix in range(nsub):
if nsub > 1: if nsub > 1:
subplot(nsub, 1, ix+1) subplot(nsub, 1, ix + 1)
h_scale = array([tn[ind[0]], tn[ind[-1]]]) h_scale = array([tn[ind[0]], tn[ind[-1]]])
ind2 = where((h_scale[0] <= tn2) & (tn2 <= h_scale[1]))[0] ind2 = where((h_scale[0] <= tn2) & (tn2 <= h_scale[1]))[0]
plot(tn[ind] * dT, xn[ind], sym1) plot(tn[ind] * dT, xn[ind], sym1)

@ -133,7 +133,7 @@ class _ExampleFunctions(object):
Maple: 0.40696958949155611906 Maple: 0.40696958949155611906
''' '''
def _exp(x, y, loc, scale, p2=2): def _exp(x, y, loc, scale, p2=2):
return np.exp(- (x-loc[0])**2/scale[0] - (y-loc[1])**p2/scale[1]) return np.exp(- (x - loc[0])**2 / scale[0] - (y - loc[1])**p2 / scale[1])
# exp = np.exp # exp = np.exp
x9, y9 = 9. * x, 9. * y x9, y9 = 9. * x, 9. * y
return (3. / 4 * _exp(x9, y9, [2, 2], [4, 4]) + return (3. / 4 * _exp(x9, y9, [2, 2], [4, 4]) +
@ -196,7 +196,7 @@ class _ExampleFunctions(object):
The value of the definite integral on the square [-1,1] x [-1,1] The value of the definite integral on the square [-1,1] x [-1,1]
is 4. is 4.
''' '''
return np.ones(np.shape(x+y)) return np.ones(np.shape(x + y))
@staticmethod @staticmethod
def exp_xy(x, y): def exp_xy(x, y):
@ -284,6 +284,8 @@ class _ExampleFunctions(object):
s.exp_fun100, s.cos30, s.constant, s.exp_xy, s.runge, s.exp_fun100, s.cos30, s.constant, s.exp_xy, s.runge,
s.abs_cubed, s.gauss, s.exp_inv] s.abs_cubed, s.gauss, s.exp_inv]
return test_function[i](x, y) return test_function[i](x, y)
example_functions = _ExampleFunctions() example_functions = _ExampleFunctions()

@ -1324,7 +1324,7 @@ def chebfit_dct(f, n=(10, ), domain=None):
for i in range(ndim): for i in range(ndim):
ck = dct(ck[..., ::-1]) ck = dct(ck[..., ::-1])
ck[..., 0] = ck[..., 0] / 2. ck[..., 0] = ck[..., 0] / 2.
if i < ndim-1: if i < ndim - 1:
ck = np.rollaxis(ck, axis=-1) ck = np.rollaxis(ck, axis=-1)
return ck / np.product(n) return ck / np.product(n)
@ -1355,7 +1355,7 @@ def idct(x, n=None):
http://en.wikipedia.org/wiki/Discrete_cosine_transform http://en.wikipedia.org/wiki/Discrete_cosine_transform
http://users.ece.utexas.edu/~bevans/courses/ee381k/lectures/ http://users.ece.utexas.edu/~bevans/courses/ee381k/lectures/
""" """
return _idct(x, n=n, norm=None)*0.5/len(x) return _idct(x, n=n, norm=None) * 0.5 / len(x)
def _chebval(x, ck, kind=1): def _chebval(x, ck, kind=1):
@ -2223,8 +2223,8 @@ def chebfitnd(xi, f, deg, rcond=None, full=False, w=None):
rcond = xi[0].size * np.finfo(xi[0].dtype).eps rcond = xi[0].size * np.finfo(xi[0].dtype).eps
# Solve the least squares problem. # Solve the least squares problem.
c, resids, rank, s = np.linalg.lstsq(lhs/scl, rhs, rcond) c, resids, rank, s = np.linalg.lstsq(lhs / scl, rhs, rcond)
c = (c/scl).reshape(orders) c = (c / scl).reshape(orders)
if full: if full:
return c, [resids, rank, s, rcond] return c, [resids, rank, s, rcond]
@ -2279,7 +2279,7 @@ def chebvalnd(c, *xi):
""" """
try: try:
xi = np.array(xi, copy=0) xi = np.array(xi, copy=0)
except: except Exception:
raise ValueError('x, y, z are incompatible') raise ValueError('x, y, z are incompatible')
chebval = np.polynomial.chebyshev.chebval chebval = np.polynomial.chebyshev.chebval
c = chebval(xi[0], c) c = chebval(xi[0], c)
@ -2354,18 +2354,18 @@ def test_chebfit1d():
zi = np.polynomial.chebyshev.chebval(xi, c) zi = np.polynomial.chebyshev.chebval(xi, c)
# plt.plot(xi, zi,'.', xi, f(xi)) # plt.plot(xi, zi,'.', xi, f(xi))
plt.semilogy(xi, np.abs(zi-f(xi))) plt.semilogy(xi, np.abs(zi - f(xi)))
plt.show('hold') plt.show('hold')
def test_chebfit2d(): def test_chebfit2d():
n = 3 n = 3
xorder, yorder = n-1, n-1 xorder, yorder = n - 1, n - 1
x = chebroot(n=n, kind=1) x = chebroot(n=n, kind=1)
xgrid, ygrid = np.meshgrid(x, x) xgrid, ygrid = np.meshgrid(x, x)
def f(x, y): def f(x, y):
return np.exp(-x**2-6*y**2) return np.exp(-x**2 - 6 * y**2)
zgrid = f(xgrid, ygrid) zgrid = f(xgrid, ygrid)
# v2d = np.polynomial.chebyshev.chebvander2d(xgrid, ygrid, # v2d = np.polynomial.chebyshev.chebvander2d(xgrid, ygrid,
@ -2373,7 +2373,7 @@ def test_chebfit2d():
# coeff, residuals, rank, s = np.linalg.lstsq(v2d, zgrid.ravel()) # coeff, residuals, rank, s = np.linalg.lstsq(v2d, zgrid.ravel())
# doeff = coeff.reshape(xorder+1,yorder+1) # doeff = coeff.reshape(xorder+1,yorder+1)
_dcoeff2 = chebfitnd((xgrid, ygrid), zgrid, [xorder, yorder]) _dcoeff2 = chebfitnd((xgrid, ygrid), zgrid, [xorder, yorder])
dcoeff = chebfit_dct(f, n=(xorder+1, yorder+1)) dcoeff = chebfit_dct(f, n=(xorder + 1, yorder + 1))
xi = np.linspace(-1, 1, 151) xi = np.linspace(-1, 1, 151)
Xi, Yi = np.meshgrid(xi, xi) Xi, Yi = np.meshgrid(xi, xi)

@ -19,7 +19,7 @@ def demo_savitzky_on_noisy_chirp():
# generate chirp signal # generate chirp signal
tvec = np.arange(0, 6.28, .02) tvec = np.arange(0, 6.28, .02)
true_signal = np.sin(tvec * (2.0 + tvec)) true_signal = np.sin(tvec * (2.0 + tvec))
true_d_signal = (2+tvec) * np.cos(tvec * (2.0 + tvec)) true_d_signal = (2 + tvec) * np.cos(tvec * (2.0 + tvec))
# add noise to signal # add noise to signal
noise = np.random.normal(size=true_signal.shape) noise = np.random.normal(size=true_signal.shape)
@ -44,7 +44,7 @@ def demo_savitzky_on_noisy_chirp():
plt.subplot(313) plt.subplot(313)
savgol1 = SavitzkyGolay(n=8, degree=1, diff_order=1) savgol1 = SavitzkyGolay(n=8, degree=1, diff_order=1)
dt = tvec[1]-tvec[0] dt = tvec[1] - tvec[0]
d_signal = savgol1.smooth(signal) / dt d_signal = savgol1.smooth(signal) / dt
plt.plot(d_signal) plt.plot(d_signal)
@ -150,7 +150,7 @@ def demo_kalman_sine():
w = 1 w = 1
T = np.arange(0, 30 + dt / 2, dt) T = np.arange(0, 30 + dt / 2, dt)
n = len(T) n = len(T)
X = 3*np.sin(w * T) X = 3 * np.sin(w * T)
Y = X + sd * np.random.randn(n) Y = X + sd * np.random.randn(n)
''' Initialize KF to values ''' Initialize KF to values
@ -299,7 +299,7 @@ def demo_tide_filter():
# import statsmodels.api as sa # import statsmodels.api as sa
import wafo.spectrum.models as sm import wafo.spectrum.models as sm
sd = 10 sd = 10
Sj = sm.Jonswap(Hm0=4.*sd) Sj = sm.Jonswap(Hm0=4. * sd)
S = Sj.tospecdata() S = Sj.tospecdata()
q = (0.1 * sd) ** 2 # variance of process noise s the car operates q = (0.1 * sd) ** 2 # variance of process noise s the car operates
@ -469,6 +469,7 @@ def demo_hodrick_on_cardioid():
x, y, 'r.', x, y, 'r.',
xs, ys, 'k', linewidth=2) xs, ys, 'k', linewidth=2)
if __name__ == '__main__': if __name__ == '__main__':
from wafo.testing import test_docstrings from wafo.testing import test_docstrings
test_docstrings(__file__) test_docstrings(__file__)

Loading…
Cancel
Save