From 6281d6fb09b2bed709bc68a93eca1d4e002fe03c Mon Sep 17 00:00:00 2001 From: pbrod Date: Thu, 26 May 2016 13:18:48 +0200 Subject: [PATCH] added integrate ocscilating + added doctest to .travis --- .travis.yml | 3 +- wafo/integrate_oscillating.py | 577 +++++++++++++++++++++++ wafo/tests/test_integrate_oscillating.py | 387 +++++++++++++++ 3 files changed, 966 insertions(+), 1 deletion(-) create mode 100644 wafo/integrate_oscillating.py create mode 100644 wafo/tests/test_integrate_oscillating.py diff --git a/.travis.yml b/.travis.yml index dec9c7f..a40f642 100644 --- a/.travis.yml +++ b/.travis.yml @@ -46,7 +46,8 @@ before_script: - git config --global user.email "per.andreas.brodtkorb@gmail.com" - git config --global user.name "pbrod" script: - - coverage run --source=wafo setup.py test + - coverage run --source=wafo setup.py test + - py.test wafo/ --cov --pep8 --doctest-modules after_success: - coveralls - codecov diff --git a/wafo/integrate_oscillating.py b/wafo/integrate_oscillating.py new file mode 100644 index 0000000..2b329a3 --- /dev/null +++ b/wafo/integrate_oscillating.py @@ -0,0 +1,577 @@ +''' +Created on 20. aug. 2015 + +@author: pab +''' +from __future__ import division +import numpy as np +import warnings +import numdifftools as nd # @UnresolvedImport +import numdifftools.nd_algopy as nda # @UnresolvedImport +from numdifftools.limits import Limit # @UnresolvedImport +from numpy import linalg +from numpy.polynomial.chebyshev import chebval, Chebyshev +from numpy.polynomial import polynomial +from wafo.misc import piecewise, findcross, ecross +from collections import namedtuple + +EPS = np.finfo(float).eps +_EPS = EPS +finfo = np.finfo(float) +_TINY = finfo.tiny +_HUGE = finfo.max +dea3 = nd.dea3 + + +class PolyBasis(object): + + def derivative(self, t, k, n=1): + c = np.zeros(k + 1) + c[k] = 1 + dc = polynomial.polyder(c, m=n) + return polynomial.polyval(t, dc) + + def eval(self, t, c): + return polynomial.polyval(t, c) + + def __call__(self, t, k): + return t**k +poly_basis = PolyBasis() + + +class ChebyshevBasis(object): + + def derivative(self, t, k, n=1): + c = np.zeros(k + 1) + c[k] = 1 + cheb = Chebyshev(c) + dcheb = cheb.deriv(m=n) + return chebval(t, dcheb.coef) + + def eval(self, t, c): + return chebval(t, c) + + def __call__(self, t, k): + c = np.zeros(k + 1) + c[k] = 1 + return chebval(t, c) +chebyshev_basis = ChebyshevBasis() + + +def richardson(Q, k): + # license BSD + # Richardson extrapolation with parameter estimation + c = np.real((Q[k - 1] - Q[k - 2]) / (Q[k] - Q[k - 1])) - 1. + # The lower bound 0.07 admits the singularity x.^-0.9 + c = max(c, 0.07) + R = Q[k] + (Q[k] - Q[k - 1]) / c + return R + + +def evans_webster_weights(omega, gg, dgg, x, basis, *args, **kwds): + + def Psi(t, k): + return dgg(t, *args, **kwds) * basis(t, k) + + j_w = 1j * omega + nn = len(x) + A = np.zeros((nn, nn), dtype=complex) + F = np.zeros((nn,), dtype=complex) + + dbasis = basis.derivative + lim_gg = Limit(gg) + b1 = np.exp(j_w*lim_gg(1, *args, **kwds)) + if np.isnan(b1): + b1 = 0.0 + a1 = np.exp(j_w*lim_gg(-1, *args, **kwds)) + if np.isnan(a1): + a1 = 0.0 + + lim_Psi = Limit(Psi) + for k in range(nn): + F[k] = basis(1, k)*b1 - basis(-1, k)*a1 + A[k] = (dbasis(x, k, n=1) + j_w * lim_Psi(x, k)) + + LS = linalg.lstsq(A, F) + return LS[0] + + +def osc_weights(omega, g, dg, x, basis, ab, *args, **kwds): + w = [] + + for a, b in zip(ab[::2], ab[1::2]): + scale = (b - a) / 2 + offset = (a + b) / 2 + + def gg(t): + return g(scale * t + offset, *args, **kwds) + + def dgg(t): + return scale * dg(scale * t + offset, *args, **kwds) + + w.append(evans_webster_weights(omega, gg, dgg, x, basis)) + + return np.asarray(w).ravel() + + +class QuadOsc(object): + info = namedtuple('info', ['error_estimate', 'n']) + + def __init__(self, f, g, dg=None, a=-1, b=1, basis=chebyshev_basis, s=15, + precision=10, endpoints=False, maxiter=17, full_output=False): + self.f = f + self.g = g + self.dg = nd.Derivative(g) if dg is None else dg + self.basis = basis + self.a = a + self.b = b + self.s = s + self.endpoints = endpoints + self.maxiter = maxiter + self.precision = precision + self.full_output = full_output + + def _change_interval_to_0_1(self, f, g, dg, a, b): + def f1(t, *args, **kwds): + den = 1-t + return f(a + t / den, *args, **kwds) / den ** 2 + + def g1(t, *args, **kwds): + return g(a + t / (1 - t), *args, **kwds) + + def dg1(t, *args, **kwds): + den = 1-t + return dg(a + t / den, *args, **kwds) / den ** 2 + return f1, g2, dg1, 0., 1. + + def _change_interval_to_m1_0(self, f, g, dg, a, b): + def f2(t, *args, **kwds): + den = 1 + t + return f(b + t / den, *args, **kwds) / den ** 2 + + def g2(t, *args, **kwds): + return g(b + t / (1 + t), *args, **kwds) + + def dg2(t, *args, **kwds): + den = 1 + t + return dg(b + t / den, *args, **kwds) / den ** 2 + return f2, g2, dg2, -1.0, 0.0 + + def _change_interval_to_m1_1(self, f, g, dg, a, b): + def f2(t, *args, **kwds): + den = (1 - t**2) + return f(t / den, *args, **kwds) * (1+t**2) / den ** 2 + + def g2(t, *args, **kwds): + den = (1 - t**2) + return g(t / den, *args, **kwds) + + def dg2(t, *args, **kwds): + den = (1 - t**2) + return dg(t / den, *args, **kwds) * (1+t**2) / den ** 2 + return f2, g2, dg2, -1., 1. + + def _get_functions(self): + a, b = self.a, self.b + reverse = np.real(a) > np.real(b) + if reverse: + a, b = b, a + f, g, dg = self.f, self.g, self.dg + + if a == b: + pass + elif np.isinf(a) | np.isinf(b): + # Check real limits + if ~np.isreal(a) | ~np.isreal(b) | np.isnan(a) | np.isnan(b): + raise ValueError('Infinite intervals must be real.') + # Change of variable + if np.isfinite(a) & np.isinf(b): + f, g, dg, a, b = self._change_interval_to_0_1(f, g, dg, a, b) + elif np.isinf(a) & np.isfinite(b): + f, g, dg, a, b = self._change_interval_to_m1_0(f, g, dg, a, b) + else: # -inf to inf + f, g, dg, a, b = self._change_interval_to_m1_1(f, g, dg, a, b) + + return f, g, dg, a, b, reverse + + def __call__(self, omega, *args, **kwds): + f, g, dg, a, b, reverse = self._get_functions() + + Q, err = self._quad_osc(f, g, dg, a, b, omega, *args, **kwds) + + if reverse: + Q = -Q + if self.full_output: + return Q, err + return Q + + def _get_best_estimate(self, k, Q0, Q1, Q2): + if k >= 5: + Qv = np.hstack((Q0[k], Q1[k], Q2[k])) + Qw = np.hstack((Q0[k - 1], Q1[k - 1], Q2[k - 1])) + elif k >= 3: + Qv = np.hstack((Q0[k], Q1[k])) + Qw = np.hstack((Q0[k - 1], Q1[k - 1])) + else: + Qv = np.atleast_1d(Q0[k]) + Qw = Q0[k - 1] + errors = np.atleast_1d(abs(Qv - Qw)) + j = np.nanargmin(errors) + return Qv[j], errors[j] + + def _extrapolate(self, k, Q0, Q1, Q2): + if k >= 4: + Q1[k], _err1 = dea3(Q0[k - 2], Q0[k - 1], Q0[k]) + Q2[k], _err2 = dea3(Q1[k - 2], Q1[k - 1], Q1[k]) + elif k >= 2: + Q1[k], _err1 = dea3(Q0[k - 2], Q0[k - 1], Q0[k]) + # # Richardson extrapolation + # if k >= 4: + # Q1[k] = richardson(Q0, k) + # Q2[k] = richardson(Q1, k) + # elif k >= 2: + # Q1[k] = richardson(Q0, k) + Q, err = self._get_best_estimate(k, Q0, Q1, Q2) + return Q, err + + def _quad_osc(self, f, g, dg, a, b, omega, *args, **kwds): + if a == b: + Q = b - a + err = b - a + return Q, err + + abseps = 10**-self.precision + max_iter = self.maxiter + basis = self.basis + if self.endpoints: + xq = chebyshev_extrema(self.s) + else: + xq = chebyshev_roots(self.s) + # xq = tanh_sinh_open_nodes(self.s) + + # One interval + hh = (b - a) / 2 + x = (a + b) / 2 + hh * xq # Nodes + + dtype = complex + Q0 = np.zeros((max_iter, 1), dtype=dtype) # Quadrature + Q1 = np.zeros((max_iter, 1), dtype=dtype) # First extrapolation + Q2 = np.zeros((max_iter, 1), dtype=dtype) # Second extrapolation + + lim_f = Limit(f) + ab = np.hstack([a, b]) + wq = osc_weights(omega, g, dg, xq, basis, ab, *args, **kwds) + Q0[0] = hh * np.sum(wq * lim_f(x, *args, **kwds)) + + # Successive bisection of intervals + nq = len(xq) + n = nq + for k in xrange(1, max_iter): + n += nq*2**k + + hh = hh / 2 + x = np.hstack([x + a, x + b]) / 2 + ab = np.hstack([ab + a, ab + b]) / 2 + wq = osc_weights(omega, g, dg, xq, basis, ab, *args, **kwds) + + Q0[k] = hh * np.sum(wq * lim_f(x, *args, **kwds)) + + Q, err = self._extrapolate(k, Q0, Q1, Q2) + + convergence = (err <= abseps) | ~np.isfinite(Q) + if convergence: + break + else: + warnings.warn('Max number of iterations reached ' + 'without convergence.') + + if ~np.isfinite(Q): + warnings.warn('Integral approximation is Infinite or NaN.') + + # The error estimate should not be zero + err += 2 * np.finfo(Q).eps + return Q, self.info(err, n) + + +def adaptive_levin_points(M, delta): + m = M - 1 + prm = 0.5 + while prm * m / delta >= 1: + delta = 2 * delta + k = np.arange(M) + x = piecewise([k < prm * m, k == np.ceil(prm * m)], + [-1 + k / delta, 0 * k, 1 - (m - k) / delta]) + return x + + +def open_levin_points(M, delta): + return adaptive_levin_points(M+2, delta)[1:-1] + + +def chebyshev_extrema(M, delta=None): + k = np.arange(M) + x = np.cos(k * np.pi / (M-1)) + return x + +_EPS = np.finfo(float).eps + + +def tanh_sinh_nodes(M, delta=None, tol=_EPS): + tmax = np.arcsinh(np.arctanh(1-_EPS)*2/np.pi) + # tmax = 3.18 + m = int(np.floor(-np.log2(tmax/max(M-1, 1)))) - 1 + h = 2.0**-m + t = np.arange((M+1)//2+1)*h + x = np.tanh(np.pi/2*np.sinh(t)) + k = np.flatnonzero(np.abs(x - 1) <= 10*tol) + y = x[:k[0]+1] if len(k) else x + return np.hstack((-y[:0:-1], y)) + + +def tanh_sinh_open_nodes(M, delta=None, tol=_EPS): + return tanh_sinh_nodes(M+1, delta, tol)[1:-1] + + +def chebyshev_roots(M, delta=None): + k = np.arange(1, 2*M, 2) * 0.5 + x = np.cos(k * np.pi / M) + return x + + +class AdaptiveLevin(object): + '''Return integral for the Levin-type and adaptive Levin-type methods''' + info = namedtuple('info', ['error_estimate', 'n']) + + def __init__(self, f, g, dg=None, a=-1, b=1, basis=chebyshev_basis, s=1, + precision=10, endpoints=True, full_output=False): + self.f = f + self.g = g + self.dg = nd.Derivative(g) if dg is None else dg + self.basis = basis + self.a = a + self.b = b + self.s = s + self.precision = precision + self.endpoints = endpoints + self.full_output = full_output + + def aLevinTQ(self, omega, ff, gg, dgg, x, s, basis, *args, **kwds): + + def Psi(t, k): + return dgg(t, *args, **kwds) * basis(t, k) + + j_w = 1j * omega + nu = np.ones((len(x),), dtype=int) + nu[0] = nu[-1] = s + S = np.cumsum(np.hstack((nu, 0))) + S[-1] = 0 + nn = int(S[-2]) + A = np.zeros((nn, nn), dtype=complex) + F = np.zeros((nn,)) + dff = Limit(nda.Derivative(ff)) + dPsi = Limit(nda.Derivative(Psi)) + dbasis = basis.derivative + for r, t in enumerate(x): + for j in range(S[r - 1], S[r]): + order = ((j - S[r - 1]) % nu[r]) # derivative order + dff.f.n = order + F[j] = dff(t, *args, **kwds) + dPsi.f.n = order + for k in range(nn): + A[j, k] = (dbasis(t, k, n=order+1) + j_w * dPsi(t, k)) + k1 = np.flatnonzero(1-np.isfinite(F)) + if k1.size > 0: # Remove singularities + warnings.warn('Singularities detected! ') + A[k1] = 0 + F[k1] = 0 + LS = linalg.lstsq(A, F) + v = basis.eval([-1, 1], LS[0]) + + lim_gg = Limit(gg) + gb = np.exp(j_w * lim_gg(1, *args, **kwds)) + if np.isnan(gb): + gb = 0 + ga = np.exp(j_w * lim_gg(-1, *args, **kwds)) + if np.isnan(ga): + ga = 0 + NR = (v[1] * gb - v[0] * ga) + return NR + + def _get_integration_limits(self, omega, args, kwds): + a, b = self.a, self.b + M = 30 + ab = [a] + scale = (b - a) / 2 + n = 30 + x = np.linspace(a, b, n + 1) + dg_x = np.asarray([scale * omega * self.dg(xi, *args, **kwds) + for xi in x]) + i10 = findcross(dg_x, M) + i1 = findcross(dg_x, 1) + i0 = findcross(dg_x, 0) + im1 = findcross(dg_x, -1) + im10 = findcross(dg_x, -M) + x10 = ecross(x, dg_x, i10, M) if len(i10) else () + x1 = ecross(x, dg_x, i1, 1) if len(i1) else () + x0 = ecross(x, dg_x, i0, 0) if len(i0) else () + xm1 = ecross(x, dg_x, im1, -1) if len(im1) else () + xm10 = ecross(x, dg_x, im10, -M) if len(im10) else () + + for i in np.unique(np.hstack((x10, x1, x0, xm1, xm10))): + if x[0] < i < x[n]: + ab.append(i) + ab.append(b) + return ab + + def __call__(self, omega, *args, **kwds): + ab = self._get_integration_limits(omega, args, kwds) + s = self.s + val = 0 + n = 0 + err = 0 + for ai, bi in zip(ab[:-1], ab[1:]): + vali, infoi = self._QaL(s, ai, bi, omega, *args, **kwds) + val += vali + err += infoi.error_estimate + n += infoi.n + if self.full_output: + info = self.info(err, n) + return val, info + return val + + def _get_num_points(self, s, prec, betam): + return 1 if s > 1 else int(prec / max(np.log10(betam + 1), 1) + 1) + + def _QaL(self, s, a, b, omega, *args, **kwds): + '''if s>1,the integral is computed by Q_s^L''' + scale = (b - a) / 2 + offset = (a + b) / 2 + prec = self.precision # desired precision + + def ff(t, *args, **kwds): + return scale * self.f(scale * t + offset, *args, **kwds) + + def gg(t, *args, **kwds): + return self.g(scale * t + offset, *args, **kwds) + + def dgg(t, *args, **kwds): + return scale * self.dg(scale * t + offset, *args, **kwds) + dg_a = abs(omega * dgg(-1, *args, **kwds)) + dg_b = abs(omega * dgg(1, *args, **kwds)) + g_a = abs(omega * gg(-1, *args, **kwds)) + g_b = abs(omega * gg(1, *args, **kwds)) + delta, alpha = min(dg_a, dg_b), min(g_a, g_b) + + betam = delta # * scale + if self.endpoints: + if (delta < 10 or alpha <= 10 or s > 1): + points = chebyshev_extrema + else: + points = adaptive_levin_points + elif (delta < 10 or alpha <= 10 or s > 1): + points = chebyshev_roots + else: + points = open_levin_points # tanh_sinh_open_nodes + + m = self._get_num_points(s, prec, betam) + abseps = 10*10.0**-prec + num_collocation_point_list = m*2**np.arange(1, 5) + 1 + basis = self.basis + + Q = 1e+300 + n = 0 + ni = 0 + for num_collocation_points in num_collocation_point_list: + ni_old = ni + Q_old = Q + x = points(num_collocation_points, betam) + ni = len(x) + if ni > ni_old: + Q = self.aLevinTQ(omega, ff, gg, dgg, x, s, basis, *args, + **kwds) + n += ni + err = np.abs(Q-Q_old) + if err <= abseps: + break + info = self.info(err, n) + return Q, info + + +class EvansWebster(AdaptiveLevin): + '''Return integral for the Evans Webster method''' + + def __init__(self, f, g, dg=None, a=-1, b=1, basis=chebyshev_basis, s=8, + precision=10, endpoints=False, full_output=False): + self.f = f + self.g = g + self.dg = nd.Derivative(g) if dg is None else dg + self.basis = basis + self.a = a + self.b = b + self.s = s + self.endpoints = endpoints + self.precision = precision + self.full_output = full_output + + def aLevinTQ(self, omega, ff, gg, dgg, x, s, basis, *args, **kwds): + w = evans_webster_weights(omega, gg, dgg, x, basis, *args, **kwds) + + f = Limit(ff)(x, *args, **kwds) + NR = np.sum(f*w) + return NR + + def _get_num_points(self, s, prec, betam): + return 8 if s > 1 else int(prec / max(np.log10(betam + 1), 1) + 1) + +# def _QaL(self, s, a, b, omega, *args, **kwds): +# '''if s>1,the integral is computed by Q_s^L''' +# scale = (b - a) / 2 +# offset = (a + b) / 2 +# prec = self.precision # desired precision +# +# def ff(t, *args, **kwds): +# return scale * self.f(scale * t + offset, *args, **kwds) +# +# def gg(t, *args, **kwds): +# return self.g(scale * t + offset, *args, **kwds) +# +# def dgg(t, *args, **kwds): +# return scale * self.dg(scale * t + offset, *args, **kwds) +# dg_a = abs(omega * dgg(-1, *args, **kwds)) +# dg_b = abs(omega * dgg(1, *args, **kwds)) +# g_a = abs(omega * gg(-1, *args, **kwds)) +# g_b = abs(omega * gg(1, *args, **kwds)) +# delta, alpha = min(dg_a, dg_b), min(g_a, g_b) +# +# betam = delta # * scale +# if self.endpoints: +# if (delta < 10 or alpha <= 10 or s > 1): +# points = chebyshev_extrema +# else: +# points = adaptive_levin_points +# elif (delta < 10 or alpha <= 10 or s > 1): +# points = chebyshev_roots +# else: +# points = tanh_sinh_open_nodes +# +# m = 8 if s > 1 else int(prec / max(np.log10(betam + 1), 1) + 1) +# abseps = 10*10.0**-prec +# num_collocation_point_list = 2*m*np.arange(1, 6, 2) +# # range(num_points, num_points+3, 2) +# basis = self.basis +# Q = 1e+300 +# n = 0 +# for num_collocation_points in num_collocation_point_list: +# Q_old = Q +# x = points(num_collocation_points-1, betam) +# Q = self.aLevinTQ(omega, ff, gg, dgg, x, s, basis, *args, **kwds) +# n += num_collocation_points +# err = np.abs(Q-Q_old) +# if err <= abseps: +# break +# info = self.info(err, n) +# return Q, info + + +if __name__ == '__main__': + tanh_sinh_nodes(16) diff --git a/wafo/tests/test_integrate_oscillating.py b/wafo/tests/test_integrate_oscillating.py new file mode 100644 index 0000000..2242151 --- /dev/null +++ b/wafo/tests/test_integrate_oscillating.py @@ -0,0 +1,387 @@ +''' +Created on 31. aug. 2015 + +@author: pab +''' +from __future__ import division +import numpy as np +import mpmath as mp +import unittest +from wafo.integrate_oscillating import (adaptive_levin_points, + chebyshev_extrema, + chebyshev_roots, tanh_sinh_nodes, + tanh_sinh_open_nodes, + AdaptiveLevin, poly_basis, + chebyshev_basis, + EvansWebster, QuadOsc) +# import numdifftools as nd +from numpy.testing import assert_allclose +from scipy.special import gamma, digamma +_EPS = np.finfo(float).eps + + +class TestBasis(unittest.TestCase): + def test_poly(self): + t = 1 + vals = [poly_basis.derivative(t, k, n=1) for k in range(3)] + assert_allclose(vals, range(3)) + vals = [poly_basis.derivative(0, k, n=1) for k in range(3)] + assert_allclose(vals, [0, 1, 0]) + vals = [poly_basis.derivative(0, k, n=2) for k in range(3)] + assert_allclose(vals, [0, 0, 2]) + + def test_chebyshev(self): + t = 1 + vals = [chebyshev_basis.derivative(t, k, n=1) for k in range(3)] + assert_allclose(vals, np.arange(3)**2) + vals = [chebyshev_basis.derivative(0, k, n=1) for k in range(3)] + assert_allclose(vals, [0, 1, 0]) + vals = [chebyshev_basis.derivative(0, k, n=2) for k in range(3)] + assert_allclose(vals, [0, 0, 4]) + + +class TestLevinPoints(unittest.TestCase): + + def test_adaptive(self): + M = 11 + delta = 100 + x = adaptive_levin_points(M, delta) + true_x = [-1., -0.99, -0.98, -0.97, -0.96, 0., + 0.96, 0.97, 0.98, 0.99, 1.] + assert_allclose(x, true_x) + + def test_chebyshev_extrema(self): + M = 11 + delta = 100 + x = chebyshev_extrema(M, delta) + true_x = [1.000000e+00, 9.510565e-01, 8.090170e-01, 5.877853e-01, + 3.090170e-01, 6.123234e-17, -3.090170e-01, -5.877853e-01, + -8.090170e-01, -9.510565e-01, -1.000000e+00] + assert_allclose(x, true_x) + + def test_chebyshev_roots(self): + M = 11 + delta = 100 + x = chebyshev_roots(M, delta) + + true_x = [9.89821442e-01, 9.09631995e-01, 7.55749574e-01, + 5.40640817e-01, 2.81732557e-01, 2.83276945e-16, + -2.81732557e-01, -5.40640817e-01, -7.55749574e-01, + -9.09631995e-01, -9.89821442e-01] + assert_allclose(x, true_x) + + def test_tanh_sinh_nodes(self): + for n in 2**np.arange(1, 5) + 1: + x = tanh_sinh_nodes(n) + # self.assertEqual(n, len(x)) + + def test_tanh_sinh_open_nodes(self): + for n in 2**np.arange(1, 5) + 1: + x = tanh_sinh_open_nodes(n) + # self.assertEqual(n, len(x)) + + +class LevinQuadrature(unittest.TestCase): + def test_exp_4t_exp_jw_gamma_t_exp_4t(self): + def f(t): + return np.exp(4 * t) # amplitude function + + def g(t): + return t + np.exp(4 * t) * gamma(t) # phase function + + def dg(t): + return 1 + (4 + digamma(t)) * np.exp(4 * t) * gamma(t) + a = 1 + b = 2 + omega = 100 + + def ftot(t): + exp4t = mp.exp(4*t) + return exp4t * mp.exp(1j * omega * (t+exp4t*mp.gamma(t))) + + _true_val, _err = mp.quadts(ftot, [a, (a+b)/2, b], error=True) + + true_val = 0.00435354129735323908804 + 0.00202865398517716214366j + # quad = AdaptiveLevin(f, g, dg, a=a, b=b, s=1, full_output=True) + for quadfun in [EvansWebster, QuadOsc, AdaptiveLevin]: + quad = quadfun(f, g, dg, a=a, b=b, full_output=True) + val, info = quad(omega) + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-11) + # assert_allclose(info.n, 9) + + def test_exp_jw_t(self): + def g(t): + return t + + def dg(t): + return np.ones(np.shape(t)) + + def true_F(t): + return np.exp(1j*omega*g(t))/(1j*omega) + + val, _err = mp.quadts(g, [0, 1], error=True) + a = 1 + b = 2 + omega = 1 + true_val = true_F(b)-true_F(a) + + for quadfun in [QuadOsc, AdaptiveLevin, EvansWebster]: + quad = quadfun(dg, g, dg, a, b, full_output=True) + val, info = quad(omega) + + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-12) + # assert_allclose(info.n, 21) + + def test_I1_1_p_ln_x_exp_jw_xlnx(self): + def g(t): + return t*np.log(t) + + def dg(t): + return 1 + np.log(t) + + def true_F(t): + return np.exp(1j*(omega*g(t)))/(1j*omega) + + a = 100 + b = 200 + omega = 1 + true_val = true_F(b)-true_F(a) + for quadfun in [AdaptiveLevin, QuadOsc, EvansWebster]: + quad = quadfun(dg, g, dg, a, b, full_output=True) + + val, info = quad(omega) + + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-10) + # assert_allclose(info.n, 11) + + def test_I4_ln_x_exp_jw_30x(self): + n = 7 + + def g(t): + return t**n + + def dg(t): + return n*t**(n-1) + + def f(t): + return dg(t)*np.log(g(t)) + + a = 0 + b = (2 * np.pi)**(1./n) + omega = 30 + + def ftot(t): + return n*t**(n-1)*mp.log(t**n) * mp.exp(1j * omega * t**n) + + _true_val, _err = mp.quadts(ftot, [a, b], error=True, maxdegree=8) + # true_val = (-0.052183048684992 - 0.193877275099872j) + true_val = (-0.0521830486849921 - 0.193877275099871j) + + for quadfun in [QuadOsc, EvansWebster, AdaptiveLevin]: + quad = quadfun(f, g, dg, a, b, full_output=True) + val, info = quad(omega) + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-5) + + def test_I5_coscost_sint_exp_jw_sint(self): + a = 0 + b = np.pi/2 + omega = 100 + + def f(t): + return np.cos(np.cos(t))*np.sin(t) + + def g(t): + return np.sin(t) + + def dg(t): + return np.cos(t) + + def ftot(t): + return mp.cos(mp.cos(t)) * mp.sin(t) * mp.exp(1j * omega * + mp.sin(t)) + + _true_val, _err = mp.quadts(ftot, [a, 0.5, 1, b], maxdegree=9, + error=True) + + true_val = 0.0325497765499959-0.121009052128827j + for quadfun in [QuadOsc, EvansWebster, AdaptiveLevin]: + quad = quadfun(f, g, dg, a, b, full_output=True) + + val, info = quad(omega) + + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-9) + + def test_I6_exp_jw_td_1_m_t(self): + a = 0 + b = 1 + omega = 1 + + def f(t): + return np.ones(np.shape(t)) + + def g(t): + return t/(1-t) + + def dg(t): + return 1./(1-t)**2 + + def ftot(t): + return mp.exp(1j * omega * t/(1-t)) + + true_val = (0.3785503757641866423607342717846606761068353230802945830 + + 0.3433779615564270328325330038583124340012440194999075192j) + for quadfun in [QuadOsc, EvansWebster, AdaptiveLevin]: + quad = quadfun(f, g, dg, a, b, endpoints=False, full_output=True) + + val, info = quad(omega) + + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-10) + + def test_I8_cos_47pix2d4_exp_jw_x(self): + def f(t): + return np.cos(47*np.pi/4*t**2) + + def g(t): + return t + + def dg(t): + return 1 + + a = -1 + b = 1 + omega = 451*np.pi/4 + + true_val = 2.3328690362927e-3 + s = 15 + for quadfun in [QuadOsc, EvansWebster]: # , AdaptiveLevin]: + quad = quadfun(f, g, dg, a, b, s=s, endpoints=False, + full_output=True) + val, _info = quad(omega) + assert_allclose(val.real, true_val) + s = 1 if s <= 2 else s // 2 + # self.assert_(info.error_estimate < 1e-10) + # assert_allclose(info.n, 11) + + def test_I9_exp_tant_sec2t_exp_jw_tant(self): + a = 0 + b = np.pi/2 + omega = 100 + + def f(t): + return np.exp(-np.tan(t))/np.cos(t)**2 + + def g(t): + return np.tan(t) + + def dg(t): + return 1./np.cos(t)**2 + + true_val = (0.0000999900009999000099990000999900009999000099990000999 + + 0.009999000099990000999900009999000099990000999900009999j) + for quadfun in [QuadOsc, EvansWebster, AdaptiveLevin]: + quad = quadfun(f, g, dg, a, b, endpoints=False, full_output=True) + + val, info = quad(omega) + + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-8) + + def test_exp_zdcos2t_dcos2t_exp_jw_cos_t_b_dcos2t(self): + x1 = -20 + y1 = 20 + z1 = 20 + beta = np.abs(np.arctan(y1/x1)) + R = np.sqrt(x1**2+y1**2) + + def f(t, beta, z1): + cos2t = np.cos(t)**2 + return np.where(cos2t == 0, 0, np.exp(-z1/cos2t)/cos2t) + + def g(t, beta, z1): + return np.cos(t-beta)/np.cos(t)**2 + + def dg(t, beta, z1=0): + cos3t = np.cos(t)**3 + return 0.5*(3*np.sin(beta)-np.sin(beta-2*t))/cos3t + + def append_dg_zero(zeros, g1, beta): + signs = [1, ] if np.abs(g1) <= _EPS else [-1, 1] + for sgn1 in signs: + tn = np.arccos(sgn1 * g1) + if -np.pi / 2 <= tn <= np.pi / 2: + for sgn2 in [-1, 1]: + t = sgn2 * tn + if np.abs(dg(t, beta)) < 10*_EPS: + zeros.append(t) + return zeros + + def zeros_dg(beta): + k0 = (9*np.cos(2*beta)-7) + if k0 < 0: # No stationary points + return () + k1 = 3*np.cos(2*beta)-5 + g0 = np.sqrt(2)*np.sqrt(np.cos(beta)**2*k0) + zeros = [] + + if g0+k1 < _EPS: + g1 = 1./2*np.sqrt(-g0-k1) + zeros = append_dg_zero(zeros, g1, beta) + if _EPS < g0-k1: + g2 = 1./2*np.sqrt(g0-k1) + zeros = append_dg_zero(zeros, g2, beta) + if np.abs(g0+k1) <= _EPS or np.abs(g0-k1) <= _EPS: + zeros = append_dg_zero(zeros, 0, beta) + return tuple(zeros) + + a = -np.pi/2 + b = np.pi/2 + omega = R + + def ftot(t): + cos2t = mp.cos(t)**2 + return (mp.exp(-z1/cos2t) / cos2t * + mp.exp(1j * omega * mp.cos(t-beta)/cos2t)) + + zdg = zeros_dg(beta) + ab = (a, ) + zdg + (b, ) + true_val, _err = mp.quadts(ftot, ab, maxdegree=9, error=True) + + import matplotlib.pyplot as plt + t = np.linspace(a, b, 5*513) + plt.subplot(2, 1, 1) + f2 = f(t, beta, z1)*np.exp(1j*R*g(t, beta, z1)) + + true_val2 = np.trapz(f2, t) + plt.plot(t, f2.real, t, f2.imag, 'r') + plt.title('integral=%g+1j%g, ' + '(%g+1j%g)' % (true_val2.real, true_val2.imag, + true_val.real, true_val.imag)) + plt.subplot(2, 1, 2) + plt.plot(t, dg(t, beta, z1), 'r') + plt.plot(t, g(t, beta, z1)) + plt.hlines(0, a, b) + plt.axis([a, b, -5, 5]) + plt.title('beta=%g' % beta) + print(np.trapz(f2, t)) + plt.show('hold') + # true_val = 0.00253186684281+0.004314054498j + # s = 15 + for quadfun in [QuadOsc, EvansWebster, AdaptiveLevin]: + # EvansWebster]: # , AdaptiveLevin, ]: + quad = quadfun(f, g, dg, a, b, precision=10, endpoints=False, + full_output=True) + val, _info = quad(omega, beta, z1) # @UnusedVariable + # assert_allclose(val, complex(true_val), rtol=1e-3) + # s = 1 if s<=1 else s//2 + pass + + +if __name__ == "__main__": + # import sys;sys.argv = ['', 'Test.testName'] + unittest.main()