|
|
|
@ -4,23 +4,33 @@ Created on 20. aug. 2015
|
|
|
|
|
@author: pab
|
|
|
|
|
"""
|
|
|
|
|
from __future__ import division
|
|
|
|
|
import numpy as np
|
|
|
|
|
from collections import namedtuple
|
|
|
|
|
import warnings
|
|
|
|
|
import numdifftools as nd # @UnresolvedImport
|
|
|
|
|
import numdifftools.nd_algopy as nda # @UnresolvedImport
|
|
|
|
|
from numdifftools.limits import Limit # @UnresolvedImport
|
|
|
|
|
import numdifftools as nd
|
|
|
|
|
import numdifftools.nd_algopy as nda
|
|
|
|
|
from numdifftools.extrapolation import dea3
|
|
|
|
|
from numdifftools.limits import Limit
|
|
|
|
|
import numpy as np
|
|
|
|
|
from numpy import linalg
|
|
|
|
|
from numpy.polynomial.chebyshev import chebval, Chebyshev
|
|
|
|
|
from numpy.polynomial import polynomial
|
|
|
|
|
from wafo.misc import piecewise, findcross, ecross
|
|
|
|
|
from collections import namedtuple
|
|
|
|
|
|
|
|
|
|
EPS = np.finfo(float).eps
|
|
|
|
|
_FINFO = np.finfo(float)
|
|
|
|
|
EPS = _FINFO(float).eps
|
|
|
|
|
_EPS = EPS
|
|
|
|
|
finfo = np.finfo(float)
|
|
|
|
|
_TINY = finfo.tiny
|
|
|
|
|
_HUGE = finfo.max
|
|
|
|
|
dea3 = nd.dea3
|
|
|
|
|
_TINY = _FINFO.tiny
|
|
|
|
|
_HUGE = _FINFO.max
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _assert(cond, msg):
|
|
|
|
|
if not cond:
|
|
|
|
|
raise ValueError(msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _assert_warn(cond, msg):
|
|
|
|
|
if not cond:
|
|
|
|
|
warnings.warn(msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class PolyBasis(object):
|
|
|
|
@ -40,8 +50,8 @@ class PolyBasis(object):
|
|
|
|
|
|
|
|
|
|
def derivative(self, t, k, n=1):
|
|
|
|
|
c = self._coefficients(k)
|
|
|
|
|
dc = self._derivative(c, m=n)
|
|
|
|
|
return self.eval(t, dc)
|
|
|
|
|
d_c = self._derivative(c, m=n)
|
|
|
|
|
return self.eval(t, d_c)
|
|
|
|
|
|
|
|
|
|
def __call__(self, t, k):
|
|
|
|
|
return t**k
|
|
|
|
@ -66,58 +76,57 @@ class ChebyshevBasis(PolyBasis):
|
|
|
|
|
chebyshev_basis = ChebyshevBasis()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def richardson(Q, k):
|
|
|
|
|
def richardson(q_val, k):
|
|
|
|
|
# license BSD
|
|
|
|
|
# Richardson extrapolation with parameter estimation
|
|
|
|
|
c = np.real((Q[k - 1] - Q[k - 2]) / (Q[k] - Q[k - 1])) - 1.
|
|
|
|
|
c = np.real((q_val[k - 1] - q_val[k - 2]) / (q_val[k] - q_val[k - 1])) - 1.
|
|
|
|
|
# The lower bound 0.07 admits the singularity x.^-0.9
|
|
|
|
|
c = max(c, 0.07)
|
|
|
|
|
R = Q[k] + (Q[k] - Q[k - 1]) / c
|
|
|
|
|
return R
|
|
|
|
|
return q_val[k] + (q_val[k] - q_val[k - 1]) / c
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def evans_webster_weights(omega, gg, dgg, x, basis, *args, **kwds):
|
|
|
|
|
def evans_webster_weights(omega, g, d_g, x, basis, *args, **kwds):
|
|
|
|
|
|
|
|
|
|
def Psi(t, k):
|
|
|
|
|
return dgg(t, *args, **kwds) * basis(t, k)
|
|
|
|
|
def psi(t, k):
|
|
|
|
|
return d_g(t, *args, **kwds) * basis(t, k)
|
|
|
|
|
|
|
|
|
|
j_w = 1j * omega
|
|
|
|
|
nn = len(x)
|
|
|
|
|
A = np.zeros((nn, nn), dtype=complex)
|
|
|
|
|
F = np.zeros((nn,), dtype=complex)
|
|
|
|
|
n = len(x)
|
|
|
|
|
a_matrix = np.zeros((n, n), dtype=complex)
|
|
|
|
|
rhs = np.zeros((n,), dtype=complex)
|
|
|
|
|
|
|
|
|
|
dbasis = basis.derivative
|
|
|
|
|
lim_gg = Limit(gg)
|
|
|
|
|
b1 = np.exp(j_w*lim_gg(1, *args, **kwds))
|
|
|
|
|
if np.isnan(b1):
|
|
|
|
|
b1 = 0.0
|
|
|
|
|
a1 = np.exp(j_w*lim_gg(-1, *args, **kwds))
|
|
|
|
|
if np.isnan(a1):
|
|
|
|
|
a1 = 0.0
|
|
|
|
|
lim_g = Limit(g)
|
|
|
|
|
b_1 = np.exp(j_w*lim_g(1, *args, **kwds))
|
|
|
|
|
if np.isnan(b_1):
|
|
|
|
|
b_1 = 0.0
|
|
|
|
|
a_1 = np.exp(j_w*lim_g(-1, *args, **kwds))
|
|
|
|
|
if np.isnan(a_1):
|
|
|
|
|
a_1 = 0.0
|
|
|
|
|
|
|
|
|
|
lim_Psi = Limit(Psi)
|
|
|
|
|
for k in range(nn):
|
|
|
|
|
F[k] = basis(1, k)*b1 - basis(-1, k)*a1
|
|
|
|
|
A[k] = (dbasis(x, k, n=1) + j_w * lim_Psi(x, k))
|
|
|
|
|
lim_psi = Limit(psi)
|
|
|
|
|
for k in range(n):
|
|
|
|
|
rhs[k] = basis(1, k) * b_1 - basis(-1, k) * a_1
|
|
|
|
|
a_matrix[k] = (dbasis(x, k, n=1) + j_w * lim_psi(x, k))
|
|
|
|
|
|
|
|
|
|
LS = linalg.lstsq(A, F)
|
|
|
|
|
return LS[0]
|
|
|
|
|
solution = linalg.lstsq(a_matrix, rhs)
|
|
|
|
|
return solution[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def osc_weights(omega, g, dg, x, basis, ab, *args, **kwds):
|
|
|
|
|
def gg(t):
|
|
|
|
|
def osc_weights(omega, g, d_g, x, basis, a_b, *args, **kwds):
|
|
|
|
|
def _g(t):
|
|
|
|
|
return g(scale * t + offset, *args, **kwds)
|
|
|
|
|
|
|
|
|
|
def dgg(t):
|
|
|
|
|
return scale * dg(scale * t + offset, *args, **kwds)
|
|
|
|
|
def _d_g(t):
|
|
|
|
|
return scale * d_g(scale * t + offset, *args, **kwds)
|
|
|
|
|
|
|
|
|
|
w = []
|
|
|
|
|
|
|
|
|
|
for a, b in zip(ab[::2], ab[1::2]):
|
|
|
|
|
for a, b in zip(a_b[::2], a_b[1::2]):
|
|
|
|
|
scale = (b - a) / 2
|
|
|
|
|
offset = (a + b) / 2
|
|
|
|
|
|
|
|
|
|
w.append(evans_webster_weights(omega, gg, dgg, x, basis))
|
|
|
|
|
w.append(evans_webster_weights(omega, _g, _d_g, x, basis))
|
|
|
|
|
|
|
|
|
|
return np.asarray(w).ravel()
|
|
|
|
|
|
|
|
|
@ -148,47 +157,47 @@ class QuadOsc(_Integrator):
|
|
|
|
|
full_output=full_output)
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def _change_interval_to_0_1(f, g, dg, a, b):
|
|
|
|
|
def f1(t, *args, **kwds):
|
|
|
|
|
def _change_interval_to_0_1(f, g, d_g, a, _b):
|
|
|
|
|
def f_01(t, *args, **kwds):
|
|
|
|
|
den = 1-t
|
|
|
|
|
return f(a + t / den, *args, **kwds) / den ** 2
|
|
|
|
|
|
|
|
|
|
def g1(t, *args, **kwds):
|
|
|
|
|
def g_01(t, *args, **kwds):
|
|
|
|
|
return g(a + t / (1 - t), *args, **kwds)
|
|
|
|
|
|
|
|
|
|
def dg1(t, *args, **kwds):
|
|
|
|
|
def d_g_01(t, *args, **kwds):
|
|
|
|
|
den = 1-t
|
|
|
|
|
return dg(a + t / den, *args, **kwds) / den ** 2
|
|
|
|
|
return f1, g1, dg1, 0., 1.
|
|
|
|
|
return d_g(a + t / den, *args, **kwds) / den ** 2
|
|
|
|
|
return f_01, g_01, d_g_01, 0., 1.
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def _change_interval_to_m1_0(f, g, dg, a, b):
|
|
|
|
|
def f2(t, *args, **kwds):
|
|
|
|
|
def _change_interval_to_m1_0(f, g, d_g, _a, b):
|
|
|
|
|
def f_m10(t, *args, **kwds):
|
|
|
|
|
den = 1 + t
|
|
|
|
|
return f(b + t / den, *args, **kwds) / den ** 2
|
|
|
|
|
|
|
|
|
|
def g2(t, *args, **kwds):
|
|
|
|
|
def g_m10(t, *args, **kwds):
|
|
|
|
|
return g(b + t / (1 + t), *args, **kwds)
|
|
|
|
|
|
|
|
|
|
def dg2(t, *args, **kwds):
|
|
|
|
|
def d_g_m10(t, *args, **kwds):
|
|
|
|
|
den = 1 + t
|
|
|
|
|
return dg(b + t / den, *args, **kwds) / den ** 2
|
|
|
|
|
return f2, g2, dg2, -1.0, 0.0
|
|
|
|
|
return d_g(b + t / den, *args, **kwds) / den ** 2
|
|
|
|
|
return f_m10, g_m10, d_g_m10, -1.0, 0.0
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def _change_interval_to_m1_1(f, g, dg, a, b):
|
|
|
|
|
def f2(t, *args, **kwds):
|
|
|
|
|
def _change_interval_to_m1_1(f, g, d_g, _a, _b):
|
|
|
|
|
def f_m11(t, *args, **kwds):
|
|
|
|
|
den = (1 - t**2)
|
|
|
|
|
return f(t / den, *args, **kwds) * (1+t**2) / den ** 2
|
|
|
|
|
|
|
|
|
|
def g2(t, *args, **kwds):
|
|
|
|
|
def g_m11(t, *args, **kwds):
|
|
|
|
|
den = (1 - t**2)
|
|
|
|
|
return g(t / den, *args, **kwds)
|
|
|
|
|
|
|
|
|
|
def dg2(t, *args, **kwds):
|
|
|
|
|
def d_g_m11(t, *args, **kwds):
|
|
|
|
|
den = (1 - t**2)
|
|
|
|
|
return dg(t / den, *args, **kwds) * (1+t**2) / den ** 2
|
|
|
|
|
return f2, g2, dg2, -1., 1.
|
|
|
|
|
return d_g(t / den, *args, **kwds) * (1+t**2) / den ** 2
|
|
|
|
|
return f_m11, g_m11, d_g_m11, -1., 1.
|
|
|
|
|
|
|
|
|
|
def _get_functions(self):
|
|
|
|
|
a, b = self.a, self.b
|
|
|
|
@ -225,131 +234,127 @@ class QuadOsc(_Integrator):
|
|
|
|
|
return val
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def _get_best_estimate(k, q0, q1, q2):
|
|
|
|
|
def _get_best_estimate(k, q_0, q_1, q_2):
|
|
|
|
|
if k >= 5:
|
|
|
|
|
qv = np.hstack((q0[k], q1[k], q2[k]))
|
|
|
|
|
qw = np.hstack((q0[k - 1], q1[k - 1], q2[k - 1]))
|
|
|
|
|
q_v = np.hstack((q_0[k], q_1[k], q_2[k]))
|
|
|
|
|
q_w = np.hstack((q_0[k - 1], q_1[k - 1], q_2[k - 1]))
|
|
|
|
|
elif k >= 3:
|
|
|
|
|
qv = np.hstack((q0[k], q1[k]))
|
|
|
|
|
qw = np.hstack((q0[k - 1], q1[k - 1]))
|
|
|
|
|
q_v = np.hstack((q_0[k], q_1[k]))
|
|
|
|
|
q_w = np.hstack((q_0[k - 1], q_1[k - 1]))
|
|
|
|
|
else:
|
|
|
|
|
qv = np.atleast_1d(q0[k])
|
|
|
|
|
qw = q0[k - 1]
|
|
|
|
|
errors = np.atleast_1d(abs(qv - qw))
|
|
|
|
|
q_v = np.atleast_1d(q_0[k])
|
|
|
|
|
q_w = q_0[k - 1]
|
|
|
|
|
errors = np.atleast_1d(abs(q_v - q_w))
|
|
|
|
|
j = np.nanargmin(errors)
|
|
|
|
|
return qv[j], errors[j]
|
|
|
|
|
return q_v[j], errors[j]
|
|
|
|
|
|
|
|
|
|
def _extrapolate(self, k, q0, q1, q2):
|
|
|
|
|
def _extrapolate(self, k, q_0, q_1, q_2):
|
|
|
|
|
if k >= 4:
|
|
|
|
|
q1[k] = dea3(q0[k - 2], q0[k - 1], q0[k])[0]
|
|
|
|
|
q2[k] = dea3(q1[k - 2], q1[k - 1], q1[k])[0]
|
|
|
|
|
q_1[k] = dea3(q_0[k - 2], q_0[k - 1], q_0[k])[0]
|
|
|
|
|
q_2[k] = dea3(q_1[k - 2], q_1[k - 1], q_1[k])[0]
|
|
|
|
|
elif k >= 2:
|
|
|
|
|
q1[k] = dea3(q0[k - 2], q0[k - 1], q0[k])[0]
|
|
|
|
|
q_1[k] = dea3(q_0[k - 2], q_0[k - 1], q_0[k])[0]
|
|
|
|
|
# # Richardson extrapolation
|
|
|
|
|
# if k >= 4:
|
|
|
|
|
# q1[k] = richardson(q0, k)
|
|
|
|
|
# q2[k] = richardson(q1, k)
|
|
|
|
|
# q_1[k] = richardson(q_0, k)
|
|
|
|
|
# q_2[k] = richardson(q_1, k)
|
|
|
|
|
# elif k >= 2:
|
|
|
|
|
# q1[k] = richardson(q0, k)
|
|
|
|
|
q, err = self._get_best_estimate(k, q0, q1, q2)
|
|
|
|
|
# q_1[k] = richardson(q_0, k)
|
|
|
|
|
q, err = self._get_best_estimate(k, q_0, q_1, q_2)
|
|
|
|
|
return q, err
|
|
|
|
|
|
|
|
|
|
def _quad_osc(self, f, g, dg, a, b, omega, *args, **kwds):
|
|
|
|
|
if a == b:
|
|
|
|
|
Q = b - a
|
|
|
|
|
err = b - a
|
|
|
|
|
return Q, err
|
|
|
|
|
q_val = b - a
|
|
|
|
|
err = np.abs(b - a)
|
|
|
|
|
return q_val, err
|
|
|
|
|
|
|
|
|
|
abseps = 10**-self.precision
|
|
|
|
|
max_iter = self.maxiter
|
|
|
|
|
basis = self.basis
|
|
|
|
|
if self.endpoints:
|
|
|
|
|
xq = chebyshev_extrema(self.s)
|
|
|
|
|
x_n = chebyshev_extrema(self.s)
|
|
|
|
|
else:
|
|
|
|
|
xq = chebyshev_roots(self.s)
|
|
|
|
|
# xq = tanh_sinh_open_nodes(self.s)
|
|
|
|
|
x_n = chebyshev_roots(self.s)
|
|
|
|
|
# x_n = tanh_sinh_open_nodes(self.s)
|
|
|
|
|
|
|
|
|
|
# One interval
|
|
|
|
|
hh = (b - a) / 2
|
|
|
|
|
x = (a + b) / 2 + hh * xq # Nodes
|
|
|
|
|
x = (a + b) / 2 + hh * x_n # Nodes
|
|
|
|
|
|
|
|
|
|
dtype = complex
|
|
|
|
|
Q0 = np.zeros((max_iter, 1), dtype=dtype) # Quadrature
|
|
|
|
|
Q1 = np.zeros((max_iter, 1), dtype=dtype) # First extrapolation
|
|
|
|
|
Q2 = np.zeros((max_iter, 1), dtype=dtype) # Second extrapolation
|
|
|
|
|
val0 = np.zeros((max_iter, 1), dtype=dtype) # Quadrature
|
|
|
|
|
val1 = np.zeros((max_iter, 1), dtype=dtype) # First extrapolation
|
|
|
|
|
val2 = np.zeros((max_iter, 1), dtype=dtype) # Second extrapolation
|
|
|
|
|
|
|
|
|
|
lim_f = Limit(f)
|
|
|
|
|
ab = np.hstack([a, b])
|
|
|
|
|
wq = osc_weights(omega, g, dg, xq, basis, ab, *args, **kwds)
|
|
|
|
|
Q0[0] = hh * np.sum(wq * lim_f(x, *args, **kwds))
|
|
|
|
|
a_b = np.hstack([a, b])
|
|
|
|
|
wq = osc_weights(omega, g, dg, x_n, basis, a_b, *args, **kwds)
|
|
|
|
|
val0[0] = hh * np.sum(wq * lim_f(x, *args, **kwds))
|
|
|
|
|
|
|
|
|
|
# Successive bisection of intervals
|
|
|
|
|
nq = len(xq)
|
|
|
|
|
nq = len(x_n)
|
|
|
|
|
n = nq
|
|
|
|
|
for k in range(1, max_iter):
|
|
|
|
|
n += nq*2**k
|
|
|
|
|
n += nq * 2**k
|
|
|
|
|
|
|
|
|
|
hh = hh / 2
|
|
|
|
|
x = np.hstack([x + a, x + b]) / 2
|
|
|
|
|
ab = np.hstack([ab + a, ab + b]) / 2
|
|
|
|
|
wq = osc_weights(omega, g, dg, xq, basis, ab, *args, **kwds)
|
|
|
|
|
a_b = np.hstack([a_b + a, a_b + b]) / 2
|
|
|
|
|
wq = osc_weights(omega, g, dg, x_n, basis, a_b, *args, **kwds)
|
|
|
|
|
|
|
|
|
|
Q0[k] = hh * np.sum(wq * lim_f(x, *args, **kwds))
|
|
|
|
|
val0[k] = hh * np.sum(wq * lim_f(x, *args, **kwds))
|
|
|
|
|
|
|
|
|
|
Q, err = self._extrapolate(k, Q0, Q1, Q2)
|
|
|
|
|
q_val, err = self._extrapolate(k, val0, val1, val2)
|
|
|
|
|
|
|
|
|
|
convergence = (err <= abseps) | ~np.isfinite(Q)
|
|
|
|
|
if convergence:
|
|
|
|
|
converged = (err <= abseps) | ~np.isfinite(q_val)
|
|
|
|
|
if converged:
|
|
|
|
|
break
|
|
|
|
|
else:
|
|
|
|
|
warnings.warn('Max number of iterations reached '
|
|
|
|
|
_assert_warn(converged, 'Max number of iterations reached '
|
|
|
|
|
'without convergence.')
|
|
|
|
|
|
|
|
|
|
if ~np.isfinite(Q):
|
|
|
|
|
warnings.warn('Integral approximation is Infinite or NaN.')
|
|
|
|
|
_assert_warn(np.isfinite(q_val),
|
|
|
|
|
'Integral approximation is Infinite or NaN.')
|
|
|
|
|
|
|
|
|
|
# The error estimate should not be zero
|
|
|
|
|
err += 2 * np.finfo(Q).eps
|
|
|
|
|
return Q, self.info(err, n)
|
|
|
|
|
err += 2 * np.finfo(q_val).eps
|
|
|
|
|
return q_val, self.info(err, n)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def adaptive_levin_points(M, delta):
|
|
|
|
|
m = M - 1
|
|
|
|
|
def adaptive_levin_points(m, delta):
|
|
|
|
|
m_1 = m - 1
|
|
|
|
|
prm = 0.5
|
|
|
|
|
while prm * m / delta >= 1:
|
|
|
|
|
while prm * m_1 / delta >= 1:
|
|
|
|
|
delta = 2 * delta
|
|
|
|
|
k = np.arange(M)
|
|
|
|
|
x = piecewise([k < prm * m, k == np.ceil(prm * m)],
|
|
|
|
|
[-1 + k / delta, 0 * k, 1 - (m - k) / delta])
|
|
|
|
|
k = np.arange(m)
|
|
|
|
|
x = piecewise([k < prm * m_1, k == np.ceil(prm * m_1)],
|
|
|
|
|
[-1 + k / delta, 0 * k, 1 - (m_1 - k) / delta])
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def open_levin_points(M, delta):
|
|
|
|
|
return adaptive_levin_points(M+2, delta)[1:-1]
|
|
|
|
|
def open_levin_points(m, delta):
|
|
|
|
|
return adaptive_levin_points(m+2, delta)[1:-1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chebyshev_extrema(M, delta=None):
|
|
|
|
|
k = np.arange(M)
|
|
|
|
|
x = np.cos(k * np.pi / (M-1))
|
|
|
|
|
def chebyshev_extrema(m, delta=None):
|
|
|
|
|
k = np.arange(m)
|
|
|
|
|
x = np.cos(k * np.pi / (m-1))
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
_EPS = np.finfo(float).eps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def tanh_sinh_nodes(M, delta=None, tol=_EPS):
|
|
|
|
|
def tanh_sinh_nodes(m, delta=None, tol=_EPS):
|
|
|
|
|
tmax = np.arcsinh(np.arctanh(1-_EPS)*2/np.pi)
|
|
|
|
|
# tmax = 3.18
|
|
|
|
|
m = int(np.floor(-np.log2(tmax/max(M-1, 1)))) - 1
|
|
|
|
|
h = 2.0**-m
|
|
|
|
|
t = np.arange((M+1)//2+1)*h
|
|
|
|
|
m_1 = int(np.floor(-np.log2(tmax/max(m-1, 1)))) - 1
|
|
|
|
|
h = 2.0**-m_1
|
|
|
|
|
t = np.arange((m+1)//2+1)*h
|
|
|
|
|
x = np.tanh(np.pi/2*np.sinh(t))
|
|
|
|
|
k = np.flatnonzero(np.abs(x - 1) <= 10*tol)
|
|
|
|
|
y = x[:k[0]+1] if len(k) else x
|
|
|
|
|
return np.hstack((-y[:0:-1], y))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def tanh_sinh_open_nodes(M, delta=None, tol=_EPS):
|
|
|
|
|
return tanh_sinh_nodes(M+1, delta, tol)[1:-1]
|
|
|
|
|
def tanh_sinh_open_nodes(m, delta=None, tol=_EPS):
|
|
|
|
|
return tanh_sinh_nodes(m+1, delta, tol)[1:-1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chebyshev_roots(m, delta=None):
|
|
|
|
@ -362,47 +367,47 @@ class AdaptiveLevin(_Integrator):
|
|
|
|
|
"""Return integral for the Levin-type and adaptive Levin-type methods"""
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def aLevinTQ(omega, ff, gg, dgg, x, s, basis, *args, **kwds):
|
|
|
|
|
def _a_levin(omega, f, g, d_g, x, s, basis, *args, **kwds):
|
|
|
|
|
|
|
|
|
|
def Psi(t, k):
|
|
|
|
|
return dgg(t, *args, **kwds) * basis(t, k)
|
|
|
|
|
def psi(t, k):
|
|
|
|
|
return d_g(t, *args, **kwds) * basis(t, k)
|
|
|
|
|
|
|
|
|
|
j_w = 1j * omega
|
|
|
|
|
nu = np.ones((len(x),), dtype=int)
|
|
|
|
|
nu[0] = nu[-1] = s
|
|
|
|
|
S = np.cumsum(np.hstack((nu, 0)))
|
|
|
|
|
S[-1] = 0
|
|
|
|
|
nn = int(S[-2])
|
|
|
|
|
A = np.zeros((nn, nn), dtype=complex)
|
|
|
|
|
F = np.zeros((nn,))
|
|
|
|
|
dff = Limit(nda.Derivative(ff))
|
|
|
|
|
dPsi = Limit(nda.Derivative(Psi))
|
|
|
|
|
n = int(S[-2])
|
|
|
|
|
a_matrix = np.zeros((n, n), dtype=complex)
|
|
|
|
|
rhs = np.zeros((n,))
|
|
|
|
|
dff = Limit(nda.Derivative(f))
|
|
|
|
|
d_psi = Limit(nda.Derivative(psi))
|
|
|
|
|
dbasis = basis.derivative
|
|
|
|
|
for r, t in enumerate(x):
|
|
|
|
|
for j in range(S[r - 1], S[r]):
|
|
|
|
|
order = ((j - S[r - 1]) % nu[r]) # derivative order
|
|
|
|
|
dff.fun.n = order
|
|
|
|
|
F[j] = dff(t, *args, **kwds)
|
|
|
|
|
dPsi.fun.n = order
|
|
|
|
|
for k in range(nn):
|
|
|
|
|
A[j, k] = (dbasis(t, k, n=order+1) + j_w * dPsi(t, k))
|
|
|
|
|
k1 = np.flatnonzero(1-np.isfinite(F))
|
|
|
|
|
rhs[j] = dff(t, *args, **kwds)
|
|
|
|
|
d_psi.fun.n = order
|
|
|
|
|
for k in range(n):
|
|
|
|
|
a_matrix[j, k] = (dbasis(t, k, n=order+1) +
|
|
|
|
|
j_w * d_psi(t, k))
|
|
|
|
|
k1 = np.flatnonzero(1-np.isfinite(rhs))
|
|
|
|
|
if k1.size > 0: # Remove singularities
|
|
|
|
|
warnings.warn('Singularities detected! ')
|
|
|
|
|
A[k1] = 0
|
|
|
|
|
F[k1] = 0
|
|
|
|
|
LS = linalg.lstsq(A, F)
|
|
|
|
|
v = basis.eval([-1, 1], LS[0])
|
|
|
|
|
|
|
|
|
|
lim_gg = Limit(gg)
|
|
|
|
|
gb = np.exp(j_w * lim_gg(1, *args, **kwds))
|
|
|
|
|
if np.isnan(gb):
|
|
|
|
|
gb = 0
|
|
|
|
|
ga = np.exp(j_w * lim_gg(-1, *args, **kwds))
|
|
|
|
|
if np.isnan(ga):
|
|
|
|
|
ga = 0
|
|
|
|
|
NR = (v[1] * gb - v[0] * ga)
|
|
|
|
|
return NR
|
|
|
|
|
a_matrix[k1] = 0
|
|
|
|
|
rhs[k1] = 0
|
|
|
|
|
solution = linalg.lstsq(a_matrix, rhs)
|
|
|
|
|
v = basis.eval([-1, 1], solution[0])
|
|
|
|
|
|
|
|
|
|
lim_g = Limit(g)
|
|
|
|
|
g_b = np.exp(j_w * lim_g(1, *args, **kwds))
|
|
|
|
|
if np.isnan(g_b):
|
|
|
|
|
g_b = 0
|
|
|
|
|
g_a = np.exp(j_w * lim_g(-1, *args, **kwds))
|
|
|
|
|
if np.isnan(g_a):
|
|
|
|
|
g_a = 0
|
|
|
|
|
return v[1] * g_b - v[0] * g_a
|
|
|
|
|
|
|
|
|
|
def _get_integration_limits(self, omega, args, kwds):
|
|
|
|
|
a, b = self.a, self.b
|
|
|
|
@ -486,23 +491,23 @@ class AdaptiveLevin(_Integrator):
|
|
|
|
|
num_collocation_point_list = m*2**np.arange(1, 5) + 1
|
|
|
|
|
basis = self.basis
|
|
|
|
|
|
|
|
|
|
Q = 1e+300
|
|
|
|
|
q_val = 1e+300
|
|
|
|
|
num_function_evaluations = 0
|
|
|
|
|
n = 0
|
|
|
|
|
ni = 0
|
|
|
|
|
for num_collocation_points in num_collocation_point_list:
|
|
|
|
|
ni_old = ni
|
|
|
|
|
Q_old = Q
|
|
|
|
|
n_old = n
|
|
|
|
|
q_old = q_val
|
|
|
|
|
x = points(num_collocation_points, betam)
|
|
|
|
|
ni = len(x)
|
|
|
|
|
if ni > ni_old:
|
|
|
|
|
Q = self.aLevinTQ(omega, ff, gg, dgg, x, s, basis, *args,
|
|
|
|
|
n = len(x)
|
|
|
|
|
if n > n_old:
|
|
|
|
|
q_val = self._a_levin(omega, ff, gg, dgg, x, s, basis, *args,
|
|
|
|
|
**kwds)
|
|
|
|
|
n += ni
|
|
|
|
|
err = np.abs(Q-Q_old)
|
|
|
|
|
num_function_evaluations += n
|
|
|
|
|
err = np.abs(q_val-q_old)
|
|
|
|
|
if err <= abseps:
|
|
|
|
|
break
|
|
|
|
|
info = self.info(err, n)
|
|
|
|
|
return Q, info
|
|
|
|
|
info = self.info(err, num_function_evaluations)
|
|
|
|
|
return q_val, info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class EvansWebster(AdaptiveLevin):
|
|
|
|
@ -515,12 +520,11 @@ class EvansWebster(AdaptiveLevin):
|
|
|
|
|
precision=precision, endpoints=endpoints,
|
|
|
|
|
full_output=full_output)
|
|
|
|
|
|
|
|
|
|
def aLevinTQ(self, omega, ff, gg, dgg, x, s, basis, *args, **kwds):
|
|
|
|
|
def _a_levin(self, omega, ff, gg, dgg, x, s, basis, *args, **kwds):
|
|
|
|
|
w = evans_webster_weights(omega, gg, dgg, x, basis, *args, **kwds)
|
|
|
|
|
|
|
|
|
|
f = Limit(ff)(x, *args, **kwds)
|
|
|
|
|
NR = np.sum(f*w)
|
|
|
|
|
return NR
|
|
|
|
|
return np.sum(f*w)
|
|
|
|
|
|
|
|
|
|
def _get_num_points(self, s, prec, betam):
|
|
|
|
|
return 8 if s > 1 else int(prec / max(np.log10(betam + 1), 1) + 1)
|
|
|
|
|