Small refactoring

master
Per A Brodtkorb 8 years ago
parent ea2edc1ca5
commit 217b607a4c

@ -1159,7 +1159,7 @@ class _Quadgr(object):
return nodes, weights return nodes, weights
@staticmethod @staticmethod
def _get_best_estimate(vals0, vals1, vals2, k): def _get_best_estimate(k, vals0, vals1, vals2):
if k >= 6: if k >= 6:
q_v = np.hstack((vals0[k], vals1[k], vals2[k])) q_v = np.hstack((vals0[k], vals1[k], vals2[k]))
q_w = np.hstack((vals0[k - 1], vals1[k - 1], vals2[k - 1])) q_w = np.hstack((vals0[k - 1], vals1[k - 1], vals2[k - 1]))
@ -1178,6 +1178,16 @@ class _Quadgr(object):
# _val, err1 = dea3(vals0[k - 2], vals0[k - 1], vals0[k]) # _val, err1 = dea3(vals0[k - 2], vals0[k - 1], vals0[k])
return q_val, err return q_val, err
def _extrapolate(self, k, val0, val1, val2):
# Richardson extrapolation
if k >= 5:
val1[k] = richardson(val0, k)
val2[k] = richardson(val1, k)
elif k >= 3:
val1[k] = richardson(val0, k)
q_val, err = self._get_best_estimate(k, val0, val1, val2)
return q_val, err
def _integrate(self, fun, a, b, abseps, max_iter): def _integrate(self, fun, a, b, abseps, max_iter):
dtype = np.result_type(fun((a+b)/2), fun((a+b)/4)) dtype = np.result_type(fun((a+b)/2), fun((a+b)/4))
@ -1204,14 +1214,7 @@ class _Quadgr(object):
val0[k] = np.sum(np.sum(np.reshape(fun(x), (-1, n)), axis=0) * val0[k] = np.sum(np.sum(np.reshape(fun(x), (-1, n)), axis=0) *
weights, axis=0) * d_x weights, axis=0) * d_x
# Richardson extrapolation q_val, err = self._extrapolate(k, val0, val1, val2)
if k >= 5:
val1[k] = richardson(val0, k)
val2[k] = richardson(val1, k)
elif k >= 3:
val1[k] = richardson(val0, k)
q_val, err = self._get_best_estimate(val0, val1, val2, k)
converged = (err < abseps) | ~np.isfinite(q_val) converged = (err < abseps) | ~np.isfinite(q_val)
if converged: if converged:

@ -4,23 +4,33 @@ Created on 20. aug. 2015
@author: pab @author: pab
""" """
from __future__ import division from __future__ import division
import numpy as np from collections import namedtuple
import warnings import warnings
import numdifftools as nd # @UnresolvedImport import numdifftools as nd
import numdifftools.nd_algopy as nda # @UnresolvedImport import numdifftools.nd_algopy as nda
from numdifftools.limits import Limit # @UnresolvedImport from numdifftools.extrapolation import dea3
from numdifftools.limits import Limit
import numpy as np
from numpy import linalg from numpy import linalg
from numpy.polynomial.chebyshev import chebval, Chebyshev from numpy.polynomial.chebyshev import chebval, Chebyshev
from numpy.polynomial import polynomial from numpy.polynomial import polynomial
from wafo.misc import piecewise, findcross, ecross from wafo.misc import piecewise, findcross, ecross
from collections import namedtuple
EPS = np.finfo(float).eps _FINFO = np.finfo(float)
EPS = _FINFO(float).eps
_EPS = EPS _EPS = EPS
finfo = np.finfo(float) _TINY = _FINFO.tiny
_TINY = finfo.tiny _HUGE = _FINFO.max
_HUGE = finfo.max
dea3 = nd.dea3
def _assert(cond, msg):
if not cond:
raise ValueError(msg)
def _assert_warn(cond, msg):
if not cond:
warnings.warn(msg)
class PolyBasis(object): class PolyBasis(object):
@ -40,8 +50,8 @@ class PolyBasis(object):
def derivative(self, t, k, n=1): def derivative(self, t, k, n=1):
c = self._coefficients(k) c = self._coefficients(k)
dc = self._derivative(c, m=n) d_c = self._derivative(c, m=n)
return self.eval(t, dc) return self.eval(t, d_c)
def __call__(self, t, k): def __call__(self, t, k):
return t**k return t**k
@ -66,58 +76,57 @@ class ChebyshevBasis(PolyBasis):
chebyshev_basis = ChebyshevBasis() chebyshev_basis = ChebyshevBasis()
def richardson(Q, k): def richardson(q_val, k):
# license BSD # license BSD
# Richardson extrapolation with parameter estimation # Richardson extrapolation with parameter estimation
c = np.real((Q[k - 1] - Q[k - 2]) / (Q[k] - Q[k - 1])) - 1. c = np.real((q_val[k - 1] - q_val[k - 2]) / (q_val[k] - q_val[k - 1])) - 1.
# The lower bound 0.07 admits the singularity x.^-0.9 # The lower bound 0.07 admits the singularity x.^-0.9
c = max(c, 0.07) c = max(c, 0.07)
R = Q[k] + (Q[k] - Q[k - 1]) / c return q_val[k] + (q_val[k] - q_val[k - 1]) / c
return R
def evans_webster_weights(omega, gg, dgg, x, basis, *args, **kwds): def evans_webster_weights(omega, g, d_g, x, basis, *args, **kwds):
def Psi(t, k): def psi(t, k):
return dgg(t, *args, **kwds) * basis(t, k) return d_g(t, *args, **kwds) * basis(t, k)
j_w = 1j * omega j_w = 1j * omega
nn = len(x) n = len(x)
A = np.zeros((nn, nn), dtype=complex) a_matrix = np.zeros((n, n), dtype=complex)
F = np.zeros((nn,), dtype=complex) rhs = np.zeros((n,), dtype=complex)
dbasis = basis.derivative dbasis = basis.derivative
lim_gg = Limit(gg) lim_g = Limit(g)
b1 = np.exp(j_w*lim_gg(1, *args, **kwds)) b_1 = np.exp(j_w*lim_g(1, *args, **kwds))
if np.isnan(b1): if np.isnan(b_1):
b1 = 0.0 b_1 = 0.0
a1 = np.exp(j_w*lim_gg(-1, *args, **kwds)) a_1 = np.exp(j_w*lim_g(-1, *args, **kwds))
if np.isnan(a1): if np.isnan(a_1):
a1 = 0.0 a_1 = 0.0
lim_Psi = Limit(Psi) lim_psi = Limit(psi)
for k in range(nn): for k in range(n):
F[k] = basis(1, k)*b1 - basis(-1, k)*a1 rhs[k] = basis(1, k) * b_1 - basis(-1, k) * a_1
A[k] = (dbasis(x, k, n=1) + j_w * lim_Psi(x, k)) a_matrix[k] = (dbasis(x, k, n=1) + j_w * lim_psi(x, k))
LS = linalg.lstsq(A, F) solution = linalg.lstsq(a_matrix, rhs)
return LS[0] return solution[0]
def osc_weights(omega, g, dg, x, basis, ab, *args, **kwds): def osc_weights(omega, g, d_g, x, basis, a_b, *args, **kwds):
def gg(t): def _g(t):
return g(scale * t + offset, *args, **kwds) return g(scale * t + offset, *args, **kwds)
def dgg(t): def _d_g(t):
return scale * dg(scale * t + offset, *args, **kwds) return scale * d_g(scale * t + offset, *args, **kwds)
w = [] w = []
for a, b in zip(ab[::2], ab[1::2]): for a, b in zip(a_b[::2], a_b[1::2]):
scale = (b - a) / 2 scale = (b - a) / 2
offset = (a + b) / 2 offset = (a + b) / 2
w.append(evans_webster_weights(omega, gg, dgg, x, basis)) w.append(evans_webster_weights(omega, _g, _d_g, x, basis))
return np.asarray(w).ravel() return np.asarray(w).ravel()
@ -148,47 +157,47 @@ class QuadOsc(_Integrator):
full_output=full_output) full_output=full_output)
@staticmethod @staticmethod
def _change_interval_to_0_1(f, g, dg, a, b): def _change_interval_to_0_1(f, g, d_g, a, _b):
def f1(t, *args, **kwds): def f_01(t, *args, **kwds):
den = 1-t den = 1-t
return f(a + t / den, *args, **kwds) / den ** 2 return f(a + t / den, *args, **kwds) / den ** 2
def g1(t, *args, **kwds): def g_01(t, *args, **kwds):
return g(a + t / (1 - t), *args, **kwds) return g(a + t / (1 - t), *args, **kwds)
def dg1(t, *args, **kwds): def d_g_01(t, *args, **kwds):
den = 1-t den = 1-t
return dg(a + t / den, *args, **kwds) / den ** 2 return d_g(a + t / den, *args, **kwds) / den ** 2
return f1, g1, dg1, 0., 1. return f_01, g_01, d_g_01, 0., 1.
@staticmethod @staticmethod
def _change_interval_to_m1_0(f, g, dg, a, b): def _change_interval_to_m1_0(f, g, d_g, _a, b):
def f2(t, *args, **kwds): def f_m10(t, *args, **kwds):
den = 1 + t den = 1 + t
return f(b + t / den, *args, **kwds) / den ** 2 return f(b + t / den, *args, **kwds) / den ** 2
def g2(t, *args, **kwds): def g_m10(t, *args, **kwds):
return g(b + t / (1 + t), *args, **kwds) return g(b + t / (1 + t), *args, **kwds)
def dg2(t, *args, **kwds): def d_g_m10(t, *args, **kwds):
den = 1 + t den = 1 + t
return dg(b + t / den, *args, **kwds) / den ** 2 return d_g(b + t / den, *args, **kwds) / den ** 2
return f2, g2, dg2, -1.0, 0.0 return f_m10, g_m10, d_g_m10, -1.0, 0.0
@staticmethod @staticmethod
def _change_interval_to_m1_1(f, g, dg, a, b): def _change_interval_to_m1_1(f, g, d_g, _a, _b):
def f2(t, *args, **kwds): def f_m11(t, *args, **kwds):
den = (1 - t**2) den = (1 - t**2)
return f(t / den, *args, **kwds) * (1+t**2) / den ** 2 return f(t / den, *args, **kwds) * (1+t**2) / den ** 2
def g2(t, *args, **kwds): def g_m11(t, *args, **kwds):
den = (1 - t**2) den = (1 - t**2)
return g(t / den, *args, **kwds) return g(t / den, *args, **kwds)
def dg2(t, *args, **kwds): def d_g_m11(t, *args, **kwds):
den = (1 - t**2) den = (1 - t**2)
return dg(t / den, *args, **kwds) * (1+t**2) / den ** 2 return d_g(t / den, *args, **kwds) * (1+t**2) / den ** 2
return f2, g2, dg2, -1., 1. return f_m11, g_m11, d_g_m11, -1., 1.
def _get_functions(self): def _get_functions(self):
a, b = self.a, self.b a, b = self.a, self.b
@ -225,131 +234,127 @@ class QuadOsc(_Integrator):
return val return val
@staticmethod @staticmethod
def _get_best_estimate(k, q0, q1, q2): def _get_best_estimate(k, q_0, q_1, q_2):
if k >= 5: if k >= 5:
qv = np.hstack((q0[k], q1[k], q2[k])) q_v = np.hstack((q_0[k], q_1[k], q_2[k]))
qw = np.hstack((q0[k - 1], q1[k - 1], q2[k - 1])) q_w = np.hstack((q_0[k - 1], q_1[k - 1], q_2[k - 1]))
elif k >= 3: elif k >= 3:
qv = np.hstack((q0[k], q1[k])) q_v = np.hstack((q_0[k], q_1[k]))
qw = np.hstack((q0[k - 1], q1[k - 1])) q_w = np.hstack((q_0[k - 1], q_1[k - 1]))
else: else:
qv = np.atleast_1d(q0[k]) q_v = np.atleast_1d(q_0[k])
qw = q0[k - 1] q_w = q_0[k - 1]
errors = np.atleast_1d(abs(qv - qw)) errors = np.atleast_1d(abs(q_v - q_w))
j = np.nanargmin(errors) j = np.nanargmin(errors)
return qv[j], errors[j] return q_v[j], errors[j]
def _extrapolate(self, k, q0, q1, q2): def _extrapolate(self, k, q_0, q_1, q_2):
if k >= 4: if k >= 4:
q1[k] = dea3(q0[k - 2], q0[k - 1], q0[k])[0] q_1[k] = dea3(q_0[k - 2], q_0[k - 1], q_0[k])[0]
q2[k] = dea3(q1[k - 2], q1[k - 1], q1[k])[0] q_2[k] = dea3(q_1[k - 2], q_1[k - 1], q_1[k])[0]
elif k >= 2: elif k >= 2:
q1[k] = dea3(q0[k - 2], q0[k - 1], q0[k])[0] q_1[k] = dea3(q_0[k - 2], q_0[k - 1], q_0[k])[0]
# # Richardson extrapolation # # Richardson extrapolation
# if k >= 4: # if k >= 4:
# q1[k] = richardson(q0, k) # q_1[k] = richardson(q_0, k)
# q2[k] = richardson(q1, k) # q_2[k] = richardson(q_1, k)
# elif k >= 2: # elif k >= 2:
# q1[k] = richardson(q0, k) # q_1[k] = richardson(q_0, k)
q, err = self._get_best_estimate(k, q0, q1, q2) q, err = self._get_best_estimate(k, q_0, q_1, q_2)
return q, err return q, err
def _quad_osc(self, f, g, dg, a, b, omega, *args, **kwds): def _quad_osc(self, f, g, dg, a, b, omega, *args, **kwds):
if a == b: if a == b:
Q = b - a q_val = b - a
err = b - a err = np.abs(b - a)
return Q, err return q_val, err
abseps = 10**-self.precision abseps = 10**-self.precision
max_iter = self.maxiter max_iter = self.maxiter
basis = self.basis basis = self.basis
if self.endpoints: if self.endpoints:
xq = chebyshev_extrema(self.s) x_n = chebyshev_extrema(self.s)
else: else:
xq = chebyshev_roots(self.s) x_n = chebyshev_roots(self.s)
# xq = tanh_sinh_open_nodes(self.s) # x_n = tanh_sinh_open_nodes(self.s)
# One interval # One interval
hh = (b - a) / 2 hh = (b - a) / 2
x = (a + b) / 2 + hh * xq # Nodes x = (a + b) / 2 + hh * x_n # Nodes
dtype = complex dtype = complex
Q0 = np.zeros((max_iter, 1), dtype=dtype) # Quadrature val0 = np.zeros((max_iter, 1), dtype=dtype) # Quadrature
Q1 = np.zeros((max_iter, 1), dtype=dtype) # First extrapolation val1 = np.zeros((max_iter, 1), dtype=dtype) # First extrapolation
Q2 = np.zeros((max_iter, 1), dtype=dtype) # Second extrapolation val2 = np.zeros((max_iter, 1), dtype=dtype) # Second extrapolation
lim_f = Limit(f) lim_f = Limit(f)
ab = np.hstack([a, b]) a_b = np.hstack([a, b])
wq = osc_weights(omega, g, dg, xq, basis, ab, *args, **kwds) wq = osc_weights(omega, g, dg, x_n, basis, a_b, *args, **kwds)
Q0[0] = hh * np.sum(wq * lim_f(x, *args, **kwds)) val0[0] = hh * np.sum(wq * lim_f(x, *args, **kwds))
# Successive bisection of intervals # Successive bisection of intervals
nq = len(xq) nq = len(x_n)
n = nq n = nq
for k in range(1, max_iter): for k in range(1, max_iter):
n += nq * 2**k n += nq * 2**k
hh = hh / 2 hh = hh / 2
x = np.hstack([x + a, x + b]) / 2 x = np.hstack([x + a, x + b]) / 2
ab = np.hstack([ab + a, ab + b]) / 2 a_b = np.hstack([a_b + a, a_b + b]) / 2
wq = osc_weights(omega, g, dg, xq, basis, ab, *args, **kwds) wq = osc_weights(omega, g, dg, x_n, basis, a_b, *args, **kwds)
Q0[k] = hh * np.sum(wq * lim_f(x, *args, **kwds)) val0[k] = hh * np.sum(wq * lim_f(x, *args, **kwds))
Q, err = self._extrapolate(k, Q0, Q1, Q2) q_val, err = self._extrapolate(k, val0, val1, val2)
convergence = (err <= abseps) | ~np.isfinite(Q) converged = (err <= abseps) | ~np.isfinite(q_val)
if convergence: if converged:
break break
else: _assert_warn(converged, 'Max number of iterations reached '
warnings.warn('Max number of iterations reached '
'without convergence.') 'without convergence.')
_assert_warn(np.isfinite(q_val),
if ~np.isfinite(Q): 'Integral approximation is Infinite or NaN.')
warnings.warn('Integral approximation is Infinite or NaN.')
# The error estimate should not be zero # The error estimate should not be zero
err += 2 * np.finfo(Q).eps err += 2 * np.finfo(q_val).eps
return Q, self.info(err, n) return q_val, self.info(err, n)
def adaptive_levin_points(M, delta): def adaptive_levin_points(m, delta):
m = M - 1 m_1 = m - 1
prm = 0.5 prm = 0.5
while prm * m / delta >= 1: while prm * m_1 / delta >= 1:
delta = 2 * delta delta = 2 * delta
k = np.arange(M) k = np.arange(m)
x = piecewise([k < prm * m, k == np.ceil(prm * m)], x = piecewise([k < prm * m_1, k == np.ceil(prm * m_1)],
[-1 + k / delta, 0 * k, 1 - (m - k) / delta]) [-1 + k / delta, 0 * k, 1 - (m_1 - k) / delta])
return x return x
def open_levin_points(M, delta): def open_levin_points(m, delta):
return adaptive_levin_points(M+2, delta)[1:-1] return adaptive_levin_points(m+2, delta)[1:-1]
def chebyshev_extrema(M, delta=None): def chebyshev_extrema(m, delta=None):
k = np.arange(M) k = np.arange(m)
x = np.cos(k * np.pi / (M-1)) x = np.cos(k * np.pi / (m-1))
return x return x
_EPS = np.finfo(float).eps
def tanh_sinh_nodes(M, delta=None, tol=_EPS): def tanh_sinh_nodes(m, delta=None, tol=_EPS):
tmax = np.arcsinh(np.arctanh(1-_EPS)*2/np.pi) tmax = np.arcsinh(np.arctanh(1-_EPS)*2/np.pi)
# tmax = 3.18 # tmax = 3.18
m = int(np.floor(-np.log2(tmax/max(M-1, 1)))) - 1 m_1 = int(np.floor(-np.log2(tmax/max(m-1, 1)))) - 1
h = 2.0**-m h = 2.0**-m_1
t = np.arange((M+1)//2+1)*h t = np.arange((m+1)//2+1)*h
x = np.tanh(np.pi/2*np.sinh(t)) x = np.tanh(np.pi/2*np.sinh(t))
k = np.flatnonzero(np.abs(x - 1) <= 10*tol) k = np.flatnonzero(np.abs(x - 1) <= 10*tol)
y = x[:k[0]+1] if len(k) else x y = x[:k[0]+1] if len(k) else x
return np.hstack((-y[:0:-1], y)) return np.hstack((-y[:0:-1], y))
def tanh_sinh_open_nodes(M, delta=None, tol=_EPS): def tanh_sinh_open_nodes(m, delta=None, tol=_EPS):
return tanh_sinh_nodes(M+1, delta, tol)[1:-1] return tanh_sinh_nodes(m+1, delta, tol)[1:-1]
def chebyshev_roots(m, delta=None): def chebyshev_roots(m, delta=None):
@ -362,47 +367,47 @@ class AdaptiveLevin(_Integrator):
"""Return integral for the Levin-type and adaptive Levin-type methods""" """Return integral for the Levin-type and adaptive Levin-type methods"""
@staticmethod @staticmethod
def aLevinTQ(omega, ff, gg, dgg, x, s, basis, *args, **kwds): def _a_levin(omega, f, g, d_g, x, s, basis, *args, **kwds):
def Psi(t, k): def psi(t, k):
return dgg(t, *args, **kwds) * basis(t, k) return d_g(t, *args, **kwds) * basis(t, k)
j_w = 1j * omega j_w = 1j * omega
nu = np.ones((len(x),), dtype=int) nu = np.ones((len(x),), dtype=int)
nu[0] = nu[-1] = s nu[0] = nu[-1] = s
S = np.cumsum(np.hstack((nu, 0))) S = np.cumsum(np.hstack((nu, 0)))
S[-1] = 0 S[-1] = 0
nn = int(S[-2]) n = int(S[-2])
A = np.zeros((nn, nn), dtype=complex) a_matrix = np.zeros((n, n), dtype=complex)
F = np.zeros((nn,)) rhs = np.zeros((n,))
dff = Limit(nda.Derivative(ff)) dff = Limit(nda.Derivative(f))
dPsi = Limit(nda.Derivative(Psi)) d_psi = Limit(nda.Derivative(psi))
dbasis = basis.derivative dbasis = basis.derivative
for r, t in enumerate(x): for r, t in enumerate(x):
for j in range(S[r - 1], S[r]): for j in range(S[r - 1], S[r]):
order = ((j - S[r - 1]) % nu[r]) # derivative order order = ((j - S[r - 1]) % nu[r]) # derivative order
dff.fun.n = order dff.fun.n = order
F[j] = dff(t, *args, **kwds) rhs[j] = dff(t, *args, **kwds)
dPsi.fun.n = order d_psi.fun.n = order
for k in range(nn): for k in range(n):
A[j, k] = (dbasis(t, k, n=order+1) + j_w * dPsi(t, k)) a_matrix[j, k] = (dbasis(t, k, n=order+1) +
k1 = np.flatnonzero(1-np.isfinite(F)) j_w * d_psi(t, k))
k1 = np.flatnonzero(1-np.isfinite(rhs))
if k1.size > 0: # Remove singularities if k1.size > 0: # Remove singularities
warnings.warn('Singularities detected! ') warnings.warn('Singularities detected! ')
A[k1] = 0 a_matrix[k1] = 0
F[k1] = 0 rhs[k1] = 0
LS = linalg.lstsq(A, F) solution = linalg.lstsq(a_matrix, rhs)
v = basis.eval([-1, 1], LS[0]) v = basis.eval([-1, 1], solution[0])
lim_gg = Limit(gg) lim_g = Limit(g)
gb = np.exp(j_w * lim_gg(1, *args, **kwds)) g_b = np.exp(j_w * lim_g(1, *args, **kwds))
if np.isnan(gb): if np.isnan(g_b):
gb = 0 g_b = 0
ga = np.exp(j_w * lim_gg(-1, *args, **kwds)) g_a = np.exp(j_w * lim_g(-1, *args, **kwds))
if np.isnan(ga): if np.isnan(g_a):
ga = 0 g_a = 0
NR = (v[1] * gb - v[0] * ga) return v[1] * g_b - v[0] * g_a
return NR
def _get_integration_limits(self, omega, args, kwds): def _get_integration_limits(self, omega, args, kwds):
a, b = self.a, self.b a, b = self.a, self.b
@ -486,23 +491,23 @@ class AdaptiveLevin(_Integrator):
num_collocation_point_list = m*2**np.arange(1, 5) + 1 num_collocation_point_list = m*2**np.arange(1, 5) + 1
basis = self.basis basis = self.basis
Q = 1e+300 q_val = 1e+300
num_function_evaluations = 0
n = 0 n = 0
ni = 0
for num_collocation_points in num_collocation_point_list: for num_collocation_points in num_collocation_point_list:
ni_old = ni n_old = n
Q_old = Q q_old = q_val
x = points(num_collocation_points, betam) x = points(num_collocation_points, betam)
ni = len(x) n = len(x)
if ni > ni_old: if n > n_old:
Q = self.aLevinTQ(omega, ff, gg, dgg, x, s, basis, *args, q_val = self._a_levin(omega, ff, gg, dgg, x, s, basis, *args,
**kwds) **kwds)
n += ni num_function_evaluations += n
err = np.abs(Q-Q_old) err = np.abs(q_val-q_old)
if err <= abseps: if err <= abseps:
break break
info = self.info(err, n) info = self.info(err, num_function_evaluations)
return Q, info return q_val, info
class EvansWebster(AdaptiveLevin): class EvansWebster(AdaptiveLevin):
@ -515,12 +520,11 @@ class EvansWebster(AdaptiveLevin):
precision=precision, endpoints=endpoints, precision=precision, endpoints=endpoints,
full_output=full_output) full_output=full_output)
def aLevinTQ(self, omega, ff, gg, dgg, x, s, basis, *args, **kwds): def _a_levin(self, omega, ff, gg, dgg, x, s, basis, *args, **kwds):
w = evans_webster_weights(omega, gg, dgg, x, basis, *args, **kwds) w = evans_webster_weights(omega, gg, dgg, x, basis, *args, **kwds)
f = Limit(ff)(x, *args, **kwds) f = Limit(ff)(x, *args, **kwds)
NR = np.sum(f*w) return np.sum(f*w)
return NR
def _get_num_points(self, s, prec, betam): def _get_num_points(self, s, prec, betam):
return 8 if s > 1 else int(prec / max(np.log10(betam + 1), 1) + 1) return 8 if s > 1 else int(prec / max(np.log10(betam + 1), 1) + 1)

Loading…
Cancel
Save