From c4e5bfd66a1299fb61a6ae8ac2d3ae54ab53bf4e Mon Sep 17 00:00:00 2001 From: pbrod Date: Wed, 25 May 2016 21:04:04 +0200 Subject: [PATCH 1/6] Deleted namedtuple, fixed bug in piecewise --- wafo/misc.py | 23 ++++--- wafo/namedtuple.py | 144 ---------------------------------------- wafo/tests/test_misc.py | 44 ++++++------ 3 files changed, 35 insertions(+), 176 deletions(-) delete mode 100644 wafo/namedtuple.py diff --git a/wafo/misc.py b/wafo/misc.py index 1d900f8..c113601 100644 --- a/wafo/misc.py +++ b/wafo/misc.py @@ -54,7 +54,7 @@ def valarray(shape, value=np.NaN, typecode=None): return out -def piecewise(xi, condlist, funclist, fill_value=0.0, args=(), **kw): +def piecewise(condlist, funclist, xi=None, fill_value=0.0, args=(), **kw): """ Evaluate a piecewise-defined function. @@ -63,8 +63,6 @@ def piecewise(xi, condlist, funclist, fill_value=0.0, args=(), **kw): Parameters ---------- - xi : tuple - input arguments to the functions in funclist, i.e., (x0, x1,...., xn) condlist : list of bool arrays Each boolean array corresponds to a function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x0,x1,...,xn)` is used as the @@ -81,6 +79,8 @@ def piecewise(xi, condlist, funclist, fill_value=0.0, args=(), **kw): or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. + xi : tuple + input arguments to the functions in funclist, i.e., (x0, x1,...., xn) fill_value : scalar fill value for out of range values. Default 0. args : tuple, optional @@ -157,23 +157,26 @@ def piecewise(xi, condlist, funclist, fill_value=0.0, args=(), **kw): " must be the same length") check_shapes(condlist, funclist) - if not isinstance(xi, tuple): - xi = (xi,) condlist = np.broadcast_arrays(*condlist) if len(condlist) == len(funclist)-1: condlist.append(otherwise_condition(condlist)) - - arrays = np.broadcast_arrays(*xi) - dtype = np.result_type(*arrays) + if xi is None: + arrays = condlist + dtype = np.result_type(*funclist) + else: + if not isinstance(xi, tuple): + xi = (xi,) + arrays = np.broadcast_arrays(*xi) + dtype = np.result_type(*arrays) out = valarray(arrays[0].shape, fill_value, dtype) for cond, func in zip(condlist, funclist): if isinstance(func, collections.Callable): temp = tuple(np.extract(cond, arr) for arr in arrays) + args np.place(out, cond, func(*temp, **kw)) - else: # func is a scalar value - np.place(out, cond, func) + else: # func is a scalar value or a list + np.putmask(out, cond, func) return out diff --git a/wafo/namedtuple.py b/wafo/namedtuple.py deleted file mode 100644 index 857bd30..0000000 --- a/wafo/namedtuple.py +++ /dev/null @@ -1,144 +0,0 @@ -from operator import itemgetter as _itemgetter -from keyword import iskeyword as _iskeyword -import sys as _sys - - -def namedtuple(typename, field_names, verbose=False): - """Returns a new subclass of tuple with named fields. - - >>> Point = namedtuple('Point', 'x y') - >>> Point.__doc__ # docstring for the new class - 'Point(x, y)' - >>> p = Point(11, y=22) # instantiate with positional args or keywords - >>> p[0] + p[1] # indexable like a plain tuple - 33 - >>> x, y = p # unpack like a regular tuple - >>> x, y - (11, 22) - >>> p.x + p.y # fields also accessable by name - 33 - >>> d = p._asdict() # convert to a dictionary - >>> d['x'] - 11 - >>> Point(**d) # convert from a dictionary - Point(x=11, y=22) - >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields - Point(x=100, y=22) - - """ - - # Parse and validate the field names. Validation serves two purposes, - # generating informative error messages and preventing template injection - # attacks. - if isinstance(field_names, basestring): - # names separated by whitespace and/or commas - field_names = field_names.replace(',', ' ').split() - field_names = tuple(field_names) - for name in (typename,) + field_names: - if not min(c.isalnum() or c == '_' for c in name): - raise ValueError( - 'Type names and field names can only contain alphanumeric ' + - 'characters and underscores: %r' % name) - if _iskeyword(name): - raise ValueError( - 'Type names and field names cannot be a keyword: %r' % name) - if name[0].isdigit(): - raise ValueError('Type names and field names cannot start ' + - 'with a number: %r' % name) - seen_names = set() - for name in field_names: - if name.startswith('_'): - raise ValueError( - 'Field names cannot start with an underscore: %r' % name) - if name in seen_names: - raise ValueError('Encountered duplicate field name: %r' % name) - seen_names.add(name) - - # Create and fill-in the class template - numfields = len(field_names) - # tuple repr without parens or quotes - argtxt = repr(field_names).replace("'", "")[1:-1] - reprtxt = ', '.join('%s=%%r' % name for name in field_names) - dicttxt = ', '.join('%r: t[%d]' % (name, pos) - for pos, name in enumerate(field_names)) - template = '''class %(typename)s(tuple): - '%(typename)s(%(argtxt)s)' \n - __slots__ = () \n - _fields = %(field_names)r \n - def __new__(cls, %(argtxt)s): - return tuple.__new__(cls, (%(argtxt)s)) \n - @classmethod - def _make(cls, iterable, new=tuple.__new__, len=len): - 'Make a new %(typename)s object from a sequence or iterable' - result = new(cls, iterable) - if len(result) != %(numfields)d: - raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result)) - return result \n - def __repr__(self): - return '%(typename)s(%(reprtxt)s)' %% self \n - def _asdict(t): - 'Return a new dict which maps field names to their values' - return {%(dicttxt)s} \n - def _replace(self, **kwds): - 'Return a new %(typename)s object replacing specified fields with new values' - result = self._make(map(kwds.pop, %(field_names)r, self)) - if kwds: - raise ValueError('Got unexpected field names: %%r' %% kwds.keys()) - return result \n\n''' % locals() - for i, name in enumerate(field_names): - template += ' %s = property(itemgetter(%d))\n' % (name, i) - if verbose: - print template - - # Execute the template string in a temporary namespace - namespace = dict(itemgetter=_itemgetter) - try: - exec template in namespace - except SyntaxError, e: - raise SyntaxError(e.message + ':\n' + template) - result = namespace[typename] - - # For pickling to work, the __module__ variable needs to be set to the - # frame where the named tuple is created. Bypass this step in enviroments - # where sys._getframe is not defined (Jython for example). - if hasattr(_sys, '_getframe'): - result.__module__ = _sys._getframe(1).f_globals['__name__'] - - return result - - -if __name__ == '__main__': - # verify that instances can be pickled - from cPickle import loads, dumps - Point = namedtuple('Point', 'x, y', True) - p = Point(x=10, y=20) - assert p == loads(dumps(p)) - - # test and demonstrate ability to override methods - class Point(namedtuple('Point', 'x y')): - - @property - def hypot(self): - return (self.x ** 2 + self.y ** 2) ** 0.5 - - def __str__(self): - return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, - self.hypot) - - for p in Point(3, 4), Point(14, 5), Point(9. / 7, 6): - print(p) - - class Point(namedtuple('Point', 'x y')): - '''Point class with optimized _make() and _replace() - without error-checking - ''' - _make = classmethod(tuple.__new__) - - def _replace(self, _map=map, **kwds): - return self._make(_map(kwds.get, ('x', 'y'), self)) - - print(Point(11, 22)._replace(x=100)) - - import doctest - TestResults = namedtuple('TestResults', 'failed attempted') - print(TestResults(*doctest.testmod())) diff --git a/wafo/tests/test_misc.py b/wafo/tests/test_misc.py index 9ad18bc..37ccc25 100644 --- a/wafo/tests/test_misc.py +++ b/wafo/tests/test_misc.py @@ -475,64 +475,64 @@ def test_tranproc(): class TestPiecewise(TestCase): def test_condition_is_single_bool_list(self): - assert_raises(ValueError, piecewise, [0, 0], [True, False], [1]) + assert_raises(ValueError, piecewise, [True, False], [1], [0, 0]) def test_condition_is_list_of_single_bool_list(self): - x = piecewise([0, 0], [[True, False]], [1]) + x = piecewise([[True, False]], [1], [0, 0]) assert_array_equal(x, [1, 0]) def test_conditions_is_list_of_single_bool_array(self): - x = piecewise([0, 0], [np.array([True, False])], [1]) + x = piecewise([np.array([True, False])], [1], [0, 0]) assert_array_equal(x, [1, 0]) def test_condition_is_single_int_array(self): - assert_raises(ValueError, piecewise, [0, 0], np.array([1, 0]), [1]) + assert_raises(ValueError, piecewise, np.array([1, 0]), [1], [0, 0]) def test_condition_is_list_of_single_int_array(self): - x = piecewise([0, 0], [np.array([1, 0])], [1]) + x = piecewise([np.array([1, 0])], [1], [0, 0]) assert_array_equal(x, [1, 0]) def test_simple(self): - x = piecewise([0, 0], [[False, True]], [lambda x:-1]) + x = piecewise([[False, True]], [lambda x:-1], [0, 0]) assert_array_equal(x, [0, -1]) - x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) + x = piecewise([[True, False], [False, True]], [3, 4], [1, 2]) assert_array_equal(x, [3, 4]) def test_default(self): # No value specified for x[1], should be 0 - x = piecewise([1, 2], [[True, False]], [2]) + x = piecewise([[True, False]], [2], [1, 2],) assert_array_equal(x, [2, 0]) # Should set x[1] to 3 - x = piecewise([1, 2], [[True, False]], [2, 3]) + x = piecewise([[True, False]], [2, 3], [1, 2]) assert_array_equal(x, [2, 3]) def test_0d(self): x = np.array(3) - y = piecewise(x, [x > 3], [4, 0]) + y = piecewise([x > 3], [4, 0], x) assert_(y.ndim == 0) assert_(y == 0) x = 5 - y = piecewise(x, [[True], [False]], [1, 0]) + y = piecewise([[True], [False]], [1, 0], x) assert_(y.ndim == 0) assert_(y == 1) def test_abs_function(self): x = np.linspace(-2.5, 2.5, 6) - vals = piecewise((x,), [x < 0, x >= 0], [lambda x: -x, lambda x: x]) + vals = piecewise([x < 0, x >= 0], [lambda x: -x, lambda x: x], (x,)) assert_array_equal(vals, [2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) def test_abs_function_with_scalar(self): x = np.array(-2.5) - vals = piecewise((x,), [x < 0, x >= 0], [lambda x: -x, lambda x: x]) + vals = piecewise([x < 0, x >= 0], [lambda x: -x, lambda x: x], (x,)) assert_(vals == 2.5) def test_otherwise_condition(self): x = np.linspace(-2.5, 2.5, 6) - vals = piecewise((x,), [x < 0, ], [lambda x: -x, lambda x: x]) + vals = piecewise([x < 0, ], [lambda x: -x, lambda x: x], (x,)) assert_array_equal(vals, [2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) def test_passing_further_args_to_fun(self): @@ -542,24 +542,24 @@ class TestPiecewise(TestCase): def fun1(x, y, scale=1.): return x*y/scale x = np.linspace(-2.5, 2.5, 6) - vals = piecewise((x,), [x < 0, ], [fun0, fun1], args=(2.,), scale=2.) + vals = piecewise([x < 0, ], [fun0, fun1], (x,), args=(2.,), scale=2.) assert_array_equal(vals, [2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) def test_step_function(self): x = np.linspace(-2.5, 2.5, 6) - vals = piecewise(x, [x < 0, x >= 0], [-1, 1]) + vals = piecewise([x < 0, x >= 0], [-1, 1], x) assert_array_equal(vals, [-1., -1., -1., 1., 1., 1.]) def test_step_function_with_scalar(self): x = 1 - vals = piecewise(x, [x < 0, x >= 0], [-1, 1]) + vals = piecewise([x < 0, x >= 0], [-1, 1], x) assert_(vals == 1) def test_function_with_two_args(self): x = np.linspace(-2, 2, 5) X, Y = np.meshgrid(x, x) vals = piecewise( - (X, Y), [X * Y < 0, ], [lambda x, y: -x * y, lambda x, y: x * y]) + [X * Y < 0, ], [lambda x, y: -x * y, lambda x, y: x * y], (X, Y)) assert_array_equal(vals, [[4., 2., -0., 2., 4.], [2., 1., -0., 1., 2.], [-0., -0., 0., 0., 0.], @@ -569,8 +569,8 @@ class TestPiecewise(TestCase): def test_fill_value_and_function_with_two_args(self): x = np.linspace(-2, 2, 5) X, Y = np.meshgrid(x, x) - vals = piecewise((X, Y), [X * Y < -0.5, X * Y > 0.5], - [lambda x, y: -x * y, lambda x, y: x * y], + vals = piecewise([X * Y < -0.5, X * Y > 0.5], + [lambda x, y: -x * y, lambda x, y: x * y], (X, Y), fill_value=np.nan) nan = np.nan assert_array_equal(vals, [[4., 2., nan, 2., 4.], @@ -582,8 +582,8 @@ class TestPiecewise(TestCase): def test_fill_value2_and_function_with_two_args(self): x = np.linspace(-2, 2, 5) X, Y = np.meshgrid(x, x) - vals = piecewise((X, Y), [X * Y < -0.5, X * Y > 0.5], - [lambda x, y: -x * y, lambda x, y: x * y, np.nan]) + vals = piecewise([X * Y < -0.5, X * Y > 0.5], + [lambda x, y: -x * y, lambda x, y: x * y, np.nan], (X, Y)) nan = np.nan assert_array_equal(vals, [[4., 2., nan, 2., 4.], [2., 1., nan, 1., 2.], From 9d5c4adb6498a89c0952c813a55c2ce03238853c Mon Sep 17 00:00:00 2001 From: pbrod Date: Wed, 25 May 2016 22:43:48 +0200 Subject: [PATCH 2/6] Small fix --- wafo/misc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wafo/misc.py b/wafo/misc.py index c113601..591ff91 100644 --- a/wafo/misc.py +++ b/wafo/misc.py @@ -162,7 +162,7 @@ def piecewise(condlist, funclist, xi=None, fill_value=0.0, args=(), **kw): if len(condlist) == len(funclist)-1: condlist.append(otherwise_condition(condlist)) if xi is None: - arrays = condlist + arrays = () dtype = np.result_type(*funclist) else: if not isinstance(xi, tuple): @@ -170,7 +170,7 @@ def piecewise(condlist, funclist, xi=None, fill_value=0.0, args=(), **kw): arrays = np.broadcast_arrays(*xi) dtype = np.result_type(*arrays) - out = valarray(arrays[0].shape, fill_value, dtype) + out = valarray(condlist[0].shape, fill_value, dtype) for cond, func in zip(condlist, funclist): if isinstance(func, collections.Callable): temp = tuple(np.extract(cond, arr) for arr in arrays) + args From c78cf86ae4ee080f8565aea1c11b62277238f727 Mon Sep 17 00:00:00 2001 From: pbrod Date: Wed, 25 May 2016 22:56:41 +0200 Subject: [PATCH 3/6] added --doctest-modules --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index ccb7e01..d9a9c10 100644 --- a/setup.cfg +++ b/setup.cfg @@ -64,6 +64,7 @@ addopts = addopts = --cov wafo --cov-report term-missing --verbose + --doctest-modules [aliases] docs = build_sphinx From f8edea49d59c0ad8eee1f7113d2414d86a5eac83 Mon Sep 17 00:00:00 2001 From: pbrod Date: Thu, 26 May 2016 13:02:06 +0200 Subject: [PATCH 4/6] Fixed failing test for piecewise --- wafo/misc.py | 4 +++- wafo/tests/test_misc.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/wafo/misc.py b/wafo/misc.py index 591ff91..8448dce 100644 --- a/wafo/misc.py +++ b/wafo/misc.py @@ -164,13 +164,15 @@ def piecewise(condlist, funclist, xi=None, fill_value=0.0, args=(), **kw): if xi is None: arrays = () dtype = np.result_type(*funclist) + shape = condlist[0].shape else: if not isinstance(xi, tuple): xi = (xi,) arrays = np.broadcast_arrays(*xi) dtype = np.result_type(*arrays) + shape = arrays[0].shape - out = valarray(condlist[0].shape, fill_value, dtype) + out = valarray(shape, fill_value, dtype) for cond, func in zip(condlist, funclist): if isinstance(func, collections.Callable): temp = tuple(np.extract(cond, arr) for arr in arrays) + args diff --git a/wafo/tests/test_misc.py b/wafo/tests/test_misc.py index 37ccc25..4df73fa 100644 --- a/wafo/tests/test_misc.py +++ b/wafo/tests/test_misc.py @@ -516,8 +516,8 @@ class TestPiecewise(TestCase): x = 5 y = piecewise([[True], [False]], [1, 0], x) - assert_(y.ndim == 0) assert_(y == 1) + assert_(y.ndim == 0) def test_abs_function(self): x = np.linspace(-2.5, 2.5, 6) From 6281d6fb09b2bed709bc68a93eca1d4e002fe03c Mon Sep 17 00:00:00 2001 From: pbrod Date: Thu, 26 May 2016 13:18:48 +0200 Subject: [PATCH 5/6] added integrate ocscilating + added doctest to .travis --- .travis.yml | 3 +- wafo/integrate_oscillating.py | 577 +++++++++++++++++++++++ wafo/tests/test_integrate_oscillating.py | 387 +++++++++++++++ 3 files changed, 966 insertions(+), 1 deletion(-) create mode 100644 wafo/integrate_oscillating.py create mode 100644 wafo/tests/test_integrate_oscillating.py diff --git a/.travis.yml b/.travis.yml index dec9c7f..a40f642 100644 --- a/.travis.yml +++ b/.travis.yml @@ -46,7 +46,8 @@ before_script: - git config --global user.email "per.andreas.brodtkorb@gmail.com" - git config --global user.name "pbrod" script: - - coverage run --source=wafo setup.py test + - coverage run --source=wafo setup.py test + - py.test wafo/ --cov --pep8 --doctest-modules after_success: - coveralls - codecov diff --git a/wafo/integrate_oscillating.py b/wafo/integrate_oscillating.py new file mode 100644 index 0000000..2b329a3 --- /dev/null +++ b/wafo/integrate_oscillating.py @@ -0,0 +1,577 @@ +''' +Created on 20. aug. 2015 + +@author: pab +''' +from __future__ import division +import numpy as np +import warnings +import numdifftools as nd # @UnresolvedImport +import numdifftools.nd_algopy as nda # @UnresolvedImport +from numdifftools.limits import Limit # @UnresolvedImport +from numpy import linalg +from numpy.polynomial.chebyshev import chebval, Chebyshev +from numpy.polynomial import polynomial +from wafo.misc import piecewise, findcross, ecross +from collections import namedtuple + +EPS = np.finfo(float).eps +_EPS = EPS +finfo = np.finfo(float) +_TINY = finfo.tiny +_HUGE = finfo.max +dea3 = nd.dea3 + + +class PolyBasis(object): + + def derivative(self, t, k, n=1): + c = np.zeros(k + 1) + c[k] = 1 + dc = polynomial.polyder(c, m=n) + return polynomial.polyval(t, dc) + + def eval(self, t, c): + return polynomial.polyval(t, c) + + def __call__(self, t, k): + return t**k +poly_basis = PolyBasis() + + +class ChebyshevBasis(object): + + def derivative(self, t, k, n=1): + c = np.zeros(k + 1) + c[k] = 1 + cheb = Chebyshev(c) + dcheb = cheb.deriv(m=n) + return chebval(t, dcheb.coef) + + def eval(self, t, c): + return chebval(t, c) + + def __call__(self, t, k): + c = np.zeros(k + 1) + c[k] = 1 + return chebval(t, c) +chebyshev_basis = ChebyshevBasis() + + +def richardson(Q, k): + # license BSD + # Richardson extrapolation with parameter estimation + c = np.real((Q[k - 1] - Q[k - 2]) / (Q[k] - Q[k - 1])) - 1. + # The lower bound 0.07 admits the singularity x.^-0.9 + c = max(c, 0.07) + R = Q[k] + (Q[k] - Q[k - 1]) / c + return R + + +def evans_webster_weights(omega, gg, dgg, x, basis, *args, **kwds): + + def Psi(t, k): + return dgg(t, *args, **kwds) * basis(t, k) + + j_w = 1j * omega + nn = len(x) + A = np.zeros((nn, nn), dtype=complex) + F = np.zeros((nn,), dtype=complex) + + dbasis = basis.derivative + lim_gg = Limit(gg) + b1 = np.exp(j_w*lim_gg(1, *args, **kwds)) + if np.isnan(b1): + b1 = 0.0 + a1 = np.exp(j_w*lim_gg(-1, *args, **kwds)) + if np.isnan(a1): + a1 = 0.0 + + lim_Psi = Limit(Psi) + for k in range(nn): + F[k] = basis(1, k)*b1 - basis(-1, k)*a1 + A[k] = (dbasis(x, k, n=1) + j_w * lim_Psi(x, k)) + + LS = linalg.lstsq(A, F) + return LS[0] + + +def osc_weights(omega, g, dg, x, basis, ab, *args, **kwds): + w = [] + + for a, b in zip(ab[::2], ab[1::2]): + scale = (b - a) / 2 + offset = (a + b) / 2 + + def gg(t): + return g(scale * t + offset, *args, **kwds) + + def dgg(t): + return scale * dg(scale * t + offset, *args, **kwds) + + w.append(evans_webster_weights(omega, gg, dgg, x, basis)) + + return np.asarray(w).ravel() + + +class QuadOsc(object): + info = namedtuple('info', ['error_estimate', 'n']) + + def __init__(self, f, g, dg=None, a=-1, b=1, basis=chebyshev_basis, s=15, + precision=10, endpoints=False, maxiter=17, full_output=False): + self.f = f + self.g = g + self.dg = nd.Derivative(g) if dg is None else dg + self.basis = basis + self.a = a + self.b = b + self.s = s + self.endpoints = endpoints + self.maxiter = maxiter + self.precision = precision + self.full_output = full_output + + def _change_interval_to_0_1(self, f, g, dg, a, b): + def f1(t, *args, **kwds): + den = 1-t + return f(a + t / den, *args, **kwds) / den ** 2 + + def g1(t, *args, **kwds): + return g(a + t / (1 - t), *args, **kwds) + + def dg1(t, *args, **kwds): + den = 1-t + return dg(a + t / den, *args, **kwds) / den ** 2 + return f1, g2, dg1, 0., 1. + + def _change_interval_to_m1_0(self, f, g, dg, a, b): + def f2(t, *args, **kwds): + den = 1 + t + return f(b + t / den, *args, **kwds) / den ** 2 + + def g2(t, *args, **kwds): + return g(b + t / (1 + t), *args, **kwds) + + def dg2(t, *args, **kwds): + den = 1 + t + return dg(b + t / den, *args, **kwds) / den ** 2 + return f2, g2, dg2, -1.0, 0.0 + + def _change_interval_to_m1_1(self, f, g, dg, a, b): + def f2(t, *args, **kwds): + den = (1 - t**2) + return f(t / den, *args, **kwds) * (1+t**2) / den ** 2 + + def g2(t, *args, **kwds): + den = (1 - t**2) + return g(t / den, *args, **kwds) + + def dg2(t, *args, **kwds): + den = (1 - t**2) + return dg(t / den, *args, **kwds) * (1+t**2) / den ** 2 + return f2, g2, dg2, -1., 1. + + def _get_functions(self): + a, b = self.a, self.b + reverse = np.real(a) > np.real(b) + if reverse: + a, b = b, a + f, g, dg = self.f, self.g, self.dg + + if a == b: + pass + elif np.isinf(a) | np.isinf(b): + # Check real limits + if ~np.isreal(a) | ~np.isreal(b) | np.isnan(a) | np.isnan(b): + raise ValueError('Infinite intervals must be real.') + # Change of variable + if np.isfinite(a) & np.isinf(b): + f, g, dg, a, b = self._change_interval_to_0_1(f, g, dg, a, b) + elif np.isinf(a) & np.isfinite(b): + f, g, dg, a, b = self._change_interval_to_m1_0(f, g, dg, a, b) + else: # -inf to inf + f, g, dg, a, b = self._change_interval_to_m1_1(f, g, dg, a, b) + + return f, g, dg, a, b, reverse + + def __call__(self, omega, *args, **kwds): + f, g, dg, a, b, reverse = self._get_functions() + + Q, err = self._quad_osc(f, g, dg, a, b, omega, *args, **kwds) + + if reverse: + Q = -Q + if self.full_output: + return Q, err + return Q + + def _get_best_estimate(self, k, Q0, Q1, Q2): + if k >= 5: + Qv = np.hstack((Q0[k], Q1[k], Q2[k])) + Qw = np.hstack((Q0[k - 1], Q1[k - 1], Q2[k - 1])) + elif k >= 3: + Qv = np.hstack((Q0[k], Q1[k])) + Qw = np.hstack((Q0[k - 1], Q1[k - 1])) + else: + Qv = np.atleast_1d(Q0[k]) + Qw = Q0[k - 1] + errors = np.atleast_1d(abs(Qv - Qw)) + j = np.nanargmin(errors) + return Qv[j], errors[j] + + def _extrapolate(self, k, Q0, Q1, Q2): + if k >= 4: + Q1[k], _err1 = dea3(Q0[k - 2], Q0[k - 1], Q0[k]) + Q2[k], _err2 = dea3(Q1[k - 2], Q1[k - 1], Q1[k]) + elif k >= 2: + Q1[k], _err1 = dea3(Q0[k - 2], Q0[k - 1], Q0[k]) + # # Richardson extrapolation + # if k >= 4: + # Q1[k] = richardson(Q0, k) + # Q2[k] = richardson(Q1, k) + # elif k >= 2: + # Q1[k] = richardson(Q0, k) + Q, err = self._get_best_estimate(k, Q0, Q1, Q2) + return Q, err + + def _quad_osc(self, f, g, dg, a, b, omega, *args, **kwds): + if a == b: + Q = b - a + err = b - a + return Q, err + + abseps = 10**-self.precision + max_iter = self.maxiter + basis = self.basis + if self.endpoints: + xq = chebyshev_extrema(self.s) + else: + xq = chebyshev_roots(self.s) + # xq = tanh_sinh_open_nodes(self.s) + + # One interval + hh = (b - a) / 2 + x = (a + b) / 2 + hh * xq # Nodes + + dtype = complex + Q0 = np.zeros((max_iter, 1), dtype=dtype) # Quadrature + Q1 = np.zeros((max_iter, 1), dtype=dtype) # First extrapolation + Q2 = np.zeros((max_iter, 1), dtype=dtype) # Second extrapolation + + lim_f = Limit(f) + ab = np.hstack([a, b]) + wq = osc_weights(omega, g, dg, xq, basis, ab, *args, **kwds) + Q0[0] = hh * np.sum(wq * lim_f(x, *args, **kwds)) + + # Successive bisection of intervals + nq = len(xq) + n = nq + for k in xrange(1, max_iter): + n += nq*2**k + + hh = hh / 2 + x = np.hstack([x + a, x + b]) / 2 + ab = np.hstack([ab + a, ab + b]) / 2 + wq = osc_weights(omega, g, dg, xq, basis, ab, *args, **kwds) + + Q0[k] = hh * np.sum(wq * lim_f(x, *args, **kwds)) + + Q, err = self._extrapolate(k, Q0, Q1, Q2) + + convergence = (err <= abseps) | ~np.isfinite(Q) + if convergence: + break + else: + warnings.warn('Max number of iterations reached ' + 'without convergence.') + + if ~np.isfinite(Q): + warnings.warn('Integral approximation is Infinite or NaN.') + + # The error estimate should not be zero + err += 2 * np.finfo(Q).eps + return Q, self.info(err, n) + + +def adaptive_levin_points(M, delta): + m = M - 1 + prm = 0.5 + while prm * m / delta >= 1: + delta = 2 * delta + k = np.arange(M) + x = piecewise([k < prm * m, k == np.ceil(prm * m)], + [-1 + k / delta, 0 * k, 1 - (m - k) / delta]) + return x + + +def open_levin_points(M, delta): + return adaptive_levin_points(M+2, delta)[1:-1] + + +def chebyshev_extrema(M, delta=None): + k = np.arange(M) + x = np.cos(k * np.pi / (M-1)) + return x + +_EPS = np.finfo(float).eps + + +def tanh_sinh_nodes(M, delta=None, tol=_EPS): + tmax = np.arcsinh(np.arctanh(1-_EPS)*2/np.pi) + # tmax = 3.18 + m = int(np.floor(-np.log2(tmax/max(M-1, 1)))) - 1 + h = 2.0**-m + t = np.arange((M+1)//2+1)*h + x = np.tanh(np.pi/2*np.sinh(t)) + k = np.flatnonzero(np.abs(x - 1) <= 10*tol) + y = x[:k[0]+1] if len(k) else x + return np.hstack((-y[:0:-1], y)) + + +def tanh_sinh_open_nodes(M, delta=None, tol=_EPS): + return tanh_sinh_nodes(M+1, delta, tol)[1:-1] + + +def chebyshev_roots(M, delta=None): + k = np.arange(1, 2*M, 2) * 0.5 + x = np.cos(k * np.pi / M) + return x + + +class AdaptiveLevin(object): + '''Return integral for the Levin-type and adaptive Levin-type methods''' + info = namedtuple('info', ['error_estimate', 'n']) + + def __init__(self, f, g, dg=None, a=-1, b=1, basis=chebyshev_basis, s=1, + precision=10, endpoints=True, full_output=False): + self.f = f + self.g = g + self.dg = nd.Derivative(g) if dg is None else dg + self.basis = basis + self.a = a + self.b = b + self.s = s + self.precision = precision + self.endpoints = endpoints + self.full_output = full_output + + def aLevinTQ(self, omega, ff, gg, dgg, x, s, basis, *args, **kwds): + + def Psi(t, k): + return dgg(t, *args, **kwds) * basis(t, k) + + j_w = 1j * omega + nu = np.ones((len(x),), dtype=int) + nu[0] = nu[-1] = s + S = np.cumsum(np.hstack((nu, 0))) + S[-1] = 0 + nn = int(S[-2]) + A = np.zeros((nn, nn), dtype=complex) + F = np.zeros((nn,)) + dff = Limit(nda.Derivative(ff)) + dPsi = Limit(nda.Derivative(Psi)) + dbasis = basis.derivative + for r, t in enumerate(x): + for j in range(S[r - 1], S[r]): + order = ((j - S[r - 1]) % nu[r]) # derivative order + dff.f.n = order + F[j] = dff(t, *args, **kwds) + dPsi.f.n = order + for k in range(nn): + A[j, k] = (dbasis(t, k, n=order+1) + j_w * dPsi(t, k)) + k1 = np.flatnonzero(1-np.isfinite(F)) + if k1.size > 0: # Remove singularities + warnings.warn('Singularities detected! ') + A[k1] = 0 + F[k1] = 0 + LS = linalg.lstsq(A, F) + v = basis.eval([-1, 1], LS[0]) + + lim_gg = Limit(gg) + gb = np.exp(j_w * lim_gg(1, *args, **kwds)) + if np.isnan(gb): + gb = 0 + ga = np.exp(j_w * lim_gg(-1, *args, **kwds)) + if np.isnan(ga): + ga = 0 + NR = (v[1] * gb - v[0] * ga) + return NR + + def _get_integration_limits(self, omega, args, kwds): + a, b = self.a, self.b + M = 30 + ab = [a] + scale = (b - a) / 2 + n = 30 + x = np.linspace(a, b, n + 1) + dg_x = np.asarray([scale * omega * self.dg(xi, *args, **kwds) + for xi in x]) + i10 = findcross(dg_x, M) + i1 = findcross(dg_x, 1) + i0 = findcross(dg_x, 0) + im1 = findcross(dg_x, -1) + im10 = findcross(dg_x, -M) + x10 = ecross(x, dg_x, i10, M) if len(i10) else () + x1 = ecross(x, dg_x, i1, 1) if len(i1) else () + x0 = ecross(x, dg_x, i0, 0) if len(i0) else () + xm1 = ecross(x, dg_x, im1, -1) if len(im1) else () + xm10 = ecross(x, dg_x, im10, -M) if len(im10) else () + + for i in np.unique(np.hstack((x10, x1, x0, xm1, xm10))): + if x[0] < i < x[n]: + ab.append(i) + ab.append(b) + return ab + + def __call__(self, omega, *args, **kwds): + ab = self._get_integration_limits(omega, args, kwds) + s = self.s + val = 0 + n = 0 + err = 0 + for ai, bi in zip(ab[:-1], ab[1:]): + vali, infoi = self._QaL(s, ai, bi, omega, *args, **kwds) + val += vali + err += infoi.error_estimate + n += infoi.n + if self.full_output: + info = self.info(err, n) + return val, info + return val + + def _get_num_points(self, s, prec, betam): + return 1 if s > 1 else int(prec / max(np.log10(betam + 1), 1) + 1) + + def _QaL(self, s, a, b, omega, *args, **kwds): + '''if s>1,the integral is computed by Q_s^L''' + scale = (b - a) / 2 + offset = (a + b) / 2 + prec = self.precision # desired precision + + def ff(t, *args, **kwds): + return scale * self.f(scale * t + offset, *args, **kwds) + + def gg(t, *args, **kwds): + return self.g(scale * t + offset, *args, **kwds) + + def dgg(t, *args, **kwds): + return scale * self.dg(scale * t + offset, *args, **kwds) + dg_a = abs(omega * dgg(-1, *args, **kwds)) + dg_b = abs(omega * dgg(1, *args, **kwds)) + g_a = abs(omega * gg(-1, *args, **kwds)) + g_b = abs(omega * gg(1, *args, **kwds)) + delta, alpha = min(dg_a, dg_b), min(g_a, g_b) + + betam = delta # * scale + if self.endpoints: + if (delta < 10 or alpha <= 10 or s > 1): + points = chebyshev_extrema + else: + points = adaptive_levin_points + elif (delta < 10 or alpha <= 10 or s > 1): + points = chebyshev_roots + else: + points = open_levin_points # tanh_sinh_open_nodes + + m = self._get_num_points(s, prec, betam) + abseps = 10*10.0**-prec + num_collocation_point_list = m*2**np.arange(1, 5) + 1 + basis = self.basis + + Q = 1e+300 + n = 0 + ni = 0 + for num_collocation_points in num_collocation_point_list: + ni_old = ni + Q_old = Q + x = points(num_collocation_points, betam) + ni = len(x) + if ni > ni_old: + Q = self.aLevinTQ(omega, ff, gg, dgg, x, s, basis, *args, + **kwds) + n += ni + err = np.abs(Q-Q_old) + if err <= abseps: + break + info = self.info(err, n) + return Q, info + + +class EvansWebster(AdaptiveLevin): + '''Return integral for the Evans Webster method''' + + def __init__(self, f, g, dg=None, a=-1, b=1, basis=chebyshev_basis, s=8, + precision=10, endpoints=False, full_output=False): + self.f = f + self.g = g + self.dg = nd.Derivative(g) if dg is None else dg + self.basis = basis + self.a = a + self.b = b + self.s = s + self.endpoints = endpoints + self.precision = precision + self.full_output = full_output + + def aLevinTQ(self, omega, ff, gg, dgg, x, s, basis, *args, **kwds): + w = evans_webster_weights(omega, gg, dgg, x, basis, *args, **kwds) + + f = Limit(ff)(x, *args, **kwds) + NR = np.sum(f*w) + return NR + + def _get_num_points(self, s, prec, betam): + return 8 if s > 1 else int(prec / max(np.log10(betam + 1), 1) + 1) + +# def _QaL(self, s, a, b, omega, *args, **kwds): +# '''if s>1,the integral is computed by Q_s^L''' +# scale = (b - a) / 2 +# offset = (a + b) / 2 +# prec = self.precision # desired precision +# +# def ff(t, *args, **kwds): +# return scale * self.f(scale * t + offset, *args, **kwds) +# +# def gg(t, *args, **kwds): +# return self.g(scale * t + offset, *args, **kwds) +# +# def dgg(t, *args, **kwds): +# return scale * self.dg(scale * t + offset, *args, **kwds) +# dg_a = abs(omega * dgg(-1, *args, **kwds)) +# dg_b = abs(omega * dgg(1, *args, **kwds)) +# g_a = abs(omega * gg(-1, *args, **kwds)) +# g_b = abs(omega * gg(1, *args, **kwds)) +# delta, alpha = min(dg_a, dg_b), min(g_a, g_b) +# +# betam = delta # * scale +# if self.endpoints: +# if (delta < 10 or alpha <= 10 or s > 1): +# points = chebyshev_extrema +# else: +# points = adaptive_levin_points +# elif (delta < 10 or alpha <= 10 or s > 1): +# points = chebyshev_roots +# else: +# points = tanh_sinh_open_nodes +# +# m = 8 if s > 1 else int(prec / max(np.log10(betam + 1), 1) + 1) +# abseps = 10*10.0**-prec +# num_collocation_point_list = 2*m*np.arange(1, 6, 2) +# # range(num_points, num_points+3, 2) +# basis = self.basis +# Q = 1e+300 +# n = 0 +# for num_collocation_points in num_collocation_point_list: +# Q_old = Q +# x = points(num_collocation_points-1, betam) +# Q = self.aLevinTQ(omega, ff, gg, dgg, x, s, basis, *args, **kwds) +# n += num_collocation_points +# err = np.abs(Q-Q_old) +# if err <= abseps: +# break +# info = self.info(err, n) +# return Q, info + + +if __name__ == '__main__': + tanh_sinh_nodes(16) diff --git a/wafo/tests/test_integrate_oscillating.py b/wafo/tests/test_integrate_oscillating.py new file mode 100644 index 0000000..2242151 --- /dev/null +++ b/wafo/tests/test_integrate_oscillating.py @@ -0,0 +1,387 @@ +''' +Created on 31. aug. 2015 + +@author: pab +''' +from __future__ import division +import numpy as np +import mpmath as mp +import unittest +from wafo.integrate_oscillating import (adaptive_levin_points, + chebyshev_extrema, + chebyshev_roots, tanh_sinh_nodes, + tanh_sinh_open_nodes, + AdaptiveLevin, poly_basis, + chebyshev_basis, + EvansWebster, QuadOsc) +# import numdifftools as nd +from numpy.testing import assert_allclose +from scipy.special import gamma, digamma +_EPS = np.finfo(float).eps + + +class TestBasis(unittest.TestCase): + def test_poly(self): + t = 1 + vals = [poly_basis.derivative(t, k, n=1) for k in range(3)] + assert_allclose(vals, range(3)) + vals = [poly_basis.derivative(0, k, n=1) for k in range(3)] + assert_allclose(vals, [0, 1, 0]) + vals = [poly_basis.derivative(0, k, n=2) for k in range(3)] + assert_allclose(vals, [0, 0, 2]) + + def test_chebyshev(self): + t = 1 + vals = [chebyshev_basis.derivative(t, k, n=1) for k in range(3)] + assert_allclose(vals, np.arange(3)**2) + vals = [chebyshev_basis.derivative(0, k, n=1) for k in range(3)] + assert_allclose(vals, [0, 1, 0]) + vals = [chebyshev_basis.derivative(0, k, n=2) for k in range(3)] + assert_allclose(vals, [0, 0, 4]) + + +class TestLevinPoints(unittest.TestCase): + + def test_adaptive(self): + M = 11 + delta = 100 + x = adaptive_levin_points(M, delta) + true_x = [-1., -0.99, -0.98, -0.97, -0.96, 0., + 0.96, 0.97, 0.98, 0.99, 1.] + assert_allclose(x, true_x) + + def test_chebyshev_extrema(self): + M = 11 + delta = 100 + x = chebyshev_extrema(M, delta) + true_x = [1.000000e+00, 9.510565e-01, 8.090170e-01, 5.877853e-01, + 3.090170e-01, 6.123234e-17, -3.090170e-01, -5.877853e-01, + -8.090170e-01, -9.510565e-01, -1.000000e+00] + assert_allclose(x, true_x) + + def test_chebyshev_roots(self): + M = 11 + delta = 100 + x = chebyshev_roots(M, delta) + + true_x = [9.89821442e-01, 9.09631995e-01, 7.55749574e-01, + 5.40640817e-01, 2.81732557e-01, 2.83276945e-16, + -2.81732557e-01, -5.40640817e-01, -7.55749574e-01, + -9.09631995e-01, -9.89821442e-01] + assert_allclose(x, true_x) + + def test_tanh_sinh_nodes(self): + for n in 2**np.arange(1, 5) + 1: + x = tanh_sinh_nodes(n) + # self.assertEqual(n, len(x)) + + def test_tanh_sinh_open_nodes(self): + for n in 2**np.arange(1, 5) + 1: + x = tanh_sinh_open_nodes(n) + # self.assertEqual(n, len(x)) + + +class LevinQuadrature(unittest.TestCase): + def test_exp_4t_exp_jw_gamma_t_exp_4t(self): + def f(t): + return np.exp(4 * t) # amplitude function + + def g(t): + return t + np.exp(4 * t) * gamma(t) # phase function + + def dg(t): + return 1 + (4 + digamma(t)) * np.exp(4 * t) * gamma(t) + a = 1 + b = 2 + omega = 100 + + def ftot(t): + exp4t = mp.exp(4*t) + return exp4t * mp.exp(1j * omega * (t+exp4t*mp.gamma(t))) + + _true_val, _err = mp.quadts(ftot, [a, (a+b)/2, b], error=True) + + true_val = 0.00435354129735323908804 + 0.00202865398517716214366j + # quad = AdaptiveLevin(f, g, dg, a=a, b=b, s=1, full_output=True) + for quadfun in [EvansWebster, QuadOsc, AdaptiveLevin]: + quad = quadfun(f, g, dg, a=a, b=b, full_output=True) + val, info = quad(omega) + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-11) + # assert_allclose(info.n, 9) + + def test_exp_jw_t(self): + def g(t): + return t + + def dg(t): + return np.ones(np.shape(t)) + + def true_F(t): + return np.exp(1j*omega*g(t))/(1j*omega) + + val, _err = mp.quadts(g, [0, 1], error=True) + a = 1 + b = 2 + omega = 1 + true_val = true_F(b)-true_F(a) + + for quadfun in [QuadOsc, AdaptiveLevin, EvansWebster]: + quad = quadfun(dg, g, dg, a, b, full_output=True) + val, info = quad(omega) + + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-12) + # assert_allclose(info.n, 21) + + def test_I1_1_p_ln_x_exp_jw_xlnx(self): + def g(t): + return t*np.log(t) + + def dg(t): + return 1 + np.log(t) + + def true_F(t): + return np.exp(1j*(omega*g(t)))/(1j*omega) + + a = 100 + b = 200 + omega = 1 + true_val = true_F(b)-true_F(a) + for quadfun in [AdaptiveLevin, QuadOsc, EvansWebster]: + quad = quadfun(dg, g, dg, a, b, full_output=True) + + val, info = quad(omega) + + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-10) + # assert_allclose(info.n, 11) + + def test_I4_ln_x_exp_jw_30x(self): + n = 7 + + def g(t): + return t**n + + def dg(t): + return n*t**(n-1) + + def f(t): + return dg(t)*np.log(g(t)) + + a = 0 + b = (2 * np.pi)**(1./n) + omega = 30 + + def ftot(t): + return n*t**(n-1)*mp.log(t**n) * mp.exp(1j * omega * t**n) + + _true_val, _err = mp.quadts(ftot, [a, b], error=True, maxdegree=8) + # true_val = (-0.052183048684992 - 0.193877275099872j) + true_val = (-0.0521830486849921 - 0.193877275099871j) + + for quadfun in [QuadOsc, EvansWebster, AdaptiveLevin]: + quad = quadfun(f, g, dg, a, b, full_output=True) + val, info = quad(omega) + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-5) + + def test_I5_coscost_sint_exp_jw_sint(self): + a = 0 + b = np.pi/2 + omega = 100 + + def f(t): + return np.cos(np.cos(t))*np.sin(t) + + def g(t): + return np.sin(t) + + def dg(t): + return np.cos(t) + + def ftot(t): + return mp.cos(mp.cos(t)) * mp.sin(t) * mp.exp(1j * omega * + mp.sin(t)) + + _true_val, _err = mp.quadts(ftot, [a, 0.5, 1, b], maxdegree=9, + error=True) + + true_val = 0.0325497765499959-0.121009052128827j + for quadfun in [QuadOsc, EvansWebster, AdaptiveLevin]: + quad = quadfun(f, g, dg, a, b, full_output=True) + + val, info = quad(omega) + + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-9) + + def test_I6_exp_jw_td_1_m_t(self): + a = 0 + b = 1 + omega = 1 + + def f(t): + return np.ones(np.shape(t)) + + def g(t): + return t/(1-t) + + def dg(t): + return 1./(1-t)**2 + + def ftot(t): + return mp.exp(1j * omega * t/(1-t)) + + true_val = (0.3785503757641866423607342717846606761068353230802945830 + + 0.3433779615564270328325330038583124340012440194999075192j) + for quadfun in [QuadOsc, EvansWebster, AdaptiveLevin]: + quad = quadfun(f, g, dg, a, b, endpoints=False, full_output=True) + + val, info = quad(omega) + + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-10) + + def test_I8_cos_47pix2d4_exp_jw_x(self): + def f(t): + return np.cos(47*np.pi/4*t**2) + + def g(t): + return t + + def dg(t): + return 1 + + a = -1 + b = 1 + omega = 451*np.pi/4 + + true_val = 2.3328690362927e-3 + s = 15 + for quadfun in [QuadOsc, EvansWebster]: # , AdaptiveLevin]: + quad = quadfun(f, g, dg, a, b, s=s, endpoints=False, + full_output=True) + val, _info = quad(omega) + assert_allclose(val.real, true_val) + s = 1 if s <= 2 else s // 2 + # self.assert_(info.error_estimate < 1e-10) + # assert_allclose(info.n, 11) + + def test_I9_exp_tant_sec2t_exp_jw_tant(self): + a = 0 + b = np.pi/2 + omega = 100 + + def f(t): + return np.exp(-np.tan(t))/np.cos(t)**2 + + def g(t): + return np.tan(t) + + def dg(t): + return 1./np.cos(t)**2 + + true_val = (0.0000999900009999000099990000999900009999000099990000999 + + 0.009999000099990000999900009999000099990000999900009999j) + for quadfun in [QuadOsc, EvansWebster, AdaptiveLevin]: + quad = quadfun(f, g, dg, a, b, endpoints=False, full_output=True) + + val, info = quad(omega) + + assert_allclose(val, true_val) + self.assert_(info.error_estimate < 1e-8) + + def test_exp_zdcos2t_dcos2t_exp_jw_cos_t_b_dcos2t(self): + x1 = -20 + y1 = 20 + z1 = 20 + beta = np.abs(np.arctan(y1/x1)) + R = np.sqrt(x1**2+y1**2) + + def f(t, beta, z1): + cos2t = np.cos(t)**2 + return np.where(cos2t == 0, 0, np.exp(-z1/cos2t)/cos2t) + + def g(t, beta, z1): + return np.cos(t-beta)/np.cos(t)**2 + + def dg(t, beta, z1=0): + cos3t = np.cos(t)**3 + return 0.5*(3*np.sin(beta)-np.sin(beta-2*t))/cos3t + + def append_dg_zero(zeros, g1, beta): + signs = [1, ] if np.abs(g1) <= _EPS else [-1, 1] + for sgn1 in signs: + tn = np.arccos(sgn1 * g1) + if -np.pi / 2 <= tn <= np.pi / 2: + for sgn2 in [-1, 1]: + t = sgn2 * tn + if np.abs(dg(t, beta)) < 10*_EPS: + zeros.append(t) + return zeros + + def zeros_dg(beta): + k0 = (9*np.cos(2*beta)-7) + if k0 < 0: # No stationary points + return () + k1 = 3*np.cos(2*beta)-5 + g0 = np.sqrt(2)*np.sqrt(np.cos(beta)**2*k0) + zeros = [] + + if g0+k1 < _EPS: + g1 = 1./2*np.sqrt(-g0-k1) + zeros = append_dg_zero(zeros, g1, beta) + if _EPS < g0-k1: + g2 = 1./2*np.sqrt(g0-k1) + zeros = append_dg_zero(zeros, g2, beta) + if np.abs(g0+k1) <= _EPS or np.abs(g0-k1) <= _EPS: + zeros = append_dg_zero(zeros, 0, beta) + return tuple(zeros) + + a = -np.pi/2 + b = np.pi/2 + omega = R + + def ftot(t): + cos2t = mp.cos(t)**2 + return (mp.exp(-z1/cos2t) / cos2t * + mp.exp(1j * omega * mp.cos(t-beta)/cos2t)) + + zdg = zeros_dg(beta) + ab = (a, ) + zdg + (b, ) + true_val, _err = mp.quadts(ftot, ab, maxdegree=9, error=True) + + import matplotlib.pyplot as plt + t = np.linspace(a, b, 5*513) + plt.subplot(2, 1, 1) + f2 = f(t, beta, z1)*np.exp(1j*R*g(t, beta, z1)) + + true_val2 = np.trapz(f2, t) + plt.plot(t, f2.real, t, f2.imag, 'r') + plt.title('integral=%g+1j%g, ' + '(%g+1j%g)' % (true_val2.real, true_val2.imag, + true_val.real, true_val.imag)) + plt.subplot(2, 1, 2) + plt.plot(t, dg(t, beta, z1), 'r') + plt.plot(t, g(t, beta, z1)) + plt.hlines(0, a, b) + plt.axis([a, b, -5, 5]) + plt.title('beta=%g' % beta) + print(np.trapz(f2, t)) + plt.show('hold') + # true_val = 0.00253186684281+0.004314054498j + # s = 15 + for quadfun in [QuadOsc, EvansWebster, AdaptiveLevin]: + # EvansWebster]: # , AdaptiveLevin, ]: + quad = quadfun(f, g, dg, a, b, precision=10, endpoints=False, + full_output=True) + val, _info = quad(omega, beta, z1) # @UnusedVariable + # assert_allclose(val, complex(true_val), rtol=1e-3) + # s = 1 if s<=1 else s//2 + pass + + +if __name__ == "__main__": + # import sys;sys.argv = ['', 'Test.testName'] + unittest.main() From f023c6af8a8f564b6798559c9f53044628bcc936 Mon Sep 17 00:00:00 2001 From: pbrod Date: Thu, 26 May 2016 13:33:17 +0200 Subject: [PATCH 6/6] Updated .travis.yml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index a40f642..eda4f4c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,7 +35,7 @@ before_install: - sudo apt-get install gfortran # Install packages install: - - conda install --yes python=$TRAVIS_PYTHON_VERSION numpy scipy pytest numdifftools matplotlib nose + - conda install --yes python=$TRAVIS_PYTHON_VERSION numpy scipy pytest numdifftools matplotlib nose pytest-pep8 pytest-cov mpmath - pip install coveralls - pip install codecov - python setup.py build