Added numba_misc.py

master
Per A Brodtkorb 9 years ago
parent 15a0887772
commit 094c73f2f8

@ -0,0 +1,131 @@
environment:
global:
# SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the
# /E:ON and /V:ON options are not enabled in the batch script intepreter
# See: http://stackoverflow.com/a/13751649/163740
CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_env.cmd"
matrix:
# Python 2.7.10 is the latest version and is not pre-installed.
- PYTHON: "C:\\Python27.10"
PYTHON_VERSION: "2.7.10"
PYTHON_ARCH: "32"
- PYTHON: "C:\\Python27.10-x64"
PYTHON_VERSION: "2.7.10"
PYTHON_ARCH: "64"
# Pre-installed Python versions, which Appveyor may upgrade to
# a later point release.
# See: http://www.appveyor.com/docs/installed-software#python
- PYTHON: "C:\\Python27"
PYTHON_VERSION: "2.7.x" # currently 2.7.9
PYTHON_ARCH: "32"
- PYTHON: "C:\\Python27-x64"
PYTHON_VERSION: "2.7.x" # currently 2.7.9
PYTHON_ARCH: "64"
- PYTHON: "C:\\Python33"
PYTHON_VERSION: "3.3.x" # currently 3.3.5
PYTHON_ARCH: "32"
- PYTHON: "C:\\Python33-x64"
PYTHON_VERSION: "3.3.x" # currently 3.3.5
PYTHON_ARCH: "64"
- PYTHON: "C:\\Python34"
PYTHON_VERSION: "3.4.x" # currently 3.4.3
PYTHON_ARCH: "32"
- PYTHON: "C:\\Python34-x64"
PYTHON_VERSION: "3.4.x" # currently 3.4.3
PYTHON_ARCH: "64"
# Python versions not pre-installed
# Python 2.6.6 is the latest Python 2.6 with a Windows installer
# See: https://github.com/ogrisel/python-appveyor-demo/issues/10
- PYTHON: "C:\\Python266"
PYTHON_VERSION: "2.6.6"
PYTHON_ARCH: "32"
- PYTHON: "C:\\Python266-x64"
PYTHON_VERSION: "2.6.6"
PYTHON_ARCH: "64"
- PYTHON: "C:\\Python35"
PYTHON_VERSION: "3.5.0"
PYTHON_ARCH: "32"
- PYTHON: "C:\\Python35-x64"
PYTHON_VERSION: "3.5.0"
PYTHON_ARCH: "64"
# Major and minor releases (i.e x.0.0 and x.y.0) prior to 3.3.0 use
# a different naming scheme.
- PYTHON: "C:\\Python270"
PYTHON_VERSION: "2.7.0"
PYTHON_ARCH: "32"
- PYTHON: "C:\\Python270-x64"
PYTHON_VERSION: "2.7.0"
PYTHON_ARCH: "64"
install:
- ECHO "Filesystem root:"
- ps: "ls \"C:/\""
- ECHO "Installed SDKs:"
- ps: "ls \"C:/Program Files/Microsoft SDKs/Windows\""
# Install Python (from the official .msi of http://python.org) and pip when
# not already installed.
- ps: if (-not(Test-Path($env:PYTHON))) { & appveyor\install.ps1 }
# Prepend newly installed Python to the PATH of this build (this cannot be
# done from inside the powershell script as it would require to restart
# the parent CMD process).
- "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
# Check that we have the expected version and architecture for Python
- "python --version"
- "python -c \"import struct; print(struct.calcsize('P') * 8)\""
# Upgrade to the latest version of pip to avoid it displaying warnings
# about it being out of date.
- "pip install --disable-pip-version-check --user --upgrade pip"
# Install the build dependencies of the project. If some dependencies contain
# compiled extensions and are not provided as pre-built wheel packages,
# pip will build them from source using the MSVC compiler matching the
# target Python version and architecture
- "%CMD_IN_ENV% pip install -r requirements.txt"
build_script:
# Build the compiled extension
- "%CMD_IN_ENV% python setup.py build"
test_script:
# Run the project tests
- "%CMD_IN_ENV% python setup.py test"
after_test:
# If tests are successful, create binary packages for the project.
- "%CMD_IN_ENV% python setup.py bdist_wheel"
- "%CMD_IN_ENV% python setup.py bdist_wininst"
- "%CMD_IN_ENV% python setup.py bdist_msi"
- ps: "ls dist"
artifacts:
# Archive the generated packages in the ci.appveyor.com build report.
- path: dist\*
#on_success:
# - TODO: upload the content of dist/*.whl to a public wheelhouse
#

@ -14,6 +14,7 @@ from . import polynomial
from . import stats from . import stats
from . import interpolate from . import interpolate
from . import dctpack from . import dctpack
from . import wave_theory
try: try:
from . import fig from . import fig
except ImportError: except ImportError:

File diff suppressed because one or more lines are too long

@ -3,6 +3,7 @@ Misc
''' '''
from __future__ import absolute_import, division from __future__ import absolute_import, division
import sys import sys
from . import numba_misc
import fractions import fractions
import numpy as np import numpy as np
from numpy import ( from numpy import (
@ -10,8 +11,8 @@ from numpy import (
amax, logical_and, arange, linspace, atleast_1d, amax, logical_and, arange, linspace, atleast_1d,
asarray, ceil, floor, frexp, hypot, asarray, ceil, floor, frexp, hypot,
sqrt, arctan2, sin, cos, exp, log, log1p, mod, diff, sqrt, arctan2, sin, cos, exp, log, log1p, mod, diff,
finfo, inf, pi, interp, isscalar, zeros, ones, linalg, inf, pi, interp, isscalar, zeros, ones, linalg,
r_, sign, unique, hstack, vstack, nonzero, where, extract) sign, unique, hstack, vstack, nonzero, where, extract)
from scipy.special import gammaln from scipy.special import gammaln
from scipy.integrate import trapz, simps from scipy.integrate import trapz, simps
import warnings import warnings
@ -26,9 +27,10 @@ try:
except ImportError: except ImportError:
warnings.warn('c_library not found. Check its compilation.') warnings.warn('c_library not found. Check its compilation.')
clib = None clib = None
floatinfo = finfo(float)
_TINY = np.finfo(float).tiny floatinfo = np.finfo(float)
_EPS = np.finfo(float).eps _TINY = floatinfo.tiny
_EPS = floatinfo.eps
__all__ = ['now', 'spaceline', 'narg_smallest', 'args_flat', 'is_numlike', __all__ = ['now', 'spaceline', 'narg_smallest', 'args_flat', 'is_numlike',
'JITImport', 'DotDict', 'Bunch', 'printf', 'sub_dict_select', 'JITImport', 'DotDict', 'Bunch', 'printf', 'sub_dict_select',
@ -839,33 +841,7 @@ def _findcross(xn, method='clib'):
if clib is not None and method == 'clib': if clib is not None and method == 'clib':
ind, m = clib.findcross(xn, 0.0) ind, m = clib.findcross(xn, 0.0)
return ind[:m] return ind[:m]
return numba_misc.findcross(xn)
n = len(xn)
iz, = (xn == 0).nonzero()
if len(iz) > 0:
# Trick to avoid turning points on the crossinglevel.
if iz[0] == 0:
if len(iz) == n:
warnings.warn('All values are equal to crossing level!')
return zeros(0, dtype=np.int)
diz = diff(iz)
if len(diz) > 0 and (diz > 1).any():
ix = iz[(diz > 1).argmax()]
else:
ix = iz[-1]
# x(ix) is a up crossing if x(1:ix) = v and x(ix+1) > v.
# x(ix) is a downcrossing if x(1:ix) = v and x(ix+1) < v.
xn[0:ix + 1] = -xn[ix + 1]
iz = iz[ix + 1::]
for ix in iz.tolist():
xn[ix] = xn[ix - 1]
# indices to local level crossings ( without turningpoints)
ind, = (xn[:n - 1] * xn[1:] < 0).nonzero()
return ind
def xor(a, b): def xor(a, b):
@ -947,17 +923,17 @@ def findcross(x, v=0.0, kind=None, method='clib'):
ind = ind[t_0::2] ind = ind[t_0::2]
elif kind in ('dw', 'uw', 'tw', 'cw'): elif kind in ('dw', 'uw', 'tw', 'cw'):
# make sure the first is a level v down-crossing # make sure the first is a level v down-crossing
# if wdef=='dw' or wdef=='tw' # if kind=='dw' or kind=='tw'
# or make sure the first is a level v up-crossing # or make sure the first is a level v up-crossing
# if wdef=='uw' or wdef=='cw' # if kind=='uw' or kind=='cw'
first_is_down_crossing = int(xn[ind[0]] > xn[ind[0] + 1]) first_is_down_crossing = int(xn[ind[0]] > xn[ind[0] + 1])
if xor(first_is_down_crossing, kind in ('dw', 'tw')): if xor(first_is_down_crossing, kind in ('dw', 'tw')):
ind = ind[1::] ind = ind[1::]
# make sure the number of troughs and crests are according to the # make sure the number of troughs and crests are according to the
# wavedef, i.e., make sure length(ind) is odd if dw or uw # wavedef, i.e., make sure length(ind) is odd if kind is dw or uw
# and even if tw or cw # and even if kind is tw or cw
is_odd = mod(ind.size, 2) is_odd = mod(ind.size, 2)
if xor(is_odd, kind in ('dw', 'uw')): if xor(is_odd, kind in ('dw', 'uw')):
ind = ind[:-1] ind = ind[:-1]
@ -1107,7 +1083,7 @@ def findrfc_astm(tp):
sig_rfc[:,1] Cycles mean value sig_rfc[:,1] Cycles mean value
sig_rfc[:,2] Cycle type, half (=0.5) or full (=1.0) sig_rfc[:,2] Cycle type, half (=0.5) or full (=1.0)
""" """
return numba_misc.findrfc_astm(tp)
y1 = atleast_1d(tp).ravel() y1 = atleast_1d(tp).ravel()
sig_rfc, cnr = clib.findrfc3_astm(y1) sig_rfc, cnr = clib.findrfc3_astm(y1)
# the sig_rfc was constructed too big in rainflow.rf3, so # the sig_rfc was constructed too big in rainflow.rf3, so
@ -1185,82 +1161,13 @@ def findrfc(tp, h=0.0, method='clib'):
if clib is not None and method == 'clib': if clib is not None and method == 'clib':
ind, ix = clib.findrfc(y, h) ind, ix = clib.findrfc(y, h)
elif method == '2':
ix = -1
ind = _findrfc2(y, h)
else: else:
ind, ix = _findrfc(y, h) ind = numba_misc.findrfc(y, h, method)
ix = len(ind)
return np.sort(ind[:ix]) + t_start return np.sort(ind[:ix]) + t_start
def _findrfc(y, h):
# TODO: merge rfcfilter and _findrfc
t_start = 0
n = len(y)
NC = np.floor(n / 2) - 1
ind = zeros(n, dtype=np.int)
NC = np.int(NC)
ix = 0
for i in range(NC):
Tmi = t_start + 2 * i
Tpl = t_start + 2 * i + 2
xminus = y[2 * i]
xplus = y[2 * i + 2]
if(i != 0):
j = i - 1
while ((j >= 0) and (y[2 * j + 1] <= y[2 * i + 1])):
if (y[2 * j] < xminus):
xminus = y[2 * j]
Tmi = t_start + 2 * j
j -= 1
if (xminus >= xplus):
if (y[2 * i + 1] - xminus >= h):
ind[ix] = Tmi
ix += 1
ind[ix] = (t_start + 2 * i + 1)
ix += 1
# goto L180 continue
else:
j = i + 1
while (j < NC):
if (y[2 * j + 1] >= y[2 * i + 1]):
break # goto L170
if((y[2 * j + 2] <= xplus)):
xplus = y[2 * j + 2]
Tpl = (t_start + 2 * j + 2)
j += 1
else:
if ((y[2 * i + 1] - xminus) >= h):
ind[ix] = Tmi
ix += 1
ind[ix] = (t_start + 2 * i + 1)
ix += 1
# iy = i
continue
# goto L180
# L170:
if (xplus <= xminus):
if ((y[2 * i + 1] - xminus) >= h):
ind[ix] = Tmi
ix += 1
ind[ix] = (t_start + 2 * i + 1)
ix += 1
elif ((y[2 * i + 1] - xplus) >= h):
ind[ix] = (t_start + 2 * i + 1)
ix += 1
ind[ix] = Tpl
ix += 1
# L180:
# iy=i
# /* for i */
return ind, ix
def _raise_kind_error(kind): def _raise_kind_error(kind):
if kind in (-1, 0): if kind in (-1, 0):
raise NotImplementedError('kind = {} not yet implemented'.format(kind)) raise NotImplementedError('kind = {} not yet implemented'.format(kind))
@ -1399,118 +1306,124 @@ def mctp2tc(f_Mm, utc, param, f_mM=None):
MCTP2TC Calculates frequencies for the upcrossing troughs and crests MCTP2TC Calculates frequencies for the upcrossing troughs and crests
using Markov chain of turning points. using Markov chain of turning points.
CALL: f_tc = mctp2tc(f_Mm,utc,param); Parameters
----------
where
f_tc = the matrix with frequences of upcrossing troughs and crests,
f_Mm = the frequency matrix for the Max2min cycles, f_Mm = the frequency matrix for the Max2min cycles,
utc = the reference level, utc = the reference level,
param = a vector defining the discretization used to compute f_Mm, param = a vector defining the discretization used to compute f_Mm,
note that f_mM has to be computed on the same grid as f_mM. note that f_mM has to be computed on the same grid as f_mM.
f_mM = the frequency matrix for the min2Max cycles.
optional call: f_tc = mctp2tc(f_Mm,utc,param,f_mM) Returns
-------
f_tc = the matrix with frequences of upcrossing troughs and crests,
f_mM = the frequency matrix for the min2Max cycles.
""" """
raise NotImplementedError('') # raise NotImplementedError('')
# if f_mM is None: if f_mM is None:
# f_mM = np.copy(f_Mm) f_mM = np.copy(f_Mm)
#
# u = np.linspace(*param) u = np.linspace(*param)
# udisc = np.fliplr(u) udisc = np.fliplr(u)
# ntc = np.sum(udisc >= utc) ntc = np.sum(udisc >= utc)
# n = len(f_Mm) n = len(f_Mm)
# if ntc > n-1: if ntc > n-1:
# raise IndexError('index for mean-level out of range, stop') raise IndexError('index for mean-level out of range, stop')
# if param[2]-1 < ntc or ntc < 2 : if param[2]-1 < ntc or ntc < 2:
# raise ValueError('the reference level out of range, stop') raise ValueError('the reference level out of range, stop')
#
# # normalization of frequency matrices # normalization of frequency matrices
#
# for i in range(n): for i in range(n):
# rowsum = np.sum(f_Mm[i]) rowsum = np.sum(f_Mm[i])
# if rowsum!=0: if rowsum != 0:
# f_Mm[i] = f_Mm[i] /rowsum f_Mm[i] = f_Mm[i] / rowsum
#
# P = np.fliplr(f_Mm) P = np.fliplr(f_Mm)
#
# Ph = np.rot90(np.fliplr(f_mM), -1) Ph = np.rot90(np.fliplr(f_mM), -1)
# for i in range(n): for i in range(n):
# rowsum = np.sum(Ph[i]) rowsum = np.sum(Ph[i])
# if rowsum!=0: if rowsum != 0:
# Ph[i] = Ph[i] / rowsum Ph[i] = Ph[i] / rowsum
#
# Ph = np.fliplr(Ph) Ph = np.fliplr(Ph)
#
# F = np.zeros((n, n)) F = np.zeros((n, n))
# F[:ntc-1,:(n-ntc)] = f_mM[:ntc-1, :(n-ntc)] F[:ntc-1, :(n-ntc)] = f_mM[:ntc-1, :(n-ntc)]
# F = cmat2nt(F) F = cmat2nt(F)
#
# for i in range(1, ntc): for i in range(1, ntc):
# for j in range(ntc, n-1): for j in range(ntc, n-1):
# if i < ntc:
# if i<ntc: Ap = P[i:ntc-1, i+1:ntc]
# Ap = P[i:ntc-1,i+1:ntc] Bp = Ph[i+1:ntc, i:ntc-1]
# Bp = Ph[i+1:ntc,i:ntc-1] dim_p = ntc-i
# dim_p = ntc-i tempp = zeros((dim_p, 1))
# tempp=zeros((dim_p, 1)) I = np.eye(np.shape(Ap))
# I=np.eye(np.shape(Ap)) if i == 2:
# if i==2: e = Ph[i+1:ntc, 0]
# e = Ph[i+1:ntc,0] else:
# else: e = np.sum(Ph[i+1:ntc, 1:i-1], axis=1)
# e = np.sum(Ph[i+1:ntc, 1:i-1], axis=1)
# if max(abs(e)) > 1e-10:
# if max(abs(e)) > 1e-10 if dim_p == 1:
# if dim_p == 1: tempp[0] = (Ap/(1-Bp*Ap)*e)
# tempp[0] = (Ap/(1-Bp*Ap)*e); else:
# else: tempp = np.dot(Ap,
# tempp = Ap*((I-Bp*Ap)\e) linalg.lstsq(I-np.dot(Bp, Ap), e)[0])
# # end
# # end # end
# # end # end
# # end
# if j>ntc elif j > ntc:
# Am = P[ntc:j-1, ntc+1:j]
# Am=P(ntc:j-1,ntc+1:j); Bm=Ph(ntc+1:j,ntc:j-1); Bm = Ph[ntc+1:j, ntc:j-1]
# dim_m=j-ntc; dim_m = j-ntc
# tempm=zeros(dim_m,1); tempm = zeros((dim_m, 1))
# Im=eye(size(Am)); Im = np.eye(np.shape(Am))
# if j==n-1 if j == n-1:
# em=P(ntc:j-1,n); em = P[ntc:j-1, n]
# else else:
# em=sum(P(ntc:j-1,j+1:n),2); em = np.sum(P[ntc:j-1, j+1:n], axis=1)
# end # end
# if max(abs(em))>1e-10 if max(abs(em)) > 1e-10:
# if dim_m==1 if dim_m == 1:
# tempm(1,1)=(Bm/(1-Am*Bm)*em); tempm[0, 0] = (Bm/(1-Am*Bm)*em)
# else else:
# tempm=Bm*((Im-Am*Bm)\em); tempm = np.dot(Bm,
# end linalg.lstsq(Im-np.dot(Am, Bm), em)[0])
# end # end
# end # end
# # end
# if (j>ntc) && (i<ntc)
# F(i,n-j+1)=F(i,n-j+1)+tempp'*freqPVL(i:ntc-1,n-ntc:-1:n-j+1)*tempm; if (j > ntc) and (i < ntc):
# F(i,n-j+1)=F(i,n-j+1)+tempp'*freqPVL(i:ntc-1,n-j:-1:1)*ones(n-j,1); a = np.dot(np.dot(tempp.T, f_mM[i:ntc-1, n-ntc:-1:n-j+1]),
# F(i,n-j+1)=F(i,n-j+1)+ones(1,i-1)*freqPVL(1:i-1,n-ntc:-1:n-j+1)*tempm; tempm)
# end b = np.dot(np.dot(tempp.T, f_mM[i:ntc-1, n-j:-1:1]),
# if (j==ntc) && (i<ntc) ones((n-j, 1)))
# F(i,n-j+1)=F(i,n-j+1)+tempp'*freqPVL(i:ntc-1,n-j:-1:1)*ones(n-j,1); c = np.dot(np.dot(ones((1, i-1)), f_mM[1:i-1, n-ntc:-1:n-j+1]),
# for k=1:ntc tempm)
# F(i,n-k+1)=F(i,n-ntc+1); F[i, n-j+1] = F[i, n-j+1] + a + b + c
# end # end
# end if (j == ntc) and (i < ntc):
# if (j>ntc) && (i==ntc) b = np.dot(np.dot(tempp.T, f_mM[i:ntc-1, n-j:-1:1]),
# F(i,n-j+1)=F(i,n-j+1)+ones(1,i-1)*freqPVL(1:i-1,n-ntc:-1:n-j+1)*tempm; ones((n-j, 1)))
# for k=ntc:n F[i, n-j+1] = F[i, n-j+1] + b
# F(k,n-j+1)=F(ntc,n-j+1); for k in range(ntc):
# end F[i, n-k+1] = F[i, n-ntc+1]
# end # end
# end # end
# end if (j > ntc) and (i == ntc):
# c = np.dot(np.dot(ones((1, i-1)), f_mM[1:i-1, n-ntc:-1:n-j+1]),
# return nt2cmat(F); tempm)
F[i, n-j+1] = F[i, n-j+1]+c
for k in range(ntc, n):
F[k, n-j+1] = F[ntc, n-j+1]
# end
# end
# end
# end
# fmax=max(max(F)); # fmax=max(max(F));
# contour (u,u,flipud(F),... # contour (u,u,flipud(F),...
@ -1520,6 +1433,7 @@ def mctp2tc(f_Mm, utc, param, f_mM=None):
# ylabel('crest'), xlabel('trough') # ylabel('crest'), xlabel('trough')
# axis('square') # axis('square')
# if mlver>1, commers, end # if mlver>1, commers, end
return nt2cmat(F)
def mctp2rfc(fmM, fMm=None): def mctp2rfc(fmM, fMm=None):
@ -1567,8 +1481,6 @@ def mctp2rfc(fmM, fMm=None):
norm = MA[j] norm = MA[j]
if norm != 0: if norm != 0:
PMm[j, :] = PMm[j, :] / norm PMm[j, :] = PMm[j, :] / norm
# end
# end
PMm = np.fliplr(PMm) PMm = np.fliplr(PMm)
return PMm return PMm
@ -1665,78 +1577,6 @@ def mctp2rfc(fmM, fMm=None):
return f_rfc return f_rfc
def _findrfc2(y, h, method=0):
n = len(y)
t = zeros(n, dtype=np.int)
j = 0
t0 = 0
y0 = y[t0]
z0 = 0
def a_le_b(a, b):
return a <= b
def a_lt_b(a, b):
return a < b
if method == 0:
cmpfun1 = a_le_b
cmpfun2 = a_lt_b
else:
cmpfun1 = a_lt_b
cmpfun2 = a_le_b
# The rainflow filter
for tim1, yi in enumerate(y[1::]):
fpi = y0 + h
fmi = y0 - h
ti = tim1 + 1
# yi = y[ti]
if z0 == 0:
if cmpfun1(yi, fmi):
z1 = -1
elif cmpfun1(fpi, yi):
z1 = +1
else:
z1 = 0
t1, y1 = (t0, y0) if z1 == 0 else (ti, yi)
else:
if (((z0 == +1) & cmpfun1(yi, fmi)) |
((z0 == -1) & cmpfun2(yi, fpi))):
z1 = -1
elif (((z0 == +1) & cmpfun2(fmi, yi)) |
((z0 == -1) & cmpfun1(fpi, yi))):
z1 = +1
else:
warnings.warn('Something wrong, i=%d' % tim1)
# Update y1
if z1 != z0:
t1, y1 = ti, yi
elif z1 == -1:
# y1 = min([y0 xi])
t1, y1 = (t0, y0) if y0 < yi else (ti, yi)
elif z1 == +1:
# y1 = max([y0 xi])
t1, y1 = (t0, y0) if y0 > yi else (ti, yi)
# Update y if y0 is a turning point
if np.abs(z0 - z1) == 2:
j += 1
t[j] = t0
# Update t0, y0, z0
t0, y0, z0 = t1, y1, z1
# end
# Update y if last y0 is greater than (or equal) threshold
if cmpfun1(h, np.abs(y0 - y[t[j]])):
j += 1
t[j] = t0
return t[:j + 1]
def rfcfilter(x, h, method=0): def rfcfilter(x, h, method=0):
""" """
Rainflow filter a signal. Rainflow filter a signal.
@ -1786,9 +1626,8 @@ def rfcfilter(x, h, method=0):
-------- --------
findrfc findrfc
""" """
# TODO merge rfcfilter and findrfc
y = atleast_1d(x).ravel() y = atleast_1d(x).ravel()
ix = _findrfc2(y, h, method) ix = numba_misc.findrfc(y, h, method)
return y[ix] return y[ix]
@ -1865,15 +1704,15 @@ def findtp(x, h=0.0, kind=None):
# the Nieslony approach always put the first loading point as the first # the Nieslony approach always put the first loading point as the first
# turning point. # turning point.
# add the first turning point is the first of the signal # add the first turning point is the first of the signal
if x[ind[0]] != x[0]: if ind[0] != 0:
ind = np.r_[0, ind, n - 1] ind = np.r_[0, ind, n - 1]
else: # only add the last point of the signal else: # only add the last point of the signal
ind = np.r_[ind, n - 1] ind = np.r_[ind, n - 1]
else: else:
if x[ind[0]] > x[ind[1]]: # adds indices to first and last value if x[ind[0]] > x[ind[1]]: # adds indices to first and last value
ind = r_[0, ind, n - 1] ind = np.r_[0, ind, n - 1]
else: # adds index to the last value else: # adds index to the last value
ind = r_[ind, n - 1] ind = np.r_[ind, n - 1]
if h > 0.0: if h > 0.0:
ind1 = findrfc(x[ind], h) ind1 = findrfc(x[ind], h)

@ -0,0 +1,568 @@
'''
Created on 6. okt. 2016
@author: pab
'''
from __future__ import absolute_import, division
from numba import jit, float64, int64, int32, int8, void
import numpy as np
@jit(int32(int32[:], int8[:]))
def _findcross(ind, y):
ix, dcross, start, v = 0, 0, 0, 0
n = len(y)
if y[0] < v:
dcross = -1 # first is a up-crossing
elif y[0] > v:
dcross = 1 # first is a down-crossing
elif y[0] == v:
# Find out what type of crossing we have next time..
for i in range(1, n):
start = i
if y[i] < v:
ind[ix] = i - 1 # first crossing is a down crossing
ix += 1
dcross = -1 # The next crossing is a up-crossing
break
elif y[i] > v:
ind[ix] = i - 1 # first crossing is a up-crossing
ix += 1
dcross = 1 # The next crossing is a down-crossing
break
for i in range(start, n - 1):
if ((dcross == -1 and y[i] <= v and v < y[i + 1]) or
(dcross == 1 and v <= y[i] and y[i + 1] < v)):
ind[ix] = i
ix += 1
dcross = -dcross
return ix
def findcross(xn):
'''Return indices to zero up and downcrossings of a vector
'''
ind = np.empty(len(xn), dtype=np.int32)
m = _findcross(ind, xn)
return ind[:m]
def _make_findrfc(cmp1, cmp2):
@jit(int64(int32[:], float64[:], float64), nopython=True)
def findrfc2(t, y, h):
# cmp1, cmp2 = (a_le_b, a_lt_b) if method==0 else (a_lt_b, a_le_b)
n = len(y)
j, t0, z0 = 0, 0, 0
y0 = y[t0]
# The rainflow filter
for ti in range(1, n):
fpi = y0 + h
fmi = y0 - h
yi = y[ti]
if z0 == 0:
if cmp1(yi, fmi):
z1 = -1
elif cmp1(fpi, yi):
z1 = +1
else:
z1 = 0
t1, y1 = (t0, y0) if z1 == 0 else (ti, yi)
else:
if (((z0 == +1) and cmp1(yi, fmi)) or
((z0 == -1) and cmp2(yi, fpi))):
z1 = -1
elif (((z0 == +1) and cmp2(fmi, yi)) or
((z0 == -1) and cmp1(fpi, yi))):
z1 = +1
else:
raise ValueError
# warnings.warn('Something wrong, i={}'.format(tim1))
# Update y1
if z1 != z0:
t1, y1 = ti, yi
elif z1 == -1:
t1, y1 = (t0, y0) if y0 < yi else (ti, yi)
elif z1 == +1:
t1, y1 = (t0, y0) if y0 > yi else (ti, yi)
# Update y if y0 is a turning point
if abs(z0 - z1) == 2:
j += 1
t[j] = t0
# Update t0, y0, z0
t0, y0, z0 = t1, y1, z1
# end
# Update y if last y0 is greater than (or equal) threshold
if cmp2(h, abs(y0 - y[t[j]])):
j += 1
t[j] = t0
return j + 1
return findrfc2
@jit(int32(float64, float64), nopython=True)
def a_le_b(a, b):
return a <= b
@jit(int32(float64, float64), nopython=True)
def a_lt_b(a, b):
return a < b
_findrfc_le = _make_findrfc(a_le_b, a_lt_b)
_findrfc_lt = _make_findrfc(a_lt_b, a_le_b)
@jit(int64(int32[:], float64[:], float64), nopython=True)
def _findrfc(ind, y, h):
n = len(y)
t_start = 0
NC = n // 2 - 1
ix = 0
for i in range(NC):
Tmi = t_start + 2 * i
Tpl = t_start + 2 * i + 2
xminus = y[2 * i]
xplus = y[2 * i + 2]
if(i != 0):
j = i - 1
while ((j >= 0) and (y[2 * j + 1] <= y[2 * i + 1])):
if (y[2 * j] < xminus):
xminus = y[2 * j]
Tmi = t_start + 2 * j
j -= 1
if (xminus >= xplus):
if (y[2 * i + 1] - xminus >= h):
ind[ix] = Tmi
ix += 1
ind[ix] = (t_start + 2 * i + 1)
ix += 1
# goto L180 continue
else:
j = i + 1
while (j < NC):
if (y[2 * j + 1] >= y[2 * i + 1]):
break # goto L170
if((y[2 * j + 2] <= xplus)):
xplus = y[2 * j + 2]
Tpl = (t_start + 2 * j + 2)
j += 1
else:
if ((y[2 * i + 1] - xminus) >= h):
ind[ix] = Tmi
ix += 1
ind[ix] = (t_start + 2 * i + 1)
ix += 1
# iy = i
continue
# goto L180
# L170:
if (xplus <= xminus):
if ((y[2 * i + 1] - xminus) >= h):
ind[ix] = Tmi
ix += 1
ind[ix] = (t_start + 2 * i + 1)
ix += 1
elif ((y[2 * i + 1] - xplus) >= h):
ind[ix] = (t_start + 2 * i + 1)
ix += 1
ind[ix] = Tpl
ix += 1
# L180:
# iy=i
# /* for i */
return ix
def findrfc(y, h, method=0):
n = len(y)
t = np.zeros(n, dtype=np.int)
findrfc_ = [_findrfc_le, _findrfc_lt, _findrfc][method]
m = findrfc_(t, y, h)
return t[:m]
@jit(void(float64[:], float64[:], float64[:], float64[:],
float64[:], float64[:], float64, float64,
int32, int32, int32, int32), nopython=True)
def _finite_water_disufq(rvec, ivec, rA, iA, w, kw, h, g, nmin, nmax, m, n):
# kfact is set to 2 in order to exploit the symmetry.
# If you set kfact to 1, you must uncomment all statements
# including the expressions: rvec[iz2], rvec[iv2], ivec[iz2] and ivec[iv2].
kfact = 2.0
for ix in range(nmin - 1, nmax):
# for (ix = nmin-1;ix<nmax;ix++) {
kw1 = kw[ix]
w1 = w[ix]
tmp1 = np.tanh(kw1 * h)
# Cg, wave group velocity
Cg = 0.5 * g * (tmp1 + kw1 * h * (1.0 - tmp1 * tmp1)) / w1 # OK
tmp1 = 0.5 * g * (kw1 / w1) * (kw1 / w1)
tmp2 = 0.5 * w1 * w1 / g
tmp3 = g * kw1 / (w1 * Cg)
tmp4 = kw1 / np.sinh(2.0 * kw1 * h) if kw1 * h < 300.0 else 0.0
# Difference frequency effects finite water depth
Edij = (tmp1 - tmp2 + tmp3) / (1.0 - g * h / (Cg * Cg)) - tmp4 # OK
# Sum frequency effects finite water depth
Epij = (3.0 * (tmp1 - tmp2) /
(1.0 - tmp1 / kw1 * np.tanh(2.0 * kw1 * h)) +
3.0 * tmp2 - tmp1) # OK
# printf("Edij = %f Epij = %f \n", Edij,Epij);
ixi = ix * m
iz1 = 2 * ixi
# iz2 = n*m-ixi;
for i in range(m):
rrA = rA[ixi] * rA[ixi]
iiA = iA[ixi] * iA[ixi]
riA = rA[ixi] * iA[ixi]
# Sum frequency effects along the diagonal
rvec[iz1] += kfact * (rrA - iiA) * Epij
ivec[iz1] += kfact * 2.0 * riA * Epij
# rvec[iz2] += kfact*(rrA-iiA)*Epij;
# ivec[iz2] -= kfact*2.0*riA*Epij;
# iz2++;
# Difference frequency effects along the diagonal
# are only contributing to the mean
rvec[i] += 2.0 * (rrA + iiA) * Edij
ixi += 1
iz1 += 1
# }
for jy in range(ix + 1, nmax):
# w1 = w[ix];
# kw1 = kw[ix];
w2 = w[jy]
kw2 = kw[jy]
tmp1 = g * (kw1 / w1) * (kw2 / w2)
tmp2 = 0.5 / g * (w1 * w1 + w2 * w2 + w1 * w2)
tmp3 = 0.5 * g * \
(w1 * kw2 * kw2 + w2 * kw1 * kw1) / (w1 * w2 * (w1 + w2))
tmp4 = (1 - g * (kw1 + kw2) / (w1 + w2) / (w1 + w2) *
np.tanh((kw1 + kw2) * h))
Epij = (tmp1 - tmp2 + tmp3) / tmp4 + tmp2 - 0.5 * tmp1 # OK */
tmp2 = 0.5 / g * (w1 * w1 + w2 * w2 - w1 * w2) # OK*/
tmp3 = -0.5 * g * \
(w1 * kw2 * kw2 - w2 * kw1 * kw1) / (w1 * w2 * (w1 - w2))
tmp4 = (1.0 - g * (kw1 - kw2) / (w1 - w2) /
(w1 - w2) * np.tanh((kw1 - kw2) * h))
Edij = (tmp1 - tmp2 + tmp3) / tmp4 + tmp2 - 0.5 * tmp1 # OK */
# printf("Edij = %f Epij = %f \n", Edij,Epij);
ixi = ix * m
jyi = jy * m
iz1 = ixi + jyi
iv1 = jyi - ixi
# iz2 = (n*m-iz1)
# iv2 = n*m-iv1
for i in range(m):
# for (i=0;i<m;i++,ixi++,jyi++,iz1++,iv1++) {
rrA = rA[ixi] * rA[jyi] # rrA = rA[i][ix]*rA[i][jy];
iiA = iA[ixi] * iA[jyi] # iiA = iA[i][ix]*iA[i][jy];
riA = rA[ixi] * iA[jyi] # riA = rA[i][ix]*iA[i][jy];
irA = iA[ixi] * rA[jyi] # irA = iA[i][ix]*rA[i][jy];
# Sum frequency effects */
tmp1 = kfact * 2.0 * (rrA - iiA) * Epij
tmp2 = kfact * 2.0 * (riA + irA) * Epij
rvec[iz1] += tmp1 # rvec[i][jy+ix] += tmp1
ivec[iz1] += tmp2 # ivec[i][jy+ix] += tmp2
# rvec[iz2] += tmp1 # rvec[i][n*m-(jy+ix)] += tmp1
# ivec[iz2] -= tmp2 # ivec[i][n*m-(jy+ix)] -= tmp2
# iz2++
# Difference frequency effects */
tmp1 = kfact * 2.0 * (rrA + iiA) * Edij
tmp2 = kfact * 2.0 * (riA - irA) * Edij
rvec[iv1] += tmp1 # rvec[i][jy-ix] += tmp1
ivec[iv1] += tmp2 # ivec[i][jy-ix] -= tmp2
# rvec[iv2] += tmp1
# ivec[iv2] -= tmp2
# iv2 += 1
ixi += 1
jyi += 1
iz1 += 1
iv1 += 1
@jit(void(float64[:], float64[:], float64[:], float64[:],
float64[:], float64[:], float64, float64,
int32, int32, int32, int32), nopython=True)
def _deep_water_disufq(rvec, ivec, rA, iA, w, kw, h, g, nmin, nmax, m, n):
# kfact is set to 2 in order to exploit the symmetry.
# If you set kfact to 1, you must uncomment all statements
# including the expressions: rvec[iz2], rvec[iv2], ivec[iz2] and ivec[iv2].
kfact = 2.0
for ix in range(nmin - 1, nmax):
ixi = ix * m
iz1 = 2 * ixi
iz2 = n * m - ixi
kw1 = kw[ix]
Epij = kw1
for _i in range(m):
rrA = rA[ixi] * rA[ixi]
iiA = iA[ixi] * iA[ixi]
riA = rA[ixi] * iA[ixi]
# Sum frequency effects along the diagonal
tmp1 = kfact * (rrA - iiA) * Epij
tmp2 = kfact * 2.0 * riA * Epij
rvec[iz1] += tmp1
ivec[iz1] += tmp2
ixi += 1
iz1 += 1
# rvec[iz2] += tmp1
# ivec[iz2] -= tmp2
iz2 += 1
# Difference frequency effects are zero along the diagonal
# and are thus not contributing to the mean.
for jy in range(ix + 1, nmax):
kw2 = kw[jy]
Epij = 0.5 * (kw2 + kw1)
Edij = -0.5 * (kw2 - kw1)
# printf("Edij = %f Epij = %f \n", Edij,Epij);
ixi = ix * m
jyi = jy * m
iz1 = ixi + jyi
iv1 = jyi - ixi
iz2 = (n*m-iz1)
iv2 = (n*m-iv1)
for _i in range(m):
rrA = rA[ixi] * rA[jyi] # rrA = rA[i][ix]*rA[i][jy]
iiA = iA[ixi] * iA[jyi] # iiA = iA[i][ix]*iA[i][jy]
riA = rA[ixi] * iA[jyi] # riA = rA[i][ix]*iA[i][jy]
irA = iA[ixi] * rA[jyi] # irA = iA[i][ix]*rA[i][jy]
# Sum frequency effects
tmp1 = kfact * 2.0 * (rrA - iiA) * Epij
tmp2 = kfact * 2.0 * (riA + irA) * Epij
rvec[iz1] += tmp1 # rvec[i][ix+jy] += tmp1
ivec[iz1] += tmp2 # ivec[i][ix+jy] += tmp2
# rvec[iz2] += tmp1 # rvec[i][n*m-(ix+jy)] += tmp1
# ivec[iz2] -= tmp2 # ivec[i][n*m-(ix+jy)] -= tmp2
iz2 += 1
# Difference frequency effects */
tmp1 = kfact * 2.0 * (rrA + iiA) * Edij
tmp2 = kfact * 2.0 * (riA - irA) * Edij
rvec[iv1] += tmp1 # rvec[i][jy-ix] += tmp1
ivec[iv1] += tmp2 # ivec[i][jy-ix] += tmp2
# rvec[iv2] += tmp1 # rvec[i][n*m-(jy-ix)] += tmp1
# ivec[iv2] -= tmp2 # ivec[i][n*m-(jy-ix)] -= tmp2
iv2 += 1
ixi += 1
jyi += 1
iz1 += 1
iv1 += 1
def disufq(rA, iA, w, kw, h, g, nmin, nmax, m, n):
"""
DISUFQ Is an internal function to spec2nlsdat
Parameters
----------
rA, iA = real and imaginary parts of the amplitudes (size m X n).
w = vector with angular frequencies (w>=0)
kw = vector with wavenumbers (kw>=0)
h = water depth (h >=0)
g = constant acceleration of gravity
nmin = minimum index where rA(:,nmin) and iA(:,nmin) is
greater than zero.
nmax = maximum index where rA(:,nmax) and iA(:,nmax) is
greater than zero.
m = size(rA,1),size(iA,1)
n = size(rA,2),size(iA,2), or size(rvec,2),size(ivec,2)
returns
-------
rvec, ivec = real and imaginary parts of the resultant (size m X n).
DISUFQ returns the summation of difference frequency and sum
frequency effects in the vector vec = rvec +sqrt(-1)*ivec.
The 2'nd order contribution to the Stokes wave is then calculated by
a simple 1D Fourier transform, real(FFT(vec)).
"""
rvec = np.zeros(n * m)
ivec = np.zeros(n * m)
if h > 10000: # { /* deep water /Inifinite water depth */
_deep_water_disufq(rvec, ivec, rA, iA, w, kw, h, g, nmin, nmax, m, n)
else:
_finite_water_disufq(rvec, ivec, rA, iA, w, kw, h, g, nmin, nmax, m, n)
return rvec, ivec
@jit(int32[:](float64[:], float64[:], float64[:, :]))
def _findrfc3_astm(array_ext, a, array_out):
"""
Rain flow without time analysis
Return [ampl ampl_mean nr_of_cycle]
By Adam Nieslony
Visit the MATLAB Central File Exchange for latest version
http://www.mathworks.com/matlabcentral/fileexchange/3026
"""
n = len(array_ext)
po = 0
# The original rainflow counting by Nieslony, unchanged
j = -1
c_nr1 = 1
for i in range(n):
j += 1
a[j] = array_ext[i]
while j >= 2 and abs(a[j - 1] - a[j - 2]) <= abs(a[j] - a[j - 1]):
ampl = abs((a[j - 1] - a[j - 2]) / 2)
mean = (a[j - 1] + a[j - 2]) / 2
if j == 2:
a[0] = a[1]
a[1] = a[2]
j = 1
if (ampl > 0):
array_out[po, :] = (ampl, mean, 0.5)
po += 1
else:
a[j - 2] = a[j]
j = j - 2
if (ampl > 0):
array_out[po, :] = (ampl, mean, 1.0)
po += 1
c_nr1 += 1
c_nr2 = 1
for i in range(j):
ampl = abs(a[i] - a[i + 1]) / 2
mean = (a[i] + a[i + 1]) / 2
if (ampl > 0):
array_out[po, :] = (ampl, mean, 0.5)
po += 1
c_nr2 += 1
return c_nr1, c_nr2
@jit(int32[:](float64[:], float64[:], float64[:], float64[:], float64[:, :]))
def _findrfc5_astm(array_ext, array_t, a, t, array_out):
"""
Rain flow with time analysis
returns
[ampl ampl_mean nr_of_cycle cycle_begin_time cycle_period_time]
By Adam Nieslony
Visit the MATLAB Central File Exchange for latest version
http://www.mathworks.com/matlabcentral/fileexchange/3026
"""
n = len(array_ext)
po = 0
# The original rainflow counting by Nieslony, unchanged
j = -1
c_nr1 = 1
for i in range(n):
j += 1
a[j] = array_ext[i]
t[j] = array_t[i]
while (j >= 2) and (abs(a[j - 1] - a[j - 2]) <= abs(a[j] - a[j - 1])):
ampl = abs((a[j - 1] - a[j - 2]) / 2)
mean = (a[j - 1] + a[j - 2]) / 2
period = (t[j - 1] - t[j - 2]) * 2
atime = t[j - 2]
if j == 2:
a[0] = a[1]
a[1] = a[2]
t[0] = t[1]
t[1] = t[2]
j = 1
if (ampl > 0):
array_out[po, :] = (ampl, mean, 0.5, atime, period)
po += 1
else:
a[j - 2] = a[j]
t[j - 2] = t[j]
j = j - 2
if (ampl > 0):
array_out[po, :] = (ampl, mean, 1.0, atime, period)
po += 1
c_nr1 += 1
c_nr2 = 1
for i in range(j):
# for (i=0; i<j; i++) {
ampl = abs(a[i] - a[i + 1]) / 2
mean = (a[i] + a[i + 1]) / 2
period = (t[i + 1] - t[i]) * 2
atime = t[i]
if (ampl > 0):
array_out[po, :] = (ampl, mean, 0.5, atime, period)
po += 1
c_nr2 += 1
return c_nr1, c_nr2
def findrfc_astm(tp, t=None):
"""
Return rainflow counted cycles
Nieslony's Matlab implementation of the ASTM standard practice for rainflow
counting ported to a Python C module.
Parameters
----------
tp : array-like
vector of turningpoints (NB! Only values, not sampled times)
t : array-like
vector of sampled times
Returns
-------
sig_rfc : array-like
array of shape (n,3) or (n, 5) with:
sig_rfc[:,0] Cycles amplitude
sig_rfc[:,1] Cycles mean value
sig_rfc[:,2] Cycle type, half (=0.5) or full (=1.0)
sig_rfc[:,3] cycle_begin_time (only if t is given)
sig_rfc[:,4] cycle_period_time (only if t is given)
"""
y1 = np.atleast_1d(tp).ravel()
n = len(y1)
a = np.zeros(n)
if t is None:
sig_rfc = np.zeros((n, 3))
cnr = _findrfc3_astm(y1, a, sig_rfc)
else:
t1 = np.atleast_1d(t).ravel()
sig_rfc = np.zeros((n, 5))
t2 = np.zeros(n)
cnr = _findrfc5_astm(y1, t1, a, t2, sig_rfc)
# the sig_rfc was constructed too big in rainflow.rf3, so
# reduce the sig_rfc array as done originally by a matlab mex c function
# n = len(sig_rfc)
return sig_rfc[:n - cnr[0]]
if __name__ == '__main__':
pass

@ -4,7 +4,7 @@ from scipy.stats._distn_infrastructure import (_skew, # @UnusedImport
_kurtosis, _ncx2_log_pdf, # @IgnorePep8 @UnusedImport _kurtosis, _ncx2_log_pdf, # @IgnorePep8 @UnusedImport
_ncx2_pdf, _ncx2_cdf) # @UnusedImport @IgnorePep8 _ncx2_pdf, _ncx2_cdf) # @UnusedImport @IgnorePep8
from .estimation import FitDistribution from .estimation import FitDistribution
from ._constants import _XMAX from ._constants import _XMAX, _XMIN
from wafo.misc import lazyselect as _lazyselect from wafo.misc import lazyselect as _lazyselect
from wafo.misc import lazywhere as _lazywhere from wafo.misc import lazywhere as _lazywhere
@ -250,9 +250,8 @@ def link(self, x, logSF, theta, i):
def _link(self, x, logSF, theta, i): def _link(self, x, logSF, theta, i):
msg = ('Link function not implemented for the %s distribution' % msg = 'Link function not implemented for the {} distribution'
self.name) raise NotImplementedError(msg.format(self.name))
raise NotImplementedError(msg)
def nlogps(self, theta, x): def nlogps(self, theta, x):
@ -303,7 +302,7 @@ def nlogps(self, theta, x):
prb = np.hstack((1.0, self.sf(x, *args), 0.0)) prb = np.hstack((1.0, self.sf(x, *args), 0.0))
dprb = -np.diff(prb) dprb = -np.diff(prb)
logD = log(dprb) logD = log(dprb + _XMIN)
dx = np.diff(x, axis=0) dx = np.diff(x, axis=0)
tie = (dx == 0) tie = (dx == 0)
if any(tie): if any(tie):

@ -12,7 +12,7 @@ import warnings
from wafo.plotbackend import plotbackend as plt from wafo.plotbackend import plotbackend as plt
from wafo.misc import ecross, findcross, argsreduce from wafo.misc import ecross, findcross, argsreduce
from wafo.stats._constants import _EPS, _XMAX from wafo.stats._constants import _EPS, _XMAX, _XMIN
from wafo.stats._distn_infrastructure import rv_frozen, rv_continuous from wafo.stats._distn_infrastructure import rv_frozen, rv_continuous
from scipy._lib.six import string_types from scipy._lib.six import string_types
import numdifftools as nd # @UnresolvedImport import numdifftools as nd # @UnresolvedImport
@ -22,8 +22,8 @@ from scipy import optimize
import numpy as np import numpy as np
from scipy.special import expm1, log1p from scipy.special import expm1, log1p
from numpy import (alltrue, arange, zeros, log, sqrt, exp, from numpy import (arange, zeros, log, sqrt, exp,
any, asarray, nan, pi, isfinite) asarray, nan, pi, isfinite)
from numpy import flatnonzero as nonzero from numpy import flatnonzero as nonzero
@ -33,7 +33,7 @@ __all__ = ['Profile', 'FitDistribution']
floatinfo = np.finfo(float) floatinfo = np.finfo(float)
arr = asarray arr = asarray
all = alltrue # @ReservedAssignment # all = alltrue # @ReservedAssignment
def _burr_link(x, logsf, phat, ix): def _burr_link(x, logsf, phat, ix):
@ -291,7 +291,7 @@ class Profile(object):
def _default_i_fixed(fit_dist): def _default_i_fixed(fit_dist):
try: try:
i0 = 1 - np.isfinite(fit_dist.par_fix).argmax() i0 = 1 - np.isfinite(fit_dist.par_fix).argmax()
except: except Exception:
i0 = 0 i0 = 0
return i0 return i0
@ -365,7 +365,7 @@ class Profile(object):
self.data = np.ones_like(pvec) * nan self.data = np.ones_like(pvec) * nan
k1 = (pvec >= p_opt).argmax() k1 = (pvec >= p_opt).argmax()
for size, step in ((-1, -1), (pvec.size, 1)): for size, step in ((-1, -1), (np.size(pvec), 1)):
phatfree = self._par[self.i_free].copy() phatfree = self._par[self.i_free].copy()
for ix in range(k1, size, step): for ix in range(k1, size, step):
Lmax, phatfree = self._profile_optimum(phatfree, pvec[ix]) Lmax, phatfree = self._profile_optimum(phatfree, pvec[ix])
@ -384,7 +384,7 @@ class Profile(object):
self.args = pvec[ix] self.args = pvec[ix]
cond = self.data == -np.inf cond = self.data == -np.inf
if any(cond): if np.any(cond):
ind, = cond.nonzero() ind, = cond.nonzero()
self.data.put(ind, floatinfo.min / 2.0) self.data.put(ind, floatinfo.min / 2.0)
ind1 = np.where(ind == 0, ind, ind - 1) ind1 = np.where(ind == 0, ind, ind - 1)
@ -1163,10 +1163,10 @@ class FitDistribution(rv_frozen):
prb = np.hstack((1.0, dist.sf(x, *args), 0.0)) prb = np.hstack((1.0, dist.sf(x, *args), 0.0))
dprb = -np.diff(prb) dprb = -np.diff(prb)
logD = log(dprb) logD = log(dprb + _XMIN)
dx = np.diff(x, axis=0) dx = np.diff(x, axis=0)
tie = (dx == 0) tie = (dx == 0)
if any(tie): if np.any(tie):
# TODO : implement this method for treating ties in data: # TODO : implement this method for treating ties in data:
# Assume measuring error is delta. Then compute # Assume measuring error is delta. Then compute
# yL = F(xi - delta, theta) # yL = F(xi - delta, theta)
@ -1257,7 +1257,8 @@ class FitDistribution(rv_frozen):
numpar = self.dist.numargs + 2 numpar = self.dist.numargs + 2
par_cov = zeros((numpar, numpar)) par_cov = zeros((numpar, numpar))
# self._penalty = 0 # self._penalty = 0
somefixed = (self.par_fix is not None) and any(isfinite(self.par_fix)) somefixed = ((self.par_fix is not None) and
np.any(isfinite(self.par_fix)))
H = np.asmatrix(self._hessian(self._fitfun, self.par, self.data)) H = np.asmatrix(self._hessian(self._fitfun, self.par, self.data))
# H = -nd.Hessian(lambda par: self._fitfun(par, self.data), # H = -nd.Hessian(lambda par: self._fitfun(par, self.data),
@ -1265,7 +1266,7 @@ class FitDistribution(rv_frozen):
self.H = H self.H = H
try: try:
if somefixed: if somefixed:
allfixed = all(isfinite(self.par_fix)) allfixed = np.all(isfinite(self.par_fix))
if allfixed: if allfixed:
self.par_cov[:, :] = 0 self.par_cov[:, :] = 0
else: else:
@ -1326,7 +1327,7 @@ class FitDistribution(rv_frozen):
def profile_probability(self, log_sf, **kwds): def profile_probability(self, log_sf, **kwds):
''' '''
Profile Log- likelihood or Product Spacing-function for quantile. Profile Log- likelihood or Product Spacing-function for probability.
Examples Examples
-------- --------
@ -1344,7 +1345,7 @@ class FitDistribution(rv_frozen):
''' '''
return ProfileProbability(self, log_sf, **kwds) return ProfileProbability(self, log_sf, **kwds)
def plotfitsummary(self): def plotfitsummary(self, axes=None, fig=None):
''' Plot various diagnostic plots to asses the quality of the fit. ''' Plot various diagnostic plots to asses the quality of the fit.
PLOTFITSUMMARY displays probability plot, density plot, residual PLOTFITSUMMARY displays probability plot, density plot, residual
@ -1355,17 +1356,18 @@ class FitDistribution(rv_frozen):
Other distribution types will introduce curvature in the residual Other distribution types will introduce curvature in the residual
plots. plots.
''' '''
plt.subplot(2, 2, 1) if axes is None:
fig, axes = plt.subplots(2, 2, figsize=(11, 8))
fig.subplots_adjust(hspace=0.4, wspace=0.4)
# plt.subplots_adjust(hspace=0.4, wspace=0.4)
# self.plotecdf() # self.plotecdf()
self.plotesf() plot_funs = (self.plotesf, self.plotepdf,
plt.subplot(2, 2, 2) self.plotresq, self.plotresprb)
self.plotepdf() for axis, plot in zip(axes.ravel(), plot_funs):
plt.subplot(2, 2, 3) plot(axis=axis)
self.plotresq()
plt.subplot(2, 2, 4) if fig is None:
self.plotresprb() fig = plt.gcf()
plt.subplots_adjust(hspace=0.4, wspace=0.4)
fixstr = '' fixstr = ''
if self.par_fix is not None: if self.par_fix is not None:
numfix = len(self.i_fixed) numfix = len(self.i_fixed)
@ -1380,13 +1382,12 @@ class FitDistribution(rv_frozen):
subtxt = 'Fit method: {0:s}, Fit p-value: {1:2.2f} {2:s}, phat=[{3:s}]' subtxt = 'Fit method: {0:s}, Fit p-value: {1:2.2f} {2:s}, phat=[{3:s}]'
par_txt = ('{:1.2g}, '*len(self.par))[:-2].format(*self.par) par_txt = ('{:1.2g}, '*len(self.par))[:-2].format(*self.par)
try: try:
plt.figtext(0.05, 0.01, subtxt.format(self.method.upper(), fig.text(0.05, 0.01, subtxt.format(self.method.upper(),
self.pvalue, fixstr, self.pvalue, fixstr, par_txt))
par_txt)) except Exception:
except:
pass pass
def plotesf(self, symb1='r-', symb2='b.'): def plotesf(self, symb1='r-', symb2='b.', axis=None):
''' Plot Empirical and fitted Survival Function ''' Plot Empirical and fitted Survival Function
The purpose of the plot is to graphically assess whether The purpose of the plot is to graphically assess whether
@ -1394,16 +1395,33 @@ class FitDistribution(rv_frozen):
If so the empirical CDF should resemble the model CDF. If so the empirical CDF should resemble the model CDF.
Other distribution types will introduce deviations in the plot. Other distribution types will introduce deviations in the plot.
''' '''
if axis is None:
axis = plt.gca()
n = len(self.data) n = len(self.data)
sf = (arange(n, 0, -1)) / n sf = (arange(n, 0, -1)) / n
plt.semilogy( axis.semilogy(self.data, sf, symb2,
self.data, sf, symb2, self.data, self.sf(self.data), symb1) self.data, self.sf(self.data), symb1)
# plt.plot(self.data,sf,'b.',self.data,self.sf(self.data),'r-')
plt.xlabel('x') if True:
plt.ylabel('F(x) (%s)' % self.dist.name) i0 = 2
plt.title('Empirical SF plot') low = int(np.log10(1/n)-0.7) - 1
log_sf = np.log(np.logspace(low, -0.5, 7)[::-1])
def plotecdf(self, symb1='r-', symb2='b.'): T = self.isf(np.exp(log_sf))
ci = []
t = []
for Ti, log_sfi in zip(T, log_sf):
try:
Lx = self.profile_probability(log_sfi, i=i0)
ci.append(np.exp(Lx.get_bounds(alpha=0.05)))
t.append(Ti)
except:
pass
axis.semilogy(t, ci, 'r--')
axis.set_xlabel('x')
axis.set_ylabel('F(x) (%s)' % self.dist.name)
axis.set_title('Empirical SF plot')
def plotecdf(self, symb1='r-', symb2='b.', axis=None):
''' Plot Empirical and fitted Cumulative Distribution Function ''' Plot Empirical and fitted Cumulative Distribution Function
The purpose of the plot is to graphically assess whether The purpose of the plot is to graphically assess whether
@ -1411,13 +1429,15 @@ class FitDistribution(rv_frozen):
If so the empirical CDF should resemble the model CDF. If so the empirical CDF should resemble the model CDF.
Other distribution types will introduce deviations in the plot. Other distribution types will introduce deviations in the plot.
''' '''
if axis is None:
axis = plt.gca()
n = len(self.data) n = len(self.data)
F = (arange(1, n + 1)) / n F = (arange(1, n + 1)) / n
plt.plot(self.data, F, symb2, axis.plot(self.data, F, symb2,
self.data, self.cdf(self.data), symb1) self.data, self.cdf(self.data), symb1)
plt.xlabel('x') axis.set_xlabel('x')
plt.ylabel('F(x) (%s)' % self.dist.name) axis.set_ylabel('F(x) ({})'.format(self.dist.name))
plt.title('Empirical CDF plot') axis.set_title('Empirical CDF plot')
def _get_grid(self, odd=False): def _get_grid(self, odd=False):
x = np.atleast_1d(self.data) x = np.atleast_1d(self.data)
@ -1452,7 +1472,7 @@ class FitDistribution(rv_frozen):
pdf, x = np.histogram(self.data, bins=limits, normed=True) pdf, x = np.histogram(self.data, bins=limits, normed=True)
return self._staircase(x, pdf) return self._staircase(x, pdf)
def plotepdf(self, symb1='r-', symb2='b-'): def plotepdf(self, symb1='r-', symb2='b-', axis=None):
'''Plot Empirical and fitted Probability Density Function '''Plot Empirical and fitted Probability Density Function
The purpose of the plot is to graphically assess whether The purpose of the plot is to graphically assess whether
@ -1460,19 +1480,21 @@ class FitDistribution(rv_frozen):
If so the histogram should resemble the model density. If so the histogram should resemble the model density.
Other distribution types will introduce deviations in the plot. Other distribution types will introduce deviations in the plot.
''' '''
if axis is None:
axis = plt.gca()
x, pdf = self._get_empirical_pdf() x, pdf = self._get_empirical_pdf()
ymax = pdf.max() ymax = pdf.max()
# plt.hist(self.data,normed=True,fill=False) # axis.hist(self.data,normed=True,fill=False)
plt.plot(self.data, self.pdf(self.data), symb1, axis.plot(self.data, self.pdf(self.data), symb1,
x, pdf, symb2) x, pdf, symb2)
ax = list(plt.axis()) axis1 = list(axis.axis())
ax[3] = min(ymax * 1.3, ax[3]) axis1[3] = min(ymax * 1.3, axis1[3])
plt.axis(ax) axis.axis(axis1)
plt.xlabel('x') axis.set_xlabel('x')
plt.ylabel('f(x) (%s)' % self.dist.name) axis.set_ylabel('f(x) (%s)' % self.dist.name)
plt.title('Density plot') axis.set_title('Density plot')
def plotresq(self, symb1='r-', symb2='b.'): def plotresq(self, symb1='r-', symb2='b.', axis=None):
'''PLOTRESQ displays a residual quantile plot. '''PLOTRESQ displays a residual quantile plot.
The purpose of the plot is to graphically assess whether The purpose of the plot is to graphically assess whether
@ -1480,18 +1502,20 @@ class FitDistribution(rv_frozen):
plot will be linear. Other distribution types will introduce plot will be linear. Other distribution types will introduce
curvature in the plot. curvature in the plot.
''' '''
if axis is None:
axis = plt.gca()
n = len(self.data) n = len(self.data)
eprob = (arange(1, n + 1) - 0.5) / n eprob = (arange(1, n + 1) - 0.5) / n
y = self.ppf(eprob) y = self.ppf(eprob)
y1 = self.data[[0, -1]] y1 = self.data[[0, -1]]
plt.plot(self.data, y, symb2, y1, y1, symb1) axis.plot(self.data, y, symb2, y1, y1, symb1)
plt.xlabel('Empirical') axis.set_xlabel('Empirical')
plt.ylabel('Model (%s)' % self.dist.name) axis.set_ylabel('Model (%s)' % self.dist.name)
plt.title('Residual Quantile Plot') axis.set_title('Residual Quantile Plot')
plt.axis('tight') axis.axis('tight')
plt.axis('equal') axis.axis('equal')
def plotresprb(self, symb1='r-', symb2='b.'): def plotresprb(self, symb1='r-', symb2='b.', axis=None):
''' PLOTRESPRB displays a residual probability plot. ''' PLOTRESPRB displays a residual probability plot.
The purpose of the plot is to graphically assess whether The purpose of the plot is to graphically assess whether
@ -1499,17 +1523,19 @@ class FitDistribution(rv_frozen):
plot will be linear. Other distribution types will introduce curvature plot will be linear. Other distribution types will introduce curvature
in the plot. in the plot.
''' '''
if axis is None:
axis = plt.gca()
n = len(self.data) n = len(self.data)
# ecdf = (0.5:n-0.5)/n; # ecdf = (0.5:n-0.5)/n;
ecdf = arange(1, n + 1) / (n + 1) ecdf = arange(1, n + 1) / (n + 1)
mcdf = self.cdf(self.data) mcdf = self.cdf(self.data)
p1 = [0, 1] p1 = [0, 1]
plt.plot(ecdf, mcdf, symb2, p1, p1, symb1) axis.plot(ecdf, mcdf, symb2, p1, p1, symb1)
plt.xlabel('Empirical') axis.set_xlabel('Empirical')
plt.ylabel('Model (%s)' % self.dist.name) axis.set_ylabel('Model (%s)' % self.dist.name)
plt.title('Residual Probability Plot') axis.set_title('Residual Probability Plot')
plt.axis('equal') axis.axis('equal')
plt.axis([0, 1, 0, 1]) axis.axis([0, 1, 0, 1])
def _pvalue(self, theta, x, unknown_numpar=None): def _pvalue(self, theta, x, unknown_numpar=None):
''' Return P-value for the fit using Moran's negative log Product ''' Return P-value for the fit using Moran's negative log Product
@ -1521,7 +1547,7 @@ class FitDistribution(rv_frozen):
''' '''
dx = np.diff(x, axis=0) dx = np.diff(x, axis=0)
tie = (dx == 0) tie = (dx == 0)
if any(tie): if np.any(tie):
warnings.warn( warnings.warn(
'P-value is on the conservative side (i.e. too large) due to' + 'P-value is on the conservative side (i.e. too large) due to' +
' ties in the data!') ' ties in the data!')

@ -1,13 +1,15 @@
from six import iteritems from six import iteritems
from numpy.testing import (run_module_suite, assert_equal, assert_almost_equal, from numpy.testing import (run_module_suite, assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_array_equal, assert_array_almost_equal,
TestCase, assert_, assert_raises,) TestCase, assert_, assert_raises,)
import numpy as np import numpy as np
from numpy import array, cos, exp, linspace, pi, sin, diff, arange, ones from numpy import array, cos, exp, linspace, pi, sin, diff, arange, ones
from wafo.data import sea from wafo.data import sea
import wafo
from wafo.misc import (JITImport, Bunch, detrendma, DotDict, findcross, ecross, from wafo.misc import (JITImport, Bunch, detrendma, DotDict, findcross, ecross,
findextrema, findrfc, rfcfilter, findtp, findtc, findextrema, findrfc, rfcfilter, findtp, findtc,
findrfc_astm,
findoutliers, common_shape, argsreduce, stirlerr, findoutliers, common_shape, argsreduce, stirlerr,
getshipchar, betaloge, getshipchar, betaloge,
gravity, nextpow2, discretize, polar2cart, gravity, nextpow2, discretize, polar2cart,
@ -17,6 +19,64 @@ from wafo.misc import (JITImport, Bunch, detrendma, DotDict, findcross, ecross,
parse_kwargs) parse_kwargs)
def test_disufq():
d_inf = [[0., -144.3090093, -269.37681737, -375.20342419, -461.78882978,
-529.13303412, -577.23603722, -606.09783908, -615.7184397,
-606.09783908, -577.23603722, -529.13303412, -461.78882978,
-375.20342419, -269.37681737, -144.3090093, 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0.00000000e+00, 0.00000000e+00, 5.65917684e-01, 2.82958842e+00,
7.92284757e+00, 1.69775305e+01, 3.11254726e+01, 5.14985092e+01,
7.92284757e+01, 1.15447207e+02, 1.61286540e+02, 2.17878308e+02,
2.86354348e+02, 3.67846494e+02, 4.63486583e+02, 5.74406449e+02,
7.01737928e+02, 8.46612855e+02, 8.46046937e+02, 8.43783266e+02,
8.38690007e+02, 8.29635324e+02, 8.15487382e+02, 7.95114345e+02,
7.67384379e+02, 7.31165647e+02, 6.85326315e+02, 6.28734546e+02,
5.60258507e+02, 4.78766360e+02, 3.83126272e+02, 2.72206406e+02]]
# depth = 10
d_10 = [[-3.43299449, -144.58425201, -269.97386241, -376.2314858,
-463.35503499, -531.34450329, -580.19988853, -609.92118976,
-620.50840653, -611.96153858, -584.28058577, -537.46554798,
-471.51642516, -386.43321726, -282.21592426, -158.8601612, 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0.87964472, 3.30251807, 8.7088916, 18.1892694,
32.87215973, 53.88831991, 82.36912798,
119.44619211, 166.25122204, 223.91597801, 293.57224772,
376.35183479, 473.38655268, 585.80822113, 714.74866403,
861.33970818, 846.05101255, 843.78326617, 838.69000702,
829.63532408, 815.48738199, 795.11434539, 767.38437889,
731.16564714, 685.32631478, 628.73454642, 560.25850671,
478.76636028, 383.12627176, 272.20640579]]
g = 9.81
n = 32
amp = np.ones(n) + 1j * 1
f = np.linspace(0., 3.0, n // 2 + 1)
nmin = 2
nmax = n // 2 + 1
cases = 1
ns = n
w = 2.0 * pi * f
from wafo import c_library
d_truth = [d_10, d_inf]
for i, water_depth in enumerate([10.0, 10000000]):
kw = wafo.wave_theory.dispersion_relation.w2k(w, 0., water_depth, g)[0]
data2 = wafo.numba_misc.disufq(amp.real, amp.imag, w, kw, water_depth,
g, nmin, nmax, cases, ns)
data = c_library.disufq(amp.real, amp.imag, w, kw, water_depth, g,
nmin, nmax, cases, ns)
# print(data[0])
# print(data[1])
# deep water
assert_array_almost_equal(data, data2)
assert_array_almost_equal(data, d_truth[i])
# assert(False)
def test_JITImport(): def test_JITImport():
np = JITImport('numpy') np = JITImport('numpy')
assert_equal(1.0, np.exp(0)) assert_equal(1.0, np.exp(0))
@ -24,8 +84,8 @@ def test_JITImport():
def test_bunch(): def test_bunch():
d = Bunch(test1=1, test2=3) d = Bunch(test1=1, test2=3)
assert_equal(1, d.test1) assert_equal(1, getattr(d, 'test1'))
assert_equal(3, d.test2) assert_equal(3, getattr(d, 'test2'))
def test_dotdict(): def test_dotdict():
@ -195,15 +255,18 @@ def test_findrfc():
219, 221, 223, 225, 226, 228, 230, 231, 233, 235, 237, 238, 240, 219, 221, 223, 225, 226, 228, 230, 231, 233, 235, 237, 238, 240,
241, 243, 245, 247, 248])) 241, 243, 245, 247, 248]))
_ti, tp = t[ind], x[ind] _ti, tp = t[ind], x[ind]
ind1 = findrfc(tp, 0.3) for method in ['clib', 2, 1, 0]:
assert_array_almost_equal( ind1 = findrfc(tp, 0.3, method=method)
ind1, if method in [1, 0]:
np.array([0, 9, 32, 53, 74, 95, 116, 137])) ind1 = ind1[:-1]
assert_array_almost_equal( assert_array_almost_equal(
tp[ind1], ind1,
np.array( np.array([0, 9, 32, 53, 74, 95, 116, 137]))
[-0.00743352, 1.08753972, -1.07206545, 1.09550837, -1.07940458, assert_array_almost_equal(
1.07849396, -1.0995006, 1.08094452])) tp[ind1],
np.array(
[-0.00743352, 1.08753972, -1.07206545, 1.09550837, -1.07940458,
1.07849396, -1.0995006, 1.08094452]))
def test_rfcfilter(): def test_rfcfilter():
@ -228,7 +291,7 @@ def test_rfcfilter():
assert_array_almost_equal( assert_array_almost_equal(
ind, ind,
np.array( np.array(
[1, 3, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, [1, 3, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21,
23, 25, 26, 28, 29, 31, 33, 35, 36, 38, 39, 41, 43, 23, 25, 26, 28, 29, 31, 33, 35, 36, 38, 39, 41, 43,
45, 46, 48, 50, 51, 53, 55, 56, 58, 60, 61, 63, 65, 45, 46, 48, 50, 51, 53, 55, 56, 58, 60, 61, 63, 65,
67, 68, 70, 71, 73, 75, 77, 78, 80, 81, 83, 85, 87, 67, 68, 70, 71, 73, 75, 77, 78, 80, 81, 83, 85, 87,
@ -248,6 +311,24 @@ def test_rfcfilter():
[-0.00743352, 1.08753972, -1.07206545, 1.09550837, -1.07940458, [-0.00743352, 1.08753972, -1.07206545, 1.09550837, -1.07940458,
1.07849396, -1.0995006, 1.08094452, 0.11983423])) 1.07849396, -1.0995006, 1.08094452, 0.11983423]))
tp3 = findrfc_astm(tp)
assert_array_almost_equal((77, 3), tp3.shape)
# print(tp3[-5:])
assert_array_almost_equal(tp3[-5:],
[[0.01552179, 0.42313414, 1.],
[1.09750448, -0.00199612, 0.5],
[1.09022256, -0.00927804, 0.5],
[0.48055514, 0.60038938, 0.5],
[0.03200274, 0.15183698, 0.5]])
assert_array_almost_equal(tp3[:5],
[[0.03578165, 0.28906389, 1.],
[0.03602834, 0.56726584, 1.],
[0.03816623, 0.76461446, 1.],
[0.0638364, 0.92381302, 1.],
[0.07759006, 0.99628738, 1.]])
# assert(False)
def test_findtp(): def test_findtp():
x = sea() x = sea()
@ -291,9 +372,9 @@ def test_findoutliers():
zcrit = 0 zcrit = 0
[inds, indg] = findoutliers(xx[:, 1], zcrit, dcrit, ddcrit, verbose=False) [inds, indg] = findoutliers(xx[:, 1], zcrit, dcrit, ddcrit, verbose=False)
assert_array_almost_equal(inds[np.r_[0, 1, 2, -3, -2, -1]], assert_array_almost_equal(inds[np.r_[0, 1, 2, -3, -2, -1]],
np.array([6, 7, 8, 9509, 9510, 9511])) np.array([6, 7, 8, 9509, 9510, 9511]))
assert_array_almost_equal(indg[np.r_[0, 1, 2, -3, -2, -1]], assert_array_almost_equal(indg[np.r_[0, 1, 2, -3, -2, -1]],
np.array([0, 1, 2, 9521, 9522, 9523])) np.array([0, 1, 2, 9521, 9522, 9523]))
def test_common_shape(): def test_common_shape():
@ -310,7 +391,7 @@ def test_common_shape():
def test_argsreduce(): def test_argsreduce():
A = linspace(0, 19, 20).reshape((4, 5)) A = np.reshape(linspace(0, 19, 20), (4, 5))
B = 2 B = 2
C = range(5) C = range(5)
cond = np.ones(A.shape) cond = np.ones(A.shape)
@ -320,8 +401,8 @@ def test_argsreduce():
[A2, B2, C2] = argsreduce(cond, A, B, C) [A2, B2, C2] = argsreduce(cond, A, B, C)
assert_equal(B2.shape, (15,)) assert_equal(B2.shape, (15,))
assert_array_equal(A2, assert_array_equal(A2,
np.array([0., 1., 2., 3., 4., 5., 6., 7., np.array([0., 1., 2., 3., 4., 5., 6., 7.,
8., 9., 15., 16., 17., 18., 19.])) 8., 9., 15., 16., 17., 18., 19.]))
assert_array_equal( assert_array_equal(
B2, np.array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])) B2, np.array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
assert_array_equal( assert_array_equal(
@ -392,10 +473,10 @@ def test_discretize():
1.96349541, 2.15984495, 2.35619449, 2.55254403, 2.74889357, 1.96349541, 2.15984495, 2.35619449, 2.55254403, 2.74889357,
2.94524311, 3.14159265])) 2.94524311, 3.14159265]))
assert_array_almost_equal( assert_array_almost_equal(
y, np.array([1.00000000e+00, 9.80785280e-01, y, np.array([1.00000000e+00, 9.80785280e-01,
9.23879533e-01, 9.23879533e-01,
8.31469612e-01, 7.07106781e-01, 5.55570233e-01, 8.31469612e-01, 7.07106781e-01, 5.55570233e-01,
3.82683432e-01, 1.95090322e-01, 6.12323400e-17, 3.82683432e-01, 1.95090322e-01, 6.12323400e-17,
-1.95090322e-01, -3.82683432e-01, -5.55570233e-01, -1.95090322e-01, -3.82683432e-01, -5.55570233e-01,
-7.07106781e-01, -8.31469612e-01, -9.23879533e-01, -7.07106781e-01, -8.31469612e-01, -9.23879533e-01,
-9.80785280e-01, -1.00000000e+00])) -9.80785280e-01, -1.00000000e+00]))
@ -413,9 +494,9 @@ def test_discretize_adaptive():
assert_array_almost_equal( assert_array_almost_equal(
y, y,
np.array( np.array(
[1.00000000e+00, 9.80785280e-01, 9.23879533e-01, [1.00000000e+00, 9.80785280e-01, 9.23879533e-01,
8.31469612e-01, 7.07106781e-01, 5.55570233e-01, 8.31469612e-01, 7.07106781e-01, 5.55570233e-01,
3.82683432e-01, 1.95090322e-01, 6.12323400e-17, 3.82683432e-01, 1.95090322e-01, 6.12323400e-17,
-1.95090322e-01, -3.82683432e-01, -5.55570233e-01, -1.95090322e-01, -3.82683432e-01, -5.55570233e-01,
-7.07106781e-01, -8.31469612e-01, -9.23879533e-01, -7.07106781e-01, -8.31469612e-01, -9.23879533e-01,
-9.80785280e-01, -1.00000000e+00])) -9.80785280e-01, -1.00000000e+00]))
@ -435,13 +516,13 @@ def test_polar2cart_n_cart2polar():
assert_array_almost_equal( assert_array_almost_equal(
y, y,
np.array( np.array(
[0.00000000e+00, 8.22972951e-01, 1.62349735e+00, [0.00000000e+00, 8.22972951e-01, 1.62349735e+00,
2.37973697e+00, 3.07106356e+00, 3.67861955e+00, 2.37973697e+00, 3.07106356e+00, 3.67861955e+00,
4.18583239e+00, 4.57886663e+00, 4.84700133e+00, 4.18583239e+00, 4.57886663e+00, 4.84700133e+00,
4.98292247e+00, 4.98292247e+00, 4.84700133e+00, 4.98292247e+00, 4.98292247e+00, 4.84700133e+00,
4.57886663e+00, 4.18583239e+00, 3.67861955e+00, 4.57886663e+00, 4.18583239e+00, 3.67861955e+00,
3.07106356e+00, 2.37973697e+00, 1.62349735e+00, 3.07106356e+00, 2.37973697e+00, 1.62349735e+00,
8.22972951e-01, 6.12323400e-16])) 8.22972951e-01, 6.12323400e-16]))
ti, ri = cart2polar(x, y) ti, ri = cart2polar(x, y)
assert_array_almost_equal( assert_array_almost_equal(
ti, ti,
@ -474,6 +555,7 @@ def test_tranproc():
class TestPiecewise(TestCase): class TestPiecewise(TestCase):
def test_condition_is_single_bool_list(self): def test_condition_is_single_bool_list(self):
assert_raises(ValueError, piecewise, [True, False], [1], [0, 0]) assert_raises(ValueError, piecewise, [True, False], [1], [0, 0])
@ -523,7 +605,7 @@ class TestPiecewise(TestCase):
x = np.linspace(-2.5, 2.5, 6) x = np.linspace(-2.5, 2.5, 6)
vals = piecewise([x < 0, x >= 0], [lambda x: -x, lambda x: x], (x,)) vals = piecewise([x < 0, x >= 0], [lambda x: -x, lambda x: x], (x,))
assert_array_equal(vals, assert_array_equal(vals,
[2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) [2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
def test_abs_function_with_scalar(self): def test_abs_function_with_scalar(self):
x = np.array(-2.5) x = np.array(-2.5)
@ -533,22 +615,22 @@ class TestPiecewise(TestCase):
def test_otherwise_condition(self): def test_otherwise_condition(self):
x = np.linspace(-2.5, 2.5, 6) x = np.linspace(-2.5, 2.5, 6)
vals = piecewise([x < 0, ], [lambda x: -x, lambda x: x], (x,)) vals = piecewise([x < 0, ], [lambda x: -x, lambda x: x], (x,))
assert_array_equal(vals, [2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) assert_array_equal(vals, [2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
def test_passing_further_args_to_fun(self): def test_passing_further_args_to_fun(self):
def fun0(x, y, scale=1.): def fun0(x, y, scale=1.):
return -x*y/scale return -x * y / scale
def fun1(x, y, scale=1.): def fun1(x, y, scale=1.):
return x*y/scale return x * y / scale
x = np.linspace(-2.5, 2.5, 6) x = np.linspace(-2.5, 2.5, 6)
vals = piecewise([x < 0, ], [fun0, fun1], (x,), args=(2.,), scale=2.) vals = piecewise([x < 0, ], [fun0, fun1], (x,), args=(2.,), scale=2.)
assert_array_equal(vals, [2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) assert_array_equal(vals, [2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
def test_step_function(self): def test_step_function(self):
x = np.linspace(-2.5, 2.5, 6) x = np.linspace(-2.5, 2.5, 6)
vals = piecewise([x < 0, x >= 0], [-1, 1], x) vals = piecewise([x < 0, x >= 0], [-1, 1], x)
assert_array_equal(vals, [-1., -1., -1., 1., 1., 1.]) assert_array_equal(vals, [-1., -1., -1., 1., 1., 1.])
def test_step_function_with_scalar(self): def test_step_function_with_scalar(self):
x = 1 x = 1
@ -560,11 +642,11 @@ class TestPiecewise(TestCase):
X, Y = np.meshgrid(x, x) X, Y = np.meshgrid(x, x)
vals = piecewise( vals = piecewise(
[X * Y < 0, ], [lambda x, y: -x * y, lambda x, y: x * y], (X, Y)) [X * Y < 0, ], [lambda x, y: -x * y, lambda x, y: x * y], (X, Y))
assert_array_equal(vals, [[4., 2., -0., 2., 4.], assert_array_equal(vals, [[4., 2., -0., 2., 4.],
[2., 1., -0., 1., 2.], [2., 1., -0., 1., 2.],
[-0., -0., 0., 0., 0.], [-0., -0., 0., 0., 0.],
[2., 1., 0., 1., 2.], [2., 1., 0., 1., 2.],
[4., 2., 0., 2., 4.]]) [4., 2., 0., 2., 4.]])
def test_fill_value_and_function_with_two_args(self): def test_fill_value_and_function_with_two_args(self):
x = np.linspace(-2, 2, 5) x = np.linspace(-2, 2, 5)
@ -573,11 +655,11 @@ class TestPiecewise(TestCase):
[lambda x, y: -x * y, lambda x, y: x * y], (X, Y), [lambda x, y: -x * y, lambda x, y: x * y], (X, Y),
fill_value=np.nan) fill_value=np.nan)
nan = np.nan nan = np.nan
assert_array_equal(vals, [[4., 2., nan, 2., 4.], assert_array_equal(vals, [[4., 2., nan, 2., 4.],
[2., 1., nan, 1., 2.], [2., 1., nan, 1., 2.],
[nan, nan, nan, nan, nan], [nan, nan, nan, nan, nan],
[2., 1., nan, 1., 2.], [2., 1., nan, 1., 2.],
[4., 2., nan, 2., 4.]]) [4., 2., nan, 2., 4.]])
def test_fill_value2_and_function_with_two_args(self): def test_fill_value2_and_function_with_two_args(self):
x = np.linspace(-2, 2, 5) x = np.linspace(-2, 2, 5)
@ -586,11 +668,11 @@ class TestPiecewise(TestCase):
[lambda x, y: -x * y, lambda x, y: x * y, np.nan], [lambda x, y: -x * y, lambda x, y: x * y, np.nan],
(X, Y)) (X, Y))
nan = np.nan nan = np.nan
assert_array_equal(vals, [[4., 2., nan, 2., 4.], assert_array_equal(vals, [[4., 2., nan, 2., 4.],
[2., 1., nan, 1., 2.], [2., 1., nan, 1., 2.],
[nan, nan, nan, nan, nan], [nan, nan, nan, nan, nan],
[2., 1., nan, 1., 2.], [2., 1., nan, 1., 2.],
[4., 2., nan, 2., 4.]]) [4., 2., nan, 2., 4.]])
class TestRotationMatrix(TestCase): class TestRotationMatrix(TestCase):

@ -5,9 +5,10 @@ k2w - Translates from wave number to frequency
w2k - Translates from frequency to wave number w2k - Translates from frequency to wave number
""" """
import warnings import warnings
# import numpy as np import numpy as np
from wafo.misc import lazywhere
from numpy import (atleast_1d, sqrt, ones_like, zeros_like, arctan2, where, from numpy import (atleast_1d, sqrt, ones_like, zeros_like, arctan2, where,
tanh, any, sin, cos, sign, inf, tanh, sin, cos, sign, inf,
flatnonzero, finfo, cosh, abs) flatnonzero, finfo, cosh, abs)
__all__ = ['k2w', 'w2k'] __all__ = ['k2w', 'w2k']
@ -75,7 +76,7 @@ def k2w(k1, k2=0e0, h=inf, g=9.81, u1=0e0, u2=0e0):
w = where(k > 0, ku1 + ku2 + sqrt(gi * k * tanh(k * hi)), 0.0) w = where(k > 0, ku1 + ku2 + sqrt(gi * k * tanh(k * hi)), 0.0)
cond = (w < 0) cond = (w < 0)
if any(cond): if np.any(cond):
txt0 = ''' txt0 = '''
Waves and current are in opposite directions Waves and current are in opposite directions
making some of the frequencies negative. making some of the frequencies negative.
@ -171,13 +172,15 @@ def w2k(w, theta=0.0, h=inf, g=9.81, count_limit=100):
while (ix.size > 0 and count < count_limit): while (ix.size > 0 and count < count_limit):
ki = k[ix] ki = k[ix]
kh = ki * hi[ix] kh = ki * hi[ix]
coshkh2 = lazywhere(np.abs(kh) < 350, (kh, ),
lambda kh: cosh(kh) ** 2.0, fillvalue=np.inf)
hn[ix] = (ki * tanh(kh) - wi[ix] ** 2.0 / gi) / \ hn[ix] = (ki * tanh(kh) - wi[ix] ** 2.0 / gi) / \
(tanh(kh) + kh / (cosh(kh) ** 2.0)) (tanh(kh) + kh / coshkh2)
knew = ki - hn[ix] knew = ki - hn[ix]
# Make sure that the current guess is not zero. # Make sure that the current guess is not zero.
# When Newton's Method suggests steps that lead to zero guesses # When Newton's Method suggests steps that lead to zero guesses
# take a step 9/10ths of the way to zero: # take a step 9/10ths of the way to zero:
ksmall = find(abs(knew) == 0) ksmall = find(np.abs(knew) == 0)
if ksmall.size > 0: if ksmall.size > 0:
knew[ksmall] = ki[ksmall] / 10.0 knew[ksmall] = ki[ksmall] / 10.0
hn[ix[ksmall]] = ki[ksmall] - knew[ksmall] hn[ix[ksmall]] = ki[ksmall] - knew[ksmall]
@ -186,7 +189,7 @@ def w2k(w, theta=0.0, h=inf, g=9.81, count_limit=100):
# disp(['Iteration ',num2str(count),' Number of points left: ' # disp(['Iteration ',num2str(count),' Number of points left: '
# num2str(length(ix)) ]), # num2str(length(ix)) ]),
ix = find((abs(hn) > sqrt(eps) * abs(k)) * abs(hn) > sqrt(eps)) ix = find((np.abs(hn) > sqrt(eps) * np.abs(k)) * np.abs(hn) > sqrt(eps))
count += 1 count += 1
if count == count_limit: if count == count_limit:

Loading…
Cancel
Save