Ongoing work

master
Per.Andreas.Brodtkorb 12 years ago
parent 8242a74b60
commit 745237a4c0

@ -66,6 +66,13 @@ def setbit(i, bit, value=1):
def setbits(bitlist): def setbits(bitlist):
""" """
Set bits of val to values in bitlist Set bits of val to values in bitlist
Example
-------
>>> setbits([1,1])
3
>>> setbits([1,0])
1
""" """
# return bitlist[7]<<7 | bitlist[6]<<6 | bitlist[5]<<5 | bitlist[4]<<4 | \ # return bitlist[7]<<7 | bitlist[6]<<6 | bitlist[5]<<5 | bitlist[4]<<4 | \
# bitlist[3]<<3 | bitlist[2]<<2 | bitlist[1]<<1 | bitlist[0] # bitlist[3]<<3 | bitlist[2]<<2 | bitlist[1]<<1 | bitlist[0]

@ -27,7 +27,7 @@ from scipy.linalg import toeplitz, sqrtm, svd, cholesky, diagsvd, pinv
from scipy import sparse from scipy import sparse
from pylab import stineman_interp from pylab import stineman_interp
from wafo.wafodata import WafoData from wafo.wafodata import PlotData
from wafo.misc import sub_dict_select, nextpow2 #, JITImport from wafo.misc import sub_dict_select, nextpow2 #, JITImport
import wafo.spectrum as _wafospec import wafo.spectrum as _wafospec
#_wafospec = JITImport('wafo.spectrum') #_wafospec = JITImport('wafo.spectrum')
@ -103,7 +103,7 @@ def _set_seed(iseed):
# return dot(std,random.randn(n, cases)) + mu[:,newaxis] # return dot(std,random.randn(n, cases)) + mu[:,newaxis]
class CovData1D(WafoData): class CovData1D(PlotData):
""" Container class for 1D covariance data objects in WAFO """ Container class for 1D covariance data objects in WAFO
Member variables Member variables
@ -127,7 +127,7 @@ class CovData1D(WafoData):
See also See also
-------- --------
WafoData PlotData
CovData CovData
""" """

@ -52,7 +52,7 @@ def savitzky_golay(y, window_size, order, deriv=0):
Notes Notes
----- -----
The Savitzky-Golay is a type of low-pass filter, particularly The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this suited for smoothing noisy data. The test_doctstrings idea behind this
approach is to make for each point a least-square fit with a approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at polynomial of high order over a odd-sized window centered at
the point. the point.
@ -1088,7 +1088,7 @@ def demo_monoticity():
plt.ioff() plt.ioff()
plt.show() plt.show()
def main(): def test_doctstrings():
from scipy import interpolate from scipy import interpolate
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import matplotlib import matplotlib
@ -1150,7 +1150,7 @@ def test_docstrings():
if __name__ == '__main__': if __name__ == '__main__':
# test_docstrings() # test_docstrings()
#main() #test_doctstrings()
#test_smoothing_spline() #test_smoothing_spline()
#compare_methods() #compare_methods()
demo_monoticity() demo_monoticity()

@ -21,11 +21,13 @@ from scipy.ndimage.morphology import distance_transform_edt
from numpy import pi, sqrt, atleast_2d, exp, newaxis #@UnresolvedImport from numpy import pi, sqrt, atleast_2d, exp, newaxis #@UnresolvedImport
from wafo.misc import meshgrid, nextpow2, tranproc #, trangood from wafo.misc import meshgrid, nextpow2, tranproc #, trangood
from wafo.wafodata import WafoData from wafo.wafodata import PlotData
from wafo.dctpack import dct, dctn, idctn from wafo.dctpack import dct, dctn, idctn
from wafo.plotbackend import plotbackend as plt from wafo.plotbackend import plotbackend as plt
from wafo import fig from wafo import fig
_TINY = np.finfo(float).machar.tiny
def _invnorm(q): def _invnorm(q):
return special.ndtri(q) return special.ndtri(q)
@ -65,6 +67,7 @@ def sphere_volume(d, r=1.0):
Chapman and Hall, pp 105 Chapman and Hall, pp 105
""" """
return (r ** d) * 2.0 * pi ** (d / 2.0) / (d * gamma(d / 2.0)) return (r ** d) * 2.0 * pi ** (d / 2.0) / (d * gamma(d / 2.0))
class KDEgauss(object): class KDEgauss(object):
""" Kernel-Density Estimator base class. """ Kernel-Density Estimator base class.
@ -136,7 +139,7 @@ class KDEgauss(object):
for i in ind.tolist(): # for i in ind.tolist(): #
h[i] = get_smoothing(self.dataset[i]) h[i] = get_smoothing(self.dataset[i])
deth = h.prod() deth = h.prod()
self.inv_hs = linalg.diag(1.0 / h) self.inv_hs = np.diag(1.0 / h)
else: #fully general smoothing matrix else: #fully general smoothing matrix
deth = linalg.det(h) deth = linalg.det(h)
if deth <= 0: if deth <= 0:
@ -836,7 +839,7 @@ class KDE(_KDE):
for i in ind.tolist(): # for i in ind.tolist(): #
h[i] = get_smoothing(self.dataset[i]) h[i] = get_smoothing(self.dataset[i])
deth = h.prod() deth = h.prod()
self.inv_hs = linalg.diag(1.0 / h) self.inv_hs = np.diag(1.0 / h)
else: #fully general smoothing matrix else: #fully general smoothing matrix
deth = linalg.det(h) deth = linalg.det(h)
if deth <= 0: if deth <= 0:
@ -1025,7 +1028,7 @@ class KRegression(_KDE):
>>> y = 2*np.exp(-x**2/(2*0.3**2))+3*np.exp(-(x-1)**2/(2*0.7**2)) + ei >>> y = 2*np.exp(-x**2/(2*0.3**2))+3*np.exp(-(x-1)**2/(2*0.7**2)) + ei
>>> kreg = wk.KRegression(x, y) >>> kreg = wk.KRegression(x, y)
>>> f = kreg(output='plotobj', title='Kernel regression', plotflag=1) >>> f = kreg(output='plotobj', title='Kernel regression', plotflag=1)
>>> f.plot(label='p=0') >>> h = f.plot(label='p=0')
""" """
def __init__(self, data, y, p=0, hs=None, kernel=None, alpha=0.0, xmin=None, xmax=None, inc=128, L2=None): def __init__(self, data, y, p=0, hs=None, kernel=None, alpha=0.0, xmin=None, xmax=None, inc=128, L2=None):
@ -3045,22 +3048,24 @@ def evar(y):
Examples: Examples:
-------- --------
1D signal 1D signal
>>> n = 1e6; >>> n = 1e6
>>> x = np.linspace(0,100,n); >>> x = np.linspace(0,100,n);
>>> y = np.cos(x/10)+(x/50) >>> y = np.cos(x/10)+(x/50)
>>> var0 = 0.02 # noise variance >>> var0 = 0.02 # noise variance
>>> yn = y + sqrt(var0)*np.random.randn(*y.shape) >>> yn = y + sqrt(var0)*np.random.randn(*y.shape)
>>> evar(yn) #estimated variance >>> s = evar(yn) #estimated variance
0.020018619214933957 >>> np.abs(s-var0)/var0 < 3.5/np.sqrt(n)
True
2D function 2D function
>>> xp = np.linspace(0,1,50) >>> xp = np.linspace(0,1,50)
>>> x, y = np.meshgrid(xp,xp) >>> x, y = np.meshgrid(xp,xp)
>>> f = np.exp(x+y) + np.sin((x-2*y)*3) >>> f = np.exp(x+y) + np.sin((x-2*y)*3)
>>> var0 = 0.04 # noise variance >>> var0 = 0.04 # noise variance
>>> fn = f + sqrt(var0)*np.random.randn(*f.shape) >>> fn = f + sqrt(var0)*np.random.randn(*f.shape)
>>> evar(fn) # estimated variance >>> s = evar(fn) # estimated variance
0.022806636928561375 >>> np.abs(s-var0)/var0 < 3.5/np.sqrt(50)
True
3D function 3D function
>>> yp = np.linspace(-2,2,50) >>> yp = np.linspace(-2,2,50)
@ -3068,8 +3073,10 @@ def evar(y):
>>> f = x*exp(-x**2-y**2-z**2) >>> f = x*exp(-x**2-y**2-z**2)
>>> var0 = 0.5 # noise variance >>> var0 = 0.5 # noise variance
>>> fn = f + sqrt(var0)*np.random.randn(*f.shape) >>> fn = f + sqrt(var0)*np.random.randn(*f.shape)
>>> evar(fn) # estimated variance >>> s = evar(fn) # estimated variance
0.47375136534336421 >>> np.abs(s-var0)/var0 < 3.5/np.sqrt(50)
True
Other example Other example
------------- -------------
@ -3161,7 +3168,7 @@ def smoothn(data, s=None, weight=None, robust=False, z0=None, tolz=1e-3, maxiter
1-D example 1-D example
>>> import matplotlib.pyplot as plt >>> import matplotlib.pyplot as plt
>>> x = np.linspace(0,100,2**8) >>> x = np.linspace(0,100,2**8)
>>> y = cos(x/10)+(x/50)**2 + np.random.randn(x.shape)/10 >>> y = np.cos(x/10)+(x/50)**2 + np.random.randn(*x.shape)/10
>>> y[np.r_[70, 75, 80]] = np.array([5.5, 5, 6]) >>> y[np.r_[70, 75, 80]] = np.array([5.5, 5, 6])
>>> z = smoothn(y) # Regular smoothing >>> z = smoothn(y) # Regular smoothing
>>> zr = smoothn(y,robust=True) # Robust smoothing >>> zr = smoothn(y,robust=True) # Robust smoothing
@ -3839,8 +3846,8 @@ def kreg_demo3(x,y, fun1, hs=None, fun='hisj', plotlog=False):
# ref Casella and Berger (1990) "Statistical inference" pp444 # ref Casella and Berger (1990) "Statistical inference" pp444
a = 2*pi + z0**2/(ciii+1e-16) # a = 2*pi + z0**2/(ciii+1e-16)
b = 2*(1+z0**2/(ciii+1e-16)) # b = 2*(1+z0**2/(ciii+1e-16))
# plo2 = ((a-sqrt(a**2-2*pi**2*b))/b).clip(min=0,max=1) # plo2 = ((a-sqrt(a**2-2*pi**2*b))/b).clip(min=0,max=1)
# pup2 = ((a+sqrt(a**2-2*pi**2*b))/b).clip(min=0,max=1) # pup2 = ((a+sqrt(a**2-2*pi**2*b))/b).clip(min=0,max=1)
@ -3986,15 +3993,15 @@ def check_kreg_demo4():
#kde_gauss_demo() #kde_gauss_demo()
#kreg_demo2(n=120,symmetric=True,fun='hste', plotlog=True) #kreg_demo2(n=120,symmetric=True,fun='hste', plotlog=True)
k = 0 k = 0
for i, n in enumerate([100,300,600,4000]): for i, n in enumerate([100,300,600,4000]): #@UnusedVariable
x,y, fun1 = _get_data(n, symmetric=True,loc1=0.1, scale1=0.6, scale2=0.75) x,y, fun1 = _get_data(n, symmetric=True,loc1=0.1, scale1=0.6, scale2=0.75)
k0 = k #k0 = k
hopt1, h1,h2 = _get_regression_smooting(x,y,fun='hos') hopt1, _h1, _h2 = _get_regression_smooting(x,y,fun='hos')
hopt2, h1,h2 = _get_regression_smooting(x,y,fun='hste') hopt2, _h1, _h2 = _get_regression_smooting(x,y,fun='hste')
hopt = sqrt(hopt1*hopt2) hopt = sqrt(hopt1*hopt2)
#hopt = _get_regression_smooting(x,y,fun='hos')[0] #hopt = _get_regression_smooting(x,y,fun='hos')[0]
for j, fun in enumerate(['hste']): # , 'hisj', 'hns', 'hstt' for j, fun in enumerate(['hste']): # , 'hisj', 'hns', 'hstt' @UnusedVariable
hsmax, hs1, hs2 =_get_regression_smooting(x,y,fun=fun) hsmax, _hs1, _hs2 =_get_regression_smooting(x,y,fun=fun)
fmax = kreg_demo4(x, y, hsmax+0.1, hopt) fmax = kreg_demo4(x, y, hsmax+0.1, hopt)
for hi in np.linspace(hsmax*0.1,hsmax,55): for hi in np.linspace(hsmax*0.1,hsmax,55):
@ -4019,7 +4026,7 @@ def check_regression_bin():
#kde_gauss_demo() #kde_gauss_demo()
#kreg_demo2(n=120,symmetric=True,fun='hste', plotlog=True) #kreg_demo2(n=120,symmetric=True,fun='hste', plotlog=True)
k = 0 k = 0
for i, n in enumerate([100,300,600,4000]): for i, n in enumerate([100,300,600,4000]): #@UnusedVariable
x,y, fun1 = _get_data(n, symmetric=True,loc1=0.1, scale1=0.6, scale2=0.75) x,y, fun1 = _get_data(n, symmetric=True,loc1=0.1, scale1=0.6, scale2=0.75)
fbest = regressionbin(x, y, alpha=0.05, color='g', label='Transit_D') fbest = regressionbin(x, y, alpha=0.05, color='g', label='Transit_D')
@ -4040,7 +4047,7 @@ def check_regression_bin():
def check_bkregression(): def check_bkregression():
plt.ion() plt.ion()
k = 0 k = 0
for i, n in enumerate([50, 100,300,600]): for i, n in enumerate([50, 100,300,600]): #@UnusedVariable
x,y, fun1 = _get_data(n, symmetric=True,loc1=0.1, scale1=0.6, scale2=0.75) x,y, fun1 = _get_data(n, symmetric=True,loc1=0.1, scale1=0.6, scale2=0.75)
bkreg = BKRegression(x,y) bkreg = BKRegression(x,y)
fbest = bkreg.prb_search_best(hsfun='hste', alpha=0.05, color='g', label='Transit_D') fbest = bkreg.prb_search_best(hsfun='hste', alpha=0.05, color='g', label='Transit_D')
@ -4204,7 +4211,7 @@ def regressionbin(x,y, alpha=0.05, color='r', label=''):
f = smoothed_bin_prb(x, y, hi, hopt, alpha, color, label, bin_prb) f = smoothed_bin_prb(x, y, hi, hopt, alpha, color, label, bin_prb)
if f.aicc<=fbest.aicc: if f.aicc<=fbest.aicc:
fbest = f fbest = f
hbest = hi #hbest = hi
return fbest return fbest
def kde_gauss_demo(n=50): def kde_gauss_demo(n=50):
''' '''
@ -4227,7 +4234,7 @@ def kde_gauss_demo(n=50):
#dist = st.norm #dist = st.norm
dist = st.expon dist = st.expon
data = dist.rvs(loc=0, scale=1.0, size=n) data = dist.rvs(loc=0, scale=1.0, size=n)
d, N = np.atleast_2d(data).shape d, N = np.atleast_2d(data).shape #@UnusedVariable
if d==1: if d==1:
plot_options = [dict(color='red'), dict(color='green'), dict(color='black')] plot_options = [dict(color='red'), dict(color='green'), dict(color='black')]
@ -4265,7 +4272,9 @@ def test_docstrings():
doctest.testmod() doctest.testmod()
if __name__ == '__main__': if __name__ == '__main__':
check_bkregression() test_docstrings()
#check_bkregression()
#check_regression_bin() #check_regression_bin()
#check_kreg_demo3() #check_kreg_demo3()
#check_kreg_demo4() #check_kreg_demo4()
@ -4274,7 +4283,7 @@ if __name__ == '__main__':
#test_smoothn_2d() #test_smoothn_2d()
#test_smoothn_cardioid() #test_smoothn_cardioid()
#test_docstrings()
#kde_demo2() #kde_demo2()
#kreg_demo1(fast=True) #kreg_demo1(fast=True)
#kde_gauss_demo() #kde_gauss_demo()

@ -30,7 +30,7 @@ __all__ = ['is_numlike', 'JITImport', 'DotDict', 'Bunch', 'printf', 'sub_dict_se
'findextrema', 'findpeaks', 'findrfc', 'rfcfilter', 'findtp', 'findtc', 'findextrema', 'findpeaks', 'findrfc', 'rfcfilter', 'findtp', 'findtc',
'findoutliers', 'common_shape', 'argsreduce', 'findoutliers', 'common_shape', 'argsreduce',
'stirlerr', 'getshipchar', 'betaloge', 'gravity', 'nextpow2', 'stirlerr', 'getshipchar', 'betaloge', 'gravity', 'nextpow2',
'discretize', 'pol2cart', 'cart2pol', 'meshgrid', 'ndgrid', 'discretize', 'polar2cart', 'cart2polar', 'meshgrid', 'ndgrid',
'trangood', 'tranproc', 'plot_histgrm', 'num2pistr', 'test_docstrings'] 'trangood', 'tranproc', 'plot_histgrm', 'num2pistr', 'test_docstrings']
@ -1770,7 +1770,7 @@ def _discretize_adaptive(fun, a, b, tol=0.005, n=5):
return x, fx return x, fx
def pol2cart(theta, rho, z=None): def polar2cart(theta, rho, z=None):
''' '''
Transform polar coordinates into 2D cartesian coordinates. Transform polar coordinates into 2D cartesian coordinates.
@ -1781,7 +1781,7 @@ def pol2cart(theta, rho, z=None):
See also See also
-------- --------
cart2pol cart2polar
''' '''
x, y = rho * cos(theta), rho * sin(theta) x, y = rho * cos(theta), rho * sin(theta)
if z is None: if z is None:
@ -1790,7 +1790,7 @@ def pol2cart(theta, rho, z=None):
return x, y, z return x, y, z
def cart2pol(x, y, z=None): def cart2polar(x, y, z=None):
''' Transform 2D cartesian coordinates into polar coordinates. ''' Transform 2D cartesian coordinates into polar coordinates.
Returns Returns
@ -1802,7 +1802,7 @@ def cart2pol(x, y, z=None):
See also See also
-------- --------
pol2cart polar2cart
''' '''
t, r = arctan2(y, x), hypot(x, y) t, r = arctan2(y, x), hypot(x, y)
if z is None: if z is None:

@ -18,7 +18,7 @@ from wafo.transform.models import TrHermite, TrOchi, TrLinear
from wafo.stats import edf, distributions from wafo.stats import edf, distributions
from wafo.misc import (nextpow2, findtp, findrfc, findtc, findcross, from wafo.misc import (nextpow2, findtp, findrfc, findtc, findcross,
ecross, JITImport, DotDict, gravity, findrfc_astm) ecross, JITImport, DotDict, gravity, findrfc_astm)
from wafodata import WafoData from wafodata import PlotData
from wafo.interpolate import SmoothSpline from wafo.interpolate import SmoothSpline
from scipy.interpolate.interpolate import interp1d from scipy.interpolate.interpolate import interp1d
from scipy.integrate.quadrature import cumtrapz #@UnresolvedImport from scipy.integrate.quadrature import cumtrapz #@UnresolvedImport
@ -56,7 +56,7 @@ __all__ = ['TimeSeries', 'LevelCrossings', 'CyclePairs', 'TurningPoints',
def _invchi2(q, df): def _invchi2(q, df):
return special.chdtri(df, q) return special.chdtri(df, q)
class LevelCrossings(WafoData): class LevelCrossings(PlotData):
''' '''
Container class for Level crossing data objects in WAFO Container class for Level crossing data objects in WAFO
@ -109,7 +109,7 @@ class LevelCrossings(WafoData):
cmax = self.data[icmax] cmax = self.data[icmax]
x = (self.args - self.mean) / self.sigma x = (self.args - self.mean) / self.sigma
y = cmax * exp(-x ** 2 / 2.0) y = cmax * exp(-x ** 2 / 2.0)
self.children = [WafoData(y, self.args)] self.children = [PlotData(y, self.args)]
def extrapolate(self, u_min=None, u_max=None, method='ml', dist='genpar', plotflag=0): def extrapolate(self, u_min=None, u_max=None, method='ml', dist='genpar', plotflag=0):
''' '''
@ -663,7 +663,7 @@ def test_levelcrossings_extrapolate():
s = x[:,1].std() s = x[:,1].std()
lc_gpd = lc.extrapolate(-2*s, 2*s, dist='rayleigh') #@UnusedVariable lc_gpd = lc.extrapolate(-2*s, 2*s, dist='rayleigh') #@UnusedVariable
class CyclePairs(WafoData): class CyclePairs(PlotData):
''' '''
Container class for Cycle Pairs data objects in WAFO Container class for Cycle Pairs data objects in WAFO
@ -839,7 +839,7 @@ class CyclePairs(WafoData):
ylab = 'Intensity [count/sec]' ylab = 'Intensity [count/sec]'
return LevelCrossings(dcount, levels, mean=self.mean, sigma=self.sigma, ylab=ylab, intensity=intensity) return LevelCrossings(dcount, levels, mean=self.mean, sigma=self.sigma, ylab=ylab, intensity=intensity)
class TurningPoints(WafoData): class TurningPoints(PlotData):
''' '''
Container class for Turning Points data objects in WAFO Container class for Turning Points data objects in WAFO
@ -1029,7 +1029,7 @@ def mat2timeseries(x):
""" """
return TimeSeries(x[:, 1::], x[:, 0].ravel()) return TimeSeries(x[:, 1::], x[:, 0].ravel())
class TimeSeries(WafoData): class TimeSeries(PlotData):
''' '''
Container class for 1D TimeSeries data objects in WAFO Container class for 1D TimeSeries data objects in WAFO
@ -1177,7 +1177,7 @@ class TimeSeries(WafoData):
#cumsum = np.cumsum #cumsum = np.cumsum
acf = _wafocov.CovData1D(R[lags], t) acf = _wafocov.CovData1D(R[lags], t)
acf.sigma = sqrt(r_[ 0, r0 ** 2 , r0 ** 2 + 2 * cumsum(R[1:] ** 2)] / Ncens) acf.sigma = sqrt(r_[ 0, r0 ** 2 , r0 ** 2 + 2 * cumsum(R[1:] ** 2)] / Ncens)
acf.children = [WafoData(-2. * acf.sigma[lags], t), WafoData(2. * acf.sigma[lags], t)] acf.children = [PlotData(-2. * acf.sigma[lags], t), PlotData(2. * acf.sigma[lags], t)]
acf.plot_args_children = ['r:'] acf.plot_args_children = ['r:']
acf.norm = norm acf.norm = norm
return acf return acf
@ -2244,167 +2244,167 @@ class TimeSeries(WafoData):
#wafostamp #wafostamp
return figs return figs
def hyperbolic_ratio(a, b, sa, sb): #def hyperbolic_ratio(a, b, sa, sb):
''' # '''
Return ratio of hyperbolic functions # Return ratio of hyperbolic functions
to allow extreme variations of arguments. # to allow extreme variations of arguments.
#
Parameters # Parameters
---------- # ----------
a, b : array-like # a, b : array-like
arguments vectors of the same size # arguments vectors of the same size
sa, sb : scalar integers # sa, sb : scalar integers
defining the hyperbolic function used, i.e., f(x,1)=cosh(x), f(x,-1)=sinh(x) # defining the hyperbolic function used, i.e., f(x,1)=cosh(x), f(x,-1)=sinh(x)
#
Returns # Returns
------- # -------
r : ndarray # r : ndarray
f(a,sa)/f(b,sb), ratio of hyperbolic functions of same # f(a,sa)/f(b,sb), ratio of hyperbolic functions of same
size as a and b # size as a and b
Examples # Examples
-------- # --------
>>> x = [-2,0,2] # >>> x = [-2,0,2]
>>> hyperbolic_ratio(x,1,1,1) # gives r=cosh(x)/cosh(1) # >>> hyperbolic_ratio(x,1,1,1) # gives r=cosh(x)/cosh(1)
array([ 2.438107 , 0.64805427, 2.438107 ]) # array([ 2.438107 , 0.64805427, 2.438107 ])
>>> hyperbolic_ratio(x,1,1,-1) # gives r=cosh(x)/sinh(1) # >>> hyperbolic_ratio(x,1,1,-1) # gives r=cosh(x)/sinh(1)
array([ 3.20132052, 0.85091813, 3.20132052]) # array([ 3.20132052, 0.85091813, 3.20132052])
>>> hyperbolic_ratio(x,1,-1,1) # gives r=sinh(x)/cosh(1) # >>> hyperbolic_ratio(x,1,-1,1) # gives r=sinh(x)/cosh(1)
array([-2.35040239, 0. , 2.35040239]) # array([-2.35040239, 0. , 2.35040239])
>>> hyperbolic_ratio(x,1,-1,-1) # gives r=sinh(x)/sinh(1) # >>> hyperbolic_ratio(x,1,-1,-1) # gives r=sinh(x)/sinh(1)
array([-3.08616127, 0. , 3.08616127]) # array([-3.08616127, 0. , 3.08616127])
>>> hyperbolic_ratio(1,x,1,1) # gives r=cosh(1)/cosh(x) # >>> hyperbolic_ratio(1,x,1,1) # gives r=cosh(1)/cosh(x)
array([ 0.41015427, 1.54308063, 0.41015427]) # array([ 0.41015427, 1.54308063, 0.41015427])
>>> hyperbolic_ratio(1,x,1,-1) # gives r=cosh(1)/sinh(x) # >>> hyperbolic_ratio(1,x,1,-1) # gives r=cosh(1)/sinh(x)
array([-0.42545906, inf, 0.42545906]) # array([-0.42545906, inf, 0.42545906])
>>> hyperbolic_ratio(1,x,-1,1) # gives r=sinh(1)/cosh(x) # >>> hyperbolic_ratio(1,x,-1,1) # gives r=sinh(1)/cosh(x)
array([ 0.3123711 , 1.17520119, 0.3123711 ]) # array([ 0.3123711 , 1.17520119, 0.3123711 ])
>>> hyperbolic_ratio(1,x,-1,-1) # gives r=sinh(1)/sinh(x) # >>> hyperbolic_ratio(1,x,-1,-1) # gives r=sinh(1)/sinh(x)
array([-0.32402714, inf, 0.32402714]) # array([-0.32402714, inf, 0.32402714])
#
See also # See also
-------- # --------
tran # tran
''' # '''
ak, bk, sak, sbk = np.atleast_1d(a, b, sign(sa), sign(sb)) # ak, bk, sak, sbk = np.atleast_1d(a, b, sign(sa), sign(sb))
# old call # # old call
#return exp(ak-bk)*(1+sak*exp(-2*ak))/(1+sbk*exp(-2*bk)) # #return exp(ak-bk)*(1+sak*exp(-2*ak))/(1+sbk*exp(-2*bk))
# TODO: Does not always handle division by zero correctly # # TODO: Does not always handle division by zero correctly
#
signRatio = np.where(sak * ak < 0, sak, 1) # signRatio = np.where(sak * ak < 0, sak, 1)
signRatio = np.where(sbk * bk < 0, sbk * signRatio, signRatio) # signRatio = np.where(sbk * bk < 0, sbk * signRatio, signRatio)
#
bk = np.abs(bk) # bk = np.abs(bk)
ak = np.abs(ak) # ak = np.abs(ak)
#
num = np.where(sak < 0, expm1(-2 * ak), 1 + exp(-2 * ak)) # num = np.where(sak < 0, expm1(-2 * ak), 1 + exp(-2 * ak))
den = np.where(sbk < 0, expm1(-2 * bk), 1 + exp(-2 * bk)) # den = np.where(sbk < 0, expm1(-2 * bk), 1 + exp(-2 * bk))
iden = np.ones(den.shape) * inf # iden = np.ones(den.shape) * inf
ind = np.flatnonzero(den != 0) # ind = np.flatnonzero(den != 0)
iden.flat[ind] = 1.0 / den[ind] # iden.flat[ind] = 1.0 / den[ind]
val = np.where(num == den, 1, num * iden) # val = np.where(num == den, 1, num * iden)
return signRatio * exp(ak - bk) * val #((sak+exp(-2*ak))/(sbk+exp(-2*bk))) # return signRatio * exp(ak - bk) * val #((sak+exp(-2*ak))/(sbk+exp(-2*bk)))
#
def sensor_typeid(*sensortypes): #def sensor_typeid(*sensortypes):
''' Return ID for sensortype name # ''' Return ID for sensortype name
#
Parameter # Parameter
--------- # ---------
sensortypes : list of strings defining the sensortype # sensortypes : list of strings defining the sensortype
#
Returns # Returns
------- # -------
sensorids : list of integers defining the sensortype # sensorids : list of integers defining the sensortype
#
Valid senor-ids and -types for time series are as follows: # Valid senor-ids and -types for time series are as follows:
0, 'n' : Surface elevation (n=Eta) # 0, 'n' : Surface elevation (n=Eta)
1, 'n_t' : Vertical surface velocity # 1, 'n_t' : Vertical surface velocity
2, 'n_tt' : Vertical surface acceleration # 2, 'n_tt' : Vertical surface acceleration
3, 'n_x' : Surface slope in x-direction # 3, 'n_x' : Surface slope in x-direction
4, 'n_y' : Surface slope in y-direction # 4, 'n_y' : Surface slope in y-direction
5, 'n_xx' : Surface curvature in x-direction # 5, 'n_xx' : Surface curvature in x-direction
6, 'n_yy' : Surface curvature in y-direction # 6, 'n_yy' : Surface curvature in y-direction
7, 'n_xy' : Surface curvature in xy-direction # 7, 'n_xy' : Surface curvature in xy-direction
8, 'P' : Pressure fluctuation about static MWL pressure # 8, 'P' : Pressure fluctuation about static MWL pressure
9, 'U' : Water particle velocity in x-direction # 9, 'U' : Water particle velocity in x-direction
10, 'V' : Water particle velocity in y-direction # 10, 'V' : Water particle velocity in y-direction
11, 'W' : Water particle velocity in z-direction # 11, 'W' : Water particle velocity in z-direction
12, 'U_t' : Water particle acceleration in x-direction # 12, 'U_t' : Water particle acceleration in x-direction
13, 'V_t' : Water particle acceleration in y-direction # 13, 'V_t' : Water particle acceleration in y-direction
14, 'W_t' : Water particle acceleration in z-direction # 14, 'W_t' : Water particle acceleration in z-direction
15, 'X_p' : Water particle displacement in x-direction from its mean position # 15, 'X_p' : Water particle displacement in x-direction from its mean position
16, 'Y_p' : Water particle displacement in y-direction from its mean position # 16, 'Y_p' : Water particle displacement in y-direction from its mean position
17, 'Z_p' : Water particle displacement in z-direction from its mean position # 17, 'Z_p' : Water particle displacement in z-direction from its mean position
#
Example: # Example:
>>> sensor_typeid('W','v') # >>> sensor_typeid('W','v')
[11, 10] # [11, 10]
>>> sensor_typeid('rubbish') # >>> sensor_typeid('rubbish')
[nan] # [nan]
#
See also # See also
-------- # --------
sensor_type # sensor_type
''' # '''
#
sensorid_table = dict(n=0, n_t=1, n_tt=2, n_x=3, n_y=4, n_xx=5, # sensorid_table = dict(n=0, n_t=1, n_tt=2, n_x=3, n_y=4, n_xx=5,
n_yy=6, n_xy=7, p=8, u=9, v=10, w=11, u_t=12, # n_yy=6, n_xy=7, p=8, u=9, v=10, w=11, u_t=12,
v_t=13, w_t=14, x_p=15, y_p=16, z_p=17) # v_t=13, w_t=14, x_p=15, y_p=16, z_p=17)
try: # try:
return [sensorid_table.get(name.lower(), nan) for name in sensortypes] # return [sensorid_table.get(name.lower(), nan) for name in sensortypes]
except: # except:
raise ValueError('Input must be a string!') # raise ValueError('Input must be a string!')
#
#
#
def sensor_type(*sensorids): #def sensor_type(*sensorids):
''' # '''
Return sensortype name # Return sensortype name
#
Parameter # Parameter
--------- # ---------
sensorids : vector or list of integers defining the sensortype # sensorids : vector or list of integers defining the sensortype
#
Returns # Returns
------- # -------
sensornames : tuple of strings defining the sensortype # sensornames : tuple of strings defining the sensortype
Valid senor-ids and -types for time series are as follows: # Valid senor-ids and -types for time series are as follows:
0, 'n' : Surface elevation (n=Eta) # 0, 'n' : Surface elevation (n=Eta)
1, 'n_t' : Vertical surface velocity # 1, 'n_t' : Vertical surface velocity
2, 'n_tt' : Vertical surface acceleration # 2, 'n_tt' : Vertical surface acceleration
3, 'n_x' : Surface slope in x-direction # 3, 'n_x' : Surface slope in x-direction
4, 'n_y' : Surface slope in y-direction # 4, 'n_y' : Surface slope in y-direction
5, 'n_xx' : Surface curvature in x-direction # 5, 'n_xx' : Surface curvature in x-direction
6, 'n_yy' : Surface curvature in y-direction # 6, 'n_yy' : Surface curvature in y-direction
7, 'n_xy' : Surface curvature in xy-direction # 7, 'n_xy' : Surface curvature in xy-direction
8, 'P' : Pressure fluctuation about static MWL pressure # 8, 'P' : Pressure fluctuation about static MWL pressure
9, 'U' : Water particle velocity in x-direction # 9, 'U' : Water particle velocity in x-direction
10, 'V' : Water particle velocity in y-direction # 10, 'V' : Water particle velocity in y-direction
11, 'W' : Water particle velocity in z-direction # 11, 'W' : Water particle velocity in z-direction
12, 'U_t' : Water particle acceleration in x-direction # 12, 'U_t' : Water particle acceleration in x-direction
13, 'V_t' : Water particle acceleration in y-direction # 13, 'V_t' : Water particle acceleration in y-direction
14, 'W_t' : Water particle acceleration in z-direction # 14, 'W_t' : Water particle acceleration in z-direction
15, 'X_p' : Water particle displacement in x-direction from its mean position # 15, 'X_p' : Water particle displacement in x-direction from its mean position
16, 'Y_p' : Water particle displacement in y-direction from its mean position # 16, 'Y_p' : Water particle displacement in y-direction from its mean position
17, 'Z_p' : Water particle displacement in z-direction from its mean position # 17, 'Z_p' : Water particle displacement in z-direction from its mean position
#
Example: # Example:
>>> sensor_type(range(3)) # >>> sensor_type(range(3))
('n', 'n_t', 'n_tt') # ('n', 'n_t', 'n_tt')
#
See also # See also
-------- # --------
sensor_typeid, tran # sensor_typeid, tran
''' # '''
valid_names = ('n', 'n_t', 'n_tt', 'n_x', 'n_y', 'n_xx', 'n_yy', 'n_xy', # valid_names = ('n', 'n_t', 'n_tt', 'n_x', 'n_y', 'n_xx', 'n_yy', 'n_xy',
'p', 'u', 'v', 'w', 'u_t', 'v_t', 'w_t', 'x_p', 'y_p', 'z_p', # 'p', 'u', 'v', 'w', 'u_t', 'v_t', 'w_t', 'x_p', 'y_p', 'z_p',
nan) # nan)
ids = atleast_1d(*sensorids) # ids = atleast_1d(*sensorids)
if isinstance(ids, list): # if isinstance(ids, list):
ids = hstack(ids) # ids = hstack(ids)
n = len(valid_names) - 1 # n = len(valid_names) - 1
ids = where(((ids < 0) | (n < ids)), n , ids) # ids = where(((ids < 0) | (n < ids)), n , ids)
return tuple(valid_names[i] for i in ids) # return tuple(valid_names[i] for i in ids)
#
#class TransferFunction(object): #class TransferFunction(object):
# ''' # '''
# Class for computing transfer functions based on linear wave theory # Class for computing transfer functions based on linear wave theory
@ -2822,7 +2822,7 @@ def main():
x = np.arange(-2, 2, 0.2) x = np.arange(-2, 2, 0.2)
# Plot 2 objects in one call # Plot 2 objects in one call
d2 = WafoData(np.sin(x), x, xlab='x', ylab='sin', title='sinus') d2 = PlotData(np.sin(x), x, xlab='x', ylab='sin', title='sinus')
d0 = d2.copy() d0 = d2.copy()

@ -21,7 +21,7 @@ from plotbackend import plotbackend as plt
import numpy as np import numpy as np
from numpy.fft import fft, ifft from numpy.fft import fft, ifft
from numpy import (zeros, ones, zeros_like, array, asarray, newaxis, arange, #@UnresolvedImport from numpy import (zeros, ones, zeros_like, array, asarray, newaxis, arange, #@UnresolvedImport
logical_or, any, pi, cos, round, diff, all, r_, exp, #atleast_1d, hstack,#@UnresolvedImport logical_or, any, pi, cos, round, diff, all, r_, exp, atleast_1d, # hstack,#@UnresolvedImport
where, extract, dot, linalg, sign, concatenate, floor, isreal, conj, remainder, #@UnresolvedImport where, extract, dot, linalg, sign, concatenate, floor, isreal, conj, remainder, #@UnresolvedImport
linspace) #@UnresolvedImport linspace) #@UnresolvedImport
from numpy.lib.polynomial import * #@UnusedWildImport from numpy.lib.polynomial import * #@UnusedWildImport

@ -1171,7 +1171,7 @@ class Cheb1d(object):
""" """
return Cheb1d(chebder(self.coeffs,self.a,self.b)) return Cheb1d(chebder(self.coeffs,self.a,self.b))
def main(): def test_doctstrings():
if False: #True: # if False: #True: #
x = np.arange(4) x = np.arange(4)
dx = dct(x) dx = dct(x)
@ -1204,4 +1204,4 @@ def main():
import doctest import doctest
doctest.testmod() doctest.testmod()
if __name__== '__main__': if __name__== '__main__':
main() test_doctstrings()

@ -8,4 +8,4 @@ Spectrum package in WAFO Toolbox.
from core import * from core import *
#SpecData1D, SpecData2D, cltext #SpecData1D, SpecData2D, cltext
import models import models
import dispersion_relation from wafo.wave_theory import dispersion_relation

@ -1,15 +1,15 @@
from __future__ import division from __future__ import division
from wafo.misc import meshgrid, gravity, cart2pol, pol2cart from wafo.misc import meshgrid, gravity, cart2polar, polar2cart
from wafo.objects import mat2timeseries, TimeSeries from wafo.objects import TimeSeries #mat2timeseries,
import warnings import warnings
import numpy as np import numpy as np
from numpy import (pi, inf, zeros, ones, where, nonzero, #@UnresolvedImport from numpy import (pi, inf, zeros, ones, where, nonzero, #@UnresolvedImport
flatnonzero, ceil, sqrt, exp, log, arctan2, log10, #@UnresolvedImport flatnonzero, ceil, sqrt, exp, log, arctan2, log10, #@UnresolvedImport
tanh, cosh, sinh, random, atleast_1d, maximum, #@UnresolvedImport tanh, cosh, sinh, random, atleast_1d, #@UnresolvedImport
minimum, diff, isnan, any, r_, conj, mod, #@UnresolvedImport minimum, diff, isnan, any, r_, conj, mod, #@UnresolvedImport
hstack, vstack, interp, ravel, finfo, linspace, #@UnresolvedImport hstack, vstack, interp, ravel, finfo, linspace, #@UnresolvedImport
arange, array, nan, newaxis, fliplr, sign) #@UnresolvedImport arange, array, nan, newaxis, sign) #, fliplr, maximum) #@UnresolvedImport
from numpy.fft import fft from numpy.fft import fft
from scipy.integrate import simps, trapz from scipy.integrate import simps, trapz
from scipy.special import erf from scipy.special import erf
@ -17,8 +17,8 @@ from scipy.linalg import toeplitz
import scipy.interpolate as interpolate import scipy.interpolate as interpolate
from wafo.interpolate import stineman_interp from wafo.interpolate import stineman_interp
from dispersion_relation import w2k #, k2w from wafo.wave_theory.dispersion_relation import w2k #, k2w
from wafo.wafodata import WafoData, now from wafo.wafodata import PlotData, now
from wafo.misc import sub_dict_select, nextpow2, discretize, JITImport, findpeaks #, tranproc from wafo.misc import sub_dict_select, nextpow2, discretize, JITImport, findpeaks #, tranproc
from wafo.graphutil import cltext from wafo.graphutil import cltext
from wafo.kdetools import qlevels from wafo.kdetools import qlevels
@ -45,7 +45,7 @@ from wafo.plotbackend import plotbackend
# Trick to avoid error due to circular import # Trick to avoid error due to circular import
#_WAFOCOV = ppimport('wafo.covariance')
_WAFOCOV = JITImport('wafo.covariance') _WAFOCOV = JITImport('wafo.covariance')
@ -156,185 +156,185 @@ def qtf(w, h=inf, g=9.81):
return h_s, h_d , h_dii return h_s, h_d , h_dii
def plotspec(specdata, linetype='b-', flag=1): #def plotspec(specdata, linetype='b-', flag=1):
''' # '''
PLOTSPEC Plot a spectral density # PLOTSPEC Plot a spectral density
#
Parameters # Parameters
---------- # ----------
S : SpecData1D or SpecData2D object # S : SpecData1D or SpecData2D object
defining spectral density. # defining spectral density.
linetype : string # linetype : string
defining color and linetype, see plot for possibilities # defining color and linetype, see plot for possibilities
flag : scalar integer # flag : scalar integer
defining the type of plot # defining the type of plot
1D: # 1D:
1 plots the density, S, (default) # 1 plots the density, S, (default)
2 plot 10log10(S) # 2 plot 10log10(S)
3 plots both the above plots # 3 plots both the above plots
2D: # 2D:
Directional spectra: S(w,theta), S(f,theta) # Directional spectra: S(w,theta), S(f,theta)
1 polar plot S (default) # 1 polar plot S (default)
2 plots spectral density and the directional # 2 plots spectral density and the directional
spreading, int S(w,theta) dw or int S(f,theta) df # spreading, int S(w,theta) dw or int S(f,theta) df
3 plots spectral density and the directional # 3 plots spectral density and the directional
spreading, int S(w,theta)/S(w) dw or int S(f,theta)/S(f) df # spreading, int S(w,theta)/S(w) dw or int S(f,theta)/S(f) df
4 mesh of S # 4 mesh of S
5 mesh of S in polar coordinates # 5 mesh of S in polar coordinates
6 contour plot of S # 6 contour plot of S
7 filled contour plot of S # 7 filled contour plot of S
Wavenumber spectra: S(k1,k2) # Wavenumber spectra: S(k1,k2)
1 contour plot of S (default) # 1 contour plot of S (default)
2 filled contour plot of S # 2 filled contour plot of S
#
Example # Example
------- # -------
>>> import numpy as np # >>> import numpy as np
>>> import wafo.spectrum as ws # >>> import wafo.spectrum as ws
>>> Sj = ws.models.Jonswap(Hm0=3, Tp=7) # >>> Sj = ws.models.Jonswap(Hm0=3, Tp=7)
>>> S = Sj.tospecdata() # >>> S = Sj.tospecdata()
>>> ws.plotspec(S,flag=1) # >>> ws.plotspec(S,flag=1)
#
S = demospec('dir'); S2 = mkdspec(jonswap,spreading); # S = demospec('dir'); S2 = mkdspec(jonswap,spreading);
plotspec(S,2), hold on # plotspec(S,2), hold on
plotspec(S,3,'g') % Same as previous fig. due to frequency independent spreading # plotspec(S,3,'g') % Same as previous fig. due to frequency independent spreading
plotspec(S2,2,'r') % Not the same as previous figs. due to frequency dependent spreading # plotspec(S2,2,'r') % Not the same as previous figs. due to frequency dependent spreading
plotspec(S2,3,'m') # plotspec(S2,3,'m')
% transform from angular frequency and radians to frequency and degrees # % transform from angular frequency and radians to frequency and degrees
Sf = ttspec(S,'f','d'); clf # Sf = ttspec(S,'f','d'); clf
plotspec(Sf,2), # plotspec(Sf,2),
#
See also dat2spec, createspec, simpson # See also dat2spec, createspec, simpson
''' # '''
#
# label the contour levels # # label the contour levels
txtFlag = 0; # txtFlag = 0;
LegendOn = 1; # LegendOn = 1;
#
#
ftype = specdata.freqtype #options are 'f' and 'w' and 'k' # ftype = specdata.freqtype #options are 'f' and 'w' and 'k'
data = specdata.data # data = specdata.data
if data.ndim == 2: # if data.ndim == 2:
freq = specdata.args[1] # freq = specdata.args[1]
theta = specdata.args[0] # theta = specdata.args[0]
else: # else:
freq = specdata.args # freq = specdata.args
#if isinstance(specdata.args, (list, tuple)): # #if isinstance(specdata.args, (list, tuple)):
#
if ftype == 'w': # if ftype == 'w':
xlbl_txt = 'Frequency [rad/s]'; # xlbl_txt = 'Frequency [rad/s]';
ylbl1_txt = 'S(w) [m^2 s / rad]'; # ylbl1_txt = 'S(w) [m^2 s / rad]';
ylbl3_txt = 'Directional Spectrum'; # ylbl3_txt = 'Directional Spectrum';
zlbl_txt = 'S(w,\theta) [m^2 s / rad^2]'; # zlbl_txt = 'S(w,\theta) [m^2 s / rad^2]';
funit = ' [rad/s]'; # funit = ' [rad/s]';
#Sunit = ' [m^2 s / rad]'; # #Sunit = ' [m^2 s / rad]';
elif ftype == 'f': # elif ftype == 'f':
xlbl_txt = 'Frequency [Hz]'; # xlbl_txt = 'Frequency [Hz]';
ylbl1_txt = 'S(f) [m^2 s]'; # ylbl1_txt = 'S(f) [m^2 s]';
ylbl3_txt = 'Directional Spectrum'; # ylbl3_txt = 'Directional Spectrum';
zlbl_txt = 'S(f,\theta) [m^2 s / rad]'; # zlbl_txt = 'S(f,\theta) [m^2 s / rad]';
funit = ' [Hz]'; # funit = ' [Hz]';
#Sunit = ' [m^2 s ]'; # #Sunit = ' [m^2 s ]';
elif ftype == 'k': # elif ftype == 'k':
xlbl_txt = 'Wave number [rad/m]'; # xlbl_txt = 'Wave number [rad/m]';
ylbl1_txt = 'S(k) [m^3/ rad]'; # ylbl1_txt = 'S(k) [m^3/ rad]';
funit = ' [rad/m]'; # funit = ' [rad/m]';
#Sunit = ' [m^3 / rad]'; # #Sunit = ' [m^3 / rad]';
ylbl4_txt = 'Wave Number Spectrum'; # ylbl4_txt = 'Wave Number Spectrum';
#
else: # else:
raise ValueError('Frequency type unknown') # raise ValueError('Frequency type unknown')
#
#
if hasattr(specdata, 'norm') and specdata.norm : # if hasattr(specdata, 'norm') and specdata.norm :
#Sunit=[]; # #Sunit=[];
funit = []; # funit = [];
ylbl1_txt = 'Normalized Spectral density'; # ylbl1_txt = 'Normalized Spectral density';
ylbl3_txt = 'Normalized Directional Spectrum'; # ylbl3_txt = 'Normalized Directional Spectrum';
ylbl4_txt = 'Normalized Wave Number Spectrum'; # ylbl4_txt = 'Normalized Wave Number Spectrum';
if ftype == 'k': # if ftype == 'k':
xlbl_txt = 'Normalized Wave number'; # xlbl_txt = 'Normalized Wave number';
else: # else:
xlbl_txt = 'Normalized Frequency'; # xlbl_txt = 'Normalized Frequency';
#
ylbl2_txt = 'Power spectrum (dB)'; # ylbl2_txt = 'Power spectrum (dB)';
#
phi = specdata.phi # phi = specdata.phi
#
spectype = specdata.type.lower() # spectype = specdata.type.lower()
stype = spectype[-3::] # stype = spectype[-3::]
if stype in ('enc', 'req', 'k1d') : #1D plot # if stype in ('enc', 'req', 'k1d') : #1D plot
Fn = freq[-1] # Nyquist frequency # Fn = freq[-1] # Nyquist frequency
indm = findpeaks(data, n=4) # indm = findpeaks(data, n=4)
maxS = data.max() # maxS = data.max()
# if isfield(S,'CI') && ~isempty(S.CI), ## if isfield(S,'CI') && ~isempty(S.CI),
# maxS = maxS*S.CI(2); ## maxS = maxS*S.CI(2);
# txtCI = [num2str(100*S.p), '% CI']; ## txtCI = [num2str(100*S.p), '% CI'];
# end ## end
#
Fp = freq[indm]# %peak frequency/wave number # Fp = freq[indm]# %peak frequency/wave number
#
if len(indm) == 1: # if len(indm) == 1:
txt = [('fp = %0.2g' % Fp) + funit] # txt = [('fp = %0.2g' % Fp) + funit]
else: # else:
txt = [] # txt = []
for i, fp in enumerate(Fp.tolist()): # for i, fp in enumerate(Fp.tolist()):
txt.append(('fp%d = %0.2g' % (i, fp)) + funit) # txt.append(('fp%d = %0.2g' % (i, fp)) + funit)
#
txt = ''.join(txt) # txt = ''.join(txt)
if (flag == 3): # if (flag == 3):
plotbackend.subplot(2, 1, 1) # plotbackend.subplot(2, 1, 1)
if (flag == 1) or (flag == 3):#% Plot in normal scale # if (flag == 1) or (flag == 3):#% Plot in normal scale
plotbackend.plot(np.vstack([Fp, Fp]), np.vstack([zeros(len(indm)), data.take(indm)]), ':', label=txt) # plotbackend.plot(np.vstack([Fp, Fp]), np.vstack([zeros(len(indm)), data.take(indm)]), ':', label=txt)
plotbackend.plot(freq, data, linetype) # plotbackend.plot(freq, data, linetype)
specdata.labels.labelfig() # specdata.labels.labelfig()
# if isfield(S,'CI'), ## if isfield(S,'CI'),
# plot(freq,S.S*S.CI(1), 'r:' ) ## plot(freq,S.S*S.CI(1), 'r:' )
# plot(freq,S.S*S.CI(2), 'r:' ) ## plot(freq,S.S*S.CI(2), 'r:' )
#
a = plotbackend.axis() # a = plotbackend.axis()
#
a1 = Fn # a1 = Fn
if (Fp > 0): # if (Fp > 0):
a1 = max(min(Fn, 10 * max(Fp)), a[1]); # a1 = max(min(Fn, 10 * max(Fp)), a[1]);
#
plotbackend.axis([0, a1 , 0, max(1.01 * maxS, a[3])]) # plotbackend.axis([0, a1 , 0, max(1.01 * maxS, a[3])])
#plotbackend.title('Spectral density') # #plotbackend.title('Spectral density')
#plotbackend.xlabel(xlbl_txt) # #plotbackend.xlabel(xlbl_txt)
#plotbackend.ylabel(ylbl1_txt) # #plotbackend.ylabel(ylbl1_txt)
#
#
if (flag == 3): # if (flag == 3):
plotbackend.subplot(2, 1, 2) # plotbackend.subplot(2, 1, 2)
#
if (flag == 2) or (flag == 3) : # Plot in logaritmic scale # if (flag == 2) or (flag == 3) : # Plot in logaritmic scale
ind = np.flatnonzero(data > 0) # ind = np.flatnonzero(data > 0)
#
plotbackend.plot(np.vstack([Fp, Fp]), # plotbackend.plot(np.vstack([Fp, Fp]),
np.vstack((min(10 * log10(data.take(ind) / maxS)).repeat(len(Fp)), # np.vstack((min(10 * log10(data.take(ind) / maxS)).repeat(len(Fp)),
10 * log10(data.take(indm) / maxS))), ':',label=txt) # 10 * log10(data.take(indm) / maxS))), ':',label=txt)
# hold on ## hold on
# if isfield(S,'CI'), ## if isfield(S,'CI'),
# plot(freq(ind),10*log10(S.S(ind)*S.CI(1)/maxS), 'r:' ) ## plot(freq(ind),10*log10(S.S(ind)*S.CI(1)/maxS), 'r:' )
# plot(freq(ind),10*log10(S.S(ind)*S.CI(2)/maxS), 'r:' ) ## plot(freq(ind),10*log10(S.S(ind)*S.CI(2)/maxS), 'r:' )
# end ## end
plotbackend.plot(freq[ind], 10 * log10(data[ind] / maxS), linetype) # plotbackend.plot(freq[ind], 10 * log10(data[ind] / maxS), linetype)
#
a = plotbackend.axis() # a = plotbackend.axis()
#
a1 = Fn # a1 = Fn
if (Fp > 0): # if (Fp > 0):
a1 = max(min(Fn, 10 * max(Fp)), a[1]); # a1 = max(min(Fn, 10 * max(Fp)), a[1]);
#
plotbackend.axis([0, a1 , -20, max(1.01 * 10 * log10(1), a[3])]) # plotbackend.axis([0, a1 , -20, max(1.01 * 10 * log10(1), a[3])])
#
specdata.labels.labelfig() # specdata.labels.labelfig()
#plotbackend.title('Spectral density') # #plotbackend.title('Spectral density')
#plotbackend.xlabel(xlbl_txt) # #plotbackend.xlabel(xlbl_txt)
plotbackend.ylabel(ylbl2_txt) # plotbackend.ylabel(ylbl2_txt)
#
if LegendOn: # if LegendOn:
plotbackend.legend() # plotbackend.legend()
# if isfield(S,'CI'), # if isfield(S,'CI'),
# legend(txt{:},txtCI,1) # legend(txt{:},txtCI,1)
# else # else
@ -384,7 +384,7 @@ def plotspec(specdata, linetype='b-', flag=1):
# h = polar([0 2*pi],[0 freq(end)]); # h = polar([0 2*pi],[0 freq(end)]);
# delete(h);hold on # delete(h);hold on
# [X,Y]=meshgrid(S.theta,freq); # [X,Y]=meshgrid(S.theta,freq);
# [X,Y]=pol2cart(X,Y); # [X,Y]=polar2cart(X,Y);
# contour(X,Y,S.S',lintype) # contour(X,Y,S.S',lintype)
# else # else
# if (abs(thmax-thmin)<3*pi), % angle given in radians # if (abs(thmax-thmin)<3*pi), % angle given in radians
@ -458,7 +458,7 @@ def plotspec(specdata, linetype='b-', flag=1):
# %h=polar([0 2*pi],[0 freq(end)]); # %h=polar([0 2*pi],[0 freq(end)]);
# %delete(h);hold on # %delete(h);hold on
# [X,Y]=meshgrid(S.theta-phi,freq); # [X,Y]=meshgrid(S.theta-phi,freq);
# [X,Y]=pol2cart(X,Y); # [X,Y]=polar2cart(X,Y);
# mesh(X,Y,S.S') # mesh(X,Y,S.S')
# % display the unit circle beneath the surface # % display the unit circle beneath the surface
# hold on, mesh(X,Y,zeros(size(S.S'))),hold off # hold on, mesh(X,Y,zeros(size(S.S'))),hold off
@ -626,7 +626,7 @@ def plotspec(specdata, linetype='b-', flag=1):
class SpecData1D(WafoData): class SpecData1D(PlotData):
""" """
Container class for 1D spectrum data objects in WAFO Container class for 1D spectrum data objects in WAFO
@ -661,7 +661,7 @@ class SpecData1D(WafoData):
See also See also
-------- --------
WafoData PlotData
CovData CovData
""" """
@ -1219,7 +1219,7 @@ class SpecData1D(WafoData):
dh = h[1] - h[0] dh = h[1] - h[0]
uvdens *= dh * dh uvdens *= dh * dh
mmpdf = WafoData(uvdens, args=(h, h), xlab='max [m]', ylab='min [m]', mmpdf = PlotData(uvdens, args=(h, h), xlab='max [m]', ylab='min [m]',
title='Joint density of maximum and minimum') title='Joint density of maximum and minimum')
try: try:
pl = [10, 30, 50, 70, 90, 95, 99, 99.9] pl = [10, 30, 50, 70, 90, 95, 99, 99.9]
@ -1388,7 +1388,7 @@ class SpecData1D(WafoData):
xtxt = 'period [s]' xtxt = 'period [s]'
Htxt = '%s_{v =%2.5g}' % (Htxt, u) Htxt = '%s_{v =%2.5g}' % (Htxt, u)
pdf = WafoData(f / A, t * A, title=Htxt, xlab=xtxt) pdf = PlotData(f / A, t * A, title=Htxt, xlab=xtxt)
pdf.err = err / A pdf.err = err / A
pdf.u = u pdf.u = u
pdf.options = opts pdf.options = opts
@ -3196,7 +3196,7 @@ class SpecData1D(WafoData):
self.labels.ylab = labels[1] self.labels.ylab = labels[1]
self.labels.zlab = labels[2] self.labels.zlab = labels[2]
class SpecData2D(WafoData): class SpecData2D(PlotData):
""" Container class for 2D spectrum data objects in WAFO """ Container class for 2D spectrum data objects in WAFO
Member variables Member variables
@ -3221,7 +3221,7 @@ class SpecData2D(WafoData):
See also See also
-------- --------
WafoData PlotData
CovData CovData
""" """
@ -3342,8 +3342,8 @@ class SpecData2D(WafoData):
# Do a physical rotation of spectrum # Do a physical rotation of spectrum
[k, k2] = meshgrid(*self.args) [k, k2] = meshgrid(*self.args)
[th, r] = cart2pol(k, k2) [th, r] = cart2polar(k, k2)
[k, k2] = pol2cart(th + self.phi, r) [k, k2] = polar2cart(th + self.phi, r)
ki1, ki2 = self.args ki1, ki2 = self.args
Sn = interp2(ki1, ki2, self.data, k, k2, method) Sn = interp2(ki1, ki2, self.data, k, k2, method)
self.data = np.where(np.isnan(Sn), 0, Sn) self.data = np.where(np.isnan(Sn), 0, Sn)
@ -3632,7 +3632,7 @@ def main():
print('done') print('done')
def test_mm_pdf(): def test_mm_pdf():
import numpy as np
import wafo.spectrum.models as sm import wafo.spectrum.models as sm
Sj = sm.Jonswap(Hm0=7, Tp=11) Sj = sm.Jonswap(Hm0=7, Tp=11)
w = np.linspace(0, 4, 256) w = np.linspace(0, 4, 256)
@ -3640,10 +3640,12 @@ def test_mm_pdf():
S = sm.SpecData1D(Sj(w), w) # Alternatively do it manually S = sm.SpecData1D(Sj(w), w) # Alternatively do it manually
mm = S.to_mm_pdf() mm = S.to_mm_pdf()
def test_docstrings():
import doctest
doctest.testmod()
if __name__ == '__main__': if __name__ == '__main__':
if False : #True: # test_docstrings()
import doctest #test_mm_pdf()
doctest.testmod()
else:
test_mm_pdf()
#main() #main()

@ -1,206 +0,0 @@
"""
Dispersion relation module
--------------------------
k2w - Translates from wave number to frequency
w2k - Translates from frequency to wave number
"""
import warnings
#import numpy as np
from numpy import (atleast_1d, sqrt, ones_like, zeros_like, arctan2, where, tanh, any, #@UnresolvedImport
sin, cos, sign, inf, flatnonzero, finfo, cosh, abs) #@UnresolvedImport
__all__ = ['k2w', 'w2k']
def k2w(k1, k2=0e0, h=inf, g=9.81, u1=0e0, u2=0e0):
''' Translates from wave number to frequency
using the dispersion relation
Parameters
----------
k1 : array-like
wave numbers [rad/m].
k2 : array-like, optional
second dimension wave number
h : real scalar, optional
water depth [m].
g : real scalar, optional
acceleration of gravity, see gravity
u1, u2 : real scalars, optional
current velocity [m/s] along dimension 1 and 2.
note: when u1!=0 | u2!=0 then theta is not calculated correctly
Returns
-------
w : ndarray
angular frequency [rad/s].
theta : ndarray
direction [rad].
Dispersion relation
-------------------
w = sqrt(g*K*tanh(K*h)) ( 0 < w < inf)
theta = arctan2(k2,k1) (-pi < theta < pi)
where
K = sqrt(k1**2+k2**2)
The shape of w and theta is the common shape of k1 and k2 according to the
numpy broadcasting rules.
See also
--------
w2k
Example
-------
>>> from numpy import arange
>>> import wafo.spectrum.dispersion_relation as wsd
>>> wsd.k2w(arange(0.01,.5,0.2))[0]
array([ 0.3132092 , 1.43530485, 2.00551739])
>>> wsd.k2w(arange(0.01,.5,0.2),h=20)[0]
array([ 0.13914927, 1.43498213, 2.00551724])
'''
k1i, k2i, hi, gi, u1i, u2i = atleast_1d(k1, k2, h, g, u1, u2)
if k1i.size == 0:
return zeros_like(k1i)
ku1 = k1i*u1i
ku2 = k2i*u2i
theta = arctan2(k2, k1)
k = sqrt(k1i**2+k2i**2)
w = where(k>0, ku1+ku2+sqrt(gi*k*tanh(k*hi)), 0.0)
cond = (w<0)
if any(cond):
txt0 = '''
Waves and current are in opposite directions
making some of the frequencies negative.
Here we are forcing the negative frequencies to zero.
'''
warnings.warn(txt0)
w = where(cond, 0.0, w) # force w to zero
return w, theta
def w2k(w, theta=0.0, h=inf, g=9.81, count_limit=100):
'''
Translates from frequency to wave number
using the dispersion relation
Parameters
----------
w : array-like
angular frequency [rad/s].
theta : array-like, optional
direction [rad].
h : real scalar, optional
water depth [m].
g : real scalar or array-like of size 2.
constant of gravity [m/s**2] or 3D normalizing constant
Returns
-------
k1, k2 : ndarray
wave numbers [rad/m] along dimension 1 and 2.
Description
-----------
Uses Newton Raphson method to find the wave number k in the dispersion relation
w**2= g*k*tanh(k*h).
The solution k(w) => k1 = k(w)*cos(theta)
k2 = k(w)*sin(theta)
The size of k1,k2 is the common shape of w and theta according to numpy
broadcasting rules. If w or theta is scalar it functions as a constant
matrix of the same shape as the other.
Example
-------
>>> import pylab as plb
>>> import wafo.spectrum.dispersion_relation as wsd
>>> w = plb.linspace(0,3);
>>> h = plb.plot(w,w2k(w)[0])
>>> wsd.w2k(range(4))[0]
array([ 0. , 0.1019368 , 0.4077472 , 0.91743119])
>>> wsd.w2k(range(4),h=20)[0]
array([ 0. , 0.10503601, 0.40774726, 0.91743119])
>>> plb.close('all')
See also
--------
k2w
'''
wi, th, hi, gi = atleast_1d(w, theta, h, g)
if wi.size == 0:
return zeros_like(wi)
k = 1.0*sign(wi)*wi**2.0 / gi[0] # deep water
if (hi > 10. ** 25).all():
k2 = k*sin(th)*gi[0]/gi[-1] #size np x nf
k1 = k*cos(th)
return k1, k2
if gi.size > 1:
txt0 = '''
Finite depth in combination with 3D normalization (len(g)=2) is not implemented yet.
'''
raise ValueError(txt0)
find = flatnonzero
eps = finfo(float).eps
oshape = k.shape
wi, k, hi = wi.ravel(), k.ravel(), hi.ravel()
# Newton's Method
# Permit no more than count_limit iterations.
hi = hi * ones_like(k)
hn = zeros_like(k)
ix = find((wi<0) | (0<wi))
# Break out of the iteration loop for three reasons:
# 1) the last update is very small (compared to x)
# 2) the last update is very small (compared to sqrt(eps))
# 3) There are more than 100 iterations. This should NEVER happen.
count = 0
while (ix.size>0 and count < count_limit):
ki = k[ix]
kh = ki * hi[ix]
hn[ix] = (ki*tanh(kh)-wi[ix]**2.0/gi)/(tanh(kh)+kh/(cosh(kh)**2.0))
knew = ki - hn[ix]
# Make sure that the current guess is not zero.
# When Newton's Method suggests steps that lead to zero guesses
# take a step 9/10ths of the way to zero:
ksmall = find(abs(knew)==0)
if ksmall.size>0:
knew[ksmall] = ki[ksmall] / 10.0
hn[ix[ksmall]] = ki[ksmall]-knew[ksmall]
k[ix] = knew
# disp(['Iteration ',num2str(count),' Number of points left: ' num2str(length(ix)) ]),
ix = find((abs(hn) > sqrt(eps)*abs(k)) * abs(hn) > sqrt(eps))
count += 1
if count == count_limit:
txt1 = ''' W2K did not converge.
The maximum error in the last step was: %13.8f''' % max(hn[ix])
warnings.warn(txt1)
k.shape = oshape
k2 = k*sin(th)
k1 = k*cos(th)
return k1, k2
def main():
import doctest
doctest.testmod()
if __name__ == '__main__':
main()

@ -24,9 +24,9 @@ def test_w2k():
array([ 0. , 0.10503601, 0.40774726, 0.91743119]) array([ 0. , 0.10503601, 0.40774726, 0.91743119])
''' '''
def main(): def test_doctstrings():
import doctest import doctest
doctest.testmod() doctest.testmod()
if __name__ == '__main__': if __name__ == '__main__':
main() test_doctstrings()

@ -6,7 +6,7 @@ Statistics package in WAFO Toolbox.
""" """
from scipy.stats import * from scipy.stats import *
from core import * from core import *
import distributions import distributions #@Reimport
from wafo.stats.distributions import * from wafo.stats.distributions import *
import estimation import estimation

@ -1,6 +1,6 @@
from __future__ import division from __future__ import division
import warnings import warnings
from wafo.wafodata import WafoData from wafo.wafodata import PlotData
from wafo.misc import findextrema from wafo.misc import findextrema
from scipy import special from scipy import special
import numpy as np import numpy as np
@ -80,7 +80,7 @@ def edf(x, method=2):
else: else:
Fz1 = arange(1, N + 1) / (N + 1) Fz1 = arange(1, N + 1) / (N + 1)
F = WafoData(Fz1, z, xlab='x', ylab='F(x)') F = PlotData(Fz1, z, xlab='x', ylab='F(x)')
F.setplotter('step') F.setplotter('step')
return F return F
@ -147,7 +147,7 @@ def reslife(data, u=None, umin=None, umax=None, nu=None, nmin=3, alpha=0.05, plo
Returns Returns
------- -------
mrl : WafoData object mrl : PlotData object
Mean residual life values, i.e., mean excesses over thresholds, u. Mean residual life values, i.e., mean excesses over thresholds, u.
Notes Notes
@ -217,9 +217,9 @@ def reslife(data, u=None, umin=None, umax=None, nu=None, nmin=3, alpha=0.05, plo
#options.CI = [mrll,mrlu]; #options.CI = [mrll,mrlu];
#options.numdata = num; #options.numdata = num;
titleTxt = 'Mean residual life with %d%s CI' % (100 * p, '%') titleTxt = 'Mean residual life with %d%s CI' % (100 * p, '%')
res = WafoData(mrl, u, xlab='Threshold', ylab='Mean Excess', title=titleTxt) res = PlotData(mrl, u, xlab='Threshold', ylab='Mean Excess', title=titleTxt)
res.workspace = dict(numdata=num, umin=umin, umax=umax, nu=nu, nmin=nmin, alpha=alpha) res.workspace = dict(numdata=num, umin=umin, umax=umax, nu=nu, nmin=nmin, alpha=alpha)
res.children = [WafoData(vstack([mrll, mrlu]).T, u, xlab='Threshold', title=titleTxt)] res.children = [PlotData(vstack([mrll, mrlu]).T, u, xlab='Threshold', title=titleTxt)]
res.plot_args_children = [':r'] res.plot_args_children = [':r']
if plotflag: if plotflag:
res.plot() res.plot()
@ -249,7 +249,7 @@ def dispersion_idx(data, t=None, u=None, umin=None, umax=None, nu=None, nmin=10,
Returns Returns
------- -------
DI : WafoData object DI : PlotData object
Dispersion index Dispersion index
b_u : real scalar b_u : real scalar
threshold where the number of exceedances in a fixed period (Tb) is threshold where the number of exceedances in a fixed period (Tb) is
@ -381,10 +381,10 @@ def dispersion_idx(data, t=None, u=None, umin=None, umax=None, nu=None, nmin=10,
CItxt = '%d%s CI' % (100 * p, '%') CItxt = '%d%s CI' % (100 * p, '%')
titleTxt = 'Dispersion Index plot'; titleTxt = 'Dispersion Index plot';
res = WafoData(di, u, title=titleTxt, labx='Threshold', laby='Dispersion Index') res = PlotData(di, u, title=titleTxt, labx='Threshold', laby='Dispersion Index')
#'caption',CItxt); #'caption',CItxt);
res.workspace = dict(umin=umin, umax=umax, nu=nu, nmin=nmin, alpha=alpha) res.workspace = dict(umin=umin, umax=umax, nu=nu, nmin=nmin, alpha=alpha)
res.children = [WafoData(vstack([diLo * ones(nu), diUp * ones(nu)]).T, u, xlab='Threshold', title=CItxt)] res.children = [PlotData(vstack([diLo * ones(nu), diUp * ones(nu)]).T, u, xlab='Threshold', title=CItxt)]
res.plot_args_children = ['--r'] res.plot_args_children = ['--r']
if plotflag: if plotflag:
res.plot(di) res.plot(di)
@ -848,9 +848,9 @@ class RegLogit(object):
z1 = (y * ones((1, yrange))) == ((y * 0 + 1) * np.arange(ymin + 1, ymax+1)) z1 = (y * ones((1, yrange))) == ((y * 0 + 1) * np.arange(ymin + 1, ymax+1))
z = z[:, np.flatnonzero(z.any(axis=0))]; z = z[:, np.flatnonzero(z.any(axis=0))];
z1 = z1[:, np.flatnonzero(z1.any(axis=0))] z1 = z1[:, np.flatnonzero(z1.any(axis=0))]
[mz, nz] = z.shape [_mz, nz] = z.shape
[mx, nx] = X.shape [_mx, nx] = X.shape
[my, ny] = y.shape [my, _ny] = y.shape
g = (z.sum(axis=0).cumsum() / my).reshape(-1,1) g = (z.sum(axis=0).cumsum() / my).reshape(-1,1)
theta00 = np.log(g / (1 - g)).ravel() theta00 = np.log(g / (1 - g)).ravel()
@ -1120,7 +1120,7 @@ class RegLogit(object):
.size : size if binomial family (default 1). .size : size if binomial family (default 1).
''' '''
[mx, nx] = self.X.shape [_mx, nx] = self.X.shape
if Xnew is None: if Xnew is None:
Xnew = self.X; Xnew = self.X;
else: else:
@ -1229,7 +1229,7 @@ def _test_dispersion_idx():
xn = wafo.data.sea() xn = wafo.data.sea()
t, data = xn.T t, data = xn.T
Ie = findpot(data,t,0,5); Ie = findpot(data,t,0,5);
di, u, ok_u = dispersion_idx(data[Ie],t[Ie],tb=100) di, _u, _ok_u = dispersion_idx(data[Ie],t[Ie],tb=100)
di.plot() # a threshold around 1 seems appropriate. di.plot() # a threshold around 1 seems appropriate.
di.show() di.show()
pass pass
@ -1240,7 +1240,7 @@ def _test_findpot():
from wafo.misc import findtc from wafo.misc import findtc
x = wafo.data.sea() x = wafo.data.sea()
t, data = x[:, :].T t, data = x[:, :].T
itc, iv = findtc(data, 0, 'dw') itc, _iv = findtc(data, 0, 'dw')
ytc, ttc = data[itc], t[itc] ytc, ttc = data[itc], t[itc]
ymin = 2 * data.std() ymin = 2 * data.std()
tmin = 10 # sec tmin = 10 # sec
@ -1248,7 +1248,7 @@ def _test_findpot():
yp, tp = data[I], t[I] yp, tp = data[I], t[I]
Ie = findpot(yp, tp, ymin, tmin) Ie = findpot(yp, tp, ymin, tmin)
ye, te = yp[Ie], tp[Ie] ye, te = yp[Ie], tp[Ie]
h = pylab.plot(t, data, ttc,ytc,'ro', t, zeros(len(t)), ':', te, ye, 'kx', tp, yp, '+') pylab.plot(t, data, ttc,ytc,'ro', t, zeros(len(t)), ':', te, ye, 'kx', tp, yp, '+')
pylab.show() # pylab.show() #
pass pass
@ -1266,7 +1266,7 @@ def test_reglogit():
#b.display() #% members and methods #b.display() #% members and methods
b.summary() b.summary()
[mu,plo,pup] = b.predict(fulloutput=True); [mu,plo,pup] = b.predict(fulloutput=True) #@UnusedVariable
pass pass
#plot(x,mu,'g',x,plo,'r:',x,pup,'r:') #plot(x,mu,'g',x,plo,'r:',x,pup,'r:')
def test_reglogit2(): def test_reglogit2():
@ -1284,12 +1284,12 @@ def test_reglogit2():
def test_sklearn0(): def test_sklearn0():
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression
from sklearn import datasets from sklearn import datasets #@UnusedImport
# FIXME: the iris dataset has only 4 features! # FIXME: the iris dataset has only 4 features!
iris = datasets.load_iris() # iris = datasets.load_iris()
X = iris.data # X = iris.data
y = iris.target # y = iris.target
X = np.sort(5*np.random.rand(40, 1)-2.5, axis=0) X = np.sort(5*np.random.rand(40, 1)-2.5, axis=0)
y = (2*(np.cos(X)>2*np.random.rand(40, 1)-1)-1).ravel() y = (2*(np.cos(X)>2*np.random.rand(40, 1)-1)-1).ravel()
@ -1302,7 +1302,7 @@ def test_sklearn0():
clf_LR.fit(X, y) clf_LR.fit(X, y)
score.append(clf_LR.score(X,y)) score.append(clf_LR.score(X,y))
plot(cvals, score) #plot(cvals, score)
def test_sklearn(): def test_sklearn():
X = np.sort(5*np.random.rand(40, 1)-2.5, axis=0) X = np.sort(5*np.random.rand(40, 1)-2.5, axis=0)
@ -1335,7 +1335,7 @@ def test_sklearn1():
y = (2*(np.cos(X)>2*np.random.rand(40, 1)-1)-1).ravel() y = (2*(np.cos(X)>2*np.random.rand(40, 1)-1)-1).ravel()
from sklearn.svm import SVR from sklearn.svm import SVR
cvals= np.logspace(-1,4,10) # cvals= np.logspace(-1,4,10)
svr_rbf = SVR(kernel='rbf', C=1e4, gamma=0.1, probability=True) svr_rbf = SVR(kernel='rbf', C=1e4, gamma=0.1, probability=True)
svr_lin = SVR(kernel='linear', C=1e4, probability=True) svr_lin = SVR(kernel='linear', C=1e4, probability=True)
svr_poly = SVR(kernel='poly', C=1e4, degree=2, probability=True) svr_poly = SVR(kernel='poly', C=1e4, degree=2, probability=True)

@ -971,7 +971,7 @@ class rv_generic(object):
return args, loc, scale return args, loc, scale
def _fix_loc(self, args, loc): def _fix_loc(self, args, loc):
args, loc, scale = self._fix_loc_scale(args, loc) args, loc, _scale = self._fix_loc_scale(args, loc)
return args, loc return args, loc
# These are actually called, and should not be overwritten if you # These are actually called, and should not be overwritten if you
@ -1523,7 +1523,7 @@ class rv_continuous(rv_generic):
for item in ['callparams', 'default', 'before_notes']: for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\ tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array_like\n shape parameters", "") "\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2): for i in range(2): #@UnusedVariable
if self.shapes is None: if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ") # necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
@ -2752,7 +2752,7 @@ def _norm_logpdf(x):
def _norm_cdf(x): def _norm_cdf(x):
return special.ndtr(x) return special.ndtr(x)
def _norm_logcdf(x): def _norm_logcdf(x):
return special.log_ndtr(x) return log(special.ndtr(x))
def _norm_ppf(q): def _norm_ppf(q):
return special.ndtri(q) return special.ndtri(q)
class norm_gen(rv_continuous): class norm_gen(rv_continuous):
@ -3166,8 +3166,8 @@ class cauchy_gen(rv_continuous):
return inf, inf, nan, nan return inf, inf, nan, nan
def _entropy(self): def _entropy(self):
return log(4*pi) return log(4*pi)
def _fitstart(data, args=None): def _fitstart(self, data, args=None):
return (0, 1) return (0, 1)
cauchy = cauchy_gen(name='cauchy') cauchy = cauchy_gen(name='cauchy')
@ -4129,8 +4129,8 @@ class genextreme_gen(rv_continuous):
""" """
def _argcheck(self, c): def _argcheck(self, c):
min = np.minimum min = np.minimum #@ReservedAssignment
max = np.maximum max = np.maximum #@ReservedAssignment
sml = floatinfo.machar.xmin sml = floatinfo.machar.xmin
#self.b = where(c > 0, 1.0 / c,inf) #self.b = where(c > 0, 1.0 / c,inf)
#self.a = where(c < 0, 1.0 / c, -inf) #self.a = where(c < 0, 1.0 / c, -inf)
@ -6440,7 +6440,7 @@ def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
else: else:
return c return c
def reverse_dict(dict): def reverse_dict(dict): #@ReservedAssignment
newdict = {} newdict = {}
sorted_keys = copy(dict.keys()) sorted_keys = copy(dict.keys())
sorted_keys.sort() sorted_keys.sort()
@ -6738,7 +6738,7 @@ class rv_discrete(rv_generic):
for item in ['callparams', 'default', 'before_notes']: for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\ tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array_like\n shape parameters", "") "\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2): for i in range(2): #@UnusedVariable
if self.shapes is None: if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ") # necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
@ -7286,9 +7286,9 @@ class rv_discrete(rv_generic):
if (n > 0) and (n < 5): if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func) signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]): if (signature[2] is not None) or ('moments' in signature[0]):
dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]} dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]} #@ReservedAssignment
else: else:
dict = {} dict = {} #@ReservedAssignment
mu, mu2, g1, g2 = self._stats(*args,**dict) mu, mu2, g1, g2 = self._stats(*args,**dict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args) val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
@ -7422,7 +7422,7 @@ class rv_discrete(rv_generic):
else: else:
invfac = 1.0 invfac = 1.0
tot = 0.0 # tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args) low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb) low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub) upp = min(max(suppnmin, upp), ub)
@ -7564,8 +7564,8 @@ class bernoulli_gen(binom_gen):
return binom._stats(1, pr) return binom._stats(1, pr)
def _entropy(self, pr): def _entropy(self, pr):
return -pr*log(pr)-(1-pr)*log1p(-pr) return -pr*log(pr)-(1-pr)*log1p(-pr)
)
bernoulli = bernoulli_gen(name='bernoulli',shapes="pr")
# Negative binomial # Negative binomial
class nbinom_gen(rv_discrete): class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable. """A negative binomial discrete random variable.
@ -7984,22 +7984,22 @@ class randint_gen(rv_discrete):
%(example)s %(example)s
""" """
def _argcheck(self, min, max): def _argcheck(self, min, max): #@ReservedAssignment
self.a = min self.a = min
self.b = max-1 self.b = max-1
return (max > min) return (max > min)
def _pmf(self, k, min, max): def _pmf(self, k, min, max): #@ReservedAssignment
fact = 1.0 / (max - min) fact = 1.0 / (max - min)
return fact return fact
def _cdf(self, x, min, max): def _cdf(self, x, min, max): #@ReservedAssignment
k = floor(x) k = floor(x)
return (k-min+1)*1.0/(max-min) return (k-min+1)*1.0/(max-min)
def _ppf(self, q, min, max): def _ppf(self, q, min, max): #@ReservedAssignment
vals = ceil(q*(max-min)+min)-1 vals = ceil(q*(max-min)+min)-1
vals1 = (vals-1).clip(min, max) vals1 = (vals-1).clip(min, max)
temp = self._cdf(vals1, min, max) temp = self._cdf(vals1, min, max)
return where(temp >= q, vals1, vals) return where(temp >= q, vals1, vals)
def _stats(self, min, max): def _stats(self, min, max): #@ReservedAssignment
m2, m1 = arr(max), arr(min) m2, m1 = arr(max), arr(min)
mu = (m2 + m1 - 1.0) / 2 mu = (m2 + m1 - 1.0) / 2
d = m2 - m1 d = m2 - m1
@ -8007,14 +8007,14 @@ class randint_gen(rv_discrete):
g1 = 0.0 g1 = 0.0
g2 = -6.0/5.0*(d*d+1.0)/(d-1.0)*(d+1.0) g2 = -6.0/5.0*(d*d+1.0)/(d-1.0)*(d+1.0)
return mu, var, g1, g2 return mu, var, g1, g2
def _rvs(self, min, max=None): def _rvs(self, min, max=None): #@ReservedAssignment
"""An array of *size* random integers >= min and < max. """An array of *size* random integers >= min and < max.
If max is None, then range is >=0 and < min If max is None, then range is >=0 and < min
""" """
return mtrand.randint(min, max, self._size) return mtrand.randint(min, max, self._size)
def _entropy(self, min, max): def _entropy(self, min, max): #@ReservedAssignment
return log(max-min) return log(max-min)
randint = randint_gen(name='randint',longname='A discrete uniform '\ randint = randint_gen(name='randint',longname='A discrete uniform '\
'(random integer)', shapes="min, max") '(random integer)', shapes="min, max")
@ -8212,11 +8212,12 @@ def test_truncrayleigh():
phat = truncrayleigh.fit2(R, method='ml'); phat = truncrayleigh.fit2(R, method='ml');
def main(): def test_doctstrings():
import matplotlib import matplotlib
matplotlib.interactive(True) matplotlib.interactive(True)
R = norm.rvs(size=100) R = norm.rvs(size=100)
phat = norm.fit(R) phat = norm.fit(R)
phat = genpareto.fit(R[R > 0.7], f0=0.1, floc=0.7) phat = genpareto.fit(R[R > 0.7], f0=0.1, floc=0.7)
#nbinom(10, 0.75).rvs(3) #nbinom(10, 0.75).rvs(3)
@ -8273,7 +8274,7 @@ def test_genpareto():
print(phat.par) print(phat.par)
if __name__ == '__main__': if __name__ == '__main__':
#main() #test_doctstrings()
test_genpareto() test_genpareto()
#test_truncrayleigh() #test_truncrayleigh()
#test_lognorm() #test_lognorm()

@ -20,8 +20,8 @@ from scipy import optimize
import numpy import numpy
import numpy as np import numpy as np
from numpy import alltrue, arange, ravel, sum, zeros, log, sqrt, exp from numpy import alltrue, arange, ravel, sum, zeros, log, sqrt, exp
from numpy import (atleast_1d, any, asarray, nan, pi, reshape, repeat, from numpy import (atleast_1d, any, asarray, nan, pi, #reshape, #repeat, product, ndarray,
product, ndarray, isfinite) isfinite)
from numpy import flatnonzero as nonzero from numpy import flatnonzero as nonzero
@ -32,7 +32,7 @@ __all__ = [
floatinfo = np.finfo(float) floatinfo = np.finfo(float)
#arr = atleast_1d #arr = atleast_1d
arr = asarray arr = asarray
all = alltrue all = alltrue #@ReservedAssignment
def chi2isf(p, df): def chi2isf(p, df):
return special.chdtri(df, p) return special.chdtri(df, p)
@ -845,11 +845,9 @@ class FitDistribution(rv_frozen):
if not self.par_fix == None: if not self.par_fix == None:
numfix = len(self.i_fixed) numfix = len(self.i_fixed)
if numfix > 0: if numfix > 0:
format = '%d,'*numfix format0 = ', '.join(['%d']*numfix)
format = format[:-1] format1 = ', '.join(['%g']*numfix)
format1 = '%g,'*numfix phatistr = format0 % tuple(self.i_fixed)
format1 = format1[:-1]
phatistr = format % tuple(self.i_fixed)
phatvstr = format1 % tuple(self.par[self.i_fixed]) phatvstr = format1 % tuple(self.par[self.i_fixed])
fixstr = 'Fixed: phat[%s] = %s ' % (phatistr, phatvstr) fixstr = 'Fixed: phat[%s] = %s ' % (phatistr, phatvstr)
@ -917,7 +915,7 @@ class FitDistribution(rv_frozen):
mn = (np.floor(mn / d) - 1) * d - odd * d / 2 mn = (np.floor(mn / d) - 1) * d - odd * d / 2
mx = (np.ceil(mx / d) + 1) * d + odd * d / 2 mx = (np.ceil(mx / d) + 1) * d + odd * d / 2
limits = np.arange(mn, mx, d) limits = np.arange(mn, mx, d)
bin, limits = numpy.histogram(self.data, bins=limits, normed=True) #, new=True) bin, limits = np.histogram(self.data, bins=limits, normed=True) #, new=True) @ReservedAssignment
limits.shape = (-1, 1) limits.shape = (-1, 1)
xx = limits.repeat(3, axis=1) xx = limits.repeat(3, axis=1)
xx.shape = (-1,) xx.shape = (-1,)
@ -986,7 +984,7 @@ class FitDistribution(rv_frozen):
dx = numpy.diff(x, axis=0) dx = numpy.diff(x, axis=0)
tie = (dx == 0) tie = (dx == 0)
if any(tie): if any(tie):
print('P-value is on the conservative side (i.e. too large) due to ties in the data!') warnings.warn('P-value is on the conservative side (i.e. too large) due to ties in the data!')
T = self.dist.nlogps(theta, x) T = self.dist.nlogps(theta, x)
@ -1009,16 +1007,17 @@ class FitDistribution(rv_frozen):
def main(): def test_doctstrings():
import doctest import doctest
doctest.testmod() doctest.testmod()
def test1(): def test1():
import wafo.stats as ws import wafo.stats as ws
R = ws.weibull_min.rvs(1,size=100); R = ws.weibull_min.rvs(1,size=100);
phat = FitDistribution(ws.weibull_min, R, method='mps') phat = FitDistribution(ws.weibull_min, R, method='mps')
# # Better CI for phat.par[i=0] # # Better CI for phat.par[i=0]
Lp1 = Profile(phat, i=1) Lp1 = Profile(phat, i=1) #@UnusedVariable
# Lp2 = Profile(phat, i=2) # Lp2 = Profile(phat, i=2)
# SF = 1./990 # SF = 1./990
# x = phat.isf(SF) # x = phat.isf(SF)
@ -1085,5 +1084,5 @@ def test1():
# lp = pht.profile() # lp = pht.profile()
if __name__ == '__main__': if __name__ == '__main__':
test1() #test1()
main() test_doctstrings()

@ -1,6 +1,6 @@
from numpy import asarray, ndarray, ones, nan #, reshape, repeat, product from numpy import asarray, ndarray, ones, nan #, reshape, repeat, product
def valarray(shape,value=nan,typecode=None): def valarray(shape, value=nan, typecode=None):
"""Return an array of all value. """Return an array of all value.
""" """
#out = reshape(repeat([value],product(shape,axis=0),axis=0),shape) #out = reshape(repeat([value],product(shape,axis=0),axis=0),shape)

@ -3,9 +3,9 @@ Created on 17. juli 2010
@author: pab @author: pab
''' '''
import numpy as np import numpy as np #@UnusedImport
from numpy import pi, inf from numpy import pi, inf #@UnusedImport
from wafo.gaussian import Rind, prbnormtndpc, prbnormndpc, prbnormnd, cdfnorm2d, prbnorm2d from wafo.gaussian import Rind, prbnormtndpc, prbnormndpc, prbnormnd, cdfnorm2d, prbnorm2d #@UnusedImport
def test_rind(): def test_rind():
''' '''

@ -4,9 +4,9 @@ Created on 20. nov. 2010
@author: pab @author: pab
''' '''
import numpy as np import numpy as np #@UnusedImport
from numpy import array from numpy import array #@UnusedImport
import wafo.kdetools as wk import wafo.kdetools as wk #@UnusedImport
#import pylab as plb #import pylab as plb
def test0_KDE1D(): def test0_KDE1D():
@ -215,7 +215,7 @@ def test_smooth_params():
array([ 0.1732289 , 0.33159097, 0.3107633 ]) array([ 0.1732289 , 0.33159097, 0.3107633 ])
>>> gauss.hisj(data) >>> gauss.hisj(data)
array([ 0.24222479, 0.74277133, 0.15492661]) array([ 0.24222479, 0.74277133, 0.15492661])
>>> data = np.array([0.753557920000000, 0.727791940000000, 0.941491690000000, >>> data = np.array([0.753557920000000, 0.727791940000000, 0.941491690000000,
@ -223,6 +223,8 @@ def test_smooth_params():
... 0.602882730000000, 1.368836350000000, 1.747543260000000, 1.095475610000000, ... 0.602882730000000, 1.368836350000000, 1.747543260000000, 1.095475610000000,
... 1.016711330000000, 0.732111430000000, 0.618917190000000, 0.759034870000000, ... 1.016711330000000, 0.732111430000000, 0.618917190000000, 0.759034870000000,
... 1.891946900000000, 0.724338080000000, 1.929730940000000, 0.447498380000000, 1.365084520000000]) ... 1.891946900000000, 0.724338080000000, 1.929730940000000, 0.447498380000000, 1.365084520000000])
''' '''
def test_gridcount_1D(): def test_gridcount_1D():

@ -1,12 +1,12 @@
import numpy as np import numpy as np #@UnusedImport
from numpy import cos, exp, linspace, pi, sin, diff, arange, ones from numpy import cos, exp, linspace, pi, sin, diff, arange, ones #@UnusedImport
from numpy.random import randn from numpy.random import randn #@UnusedImport
from wafo.data import sea from wafo.data import sea #@UnusedImport
from wafo.misc import (JITImport, Bunch, detrendma, DotDict, findcross, ecross, findextrema, from wafo.misc import (JITImport, Bunch, detrendma, DotDict, findcross, ecross, findextrema, #@UnusedImport
findrfc, rfcfilter, findtp, findtc, findoutliers, findrfc, rfcfilter, findtp, findtc, findoutliers, #@UnusedImport
common_shape, argsreduce, stirlerr, getshipchar, betaloge, common_shape, argsreduce, stirlerr, getshipchar, betaloge, #@UnusedImport
gravity, nextpow2, discretize, discretize2, pol2cart, gravity, nextpow2, discretize, polar2cart, #@UnusedImport
cart2pol, meshgrid, tranproc) cart2polar, meshgrid, tranproc)#@UnusedImport
def test_JITImport(): def test_JITImport():
''' '''
@ -231,7 +231,7 @@ def test_rfcfilter():
>>> tp03 = rfcfilter(tp,0.3) >>> tp03 = rfcfilter(tp,0.3)
>>> tp03 >>> tp03
array([-0.00743352, 1.08753972, -1.07206545, 1.09550837, -1.07940458, array([-0.00743352, 1.08753972, -1.07206545, 1.09550837, -1.07940458,
1.07849396, -1.0995006 , 1.08094452]) 1.07849396, -1.0995006 , 1.08094452, 0.11983423])
''' '''
def test_findtp(): def test_findtp():
''' '''
@ -379,9 +379,9 @@ def test_discretize():
-7.07106781e-01, -8.31469612e-01, -9.23879533e-01, -7.07106781e-01, -8.31469612e-01, -9.23879533e-01,
-9.80785280e-01, -1.00000000e+00]) -9.80785280e-01, -1.00000000e+00])
''' '''
def test_discretize2(): def test_discretize_adaptive():
''' '''
>>> x, y = discretize2(np.cos,0,np.pi) >>> x, y = discretize(np.cos,0,np.pi, method='adaptive')
>>> x; y >>> x; y
array([ 0. , 0.19634954, 0.39269908, 0.58904862, 0.78539816, array([ 0. , 0.19634954, 0.39269908, 0.58904862, 0.78539816,
0.9817477 , 1.17809725, 1.37444679, 1.57079633, 1.76714587, 0.9817477 , 1.17809725, 1.37444679, 1.57079633, 1.76714587,
@ -398,7 +398,7 @@ def test_pol2cart_n_cart2pol():
''' '''
>>> r = 5 >>> r = 5
>>> t = linspace(0,pi,20) >>> t = linspace(0,pi,20)
>>> x, y = pol2cart(t,r) >>> x, y = polar2cart(t,r)
>>> x; y >>> x; y
array([ 5. , 4.93180652, 4.72908621, 4.39736876, 3.94570255, array([ 5. , 4.93180652, 4.72908621, 4.39736876, 3.94570255,
3.38640786, 2.73474079, 2.00847712, 1.22742744, 0.41289673, 3.38640786, 2.73474079, 2.00847712, 1.22742744, 0.41289673,
@ -411,7 +411,7 @@ def test_pol2cart_n_cart2pol():
4.57886663e+00, 4.18583239e+00, 3.67861955e+00, 4.57886663e+00, 4.18583239e+00, 3.67861955e+00,
3.07106356e+00, 2.37973697e+00, 1.62349735e+00, 3.07106356e+00, 2.37973697e+00, 1.62349735e+00,
8.22972951e-01, 6.12323400e-16]) 8.22972951e-01, 6.12323400e-16])
>>> ti, ri = cart2pol(x,y) >>> ti, ri = cart2polar(x,y)
>>> ti;ri >>> ti;ri
array([ 0. , 0.16534698, 0.33069396, 0.49604095, 0.66138793, array([ 0. , 0.16534698, 0.33069396, 0.49604095, 0.66138793,
0.82673491, 0.99208189, 1.15742887, 1.32277585, 1.48812284, 0.82673491, 0.99208189, 1.15742887, 1.32277585, 1.48812284,

@ -4,7 +4,7 @@ from __future__ import division
#import numpy as np #import numpy as np
from numpy import trapz, sqrt, linspace #@UnresolvedImport from numpy import trapz, sqrt, linspace #@UnresolvedImport
from wafo.wafodata import WafoData from wafo.wafodata import PlotData
from wafo.misc import tranproc #, trangood from wafo.misc import tranproc #, trangood
__all__ = ['TrData', 'TrCommon'] __all__ = ['TrData', 'TrCommon']
@ -127,8 +127,9 @@ class TrCommon(object):
def _dat2gauss(self, x, *xi): def _dat2gauss(self, x, *xi):
pass pass
class TrData(WafoData, TrCommon): class TrData(PlotData, TrCommon):
__doc__ = TrCommon.__doc__.split('mean')[0].replace('<generic>','Data') + """ __doc__ = TrCommon.__doc__.split('mean')[0].replace('<generic>','Data' #@ReservedAssignment
) + """
data : array-like data : array-like
Gaussian values, Y Gaussian values, Y
args : array-like args : array-like
@ -186,7 +187,7 @@ class TrData(WafoData, TrCommon):
ym = self.ymean-self.ysigma ym = self.ymean-self.ysigma
self.sigma = (self.gauss2dat(yp)-self.gauss2dat(ym))/2. self.sigma = (self.gauss2dat(yp)-self.gauss2dat(ym))/2.
self.children = [WafoData((self.args-self.mean)/self.sigma, self.args)] self.children = [PlotData((self.args-self.mean)/self.sigma, self.args)]
def trdata(self): def trdata(self):
return self return self

@ -43,7 +43,7 @@ _example = '''
>>> xs = g2.gauss2dat(ys[:,1:]) # Transformed to the real world >>> xs = g2.gauss2dat(ys[:,1:]) # Transformed to the real world
''' '''
class TrCommon2(TrCommon): class TrCommon2(TrCommon):
__doc__ = TrCommon.__doc__ __doc__ = TrCommon.__doc__ #@ReservedAssignment
def trdata(self, x=None, xnmin= -5, xnmax=5, n=513): def trdata(self, x=None, xnmin= -5, xnmax=5, n=513):
""" """
Return a discretized transformation model. Return a discretized transformation model.
@ -75,7 +75,8 @@ class TrCommon2(TrCommon):
return TrData(yn, x, mean=self.mean, sigma=self.sigma) return TrData(yn, x, mean=self.mean, sigma=self.sigma)
class TrHermite(TrCommon2): class TrHermite(TrCommon2):
__doc__ = TrCommon2.__doc__.replace('<generic>', 'Hermite') + """ __doc__ = TrCommon2.__doc__.replace('<generic>', 'Hermite' #@ReservedAssignment
) + """
pardef : scalar, integer pardef : scalar, integer
1 Winterstein et. al. (1994) parametrization [1]_ (default) 1 Winterstein et. al. (1994) parametrization [1]_ (default)
2 Winterstein (1988) parametrization [2]_ 2 Winterstein (1988) parametrization [2]_
@ -328,7 +329,8 @@ class TrHermite(TrCommon2):
class TrLinear(TrCommon2): class TrLinear(TrCommon2):
__doc__ = TrCommon2.__doc__.replace('<generic>', 'Linear') + """ __doc__ = TrCommon2.__doc__.replace('<generic>', 'Linear' #@ReservedAssignment
) + """
Description Description
----------- -----------
The linear transformation model is monotonic linear polynomial, calibrated The linear transformation model is monotonic linear polynomial, calibrated
@ -369,7 +371,8 @@ class TrLinear(TrCommon2):
class TrOchi(TrCommon2): class TrOchi(TrCommon2):
__doc__ = TrCommon2.__doc__.replace('<generic>', 'Ochi') + """ __doc__ = TrCommon2.__doc__.replace('<generic>', 'Ochi' #@ReservedAssignment
) + """
Description Description
----------- -----------

@ -7,7 +7,7 @@ from scipy.integrate.quadrature import cumtrapz #@UnresolvedImport
from scipy.interpolate import griddata from scipy.interpolate import griddata
from scipy import integrate from scipy import integrate
__all__ = ['WafoData', 'AxisLabels'] __all__ = ['PlotData', 'AxisLabels']
def empty_copy(obj): def empty_copy(obj):
class Empty(obj.__class__): class Empty(obj.__class__):
@ -29,7 +29,7 @@ def now():
''' '''
return strftime("%a, %d %b %Y %H:%M:%S", gmtime()) return strftime("%a, %d %b %Y %H:%M:%S", gmtime())
class WafoData(object): class PlotData(object):
''' '''
Container class for data objects in WAFO Container class for data objects in WAFO
@ -38,7 +38,7 @@ class WafoData(object):
data : array_like data : array_like
args : vector for 1D, list of vectors for 2D, 3D, ... args : vector for 1D, list of vectors for 2D, 3D, ...
labels : AxisLabels labels : AxisLabels
children : list of WafoData objects children : list of PlotData objects
Member methods Member methods
-------------- --------------
@ -52,12 +52,12 @@ class WafoData(object):
>>> x = np.arange(-2, 2, 0.2) >>> x = np.arange(-2, 2, 0.2)
# Plot 2 objects in one call # Plot 2 objects in one call
>>> d2 = WafoData(np.sin(x), x, xlab='x', ylab='sin', title='sinus') >>> d2 = PlotData(np.sin(x), x, xlab='x', ylab='sin', title='sinus')
>>> h = d2.plot() >>> h = d2.plot()
Plot with confidence interval Plot with confidence interval
>>> d3 = WafoData(np.sin(x),x) >>> d3 = PlotData(np.sin(x),x)
>>> d3.children = [WafoData(np.vstack([np.sin(x)*0.9, np.sin(x)*1.2]).T,x)] >>> d3.children = [PlotData(np.vstack([np.sin(x)*0.9, np.sin(x)*1.2]).T,x)]
>>> d3.plot_args_children=[':r'] >>> d3.plot_args_children=[':r']
>>> h = d3.plot() >>> h = d3.plot()
@ -110,9 +110,9 @@ class WafoData(object):
def eval_points(self, *args, **kwds): def eval_points(self, *args, **kwds):
''' '''
>>> x = np.linspace(0,5,20) >>> x = np.linspace(0,5,20)
>>> d = WafoData(np.sin(x),x) >>> d = PlotData(np.sin(x),x)
>>> xi = np.linspace(0,5,60) >>> xi = np.linspace(0,5,60)
>>> di = WafoData(d.eval_points(xi, method='cubic'),xi) >>> di = PlotData(d.eval_points(xi, method='cubic'),xi)
>>> h = d.plot('.') >>> h = d.plot('.')
>>> hi = di.plot() >>> hi = di.plot()
@ -134,7 +134,7 @@ class WafoData(object):
def integrate(self, a, b, **kwds): def integrate(self, a, b, **kwds):
''' '''
>>> x = np.linspace(0,5,60) >>> x = np.linspace(0,5,60)
>>> d = WafoData(np.sin(x), x) >>> d = PlotData(np.sin(x), x)
>>> d.integrate(0,np.pi/2) >>> d.integrate(0,np.pi/2)
0.99940054759302188 0.99940054759302188
@ -490,15 +490,15 @@ def plot2d(axis, wdata, plotflag, *args, **kwds):
def test_eval_points(): def test_eval_points():
plotbackend.ioff() plotbackend.ioff()
x = np.linspace(0,5,21) x = np.linspace(0,5,21)
d = WafoData(np.sin(x),x) d = PlotData(np.sin(x),x)
xi = np.linspace(0,5,61) xi = np.linspace(0,5,61)
di = WafoData(d.eval_points(xi,method='cubic'),xi) di = PlotData(d.eval_points(xi,method='cubic'),xi)
d.plot('.') d.plot('.')
di.plot() di.plot()
di.show() di.show()
def test_integrate(): def test_integrate():
x = np.linspace(0,5,60) x = np.linspace(0,5,60)
d = WafoData(np.sin(x), x) d = PlotData(np.sin(x), x)
print(d.integrate(0,np.pi/2,method='simps')) print(d.integrate(0,np.pi/2,method='simps'))
def test_docstrings(): def test_docstrings():
import doctest import doctest

@ -5,7 +5,9 @@ Created on 3. juni 2011
''' '''
import numpy as np import numpy as np
from numpy import exp, expm1, inf, nan, pi, hstack, where, atleast_1d, cos, sin from numpy import exp, expm1, inf, nan, pi, hstack, where, atleast_1d, cos, sin
from dispersion_relation import w2k, k2w from dispersion_relation import w2k, k2w #@UnusedImport
__all__ =['w2k', 'k2w', 'sensor_typeid', 'sensor_type', 'TransferFunction']
def hyperbolic_ratio(a, b, sa, sb): def hyperbolic_ratio(a, b, sa, sb):
''' '''
@ -474,90 +476,90 @@ class TransferFunction(object):
zk = self._get_zk(kw) zk = self._get_zk(kw)
return hyperbolic_ratio(zk, hk, -1, -1), ee # sinh(zk)./sinh(hk), ee return hyperbolic_ratio(zk, hk, -1, -1), ee # sinh(zk)./sinh(hk), ee
def wave_pressure(z, Hm0, h=10000, g=9.81, rho=1028): #def wave_pressure(z, Hm0, h=10000, g=9.81, rho=1028):
''' # '''
Calculate pressure amplitude due to water waves. # Calculate pressure amplitude due to water waves.
#
Parameters # Parameters
---------- # ----------
z : array-like # z : array-like
depth where pressure is calculated [m] # depth where pressure is calculated [m]
Hm0 : array-like # Hm0 : array-like
significant wave height (same as the average of the 1/3'rd highest # significant wave height (same as the average of the 1/3'rd highest
waves in a seastate. [m] # waves in a seastate. [m]
h : real scalar # h : real scalar
waterdepth (default 10000 [m]) # waterdepth (default 10000 [m])
g : real scalar # g : real scalar
acceleration of gravity (default 9.81 m/s**2) # acceleration of gravity (default 9.81 m/s**2)
rho : real scalar # rho : real scalar
water density (default 1028 kg/m**3) # water density (default 1028 kg/m**3)
#
#
Returns # Returns
------- # -------
p : ndarray # p : ndarray
pressure amplitude due to water waves at water depth z. [Pa] # pressure amplitude due to water waves at water depth z. [Pa]
#
PRESSURE calculate pressure amplitude due to water waves according to # PRESSURE calculate pressure amplitude due to water waves according to
linear theory. # linear theory.
#
Example # Example
----- # -----
>>> import pylab as plt # >>> import pylab as plt
>>> z = -np.linspace(10,20) # >>> z = -np.linspace(10,20)
>>> fh = plt.plot(z, wave_pressure(z, Hm0=1, h=20)) # >>> fh = plt.plot(z, wave_pressure(z, Hm0=1, h=20))
>>> plt.show() # >>> plt.show()
#
See also # See also
-------- # --------
w2k # w2k
#
#
u = psweep.Fn*sqrt(mgf.length*9.81) # u = psweep.Fn*sqrt(mgf.length*9.81)
z = -10; h = inf; # z = -10; h = inf;
Hm0 = 1.5;Tp = 4*sqrt(Hm0); # Hm0 = 1.5;Tp = 4*sqrt(Hm0);
S = jonswap([],[Hm0,Tp]); # S = jonswap([],[Hm0,Tp]);
Hw = tran(S.w,0,[0 0 -z],'P',h) # Hw = tran(S.w,0,[0 0 -z],'P',h)
Sm = S; # Sm = S;
Sm.S = Hw.'.*S.S; # Sm.S = Hw.'.*S.S;
x1 = spec2sdat(Sm,1000); # x1 = spec2sdat(Sm,1000);
pwave = pressure(z,Hm0,h) # pwave = pressure(z,Hm0,h)
#
plot(psweep.x{1}/u, psweep.f) # plot(psweep.x{1}/u, psweep.f)
hold on # hold on
plot(x1(1:100,1)-30,x1(1:100,2),'r') # plot(x1(1:100,1)-30,x1(1:100,2),'r')
''' # '''
#
#
# Assume seastate with jonswap spectrum: # # Assume seastate with jonswap spectrum:
#
Tp = 4 * np.sqrt(Hm0) # Tp = 4 * np.sqrt(Hm0)
gam = jonswap_peakfact(Hm0, Tp) # gam = jonswap_peakfact(Hm0, Tp)
Tm02 = Tp / (1.30301 - 0.01698 * gam + 0.12102 / gam) # Tm02 = Tp / (1.30301 - 0.01698 * gam + 0.12102 / gam)
w = 2 * np.pi / Tm02 # w = 2 * np.pi / Tm02
kw, unused_kw2 = w2k(w, 0, h) # kw, unused_kw2 = w2k(w, 0, h)
#
hk = kw * h # hk = kw * h
zk1 = kw * z # zk1 = kw * z
zk = hk + zk1 # z measured positive upward from mean water level (default) # zk = hk + zk1 # z measured positive upward from mean water level (default)
#zk = hk-zk1; % z measured positive downward from mean water level # #zk = hk-zk1; % z measured positive downward from mean water level
#zk1 = -zk1; # #zk1 = -zk1;
#zk = zk1; % z measured positive upward from sea floor # #zk = zk1; % z measured positive upward from sea floor
#
# cosh(zk)/cosh(hk) approx exp(zk) for large h # # cosh(zk)/cosh(hk) approx exp(zk) for large h
# hyperbolic_ratio(zk,hk,1,1) = cosh(zk)/cosh(hk) # # hyperbolic_ratio(zk,hk,1,1) = cosh(zk)/cosh(hk)
# pr = np.where(np.pi < hk, np.exp(zk1), hyperbolic_ratio(zk, hk, 1, 1)) # # pr = np.where(np.pi < hk, np.exp(zk1), hyperbolic_ratio(zk, hk, 1, 1))
pr = hyperbolic_ratio(zk, hk, 1, 1) # pr = hyperbolic_ratio(zk, hk, 1, 1)
pressure = (rho * g * Hm0 / 2) * pr # pressure = (rho * g * Hm0 / 2) * pr
#
# pos = [np.zeros_like(z),np.zeros_like(z),z] ## pos = [np.zeros_like(z),np.zeros_like(z),z]
# tf = TransferFunction(pos=pos, sensortype='p', h=h, rho=rho, g=g) ## tf = TransferFunction(pos=pos, sensortype='p', h=h, rho=rho, g=g)
# Hw, Gwt = tf.tran(w,0) ## Hw, Gwt = tf.tran(w,0)
# pressure2 = np.abs(Hw) * Hm0 / 2 ## pressure2 = np.abs(Hw) * Hm0 / 2
#
return pressure # return pressure
def main(): def main():
sensortype(range(21)) sensor_type(range(21))
if __name__ == '__main__': if __name__ == '__main__':
pass pass

@ -6,7 +6,7 @@ w2k - Translates from frequency to wave number
""" """
import warnings import warnings
#import numpy as np #import numpy as np
from numpy import (atleast_1d, sqrt, zeros_like, arctan2, where, tanh, any, #@UnresolvedImport from numpy import (atleast_1d, sqrt, ones_like, zeros_like, arctan2, where, tanh, any, #@UnresolvedImport
sin, cos, sign, inf, flatnonzero, finfo, cosh, abs) #@UnresolvedImport sin, cos, sign, inf, flatnonzero, finfo, cosh, abs) #@UnresolvedImport
__all__ = ['k2w', 'w2k'] __all__ = ['k2w', 'w2k']
@ -132,15 +132,15 @@ def w2k(w, theta=0.0, h=inf, g=9.81, count_limit=100):
-------- --------
k2w k2w
''' '''
wi, th, gi = atleast_1d(w, theta, g) wi, th, hi, gi = atleast_1d(w, theta, h, g)
if wi.size == 0: if wi.size == 0:
return zeros_like(wi) return zeros_like(wi)
k = 1.0*sign(wi)*wi**2.0 #% deep water k = 1.0*sign(wi)*wi**2.0 / gi[0] # deep water
if h > 10. ** 25: if (hi > 10. ** 25).all():
k2 = k*sin(th)/gi[-1] #%size np x nf k2 = k*sin(th)*gi[0]/gi[-1] #size np x nf
k1 = k*cos(th)/gi[0] k1 = k*cos(th)
return k1, k2 return k1, k2
@ -155,11 +155,11 @@ def w2k(w, theta=0.0, h=inf, g=9.81, count_limit=100):
eps = finfo(float).eps eps = finfo(float).eps
oshape = k.shape oshape = k.shape
wi, k = wi.ravel(), k.ravel() wi, k, hi = wi.ravel(), k.ravel(), hi.ravel()
# Newton's Method # Newton's Method
# Permit no more than count_limit iterations. # Permit no more than count_limit iterations.
hi = hi * ones_like(k)
hn = zeros_like(k) hn = zeros_like(k)
ix = find((wi<0) | (0<wi)) ix = find((wi<0) | (0<wi))
@ -170,7 +170,8 @@ def w2k(w, theta=0.0, h=inf, g=9.81, count_limit=100):
count = 0 count = 0
while (ix.size>0 and count < count_limit): while (ix.size>0 and count < count_limit):
ki = k[ix] ki = k[ix]
hn[ix] = (ki*tanh(ki*h)-wi[ix]**2.0/gi)/(tanh(ki*h)+ki*h/(cosh(ki*h)**2.0)) kh = ki * hi[ix]
hn[ix] = (ki*tanh(kh)-wi[ix]**2.0/gi)/(tanh(kh)+kh/(cosh(kh)**2.0))
knew = ki - hn[ix] knew = ki - hn[ix]
# Make sure that the current guess is not zero. # Make sure that the current guess is not zero.
# When Newton's Method suggests steps that lead to zero guesses # When Newton's Method suggests steps that lead to zero guesses
@ -181,7 +182,7 @@ def w2k(w, theta=0.0, h=inf, g=9.81, count_limit=100):
hn[ix[ksmall]] = ki[ksmall]-knew[ksmall] hn[ix[ksmall]] = ki[ksmall]-knew[ksmall]
k[ix] = knew k[ix] = knew
# disp(['Iteration ',num2str(count),' Number of points left: ' num2str(length(ix)) ]), # disp(['Iteration ',num2str(count),' Number of points left: ' num2str(length(ix)) ]),
ix = find((abs(hn) > sqrt(eps)*abs(k)) * abs(hn) > sqrt(eps)) ix = find((abs(hn) > sqrt(eps)*abs(k)) * abs(hn) > sqrt(eps))
count += 1 count += 1

Loading…
Cancel
Save