Copied stineman_interp from pylab to interpolate.py and fixed the annoying dividing by zero warnings.

Working on LevelCrossings.extrapolate method: Work in progress

Updated doc in distributions.py
master
Per.Andreas.Brodtkorb 14 years ago
parent d2bd3dee53
commit 0a9293fc27

@ -65,7 +65,7 @@ dtyest = Sest.to_t_pdf(pdef='Tt', paramt=(0, 10, 51), nit=3)
T, index = ts.wave_periods(vh=0, pdef='d2u') T, index = ts.wave_periods(vh=0, pdef='d2u')
bins = wm.good_bins(T, num_bins=25, odd=True) bins = wm.good_bins(T, num_bins=25, odd=True)
wm.plot_histgrm(T, bins=bins, scale=True) wm.plot_histgrm(T, bins=bins, normed=True)
dtyex.plot() dtyex.plot()
dtyest.plot('-.') dtyest.plot('-.')

@ -19,7 +19,7 @@ from numpy.lib.shape_base import vstack
from numpy.lib.function_base import linspace from numpy.lib.function_base import linspace
import polynomial as pl import polynomial as pl
__all__ =['PPform','SmoothSpline'] __all__ = ['PPform', 'SmoothSpline', 'stineman_interp']
class PPform(object): class PPform(object):
"""The ppform of the piecewise polynomials is given in terms of coefficients """The ppform of the piecewise polynomials is given in terms of coefficients
@ -342,6 +342,139 @@ class SmoothSpline(PPform):
return u.reshape(n - 2, -1), p return u.reshape(n - 2, -1), p
def slopes(x, y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x = np.asarray(x, np.float_)
y = np.asarray(y, np.float_)
yp = np.zeros(y.shape, np.float_)
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
dydx = dy / dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1]) / (dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0] / dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1] / dx[-1] - yp[-2]
return yp
def stineman_interp(xi, x, y, yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x = np.asarray(x, np.float_)
y = np.asarray(y, np.float_)
assert x.shape == y.shape
#N = len(y)
if yp is None:
yp = slopes(x, y)
else:
yp = np.asarray(yp, np.float_)
xi = np.asarray(xi, np.float_)
#yi = np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy / dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx + 1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx) - sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx + 1) - sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1 * dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
dy1mdy2 = np.where(dy1dy2,dy1-dy2,np.inf)
dy1pdy2 = np.where(dy1dy2,dy1+dy2,np.inf)
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32) + 1,
((2 * xi - xidx - xidxp1) / ((dy1mdy2) * (xidxp1 - xidx)),
0.0,
1 / (dy1pdy2)))
return yi
def test_smoothing_spline(): def test_smoothing_spline():
x = linspace(0, 2 * pi + pi / 4, 20) x = linspace(0, 2 * pi + pi / 4, 20)
y = sin(x) #+ np.random.randn(x.size) y = sin(x) #+ np.random.randn(x.size)

@ -1973,7 +1973,7 @@ def good_bins(data=None, range=None, num_bins=None, num_data=None, odd=False, lo
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5]) array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5])
''' '''
if data: if data is not None:
x = np.atleast_1d(data) x = np.atleast_1d(data)
num_data = len(x) num_data = len(x)

@ -15,7 +15,7 @@
from __future__ import division from __future__ import division
from wafo.transform.core import TrData from wafo.transform.core import TrData
from wafo.transform.models import TrHermite, TrOchi, TrLinear from wafo.transform.models import TrHermite, TrOchi, TrLinear
from wafo.stats import edf from wafo.stats import edf, distributions
from wafo.misc import (nextpow2, findtp, findrfc, findtc, findcross, from wafo.misc import (nextpow2, findtp, findrfc, findtc, findcross,
ecross, JITImport, DotDict, gravity) ecross, JITImport, DotDict, gravity)
from wafodata import WafoData from wafodata import WafoData
@ -85,10 +85,10 @@ class LevelCrossings(WafoData):
xlab='Levels', ylab='Count', xlab='Levels', ylab='Count',
plotmethod='semilogy', plotmethod='semilogy',
plot_args=['b'], plot_args=['b'],
plot_args_children=['r--'],) plot_args_children=['r--'])
options.update(**kwds) options.update(**kwds)
super(LevelCrossings, self).__init__(*args, **options) super(LevelCrossings, self).__init__(*args, **options)
self.intensity = kwds.get('intensity', False)
self.sigma = kwds.get('sigma', None) self.sigma = kwds.get('sigma', None)
self.mean = kwds.get('mean', None) self.mean = kwds.get('mean', None)
#self.setplotter(plotmethod='step') #self.setplotter(plotmethod='step')
@ -111,6 +111,226 @@ class LevelCrossings(WafoData):
y = cmax * exp(-x ** 2 / 2.0) y = cmax * exp(-x ** 2 / 2.0)
self.children = [WafoData(y, self.args)] self.children = [WafoData(y, self.args)]
def extrapolate(self, u_min=None, u_max=None, method='ml',dist='genpar', plotflag=0):
'''
Returns an extrapolated level crossing spectrum
Parameters
-----------
u_min, u_max : real scalars
extrapolate below u_min and above u_max.
method : string
describing the method of estimation. Options are:
'ml' : Maximum Likelihood method (default)
'mps': Maximum Product Spacing method
dist : string
defining distribution function. Options are:
genpareto : Generalized Pareto distribution (GPD)
expon : Exponential distribution (GPD with k=0)
rayleigh : Rayleigh distribution
plotflag : scalar integer
1: Diagnostic plots. (default)
0: Don't plot diagnostic plots.
Returns
-------
lc : LevelCrossing object
with the estimated level crossing spectrum
Est = Estimated parameters. [struct array]
Extrapolates the level crossing spectrum (LC) for high and for low levels.
The tails of the LC is fited to a survival function of a GPD.
H(x) = (1-k*x/s)^(1/k) (GPD)
The use of GPD is motivated by POT methods in extreme value theory.
For k=0 the GPD is the exponential distribution
H(x) = exp(-x/s), k=0 (expon)
The tails with the survival function of a Rayleigh distribution.
H(x) = exp(-((x+x0).^2-x0^2)/s^2) (rayleigh)
where x0 is the value where the LC has its maximum.
The method 'gpd' uses the GPD. We recommend the use of 'gpd,ml'.
The method 'exp' uses the Exp.
The method 'ray' uses Ray, and should be used if the load is a Gaussian process.
Example:
S = jonswap;
x = spec2sdat(S,100000,0.1,[],'random');
lc = dat2lc(x); s = std(x(:,2));
[lcEst,Est] = extralc(lc,s*[-2 2]);
[lcEst,Est] = extralc(lc,s*[-2 2],'exp,ml');
[lcEst,Est] = extralc(lc,s*[-2 2],'ray,ml');
See also
--------
cmat2extralc, rfmextrapolate, lc2rfmextreme, extralc, fitgenpar
References
----------
Johannesson, P., and Thomas, J-.J. (2000):
Extrapolation of Rainflow Matrices.
Preprint 2000:82, Mathematical statistics, Chalmers, pp. 18.
'''
i_max = self.data.argmax()
c_max = self.data[i_max]
# Maximum of lc
lc_max = self.args[i_max]
if u_min is None or u_max is None:
fraction = sqrt(c_max)
i = np.flatnonzero(self.data>fraction)
if u_min is None :
u_min = self.args[i.min()]
if u_max is None:
u_max = self.args[i.max()]
# # Extrapolate LC for high levels
# [lcEst.High,Est.High] = self._extrapolate(lc,u_max,u_max-lc_max,method, dist);
#
# # Extrapolate LC for low levels
#
# lc1 = [-flipud(lc(:,1)) flipud(lc(:,2))];
#
# [lcEst1,Est1] = extrapolate(lc1,-u(1),method,lc_max-u(1));
# lcEst.Low = [-flipud(lcEst1(:,1)) flipud(lcEst1(:,2:end))];
# Est.Low = Est1;
#
# if plotflag
# semilogx(lc(:,2),lc(:,1),lcEst.High(:,2),lcEst.High(:,1),lcEst.Low(:,2),lcEst.Low(:,1))
# end
#
###
# def _extrapolate(self, lcx,lcf,u,offset,method,dist)
# # Extrapolate the level crossing spectra for high levels
#
# method = method.lower()
# dist = dist.lower()
#
# # Excedences over level u
# Iu = lcx>u;
# lcx1, lcf1 = lcx[Iu], lcf[Iu]
# lcf3,lcx3 = _make_increasing(lcf1[::-1],lcx1[::-1])
#
# # Corrected by PJ 17-Feb-2004
# lc3=[0 0; lc3];
# x=[];
# for k=2:length(lc3(:,1))
# nk = lc3(k,2)-lc3(k-1,2);
# x = [x; ones(nk,1)*lc3(k,1)];
# #end
# x = x-u;
#
# # Estimate tail
#
# if dist=='genpareto':
# genpareto = distributions.genpareto
# phat = genpareto.fit2(x, floc=0, method=method)
#
## Est.k = phat.params(1);
## Est.s = phat.params(2);
## if isnan(phat.fixpar(3))
## Est.cov = phat.covariance;
## else
## Est.cov = phat.covariance(1:2,1:2);
## end
## if Est.k>0,
## Est.UpperLimit = u+Est.s/Est.k;
## end
# df = 0.01
# xF = np.arange(0.0,4+df/2,df)
# F = phat.cdf(xF)
# Est.lcu = interp1(lcx,lcf,u)+1
#
# # Calculate 90 # confidence region, an ellipse, for (k,s)
# [B,D] = eig(Est.cov);
# b = [Est.k; Est.s];
#
# r = sqrt(-2*log(1-90/100)); # 90 # confidence sphere
# Nc = 16+1;
# ang = linspace(0,2*pi,Nc);
# c0 = [r*sqrt(D(1,1))*sin(ang); r*sqrt(D(2,2))*cos(ang)]; # 90# Circle
# # plot(c0(1,:),c0(2,:))
#
# c1 = B*c0+b*ones(1,length(c0)); # Transform to ellipse for (k,s)
# # plot(c1(1,:),c1(2,:)), hold on
#
# # Calculate conf.int for lcu
# # Assumtion: lcu is Poisson distributed
# # Poissin distr. approximated by normal when calculating conf. int.
# dXX = 1.64*sqrt(Est.lcu); # 90 # quantile for lcu
#
# lcEstCu = zeros(length(xF),Nc);
# lcEstCl = lcEstCu;
# for i=1:Nc
# k=c1(1,i);
# s=c1(2,i);
# F2 = cdfgenpar(xF,k,s);
# lcEstCu(:,i) = (Est.lcu+dXX)*(1-F2);
# lcEstCl(:,i) = (Est.lcu-dXX)*(1-F2);
# end
#
# lcEstCI = [min(lcEstCl')' max(lcEstCu')'];
#
# lcEst = [xF+u Est.lcu*(1-F) lcEstCI];
#
# case 'exp'
#
# n = length(x);
# s = mean(x);
# cov = s/n;
# Est.s = s;
# Est.cov = cov;
#
# xF = (0.0:0.01:4)';
# F = 1-exp(-xF/s);
# Est.lcu = interp1(lc(:,1),lc(:,2),u)+1;
#
# lcEst = [xF+u Est.lcu*(1-F)];
#
# case 'ray'
#
# n = length(x);
# Sx = sum((x+offset).^2-offset^2);
# s=sqrt(Sx/n); # Shape parameter
#
# Est.s = s;
# Est.cov = NaN;
#
# xF = (0.0:0.01:4)';
# F = 1 - exp(-((xF+offset).^2-offset^2)/s^2);
# Est.lcu = interp1(lc(:,1),lc(:,2),u)+1;
#
# lcEst = [xF+u Est.lcu*(1-F)];
#
# otherwise
#
# error(['Unknown method: ' method]);
#
# end
#
#
# ## End extrapolate
#
# def _make_increasing(f, t=None):
# # Makes the signal x strictly increasing.
# #
# # x = two column matrix with times in first column and values in the second.
#
# n = len(f)
# if t is None:
# t = np.arange(n)
# ff = []
# tt = []
# ff.append(f[0])
# tt.append(t[0])
#
# for i in xrange(1,n):
# if f[i]>ff[-1]
# ff.append(f[i])
# tt.append(t[i])
#
# return np.asarray(ff), np.asarray(tt)
def sim(self, ns, alpha): def sim(self, ns, alpha):
""" """
Simulates process with given irregularity factor and crossing spectrum Simulates process with given irregularity factor and crossing spectrum
@ -142,25 +362,30 @@ class LevelCrossings(WafoData):
>>> mm = tp.cycle_pairs() >>> mm = tp.cycle_pairs()
>>> lc = mm.level_crossings() >>> lc = mm.level_crossings()
xs2 = lc.sim(n,alpha) >>> xs2 = lc.sim(n,alpha)
ts2 = mat2timeseries(xs2) >>> ts2 = mat2timeseries(xs2)
Se = ts2.tospecdata() >>> Se = ts2.tospecdata(L=324)
S.plot('b') >>> alpha2 = Se.characteristic('alpha')[0]
Se.plot('r') >>> alpha2
alpha2 = Se.characteristic('alpha')[0] array([ 0.68382343])
alpha-alpha2 >>> alpha-alpha2
array([ 0.01620704])
spec2char(Se,'alpha')
lc2 = dat2lc(xs2) >>> h0 = S.plot('b')
figure(gcf+1) >>> h1 = Se.plot('r')
subplot(211)
lcplot(lc2) >>> lc2 = ts2.turning_points().cycle_pairs().level_crossings()
subplot(212)
lcplot(lc) >>> import pylab as plt
>>> h = plt.subplot(211)
>>> h2 = lc2.plot()
>>> h = plt.subplot(212)
>>> h0 = lc.plot()
""" """
# TODO % add a good example # TODO: add a good example
f = linspace(0, 0.49999, 1000) f = linspace(0, 0.49999, 1000)
rho_st = 2. * sin(f * pi) ** 2 - 1. rho_st = 2. * sin(f * pi) ** 2 - 1.
tmp = alpha * arcsin(sqrt((1. + rho_st) / 2)) tmp = alpha * arcsin(sqrt((1. + rho_st) / 2))
@ -179,13 +404,13 @@ class LevelCrossings(WafoData):
sigma2 = r0 + a1 * r1 + a2 * r2 sigma2 = r0 + a1 * r1 + a2 * r2
#randn = np.random.randn #randn = np.random.randn
e = randn(ns) * sqrt(sigma2) e = randn(ns) * sqrt(sigma2)
e[:1] = 0.0 e[:2] = 0.0
L0 = randn(1) L0 = randn(1)
L0 = vstack((L0, r1 * L0 + sqrt(1 - r2 ** 2) * randn(1))) L0 = hstack((L0, r1 * L0 + sqrt(1 - r2 ** 2) * randn(1)))
#%Simulate the process, starting in L0 #%Simulate the process, starting in L0
lfilter = scipy.signal.lfilter lfilter = scipy.signal.lfilter
# TODO: lfilter crashes the example z0 = lfilter([1, a1, a2], ones(1), L0)
L = lfilter(ones(1), [1, a1, a2], e, lfilter([1, a1, a2], ones(1), L0)) L, unused_zf = lfilter(ones(1), [1, a1, a2], e, axis=0, zi=z0)
epsilon = 1.01 epsilon = 1.01
min_L = min(L) min_L = min(L)
@ -213,20 +438,24 @@ class LevelCrossings(WafoData):
sumcr = trapz(self.data, self.args) sumcr = trapz(self.data, self.args)
lc = self.data / sumcr lc = self.data / sumcr
lc1 = self.args lc1 = self.args
mcr = trapz(lc1 * lc, lc1) mcr = trapz(lc1 * lc, lc1) if self.mean is None else self.mean
if self.sigma is None:
scr = trapz(lc1 ** 2 * lc, lc1) scr = trapz(lc1 ** 2 * lc, lc1)
scr = sqrt(scr - mcr ** 2) scr = sqrt(scr - mcr ** 2)
else:
scr = self.sigma
lc2 = LevelCrossings(lc, lc1, mean=mcr, sigma=scr, intensity=True)
lc2 = LevelCrossings(lc, lc1, mean=mcr, sigma=scr) g = lc2.trdata()[0]
g = lc2.trdata()
#f = [u, u] f = g.gauss2dat(Z)
f = g.dat2gauss(Z)
G = TrData(f, u) G = TrData(f, u)
process = G.dat2gauss(L) process = G.dat2gauss(L)
return np.vstack((arange(len(process)), process)).T return np.vstack((arange(len(process)), process)).T
## ##
## ##
## %Check the result without reference to getrfc: ## %Check the result without reference to getrfc:
@ -391,7 +620,13 @@ class LevelCrossings(WafoData):
cor2 = 0 cor2 = 0
lc22 = hstack((0, cumtrapz(lc2, lc1) + cor1)) lc22 = hstack((0, cumtrapz(lc2, lc1) + cor1))
if self.intensity:
lc22 = (lc22 + 0.5/ncr) / (lc22[-1] + cor2 + 1./ncr)
else:
lc22 = (lc22 + 0.5) / (lc22[-1] + cor2 + 1) lc22 = (lc22 + 0.5) / (lc22[-1] + cor2 + 1)
lc11 = (lc1 - mean) / sigma lc11 = (lc1 - mean) / sigma
lc22 = invnorm(lc22) #- ymean lc22 = invnorm(lc22) #- ymean
@ -603,7 +838,7 @@ class CyclePairs(WafoData):
if intensity: if intensity:
dcount = dcount/self.time dcount = dcount/self.time
ylab = 'Intensity [count/sec]' ylab = 'Intensity [count/sec]'
return LevelCrossings(dcount, levels, mean=self.mean, sigma=self.sigma, ylab=ylab) return LevelCrossings(dcount, levels, mean=self.mean, sigma=self.sigma, ylab=ylab, intensity=intensity)
class TurningPoints(WafoData): class TurningPoints(WafoData):
''' '''
@ -680,7 +915,7 @@ class TurningPoints(WafoData):
t = self.args[ind] t = self.args[ind]
except: except:
t = ind t = ind
mean = self.mean() mean = self.mean
sigma = self.sigma sigma = self.sigma
return TurningPoints(self.data[ind], t, mean=mean, sigma=sigma) return TurningPoints(self.data[ind], t, mean=mean, sigma=sigma)

@ -15,7 +15,7 @@ from scipy.integrate import simps, trapz
from scipy.special import erf from scipy.special import erf
from scipy.linalg import toeplitz from scipy.linalg import toeplitz
import scipy.interpolate as interpolate import scipy.interpolate as interpolate
from pylab import stineman_interp from wafo.interpolate import stineman_interp
from dispersion_relation import w2k #, k2w from dispersion_relation import w2k #, k2w
from wafo.wafodata import WafoData, now from wafo.wafodata import WafoData, now
@ -1530,16 +1530,7 @@ class SpecData1D(WafoData):
else: else:
for i in range(cases): for i in range(cases):
x[:, i + 1] = g.gauss2dat(x[:, i + 1]) x[:, i + 1] = g.gauss2dat(x[:, i + 1])
# G = fliplr(g) #% the invers of g
# if derivative:
# for i in range(cases):
# tmp = tranproc(hstack((x[:, i + 1], xder[:, i + 1])), G)
# x[:, i + 1] = tmp[:, 0]
# xder[:, i + 1] = tmp[:, 1]
#
# else:
# for i in range(cases):
# x[:, i + 1] = tranproc(x[:, i + 1], G)
if derivative: if derivative:
return x, xder return x, xder
@ -2249,13 +2240,11 @@ class SpecData1D(WafoData):
Parameters Parameters
---------- ----------
dt : scalar dt : real scalar
wanted sampling interval (default as given by S, see spec2dt) wanted sampling interval (default as given by S, see spec2dt)
unit: [s] if frequency-spectrum, [m] if wave number spectrum unit: [s] if frequency-spectrum, [m] if wave number spectrum
Nmin : scalar Nmin, Nmax : scalar integers
minimum number of frequencies. minimum and maximum number of frequencies, respectively.
Nmax : scalar
minimum number of frequencies
method : string method : string
interpolation method (options are 'linear', 'cubic' or 'stineman') interpolation method (options are 'linear', 'cubic' or 'stineman')
@ -2281,15 +2270,14 @@ class SpecData1D(WafoData):
n = w.size n = w.size
#%doInterpolate = 0 #%doInterpolate = 0
# Nyquist to sampling interval factor
Cnf2dt = 0.5 if ftype == 'f' else pi #% ftype == w og ftype == k
if ftype == 'f':
Cnf2dt = 0.5 # Nyquist to sampling interval factor
else: #% ftype == w og ftype == k
Cnf2dt = pi
wnOld = w[-1] # Old Nyquist frequency wnOld = w[-1] # Old Nyquist frequency
dTold = Cnf2dt / wnOld # sampling interval=1/Fs dTold = Cnf2dt / wnOld # sampling interval=1/Fs
#dTold = self.sampling_period()
if dt is None: if dt is None:
dt = dTold dt = dTold
@ -2334,7 +2322,6 @@ class SpecData1D(WafoData):
S1 = hstack((zeros(Nz), S1)) S1 = hstack((zeros(Nz), S1))
#% Do a final check on spacing in order to check that the gridding is #% Do a final check on spacing in order to check that the gridding is
#% sufficiently dense: #% sufficiently dense:
#np1 = S1.size #np1 = S1.size

@ -2356,26 +2356,45 @@ class rv_continuous(rv_generic):
Parameters Parameters
---------- ----------
data : array-like data : array-like
data used in fitting Data to use in calculating the ML or MPS estimators
arg1, arg2, arg3,... : array-like args : optional
initial non-fixed distribution parameter-values Starting values for any shape arguments (those not specified
including loc and scale (see docstring of the will be determined by dist._fitstart(data))
instance object for more information) kwds : loc, scale
method : String, optional Starting values for the location and scale parameters
describing the method of estimation. Options are Special keyword arguments are recognized as holding certain
parameters fixed:
f0..fn : hold respective shape paramters fixed
floc : hold location parameter fixed to specified value
fscale : hold scale parameter fixed to specified value
method : of estimation. Options are
'ml' : Maximum Likelihood method (default) 'ml' : Maximum Likelihood method (default)
'mps': Maximum Product Spacing method 'mps': Maximum Product Spacing method
alpha : scalar, optional alpha : scalar, optional
Confidence coefficent (default=0.05) Confidence coefficent (default=0.05)
par_fix : list , optional
of fixed parameters. Non fixed parameters must be given as NaN's.
(Must have the same length as the number of parameters or be None)
(default None)
search : bool search : bool
If true search for best estimator (default), If true search for best estimator (default),
otherwise return object with initial distribution parameters otherwise return object with initial distribution parameters
copydata : bool copydata : bool
If true copydata (default) If true copydata (default)
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Return
------
phat : FitDistribution object
Fitted distribution object with following member variables:
LLmax : loglikelihood function evaluated using par
LPSmax : log product spacing function evaluated using par
pvalue : p-value for the fit
par : distribution parameters (fixed and fitted)
par_cov : covariance of distribution parameters
par_fix : fixed distribution parameters
par_lower : lower (1-alpha)% confidence bound for the parameters
par_upper : upper (1-alpha)% confidence bound for the parameters
Note Note
---- ----

Loading…
Cancel
Save