Added test_distributions.py

updated test_estimation.py
master
Per.Andreas.Brodtkorb 14 years ago
parent 85792549d1
commit 111d6ce808

@ -0,0 +1,567 @@
""" Test functions for stats module
"""
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_array_equal, assert_almost_equal, assert_array_almost_equal, \
assert_, rand, dec
import numpy
import numpy as np
from numpy import typecodes, array
import wafo.stats as stats
from wafo.stats.distributions import argsreduce
def kolmogorov_check(diststr, args=(), N=20, significance=0.01):
qtest = stats.ksoneisf(significance, N)
cdf = eval('stats.'+diststr+'.cdf')
dist = eval('stats.'+diststr)
# Get random numbers
kwds = {'size':N}
vals = numpy.sort(dist.rvs(*args, **kwds))
cdfvals = cdf(vals, *args)
q = max(abs(cdfvals - np.arange(1.0, N+1)/N))
assert_(q < qtest, msg="Failed q=%f, bound=%f, alpha=%f" % (q, qtest, significance))
return
# generate test cases to test cdf and distribution consistency
dists = ['uniform','norm','lognorm','expon','beta',
'powerlaw','bradford','burr','fisk','cauchy','halfcauchy',
'foldcauchy','gamma','gengamma','loggamma',
'alpha','anglit','arcsine','betaprime','erlang',
'dgamma','exponweib','exponpow','frechet_l','frechet_r',
'gilbrat','f','ncf','chi2','chi','nakagami','genpareto',
'genextreme','genhalflogistic','pareto','lomax','halfnorm',
'halflogistic','fatiguelife','foldnorm','ncx2','t','nct',
'weibull_min','weibull_max','dweibull','maxwell','rayleigh',
'genlogistic', 'logistic','gumbel_l','gumbel_r','gompertz',
'hypsecant', 'laplace', 'reciprocal','triang','tukeylambda',
'vonmises']
# check function for test generator
def check_distribution(dist, args, alpha):
D,pval = stats.kstest(dist,'', args=args, N=1000)
if (pval < alpha):
D,pval = stats.kstest(dist,'',args=args, N=1000)
#if (pval < alpha):
# D,pval = stats.kstest(dist,'',args=args, N=1000)
assert_(pval > alpha, msg="D = " + str(D) + "; pval = " + str(pval) + \
"; alpha = " + str(alpha) + "\nargs = " + str(args))
# nose test generator
def test_all_distributions():
for dist in dists:
distfunc = getattr(stats, dist)
nargs = distfunc.numargs
alpha = 0.01
if dist == 'fatiguelife':
alpha = 0.001
if dist == 'erlang':
args = (4,)+tuple(rand(2))
elif dist == 'frechet':
args = tuple(2*rand(1))+(0,)+tuple(2*rand(2))
elif dist == 'triang':
args = tuple(rand(nargs))
elif dist == 'reciprocal':
vals = rand(nargs)
vals[1] = vals[0] + 1.0
args = tuple(vals)
elif dist == 'vonmises':
yield check_distribution, dist, (10,), alpha
yield check_distribution, dist, (101,), alpha
args = tuple(1.0+rand(nargs))
else:
args = tuple(1.0+rand(nargs))
yield check_distribution, dist, args, alpha
def check_vonmises_pdf_periodic(k,l,s,x):
vm = stats.vonmises(k,loc=l,scale=s)
assert_almost_equal(vm.pdf(x),vm.pdf(x%(2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k,l,s,x):
vm = stats.vonmises(k,loc=l,scale=s)
assert_almost_equal(vm.cdf(x)%1,vm.cdf(x%(2*numpy.pi*s))%1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0,1,numpy.pi,10,100]:
yield check_vonmises_pdf_periodic, k, 0, 1, x
yield check_vonmises_pdf_periodic, k, 1, 1, x
yield check_vonmises_pdf_periodic, k, 0, 10, x
yield check_vonmises_cdf_periodic, k, 0, 1, x
yield check_vonmises_cdf_periodic, k, 1, 1, x
yield check_vonmises_cdf_periodic, k, 0, 10, x
class TestRandInt(TestCase):
def test_rvs(self):
vals = stats.randint.rvs(5,30,size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5,30,size=(2,50))
assert_(numpy.shape(vals) == (2,50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15,46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=`type(val)`)
val = stats.randint(15,46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k,5,30)
assert_array_almost_equal(vals,out)
def test_cdf(self):
x = numpy.r_[0:36:100j]
k = numpy.floor(x)
out = numpy.select([k>=30,k>=5],[1.0,(k-5.0+1)/(30-5.0)],0)
vals = stats.randint.cdf(x,5,30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(TestCase):
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestBernoulli(TestCase):
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestNBinom(TestCase):
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestGeom(TestCase):
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1,2,3],0.5)
assert_array_almost_equal(vals,[0.5,0.25,0.125])
def test_cdf_sf(self):
vals = stats.geom.cdf([1,2,3],0.5)
vals_sf = stats.geom.sf([1,2,3],0.5)
expected = array([0.5,0.75,0.875])
assert_array_almost_equal(vals,expected)
assert_array_almost_equal(vals_sf,1-expected)
class TestHypergeom(TestCase):
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestLogser(TestCase):
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestPoisson(TestCase):
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestZipf(TestCase):
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestDLaplace(TestCase):
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5 , size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestRvDiscrete(TestCase):
def test_rvs(self):
states = [-1,0,1,2,3,4]
probability = [0.0,0.3,0.4,0.0,0.3,0.0]
samples = 1000
r = stats.rv_discrete(name='sample',values=(states,probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s,p in zip(states,probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
class TestExpon(TestCase):
def test_zero(self):
assert_equal(stats.expon.pdf(0),1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
class TestGenExpon(TestCase):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
assert_almost_equal(simps(stats.genexpon.pdf(numpy.arange(0,10,0.01),
0.5, 0.5, 2.0),
dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(TestCase):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8), 5)
class TestSkellam(TestCase):
def test_pmf(self):
#comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
#comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestHypergeom(TestCase):
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
class TestChi2(TestCase):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003, 14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778, 14)
class TestArrayArgument(TestCase): #test for ticket:992
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5), size=(10,5))
assert_equal(rvs.shape, (10,5))
class TestDocstring(TestCase):
def test_docstrings(self):
"""See ticket #761"""
if stats.rayleigh.__doc__ is not None:
self.assertTrue("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
self.assertTrue("bernoulli" in stats.bernoulli.__doc__.lower())
class TestEntropy(TestCase):
def test_entropy_positive(self):
"""See ticket #497"""
pk = [0.5,0.2,0.3]
qk = [0.1,0.25,0.65]
eself = stats.entropy(pk,pk)
edouble = stats.entropy(pk,qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def TestArgsreduce():
a = array([1,3,2,1,2,3,3])
b,c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3,2,2,3,3])
assert_array_equal(c, [2,2,2,2,2])
b,c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b,c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(TestCase):
skip = ['ncf']
@dec.slow
def test_fit(self):
for func, dist, args, alpha in test_all_distributions():
if dist in self.skip:
continue
distfunc = getattr(stats, dist)
res = distfunc.rvs(*args, **{'size':200})
vals = distfunc.fit(res)
vals2 = distfunc.fit(res, optimizer='powell')
# Only check the length of the return
# FIXME: should check the actual results to see if we are 'close'
# to what was created --- but what is 'close' enough
if dist in ['erlang', 'frechet']:
assert_(len(vals)==len(args))
assert_(len(vals2)==len(args))
else:
assert_(len(vals) == 2+len(args))
assert_(len(vals2)==2+len(args))
@dec.slow
def test_fix_fit(self):
for func, dist, args, alpha in test_all_distributions():
# Not sure why 'ncf', and 'beta' are failing
# erlang and frechet have different len(args) than distfunc.numargs
if dist in self.skip + ['erlang', 'frechet', 'beta']:
continue
distfunc = getattr(stats, dist)
res = distfunc.rvs(*args, **{'size':200})
vals = distfunc.fit(res,floc=0)
vals2 = distfunc.fit(res,fscale=1)
assert_(len(vals) == 2+len(args))
assert_(vals[-2] == 0)
assert_(vals2[-1] == 1)
assert_(len(vals2) == 2+len(args))
if len(args) > 0:
vals3 = distfunc.fit(res, f0=args[0])
assert_(len(vals3) == 2+len(args))
assert_(vals3[0] == args[0])
if len(args) > 1:
vals4 = distfunc.fit(res, f1=args[1])
assert_(len(vals4) == 2+len(args))
assert_(vals4[1] == args[1])
if len(args) > 2:
vals5 = distfunc.fit(res, f2=args[2])
assert_(len(vals5) == 2+len(args))
assert_(vals5[2] == args[2])
class TestFrozen(TestCase):
"""Test that a frozen distribution gives the same results as the original object.
Only tested for the normal distribution (with loc and scale specified) and for the
gamma distribution (with a shape parameter specified).
"""
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2)
assert_equal(result_f, result)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
def test_regression_02(self):
"""Regression test for ticket #1293."""
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
s = frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_regression_ticket_1316():
"""Regression test for ticket #1316."""
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
g = stats.distributions.gamma_gen(name='gamma')
if __name__ == "__main__":
run_module_suite()

@ -1,65 +1,48 @@
''' # -*- coding:utf-8 -*-
"""
Created on 19. nov. 2010 Created on 19. nov. 2010
@author: pab @author: pab
''' """
from numpy import array, log
import wafo.stats as ws import wafo.stats as ws
from wafo.stats.estimation import Profile, FitDistribution from wafo.stats.estimation import FitDistribution, Profile
from numpy import log, array def test_profile():
def test_fit_and_profile():
''' '''
# MLE # MLE
import wafo.stats as ws import wafo.stats as ws
R = ws.weibull_min.rvs(1,size=100) R = ws.weibull_min.rvs(1,size=20)
>>> R = array([ 1.11214852, 0.55730247, 0.88241032, 0.13421021, 0.21939628, >>> R = array([ 0.08018795, 1.09015299, 2.08591271, 0.51542081, 0.75692042,
... 0.93901239, 4.55065051, 0.06728561, 1.65808964, 3.31725493, ... 0.57837017, 0.45419753, 1.1559131 , 0.26582267, 0.51517273,
... 1.26022136, 0.48707756, 1.04600817, 3.45064509, 0.23478845, ... 0.75008492, 0.59404957, 1.33748264, 0.14472142, 0.77459603,
... 0.73197928, 0.78702608, 1.10566899, 0.08770387, 0.29773143, ... 1.77312556, 1.06347991, 0.42007769, 0.71094628, 0.02366977])
... 0.442589 , 0.06017045, 0.99086215, 0.36520787, 0.55931192,
... 0.02220038, 0.09683231, 0.2407798 , 1.5550326 , 1.21834857,
... 0.51317074, 3.12672982, 2.01984527, 1.42837289, 1.60665276,
... 0.07643233, 0.97254151, 0.71870616, 0.15669964, 0.7647863 ,
... 2.15586998, 1.03167507, 0.66384501, 0.26824186, 0.25953582,
... 0.52773641, 0.3371869 , 1.17897776, 0.36094868, 0.49593992,
... 0.12124638, 0.13178139, 0.28325022, 1.2189952 , 2.85895075,
... 1.94868469, 1.5216953 , 0.77066953, 0.42610845, 1.52848366,
... 0.26570905, 0.52072509, 1.03106309, 0.34430637, 1.36386879,
... 1.05857501, 0.66754409, 3.18836655, 0.40063524, 0.40696207,
... 1.24167978, 0.27927484, 3.95644924, 1.34793436, 0.66225993,
... 0.46567866, 0.09325875, 1.90895739, 1.23831713, 0.29819745,
... 1.10528719, 0.01973962, 0.56370415, 0.66831751, 1.33781636,
... 0.53985288, 0.85251279, 1.75369891, 0.08066507, 0.29273944,
... 0.38886539, 0.20961546, 0.93814728, 0.08732068, 1.6336713 ,
... 1.38893923, 2.05882459, 0.51709862, 0.37553027, 0.06829269])
>>> phat = FitDistribution(ws.weibull_min, R, 1, scale=1, floc=0.0) >>> phat = FitDistribution(ws.weibull_min, R, 1, scale=1, floc=0.0)
>>> phat.par >>> phat.par
array([ 1.07424857, 0. , 0.96282996]) array([ 1.37836487, 0. , 0.82085633])
# Better CI for phat.par[i=0] # Better CI for phat.par[i=0]
>>> Lp = Profile(phat, i=0) >>> Lp = Profile(phat, i=0)
>>> Lp.get_bounds(alpha=0.1) >>> Lp.get_bounds(alpha=0.1)
array([ 0.94231243, 1.21444458]) array([ 1.00225064, 1.8159036 ])
>>> SF = 1./990 >>> SF = 1./990
>>> x = phat.isf(SF) >>> x = phat.isf(SF)
>>> x >>> x
5.8114656626008818 3.3323076459875312
# CI for x # CI for x
>>> Lx = phat.profile(i=0, x=x, link=phat.dist.link) >>> Lx = phat.profile(i=0, x=x, link=phat.dist.link)
>>> Lx.get_bounds(alpha=0.2) >>> Lx.get_bounds(alpha=0.2)
array([ 4.91172017, 7.08081584]) array([ 2.52211661, 4.98664787])
# CI for logSF=log(SF) # CI for logSF=log(SF)
>>> Lsf = phat.profile(i=0, logSF=log(SF), link=phat.dist.link) >>> logSF = log(SF)
>>> Lsf = phat.profile(i=0, logSF=logSF, link=phat.dist.link, pmin=logSF-10,pmax=logSF+5)
>>> Lsf.get_bounds(alpha=0.2) >>> Lsf.get_bounds(alpha=0.2)
array([-8.37580767, -5.66897775]) array([-10.87488318, -4.36225468])
''' '''
pass
if __name__ == "__main__": if __name__ == '__main__':
import doctest import doctest
doctest.testmod() doctest.testmod()
Loading…
Cancel
Save