|
|
@ -1450,7 +1450,7 @@ class Kernel(object):
|
|
|
|
break
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
ai = bi
|
|
|
|
ai = bi
|
|
|
|
y = np.asarray([fun(j) for j in x])
|
|
|
|
#y = np.asarray([fun(j) for j in x])
|
|
|
|
#pylab.figure(1)
|
|
|
|
#pylab.figure(1)
|
|
|
|
#pylab.plot(x,y)
|
|
|
|
#pylab.plot(x,y)
|
|
|
|
#pylab.show()
|
|
|
|
#pylab.show()
|
|
|
@ -1459,9 +1459,6 @@ class Kernel(object):
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
t_star = optimize.brentq(fun, a=ai, b=bi)
|
|
|
|
t_star = optimize.brentq(fun, a=ai, b=bi)
|
|
|
|
except:
|
|
|
|
except:
|
|
|
|
# try:
|
|
|
|
|
|
|
|
# t_star = optimize.bisect(fun, a=ai, b=bi+1)
|
|
|
|
|
|
|
|
# except:
|
|
|
|
|
|
|
|
t_star = 0.28*N**(-2./5)
|
|
|
|
t_star = 0.28*N**(-2./5)
|
|
|
|
warnings.warn('Failure in obtaining smoothing parameter')
|
|
|
|
warnings.warn('Failure in obtaining smoothing parameter')
|
|
|
|
|
|
|
|
|
|
|
@ -2635,68 +2632,19 @@ def kde_demo3():
|
|
|
|
|
|
|
|
|
|
|
|
pylab.figure(0)
|
|
|
|
pylab.figure(0)
|
|
|
|
|
|
|
|
|
|
|
|
def kde_demo4(hs=None, fast=False):
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
N = 100
|
|
|
|
|
|
|
|
#ei = np.random.normal(loc=0, scale=0.075, size=(N,))
|
|
|
|
|
|
|
|
ei = np.array([-0.08508516, 0.10462496, 0.07694448, -0.03080661, 0.05777525,
|
|
|
|
|
|
|
|
0.06096313, -0.16572389, 0.01838912, -0.06251845, -0.09186784,
|
|
|
|
|
|
|
|
-0.04304887, -0.13365788, -0.0185279 , -0.07289167, 0.02319097,
|
|
|
|
|
|
|
|
0.06887854, -0.08938374, -0.15181813, 0.03307712, 0.08523183,
|
|
|
|
|
|
|
|
-0.0378058 , -0.06312874, 0.01485772, 0.06307944, -0.0632959 ,
|
|
|
|
|
|
|
|
0.18963205, 0.0369126 , -0.01485447, 0.04037722, 0.0085057 ,
|
|
|
|
|
|
|
|
-0.06912903, 0.02073998, 0.1174351 , 0.17599277, -0.06842139,
|
|
|
|
|
|
|
|
0.12587608, 0.07698113, -0.0032394 , -0.12045792, -0.03132877,
|
|
|
|
|
|
|
|
0.05047314, 0.02013453, 0.04080741, 0.00158392, 0.10237899,
|
|
|
|
|
|
|
|
-0.09069682, 0.09242174, -0.15445323, 0.09190278, 0.07138498,
|
|
|
|
|
|
|
|
0.03002497, 0.02495252, 0.01286942, 0.06449978, 0.03031802,
|
|
|
|
|
|
|
|
0.11754861, -0.02322272, 0.00455867, -0.02132251, 0.09119446,
|
|
|
|
|
|
|
|
-0.03210086, -0.06509545, 0.07306443, 0.04330647, 0.078111 ,
|
|
|
|
|
|
|
|
-0.04146907, 0.05705476, 0.02492201, -0.03200572, -0.02859788,
|
|
|
|
|
|
|
|
-0.05893749, 0.00089538, 0.0432551 , 0.04001474, 0.04888828,
|
|
|
|
|
|
|
|
-0.17708392, 0.16478644, 0.1171006 , 0.11664846, 0.01410477,
|
|
|
|
|
|
|
|
-0.12458953, -0.11692081, 0.0413047 , -0.09292439, -0.07042327,
|
|
|
|
|
|
|
|
0.14119701, -0.05114335, 0.04994696, -0.09520663, 0.04829406,
|
|
|
|
|
|
|
|
-0.01603065, -0.1933216 , 0.19352763, 0.11819496, 0.04567619,
|
|
|
|
|
|
|
|
-0.08348306, 0.00812816, -0.00908206, 0.14528945, 0.02901065])
|
|
|
|
|
|
|
|
x = np.linspace(0, 1, N)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
y0 = 2*np.exp(-x**2/(2*0.3**2))+3*np.exp(-(x-1)**2/(2*0.7**2))
|
|
|
|
|
|
|
|
y = y0 + ei
|
|
|
|
|
|
|
|
kreg = KRegression(x, y, p=0, hs=hs)
|
|
|
|
|
|
|
|
kreg.tkde.kernel.get_smooting = kreg.tkde.kernel.hste
|
|
|
|
|
|
|
|
if fast:
|
|
|
|
|
|
|
|
kreg.__call__ = kreg.eval_grid_fast
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
f = kreg(output='plot', title='Kernel regression', plotflag=1)
|
|
|
|
|
|
|
|
pylab.figure(0)
|
|
|
|
|
|
|
|
f.plot(label='p=0')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
kreg.p=1
|
|
|
|
|
|
|
|
f1 = kreg(output='plot', title='Kernel regression', plotflag=1)
|
|
|
|
|
|
|
|
f1.plot(label='p=1')
|
|
|
|
|
|
|
|
pylab.plot(x,y,'.', x,y0, 'k')
|
|
|
|
|
|
|
|
pylab.legend()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pylab.show()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(kreg.tkde.tkde.inv_hs)
|
|
|
|
|
|
|
|
print(kreg.tkde.tkde.hs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def kde_demo5(N=50):
|
|
|
|
|
|
|
|
|
|
|
|
def kde_demo4(N=50):
|
|
|
|
'''Demonstrate that the improved Sheather-Jones plug-in (hisj) is superior
|
|
|
|
'''Demonstrate that the improved Sheather-Jones plug-in (hisj) is superior
|
|
|
|
for multimodal distributions
|
|
|
|
for 1D multimodal distributions
|
|
|
|
|
|
|
|
|
|
|
|
KDEDEMO5 shows that the improved Sheather-Jones plug-in smoothing is a better
|
|
|
|
KDEDEMO4 shows that the improved Sheather-Jones plug-in smoothing is a better
|
|
|
|
compared to normal reference rules (in this case the hns)
|
|
|
|
compared to normal reference rules (in this case the hns)
|
|
|
|
'''
|
|
|
|
'''
|
|
|
|
import scipy.stats as st
|
|
|
|
import scipy.stats as st
|
|
|
|
|
|
|
|
|
|
|
|
data = np.hstack((st.norm.rvs(loc=5, scale=1, size=(N,)), st.norm.rvs(loc=-5, scale=1, size=(N,))))
|
|
|
|
data = np.hstack((st.norm.rvs(loc=5, scale=1, size=(N,)),
|
|
|
|
|
|
|
|
st.norm.rvs(loc=-5, scale=1, size=(N,))))
|
|
|
|
|
|
|
|
|
|
|
|
#x = np.linspace(1.5e-3, 5, 55)
|
|
|
|
#x = np.linspace(1.5e-3, 5, 55)
|
|
|
|
|
|
|
|
|
|
|
@ -2717,11 +2665,11 @@ def kde_demo5(N=50):
|
|
|
|
pylab.plot(x + loc, st.norm.pdf(x, 0, scale=1)/2, 'k:', label='True density')
|
|
|
|
pylab.plot(x + loc, st.norm.pdf(x, 0, scale=1)/2, 'k:', label='True density')
|
|
|
|
pylab.legend()
|
|
|
|
pylab.legend()
|
|
|
|
|
|
|
|
|
|
|
|
def kde_demo6(N=500):
|
|
|
|
def kde_demo5(N=500):
|
|
|
|
'''Demonstrate that the improved Sheather-Jones plug-in (hisj) is superior
|
|
|
|
'''Demonstrate that the improved Sheather-Jones plug-in (hisj) is superior
|
|
|
|
for multimodal distributions
|
|
|
|
for 2D multimodal distributions
|
|
|
|
|
|
|
|
|
|
|
|
KDEDEMO5 shows that the improved Sheather-Jones plug-in smoothing is a better
|
|
|
|
KDEDEMO5 shows that the improved Sheather-Jones plug-in smoothing is better
|
|
|
|
compared to normal reference rules (in this case the hns)
|
|
|
|
compared to normal reference rules (in this case the hns)
|
|
|
|
'''
|
|
|
|
'''
|
|
|
|
import scipy.stats as st
|
|
|
|
import scipy.stats as st
|
|
|
@ -2742,11 +2690,64 @@ def kde_demo6(N=500):
|
|
|
|
pylab.clf()
|
|
|
|
pylab.clf()
|
|
|
|
f1.plot()
|
|
|
|
f1.plot()
|
|
|
|
pylab.plot(data[0], data[1], '.')
|
|
|
|
pylab.plot(data[0], data[1], '.')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def kreg_demo1(hs=None, fast=False, fun='hisj'):
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
N = 100
|
|
|
|
|
|
|
|
#ei = np.random.normal(loc=0, scale=0.075, size=(N,))
|
|
|
|
|
|
|
|
ei = np.array([-0.08508516, 0.10462496, 0.07694448, -0.03080661, 0.05777525,
|
|
|
|
|
|
|
|
0.06096313, -0.16572389, 0.01838912, -0.06251845, -0.09186784,
|
|
|
|
|
|
|
|
-0.04304887, -0.13365788, -0.0185279 , -0.07289167, 0.02319097,
|
|
|
|
|
|
|
|
0.06887854, -0.08938374, -0.15181813, 0.03307712, 0.08523183,
|
|
|
|
|
|
|
|
-0.0378058 , -0.06312874, 0.01485772, 0.06307944, -0.0632959 ,
|
|
|
|
|
|
|
|
0.18963205, 0.0369126 , -0.01485447, 0.04037722, 0.0085057 ,
|
|
|
|
|
|
|
|
-0.06912903, 0.02073998, 0.1174351 , 0.17599277, -0.06842139,
|
|
|
|
|
|
|
|
0.12587608, 0.07698113, -0.0032394 , -0.12045792, -0.03132877,
|
|
|
|
|
|
|
|
0.05047314, 0.02013453, 0.04080741, 0.00158392, 0.10237899,
|
|
|
|
|
|
|
|
-0.09069682, 0.09242174, -0.15445323, 0.09190278, 0.07138498,
|
|
|
|
|
|
|
|
0.03002497, 0.02495252, 0.01286942, 0.06449978, 0.03031802,
|
|
|
|
|
|
|
|
0.11754861, -0.02322272, 0.00455867, -0.02132251, 0.09119446,
|
|
|
|
|
|
|
|
-0.03210086, -0.06509545, 0.07306443, 0.04330647, 0.078111 ,
|
|
|
|
|
|
|
|
-0.04146907, 0.05705476, 0.02492201, -0.03200572, -0.02859788,
|
|
|
|
|
|
|
|
-0.05893749, 0.00089538, 0.0432551 , 0.04001474, 0.04888828,
|
|
|
|
|
|
|
|
-0.17708392, 0.16478644, 0.1171006 , 0.11664846, 0.01410477,
|
|
|
|
|
|
|
|
-0.12458953, -0.11692081, 0.0413047 , -0.09292439, -0.07042327,
|
|
|
|
|
|
|
|
0.14119701, -0.05114335, 0.04994696, -0.09520663, 0.04829406,
|
|
|
|
|
|
|
|
-0.01603065, -0.1933216 , 0.19352763, 0.11819496, 0.04567619,
|
|
|
|
|
|
|
|
-0.08348306, 0.00812816, -0.00908206, 0.14528945, 0.02901065])
|
|
|
|
|
|
|
|
x = np.linspace(0, 1, N)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
y0 = 2*np.exp(-x**2/(2*0.3**2))+3*np.exp(-(x-1)**2/(2*0.7**2))
|
|
|
|
|
|
|
|
y = y0 + ei
|
|
|
|
|
|
|
|
kernel = Kernel('gauss',fun=fun)
|
|
|
|
|
|
|
|
kreg = KRegression(x, y, p=0, hs=hs, kernel=kernel)
|
|
|
|
|
|
|
|
if fast:
|
|
|
|
|
|
|
|
kreg.__call__ = kreg.eval_grid_fast
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
f = kreg(output='plot', title='Kernel regression', plotflag=1)
|
|
|
|
|
|
|
|
pylab.figure(0)
|
|
|
|
|
|
|
|
f.plot(label='p=0')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
kreg.p=1
|
|
|
|
|
|
|
|
f1 = kreg(output='plot', title='Kernel regression', plotflag=1)
|
|
|
|
|
|
|
|
f1.plot(label='p=1')
|
|
|
|
|
|
|
|
pylab.plot(x,y,'.', x,y0, 'k')
|
|
|
|
|
|
|
|
pylab.legend()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pylab.show()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(kreg.tkde.tkde.inv_hs)
|
|
|
|
|
|
|
|
print(kreg.tkde.tkde.hs)
|
|
|
|
|
|
|
|
|
|
|
|
def test_docstrings():
|
|
|
|
def test_docstrings():
|
|
|
|
import doctest
|
|
|
|
import doctest
|
|
|
|
doctest.testmod()
|
|
|
|
doctest.testmod()
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
if __name__ == '__main__':
|
|
|
|
#test_docstrings()
|
|
|
|
#test_docstrings()
|
|
|
|
kde_demo2()
|
|
|
|
#kde_demo2()
|
|
|
|
|
|
|
|
kreg_demo1()
|