Simplified _nlogps

master
pbrod 8 years ago
parent a983812217
commit 91c0a2fd78

@ -1121,6 +1121,42 @@ class FitDistribution(rv_frozen):
def _nnlf(self, theta, x): def _nnlf(self, theta, x):
return self.dist._penalized_nnlf(theta, x, self._penalty) return self.dist._penalized_nnlf(theta, x, self._penalty)
def _log_dprb(self, x, args, scale, dist):
lowertail = True
if lowertail:
prb = np.hstack((0.0, dist.cdf(x, *args), 1.0))
dprb = np.diff(prb)
else:
prb = np.hstack((1.0, dist.sf(x, *args), 0.0))
dprb = -np.diff(prb)
log_dprb = log(dprb + _XMIN)
dx = np.diff(x, axis=0)
tie = dx == 0
if np.any(tie):
# TODO : implement this method for treating ties in data:
# Assume measuring error is delta. Then compute
# yL = F(xi - delta, theta)
# yU = F(xi + delta, theta)
# and replace
# logDj = log((yU-yL)/(r-1)) for j = i+1,i+2,...i+r-1
# The following is OK when only minimization of T is wanted
i_tie, = np.nonzero(tie)
tiedata = x[i_tie]
log_dprb[i_tie + 1] = log(dist._pdf(tiedata, *args)) - log(scale)
return log_dprb
def _compute_penalty(self, Nbad, finiteD):
nonfiniteD = 1 - finiteD
Nbad += np.sum(nonfiniteD, axis=0)
if Nbad > 0:
if self._penalty is None:
penalty = 100.0 * log(_XMAX) * Nbad
else:
penalty = 0.0
return penalty
def _nlogps(self, theta, x): def _nlogps(self, theta, x):
""" Moran's negative log Product Spacings statistic """ Moran's negative log Product Spacings statistic
@ -1146,58 +1182,33 @@ class FitDistribution(rv_frozen):
product of spacings.", product of spacings.",
IMS Lecture Notes Monograph Series 2006, Vol. 52, pp. 272-283 IMS Lecture Notes Monograph Series 2006, Vol. 52, pp. 272-283
""" """
n = 2 if isinstance(self.dist, rv_continuous) else 1 def parse_theta(theta, dist):
try: n = 2 if isinstance(dist, rv_continuous) else 1
loc = theta[-n] try:
scale = theta[-1] loc = theta[-n]
args = tuple(theta[:-n]) scale = theta[-1]
except IndexError: args = tuple(theta[:-n])
raise ValueError("Not enough input arguments.") except IndexError:
if not isinstance(self.dist, rv_continuous): raise ValueError("Not enough input arguments.")
scale = 1 if not isinstance(dist, rv_continuous):
if not self.dist._argcheck(*args) or scale <= 0: scale = 1
return np.inf return args, loc, scale
dist = self.dist dist = self.dist
args, loc, scale = parse_theta(theta, dist)
if not dist._argcheck(*args) or scale <= 0:
return np.inf
x = asarray((x - loc) / scale) x = asarray((x - loc) / scale)
cond0 = (x < dist.a) | (dist.b < x) cond0 = (x < dist.a) | (dist.b < x)
Nbad = np.sum(cond0) Nbad = np.sum(cond0)
if Nbad > 0: if Nbad > 0:
x = argsreduce(~cond0, x)[0] x = argsreduce(~cond0, x)[0]
lowertail = True log_dprb = self._log_dprb(x, args, scale, dist)
if lowertail:
prb = np.hstack((0.0, dist.cdf(x, *args), 1.0))
dprb = np.diff(prb)
else:
prb = np.hstack((1.0, dist.sf(x, *args), 0.0))
dprb = -np.diff(prb)
logD = log(dprb + _XMIN) finiteD = np.isfinite(log_dprb)
dx = np.diff(x, axis=0) penalty = self._compute_penalty(Nbad, finiteD)
tie = (dx == 0) return -np.sum(log_dprb[finiteD], axis=0) + penalty
if np.any(tie):
# TODO : implement this method for treating ties in data:
# Assume measuring error is delta. Then compute
# yL = F(xi - delta, theta)
# yU = F(xi + delta, theta)
# and replace
# logDj = log((yU-yL)/(r-1)) for j = i+1,i+2,...i+r-1
# The following is OK when only minimization of T is wanted
i_tie, = np.nonzero(tie)
tiedata = x[i_tie]
logD[i_tie + 1] = log(dist._pdf(tiedata, *args)) - log(scale)
finiteD = np.isfinite(logD)
nonfiniteD = 1 - finiteD
Nbad += np.sum(nonfiniteD, axis=0)
if Nbad > 0:
if self._penalty is None:
penalty = 100.0 * log(_XMAX) * Nbad
else:
penalty = 0.0
return -np.sum(logD[finiteD], axis=0) + penalty
return -np.sum(logD, axis=0)
@staticmethod @staticmethod
def _get_optimizer(kwds): def _get_optimizer(kwds):

Loading…
Cancel
Save