Fixed more bugs in distributions.py

master
Per.Andreas.Brodtkorb 11 years ago
parent 6b88f2d4cc
commit 0bfe623f5c

@ -7,12 +7,13 @@ import sys
import fractions
import numpy as np
from numpy import (
abs, amax, any, logical_and, arange, linspace, atleast_1d, # atleast_2d,
array, asarray, broadcast_arrays, ceil, floor, frexp, hypot,
meshgrid,
abs, amax, any, logical_and, arange, linspace, atleast_1d,
array, asarray, ceil, floor, frexp, hypot,
sqrt, arctan2, sin, cos, exp, log, mod, diff, empty_like,
finfo, inf, pi, interp, isnan, isscalar, zeros, ones, linalg,
r_, sign, unique, hstack, vstack, nonzero, where, extract)
from scipy.special import gammaln
from scipy.special import gammaln, gamma, psi
from scipy.integrate import trapz, simps
import warnings
from plotbackend import plotbackend
@ -24,7 +25,8 @@ try:
except:
clib = None
floatinfo = finfo(float)
_TINY = np.finfo(float).tiny
_EPS = np.finfo(float).eps
__all__ = [
'is_numlike', 'JITImport', 'DotDict', 'Bunch', 'printf', 'sub_dict_select',
@ -1693,6 +1695,600 @@ def gravity(phi=45):
0.0000059 * sin(2 * phir) ** 2.)
def dea3(v0, v1, v2):
'''
Extrapolate a slowly convergent sequence
Parameters
----------
v0, v1, v2 : array-like
3 values of a convergent sequence to extrapolate
Returns
-------
result : array-like
extrapolated value
abserr : array-like
absolute error estimate
Description
-----------
DEA3 attempts to extrapolate nonlinearly to a better estimate
of the sequence's limiting value, thus improving the rate of
convergence. The routine is based on the epsilon algorithm of
P. Wynn, see [1]_.
Example
-------
# integrate sin(x) from 0 to pi/2
>>> import numpy as np
>>> import numdifftools as nd
>>> Ei= np.zeros(3)
>>> linfun = lambda k : np.linspace(0,np.pi/2.,2.**(k+5)+1)
>>> for k in np.arange(3):
... x = linfun(k)
... Ei[k] = np.trapz(np.sin(x),x)
>>> [En, err] = nd.dea3(Ei[0], Ei[1], Ei[2])
>>> truErr = Ei-1.
>>> (truErr, err, En)
(array([ -2.00805680e-04, -5.01999079e-05, -1.25498825e-05]),
array([ 0.00020081]), array([ 1.]))
See also
--------
dea
Reference
---------
.. [1] C. Brezinski (1977)
"Acceleration de la convergence en analyse numerique",
"Lecture Notes in Math.", vol. 584,
Springer-Verlag, New York, 1977.
'''
E0, E1, E2 = np.atleast_1d(v0, v1, v2)
abs = np.abs # @ReservedAssignment
max = np.maximum # @ReservedAssignment
delta2, delta1 = E2 - E1, E1 - E0
err2, err1 = abs(delta2), abs(delta1)
tol2, tol1 = max(abs(E2), abs(E1)) * _EPS, max(abs(E1), abs(E0)) * _EPS
with warnings.catch_warnings():
warnings.simplefilter("ignore") # ignore division by zero and overflow
ss = 1.0 / delta2 - 1.0 / delta1
smallE2 = (abs(ss * E1) <= 1.0e-3).ravel()
result = 1.0 * E2
abserr = err1 + err2 + E2 * _EPS * 10.0
converged = (err1 <= tol1) & (err2 <= tol2).ravel() | smallE2
k4, = (1 - converged).nonzero()
if k4.size > 0:
result[k4] = E1[k4] + 1.0 / ss[k4]
abserr[k4] = err1[k4] + err2[k4] + abs(result[k4] - E2[k4])
return result, abserr
def hyp2f1_taylor(a, b, c, z, tol=1e-13, itermax=500):
a, b, c, z = np.broadcast_arrays(*np.atleast_1d(a, b, c, z))
shape = a.shape
ak, bk, ck, zk = [d.ravel() for d in (a, b, c, z)]
ajm1 = np.ones(ak.shape)
bjm2 = 0.5 * np.ones(ak.shape)
bjm1 = np.ones(ak.shape)
hout = np.zeros(ak.shape)
k0 = np.arange(len(ak))
for j in range(0, itermax):
aj = ajm1 * (ak + j) * (bk + j) / (ck + j) * zk / (j + 1)
bj = bjm1 + aj
h, err = dea3(bjm2, bjm1, bj)
k = np.flatnonzero(err > tol * np.abs(h))
hout[k0] = h
if len(k) == 0:
break
k0 = k0[k]
ak, bk, ck, zk = ak[k], bk[k], ck[k], zk[k]
ajm1 = aj[k]
bjm2 = bjm1[k]
bjm1 = bj[k]
else:
warnings.warn(('Reached %d limit! \n' +
'#%d values did not converge! Max error=%g') %
(j, len(k), np.max(err)))
return hout.reshape(shape)
def hyp2f1(a, b, c, z, rho=0.5):
e1 = gammaln(a)
e2 = gammaln(b)
e3 = gammaln(c)
e4 = gammaln(b - a)
e5 = gammaln(a - b)
e6 = gammaln(c - a)
e7 = gammaln(c - b)
e8 = gammaln(c - a - b)
e9 = gammaln(a + b - c)
_cmab = c-a-b
#~(np.round(cmab) == cmab & cmab <= 0)
if abs(z) <= rho:
h = hyp2f1_taylor(a, b, c, z, 1e-15)
elif abs(1 - z) <= rho: # % Require that |arg(1-z)|<pi
h = exp(e3 + e8 - e6 - e7) * hyp2f1_taylor(a, b, a + b - c, 1 - z, 1e-15) \
+ (1 - z) ** (c - a - b) * exp(e3 + e9 - e1 - e2) \
* hyp2f1_taylor(c - a, c - b, c - a - b + 1, 1 - z, 1e-15)
elif abs(z / (z - 1)) <= rho:
h = (1 - z) ** (-a) \
* hyp2f1_taylor(a, c - b, c, (z / (z - 1)), 1e-15)
elif abs(1 / z) <= rho: # % Require that |arg(z)|<pi and |arg(1-z)|<pi
h = (-z + 0j) ** (-a) * exp(e3 + e4 - e2 - e6) \
* hyp2f1_taylor(a, a - c + 1, a - b + 1, 1. / z, 1e-15) \
+ (-z + 0j) ** (-b) * exp(e3 + e5 - e1 - e7) \
* hyp2f1_taylor(b - c + 1, b, b - a + 1, (1. / z), 1e-15)
elif abs(1. / (1 - z)) <= rho: # % Require that |arg(1-z)|<pi
h = (1 - z) ** (-a) * exp(e3 + e4 - e2 - e6) \
* hyp2f1_taylor(a, c - b, a - b + 1, (1. / (1 - z)), 1e-15)\
+ (1 - z) ** (-b) * exp(e3 + e5 - e1 - e7) \
* hyp2f1_taylor(b, c - a, b - a + 1, (1. / (1 - z)), 1e-15)
elif abs(1 - 1 / z) < rho: # % Require that |arg(z)|<pi and |arg(1-z)|<pi
h = z ** (-a) * exp(e3 + e8 - e6 - e7) \
* hyp2f1_taylor(a, a - c + 1, a + b - c + 1, (1 - 1 / z), 1e-15) \
+ z ** (a - c) * (1 - z) ** (c - a - b) * exp(e3 + e9 - e1 - e2) \
* hyp2f1_taylor(c - a, 1 - a, c - a - b + 1, (1 - 1 / z), 1e-15)
else:
warnings.warn('Another method is needed')
return h
def hyp2f1_wrong(a, b, c, z, tol=1e-13, itermax=500):
ajm1 = 0
bjm1 = 1
cjm1 = 1
xjm1 = np.ones(np.shape(c + a * b * z))
xjm2 = 2 * np.ones(xjm1.shape)
for j in range(1, itermax):
aj = (ajm1 + bjm1) * j * (c + j - 1)
bj = bjm1 * (a + j - 1) * (b + j - 1) * z
cj = cjm1 * j * (c + j - 1)
if np.any((aj == np.inf) | (bj == np.inf) | (cj == np.inf)):
break
xj = (aj + bj) / cj
h, err = dea3(xjm2, xjm1, xj)
if np.all(err <= tol * np.abs(h)) and j > 10:
break
xjm2 = xjm1
xjm1 = xj
else:
warnings.warn('Reached %d limit' % j)
return h
def hygfz(A, B, C, Z):
''' Return hypergeometric function for a complex argument, F(a,b,c,z)
Parameters
----------
a, b, c:
parameters where c <> 0,-1,-2,...
z :--- Complex argument
'''
X = np.real(Z)
Y = np.imag(Z)
EPS = 1.0e-15
L0 = C == np.round(C) and C < 0.0e0
L1 = abs(1.0 - X) < EPS and Y == 0.0 and C - A - B <= 0.0
L2 = abs(Z + 1.0) < EPS and abs(C - A + B - 1.0) < EPS
L3 = A == np.round(A) and A < 0.0
L4 = B == np.round(B) and B < 0.0
L5 = C - A == np.round(C - A) and C - A <= 0.0
L6 = C - B == np.round(C - B) and C - B <= 0.0
AA = A
BB = B
A0 = abs(Z)
if (A0 > 0.95):
EPS = 1.0e-8
PI = 3.141592653589793
EL = .5772156649015329
if (L0 or L1):
# 'The hypergeometric series is divergent'
return np.inf
NM = 0
if (A0 == 0.0 or A == 0.0 or B == 0.0):
ZHF = 1.0
elif (Z == 1.0 and C - A - B > 0.0):
GC = gamma(C)
GCAB = gamma(C - A - B)
GCA = gamma(C - A)
GCB = gamma(C - B)
ZHF = GC * GCAB / (GCA * GCB)
elif L2:
G0 = sqrt(PI) * 2.0 ** (-A)
G1 = gamma(C)
G2 = gamma(1.0 + A / 2.0 - B)
G3 = gamma(0.5 + 0.5 * A)
ZHF = G0 * G1 / (G2 * G3)
elif L3 or L4:
if (L3):
NM = int(np.round(abs(A)))
if (L4):
NM = int(np.round(abs(B)))
ZHF = 1.0
ZR = 1.0
for K in range(NM):
ZR = ZR * (A + K) * (B + K) / ((K + 1.) * (C + K)) * Z
ZHF = ZHF + ZR
elif L5 or L6:
if (L5):
NM = np.round(abs(C - A))
if (L6):
NM = np.round(abs(C - B))
ZHF = 1.0 + 0j
ZR = 1.0 + 0j
for K in range(NM):
ZR *= (C - A + K) * (C - B + K) / ((K + 1.) * (C + K)) * Z
ZHF = ZHF + ZR
ZHF = (1.0 - Z) ** (C - A - B) * ZHF
elif (A0 <= 1.0):
if (X < 0.0):
Z1 = Z / (Z - 1.0)
if (C > A and B < A and B > 0.0):
A = BB
B = AA
ZC0 = 1.0 / ((1.0 - Z) ** A)
ZHF = 1.0 + 0j
ZR0 = 1.0 + 0j
ZW = 0
for K in range(500):
ZR0 *= (A + K) * (C - B + K) / ((K + 1.0) * (C + K)) * Z1
ZHF += ZR0
if (abs(ZHF - ZW) < abs(ZHF) * EPS):
break
ZW = ZHF
ZHF = ZC0 * ZHF
elif (A0 >= 0.90):
ZW = 0.0
GM = 0.0
MCAB = np.round(C - A - B)
if (abs(C - A - B - MCAB) < EPS):
M = int(np.round(C - A - B))
GA = gamma(A)
GB = gamma(B)
GC = gamma(C)
GAM = gamma(A + M)
GBM = gamma(B + M)
PA = psi(A)
PB = psi(B)
if (M != 0):
GM = 1.0
for j in range(1, abs(M)):
GM *= j
RM = 1.0
for j in range(1, abs(M) + 1): # DO 35 J=1,abs(M)
RM *= j
ZF0 = 1.0
ZR0 = 1.0
ZR1 = 1.0
SP0 = 0.0
SP = 0.0
if (M >= 0):
ZC0 = GM * GC / (GAM * GBM)
ZC1 = -GC * (Z - 1.0) ** M / (GA * GB * RM)
for K in range(1, M):
ZR0 = ZR0 * \
(A + K - 1.) * (B + K - 1.) / \
(K * (K - M)) * (1. - Z)
ZF0 = ZF0 + ZR0
for K in range(M):
SP0 = SP0 + 1.0 / \
(A + K) + 1.0 / (B + K) - 1. / (K + 1.)
ZF1 = PA + PB + SP0 + 2.0 * EL + np.log(1.0 - Z)
for K in range(1, 501):
SP = SP + \
(1.0 - A) / (K * (A + K - 1.0)) + (
1.0 - B) / (K * (B + K - 1.0))
SM = 0.0
for J in range(1, M):
SM += (1.0 - A) / (
(J + K) * (A + J + K - 1.0)) + 1.0 / (B + J + K - 1.0)
ZP = PA + PB + 2.0 * EL + SP + SM + np.log(1.0 - Z)
ZR1 = ZR1 * \
(A + M + K - 1.0) * (B + M + K - 1.0) / (
K * (M + K)) * (1.0 - Z)
ZF1 = ZF1 + ZR1 * ZP
if (abs(ZF1 - ZW) < abs(ZF1) * EPS):
break
ZW = ZF1
ZHF = ZF0 * ZC0 + ZF1 * ZC1
elif (M < 0):
M = -M
ZC0 = GM * GC / (GA * GB * (1.0 - Z) ** M)
ZC1 = -(-1) ** M * GC / (GAM * GBM * RM)
for K in range(1, M):
ZR0 = ZR0 * \
(A - M + K - 1.0) * (B - M + K - 1.0) / (
K * (K - M)) * (1.0 - Z)
ZF0 = ZF0 + ZR0
for K in range(1, M + 1):
SP0 = SP0 + 1.0 / K
ZF1 = PA + PB - SP0 + 2.0 * EL + np.log(1.0 - Z)
for K in range(1, 501):
SP = SP + \
(1.0 - A) / (K * (A + K - 1.0)) + (
1.0 - B) / (K * (B + K - 1.0))
SM = 0.0
for J in range(1, M + 1):
SM = SM + 1.0 / (J + K)
ZP = PA + PB + 2.0 * EL + SP - SM + np.log(1.0 - Z)
ZR1 = ZR1 * \
(A + K - 1.) * (B + K - 1.) / \
(K * (M + K)) * (1. - Z)
ZF1 = ZF1 + ZR1 * ZP
if (abs(ZF1 - ZW) < abs(ZF1) * EPS):
break
ZW = ZF1
ZHF = ZF0 * ZC0 + ZF1 * ZC1
else:
GA = gamma(A)
GB = gamma(B)
GC = gamma(C)
GCA = gamma(C - A)
GCB = gamma(C - B)
GCAB = gamma(C - A - B)
GABC = gamma(A + B - C)
ZC0 = GC * GCAB / (GCA * GCB)
ZC1 = GC * GABC / (GA * GB) * (1.0 - Z) ** (C - A - B)
ZHF = 0 + 0j
ZR0 = ZC0
ZR1 = ZC1
for K in range(1, 501):
ZR0 = ZR0 * \
(A + K - 1.) * (B + K - 1.) / \
(K * (A + B - C + K)) * (1. - Z)
ZR1 = ZR1 * \
(C - A + K - 1.0) * (C - B + K - 1.0) / (
K * (C - A - B + K)) * (1.0 - Z)
ZHF = ZHF + ZR0 + ZR1
if (abs(ZHF - ZW) < abs(ZHF) * EPS):
break
ZW = ZHF
ZHF = ZHF + ZC0 + ZC1
else:
ZW = 0.0
Z00 = 1.0 #+ 0j
if (C - A < A and C - B < B):
Z00 = (1.0 - Z) ** (C - A - B)
A = C - A
B = C - B
ZHF = 1.0
ZR = 1.0
for K in range(1, 501):
ZR = ZR * \
(A + K - 1.0) * (B + K - 1.0) / (K * (C + K - 1.0)) * Z
ZHF = ZHF + ZR
if (abs(ZHF - ZW) <= abs(ZHF) * EPS):
break
ZW = ZHF
ZHF = Z00 * ZHF
elif (A0 > 1.0):
MAB = np.round(A - B)
if (abs(A - B - MAB) < EPS and A0 <= 1.1):
B = B + EPS
if (abs(A - B - MAB) > EPS):
GA = gamma(A)
GB = gamma(B)
GC = gamma(C)
GAB = gamma(A - B)
GBA = gamma(B - A)
GCA = gamma(C - A)
GCB = gamma(C - B)
ZC0 = GC * GBA / (GCA * GB * (-Z) ** A)
ZC1 = GC * GAB / (GCB * GA * (-Z) ** B)
ZR0 = ZC0
ZR1 = ZC1
ZHF = 0.0 + 0j
for K in range(1, 501):
ZR0 = ZR0 * (A + K - 1.0) * (A - C + K) / ((A - B + K) * K * Z)
ZR1 = ZR1 * (B + K - 1.0) * (B - C + K) / ((B - A + K) * K * Z)
ZHF = ZHF + ZR0 + ZR1
if (abs((ZHF - ZW) / ZHF) <= EPS):
break
ZW = ZHF
ZHF = ZHF + ZC0 + ZC1
else:
if (A - B < 0.0):
A = BB
B = AA
CA = C - A
CB = C - B
NCA = np.round(CA)
NCB = np.round(CB)
if (abs(CA - NCA) < EPS or abs(CB - NCB) < EPS):
C = C + EPS
GA = gamma(A)
GC = gamma(C)
GCB = gamma(C - B)
PA = psi(A)
PCA = psi(C - A)
PAC = psi(A - C)
MAB = np.round(A - B + EPS)
ZC0 = GC / (GA * (-Z) ** B)
GM = gamma(A - B)
ZF0 = GM / GCB * ZC0
ZR = ZC0
for K in range(1, MAB):
ZR = ZR * (B + K - 1.0) / (K * Z)
T0 = A - B - K
G0 = gamma(T0)
GCBK = gamma(C - B - K)
ZF0 = ZF0 + ZR * G0 / GCBK
if (MAB == 0):
ZF0 = 0.0 + 0j
ZC1 = GC / (GA * GCB * (-Z) ** A)
SP = -2.0 * EL - PA - PCA
for J in range(1, MAB + 1):
SP = SP + 1.0 / J
ZP0 = SP + np.log(-Z)
SQ = 1.0
for J in range(1, MAB + 1):
SQ = SQ * (B + J - 1.0) * (B - C + J) / J
ZF1 = (SQ * ZP0) * ZC1
ZR = ZC1
RK1 = 1.0
SJ1 = 0.0
W0 = 0.0
for K in range(1, 10001):
ZR = ZR / Z
RK1 = RK1 * (B + K - 1.0) * (B - C + K) / (K * K)
RK2 = RK1
for J in range(K + 1, K + MAB + 1):
RK2 = RK2 * (B + J - 1.0) * (B - C + J) / J
SJ1 = SJ1 + \
(A - 1.0) / (K * (A + K - 1.0)) + \
(A - C - 1.0) / (K * (A - C + K - 1.0))
SJ2 = SJ1
for J in range(K + 1, K + MAB + 1):
SJ2 = SJ2 + 1.0 / J
ZP = -2.0 * EL - PA - PAC + SJ2 - 1.0 / \
(K + A - C) - PI / np.tan(PI * (K + A - C)) + np.log(-Z)
ZF1 = ZF1 + RK2 * ZR * ZP
WS = abs(ZF1)
if (abs((WS - W0) / WS) < EPS):
break
W0 = WS
ZHF = ZF0 + ZF1
A = AA
B = BB
if (K > 150):
warnings.warn('Warning! You should check the accuracy')
return ZHF
# def hypgf(a, b, c, x, abseps=0, releps=1e-13, kmax=10000):
# '''HYPGF Hypergeometric function F(a,b,c,x)
#
# CALL: [y ,abserr] = hypgf(a,b,c,x,abseps,releps)
#
# y = F(a,b,c,x)
# abserr = absolute error estimate
# a,b,c,x = input parameters
# abseps = requested absolute error
# releps = requested relative error
#
# HYPGF calculates one solution to Gauss's hypergeometric differential
# equation:
#
# x*(1-x)Y''(x)+[c-(a+b+1)*x]*Y'(x)-a*b*Y(x) = 0
# where
# F(a,b,c,x) = Y1(x) = 1 + a*b*x/c + a*(a+1)*b*(b+1)*x^2/(c*(c+1))+....
#
#
# Many elementary functions are special cases of F(a,b,c,x):
# 1/(1-x) = F(1,1,1,x) = F(1,b,b,x) = F(a,1,a,x)
# (1+x)^n = F(-n,b,b,-x)
# atan(x) = x*F(.5,1,1.5,-x^2)
# asin(x) = x*F(.5,.5,1.5,x^2)
# log(x) = x*F(1,1,2,-x)
# log(1+x)-log(1-x) = 2*x*F(.5,1,1.5,x^2)
#
# NOTE: only real x, abs(x) < 1 and c~=0,-1,-2,... are allowed.
#
# Examples:
# x = linspace(-.99,.99)';
# [Sn1,err1] = hypgf(1,1,1,x)
# plot(x,abs(Sn1-1./(1-x)),'b',x,err1,'r'),set(gca,'yscale','log')
# [Sn2,err2] = hypgf(.5,.5,1.5,x.^2);
# plot(x,abs(x.*Sn2-asin(x)),'b',x,abs(x.*err2),'r'),set(gca,'yscale','log')
#
#
# Reference:
# ---------
# Kreyszig, Erwin (1988)
# Advanced engineering mathematics
# John Wiley & Sons, sixth edition, pp 204.
# '''
# csize = common_shape(x, a, b, c)
# kmin = 2
# fsum = np.zeros(csize)
# delta = np.zeros(csize)
# err = np.zeros(csize)
#
# ok = ~((np.round(c) == c & c <= 0) | np.abs(x) > 1)
# if np.any(~ok):
# warnings.warn('HYPGF', 'Illegal input: c = 0,-1,-2,... or abs(x)>1')
# fsum[~ok] = np.NaN
# err[~ok] = np.NaN
#
# k0=find(ok & abs(x)==1);
# if any(k0)
# cmab = c(k0)-a(k0)-b(k0);
# fsum(k0) = exp(gammaln(c(k0))+gammaln(cmab)-...
# gammaln(c(k0)-a(k0))-gammaln(c(k0)-b(k0)));
# err(k0) = eps;
# k00 = find(real(cmab)<=0);
# if any(k00)
# err(k0(k00)) = nan;
# fsum(k0(k00)) = nan;
# end
# end
# k=find(ok & abs(x)<1);
# if any(k),
# delta(k) = ones(size(k));
# fsum(k) = delta(k);
#
# k1 = k;
# E = cell(1,3);
# E{3} = fsum(k);
# converge = 'n';
# for ix=0:Kmax-1,
# delta(k1) = delta(k1).*((a(k1)+ix)./(ix+1)).*((b(k1)+ix)./(c(k1)+ ix)).*x(k1);
# fsum(k1) = fsum(k1)+delta(k1);
#
# E(1:2) = E(2:3);
# E{3} = fsum(k1);
#
# if ix>Kmin
# if useDEA,
# [Sn, err(k1)] = dea3(E{:});
# k00 = find((abs(err(k1))) <= max(absEps,abs(relEps.*fsum(k1))));
# if any(k00)
# fsum(k1(k00)) = Sn(k00);
# end
# if (ix==Kmax-1)
# fsum(k1) = Sn;
# end
# k0 = (find((abs(err(k1))) > max(absEps,abs(relEps.*fsum(k1)))));
# if any(k0),% compute more terms
# %nk=length(k0);%# of values we have to compute again
# E{2} = E{2}(k0);
# E{3} = E{3}(k0);
# else
# converge='y';
# break;
# end
# else
# err(k1) = 10*abs(delta(k1));
# k0 = (find((abs(err(k1))) > max(absEps,abs(relEps.* ...
# fsum(k1)))));
# if any(k0),% compute more terms
# %nk=length(k0);%# of values we have to compute again
# else
# converge='y';
# break;
# end
# end
# k1 = k1(k0);
# end
# end
# if ~strncmpi(converge,'y',1)
# disp(sprintf('#%d values did not converge',length(k1)))
# end
# end
# %ix
# return
def nextpow2(x):
'''
Return next higher power of 2
@ -1761,8 +2357,6 @@ def _discretize_linear(fun, a, b, tol=0.005, n=5):
'''
Automatic discretization of function, linear gridding
'''
tiny = floatinfo.tiny
x = linspace(a, b, n)
y = fun(x)
@ -1777,7 +2371,7 @@ def _discretize_linear(fun, a, b, tol=0.005, n=5):
x = linspace(a, b, n)
y = fun(x)
y00 = interp(x, x0, y0)
err = 0.5 * amax(abs((y00 - y) / (abs(y00 + y) + tiny)))
err = 0.5 * amax(abs((y00 - y) / (abs(y00 + y) + _TINY)))
return x, y
@ -1785,7 +2379,6 @@ def _discretize_adaptive(fun, a, b, tol=0.005, n=5):
'''
Automatic discretization of function, adaptive gridding.
'''
tiny = floatinfo.tiny
n += (mod(n, 2) == 0) # make sure n is odd
x = linspace(a, b, n)
fx = fun(x)
@ -1807,7 +2400,7 @@ def _discretize_adaptive(fun, a, b, tol=0.005, n=5):
fy = fun(y)
fy0 = interp(y, x, fx)
erri = 0.5 * (abs((fy0 - fy) / (abs(fy0 + fy) + tiny)))
erri = 0.5 * (abs((fy0 - fy) / (abs(fy0 + fy) + _TINY)))
err = erri.max()
@ -1867,125 +2460,6 @@ def cart2polar(x, y, z=None):
return t, r, z
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from one or more coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : 'xy' or 'ij' (optional)
cartesian ('xy', default) or matrix ('ij') indexing of output
sparse : True or False (default) (optional)
If True a sparse grid is returned in order to conserve memory.
copy : True (default) or False (optional)
If False a view into the original arrays are returned in order to
conserve memory
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> x = np.linspace(0,1,3) # coordinates along x axis
>>> y = np.linspace(0,1,2) # coordinates along y axis
>>> xv, yv = meshgrid(x,y) # extend x and y for a 2D xy grid
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x,y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
>>> meshgrid(x,y,sparse=True,indexing='ij') # change to matrix indexing
[array([[ 0. ],
[ 0.5],
[ 1. ]]), array([[ 0., 1.]])]
>>> meshgrid(x,y,indexing='ij')
[array([[ 0. , 0. ],
[ 0.5, 0.5],
[ 1. , 1. ]]), array([[ 0., 1.],
[ 0., 1.],
[ 0., 1.]])]
>>> meshgrid(0,1,5) # just a 3D point
[array([[[0]]]), array([[[1]]]), array([[[5]]])]
>>> map(np.squeeze,meshgrid(0,1,5)) # just a 3D point
[array(0), array(1), array(5)]
>>> meshgrid(3)
array([3])
>>> meshgrid(y) # 1D grid y is just returned
array([ 0., 1.])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2+yy**2)/(xx**2+yy**2)
"""
copy_ = kwargs.get('copy', True)
args = atleast_1d(*xi)
if not isinstance(args, list):
if args.size > 0:
return args.copy() if copy_ else args
else:
raise TypeError('meshgrid() take 1 or more arguments (0 given)')
sparse = kwargs.get('sparse', False)
indexing = kwargs.get('indexing', 'xy') # 'ij'
ndim = len(args)
s0 = (1,) * ndim
output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(args)]
shape = [x.size for x in output]
if indexing == 'xy':
# switch first and second axis
output[0].shape = (1, -1) + (1,) * (ndim - 2)
output[1].shape = (-1, 1) + (1,) * (ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return broadcast_arrays(*output)
def ndgrid(*args, **kwargs):
"""
Same as calling meshgrid with indexing='ij' (see meshgrid for
@ -2059,8 +2533,7 @@ def trangood(x, f, min_n=None, min_x=None, max_x=None, max_n=inf):
xn = xo[-1]
x0 = xo[0]
L = float(xn - x0)
eps = floatinfo.eps
if ((nf < min_n) or (max_n < nf) or any(abs(ddx) > 10 * eps * (L))):
if ((nf < min_n) or (max_n < nf) or any(abs(ddx) > 10 * _EPS * (L))):
# % pab 07.01.2001: Always choose the stepsize df so that
# % it is an exactly representable number.
# % This is important when calculating numerical derivatives and is
@ -2140,8 +2613,6 @@ def tranproc(x, f, x0, *xi):
--------
trangood.
"""
eps = floatinfo.eps
xo, fo, x0 = atleast_1d(x, f, x0)
xi = atleast_1d(*xi)
if not isinstance(xi, list):
@ -2165,7 +2636,7 @@ def tranproc(x, f, x0, *xi):
if N > 0:
y = [y0]
hn = xo[1] - xo[0]
if hn ** N < sqrt(eps):
if hn ** N < sqrt(_EPS):
msg = ('Numerical problems may occur for the derivatives in ' +
'tranproc.\nThe sampling of the transformation may be too small.')
warnings.warn(msg)
@ -2602,5 +3073,33 @@ def test_docstrings():
import doctest
doctest.testmod()
def test_hyp2f1():
# 1/(1-x) = F(1,1,1,x) = F(1,b,b,x) = F(a,1,a,x)
# (1+x)^n = F(-n,b,b,-x)
# atan(x) = x*F(.5,1,1.5,-x^2)
# asin(x) = x*F(.5,.5,1.5,x^2)
# log(x) = x*F(1,1,2,-x)
# log(1+x)-log(1-x) = 2*x*F(.5,1,1.5,x^2)
x = linspace(0., .7, 20)
y = hyp2f1_taylor(-1, -4, 1, .9)
y2 = hygfz(-1, -4, 1, .9)
y3 = hygfz(5, -300, 10, 0.5)
y4 = hyp2f1_taylor(5, -300, 10, 0.5)
#y = hyp2f1(0.1, 0.2, 0.3, 0.5)
#y = hyp2f1(1, 1.5, 3, -4 +3j)
#y = hyp2f1(5, 7.5, 2.5, 5)
# fun = lambda x : 1./(1-x)
# x = .99
# y = hyp2f1(1,1,1,x)
# print(y-fun(x))
#
plt = plotbackend
plt.interactive(False)
plt.semilogy(x, np.abs(y- 1. / (1 - x)) + 1e-20, 'r')
plt.show()
if __name__ == "__main__":
test_docstrings()
#test_docstrings()
test_hyp2f1()

@ -28,7 +28,8 @@ import warnings
import numpy as np
from numpy import (inf, pi, zeros, ones, sqrt, where, log, exp, cos, sin, arcsin, mod, interp, # @UnresolvedImport
linspace, arange, sort, all, abs, vstack, hstack, atleast_1d, sign, expm1, #@UnresolvedImport
#@UnresolvedImport
linspace, arange, sort, all, abs, vstack, hstack, atleast_1d, sign, expm1,
finfo, polyfit, r_, nonzero, cumsum, ravel, size, isnan, nan, ceil, diff, array) # @UnresolvedImport
from numpy.fft import fft
from numpy.random import randn
@ -53,10 +54,13 @@ _wafospec = JITImport('wafo.spectrum')
__all__ = ['TimeSeries', 'LevelCrossings', 'CyclePairs', 'TurningPoints',
'sensortypeid', 'sensortype']
def _invchi2(q, df):
return special.chdtri(df, q)
class LevelCrossings(PlotData):
'''
Container class for Level crossing data objects in WAFO
@ -80,6 +84,7 @@ class LevelCrossings(PlotData):
>>> lc = mm.level_crossings()
>>> h2 = lc.plot()
'''
def __init__(self, *args, **kwds):
options = dict(title='Level crossing spectrum',
xlab='Levels', ylab='Count',
@ -99,11 +104,14 @@ class LevelCrossings(PlotData):
logcros = where(self.data == 0.0, inf, -log(self.data))
logcmin = logcros[icmax]
logcros = sqrt(2 * abs(logcros - logcmin))
logcros[0:icmax + 1] = 2 * logcros[icmax] - logcros[0:icmax + 1]
logcros[0:icmax + 1] = 2 * logcros[
icmax] - logcros[0:icmax + 1]
ncr = 10
p = polyfit(self.args[ncr:-ncr], logcros[ncr:-ncr], 1) #least square fit
#least square fit
p = polyfit(self.args[ncr:-ncr], logcros[ncr:-ncr], 1)
if self.sigma is None:
self.sigma = 1.0 / p[0] #estimated standard deviation of x
# estimated standard deviation of x
self.sigma = 1.0 / p[0]
if self.mean is None:
self.mean = -p[1] / p[0] # self.args[icmax]
cmax = self.data[icmax]
@ -184,7 +192,6 @@ class LevelCrossings(PlotData):
Preprint 2000:82, Mathematical statistics, Chalmers, pp. 18.
'''
i_max = self.data.argmax()
c_max = self.data[i_max]
# Maximum of lc
@ -199,10 +206,12 @@ class LevelCrossings(PlotData):
u_max = self.args[i.max()]
lcf, lcx = self.data, self.args
# Extrapolate LC for high levels
[lc_High, phat_high] = self._extrapolate(lcx, lcf, u_max, u_max - lc_max, method, dist);
[lc_High, phat_high] = self._extrapolate(
lcx, lcf, u_max, u_max - lc_max, method, dist)
#
# # Extrapolate LC for low levels
[lcEst1, phat_low] = self._extrapolate(-lcx[::-1], lcf[::-1], -u_min, lc_max - u_min, method, dist)
# Extrapolate LC for low levels
[lcEst1, phat_low] = self._extrapolate(
-lcx[::-1], lcf[::-1], -u_min, lc_max - u_min, method, dist)
lc_Low = lcEst1[::-1, :] # [-lcEst1[::-1, 0], lcEst1[::-1, 1::]]
lc_Low[:, 0] *= -1
# Est.Low = Est1;
@ -218,7 +227,8 @@ class LevelCrossings(PlotData):
lc_out.phat_high = phat_high
lc_out.phat_low = phat_low
return lc_out
##
#
def _extrapolate(self, lcx, lcf, u, offset, method, dist):
# Extrapolate the level crossing spectra for high levels
@ -236,7 +246,6 @@ class LevelCrossings(PlotData):
x.append(ones(ni - nim1) * xk)
nim1 = ni
x = np.hstack(x) - u
df = 0.01
@ -250,7 +259,7 @@ class LevelCrossings(PlotData):
covar = phat.par_cov[::2, ::2]
# Calculate 90 # confidence region, an ellipse, for (k,s)
D, B = np.linalg.eig(covar);
D, B = np.linalg.eig(covar)
b = phat.par[::2]
if b[0] > 0:
phat.upperlimit = u + b[1] / b[0]
@ -258,10 +267,13 @@ class LevelCrossings(PlotData):
r = sqrt(-2 * log(1 - 90 / 100)) # 90 # confidence sphere
Nc = 16 + 1
ang = linspace(0, 2 * pi, Nc)
c0 = np.vstack((r * sqrt(D[0]) * sin(ang), r * sqrt(D[1]) * cos(ang))) # 90# Circle
# 90# Circle
c0 = np.vstack(
(r * sqrt(D[0]) * sin(ang), r * sqrt(D[1]) * cos(ang)))
# plot(c0(1,:),c0(2,:))
c1 = np.dot(B, c0) + b[:,None] #* ones((1, len(c0))) # Transform to ellipse for (k,s)
#* ones((1, len(c0))) # Transform to ellipse for (k,s)
c1 = np.dot(B, c0) + b[:, None]
# plot(c1(1,:),c1(2,:)), hold on
# Calculate conf.int for lcu
@ -300,7 +312,7 @@ class LevelCrossings(PlotData):
raise ValueError()
return lcEst, phat
## End extrapolate
# End extrapolate
def _make_increasing(self, f, t=None):
# Makes the signal f strictly increasing.
@ -435,40 +447,36 @@ class LevelCrossings(PlotData):
g = lc2.trdata()[0]
f = g.gauss2dat(Z)
G = TrData(f, u)
process = G.dat2gauss(L)
return np.vstack((arange(len(process)), process)).T
##
##
## %Check the result without reference to getrfc:
#
#
# %Check the result without reference to getrfc:
## LCe = dat2lc(process)
## max(lc(:,2))
## max(LCe(:,2))
##
## clf
## plot(lc(:,1),lc(:,2)/max(lc(:,2)))
## hold on
## plot(LCe(:,1),LCe(:,2)/max(LCe(:,2)),'-.')
# max(lc(:,2))
# max(LCe(:,2))
#
# clf
# plot(lc(:,1),lc(:,2)/max(lc(:,2)))
# hold on
# plot(LCe(:,1),LCe(:,2)/max(LCe(:,2)),'-.')
## title('Relative crossing intensity')
##
## %% Plot made by the function funplot_4, JE 970707
## %param = [min(process(:,2)) max(process(:,2)) 100]
## %plot(lc(:,1),lc(:,2)/max(lc(:,2)))
## %hold on
## %plot(levels(param),mu/max(mu),'--')
## %hold off
## %title('Crossing intensity')
## %watstamp
##
## % Temporarily
## %funplot_4(lc,param,mu)
#
# %% Plot made by the function funplot_4, JE 970707
# %param = [min(process(:,2)) max(process(:,2)) 100]
# %plot(lc(:,1),lc(:,2)/max(lc(:,2)))
# %hold on
# %plot(levels(param),mu/max(mu),'--')
# %hold off
# %title('Crossing intensity')
# %watstamp
#
# % Temporarily
# %funplot_4(lc,param,mu)
def trdata(self, mean=None, sigma=None, **options):
'''
Estimate transformation, g, from observed crossing intensity, version2.
@ -593,8 +601,8 @@ class LevelCrossings(PlotData):
if ng == 1:
gvar = opt.gvar * ones(ncr)
else:
gvar = interp1d(linspace(0, 1, ng) , opt.gvar, kind='linear')(linspace(0, 1, ncr))
gvar = interp1d(linspace(0, 1, ng), opt.gvar, kind='linear')(
linspace(0, 1, ncr))
uu = linspace(*param)
g1 = sigma * uu + mean
@ -613,7 +621,6 @@ class LevelCrossings(PlotData):
else:
lc22 = (lc22 + 0.5) / (lc22[-1] + cor2 + 1)
lc11 = (lc1 - mean) / sigma
lc22 = invnorm(lc22) # - ymean
@ -626,7 +633,8 @@ class LevelCrossings(PlotData):
# to be linear outside the edges or choosing a lower value for csm2.
inds = slice(Ne, ncr - Ne) # indices to points we are smoothing over
slc22 = SmoothSpline(lc11[inds], lc22[inds], opt.gsm, opt.linextrap, gvar[inds])(uu)
slc22 = SmoothSpline(
lc11[inds], lc22[inds], opt.gsm, opt.linextrap, gvar[inds])(uu)
g = TrData(slc22.copy(), g1.copy(), mean=mean, sigma=sigma)
@ -641,7 +649,8 @@ class LevelCrossings(PlotData):
eps = finfo(float).eps
dy[dy > 0] = eps
gvar = -(hstack((dy, 0)) + hstack((0, dy))) / 2 + eps
g.data = SmoothSpline(g.args, g.data, 1, opt.linextrap, ix * gvar)(g.args)
g.data = SmoothSpline(
g.args, g.data, 1, opt.linextrap, ix * gvar)(g.args)
else:
break
@ -650,6 +659,8 @@ class LevelCrossings(PlotData):
g2.plot()
return g, g2
def test_levelcrossings_extrapolate():
import wafo.data
#import wafo.objects as wo
@ -663,7 +674,9 @@ def test_levelcrossings_extrapolate():
s = x[:, 1].std()
lc_gpd = lc.extrapolate(-2 * s, 2 * s, dist='rayleigh') # @UnusedVariable
class CyclePairs(PlotData):
'''
Container class for Cycle Pairs data objects in WAFO
@ -683,6 +696,7 @@ class CyclePairs(PlotData):
>>> mm = tp.cycle_pairs()
>>> h1 = mm.plot(marker='x')
'''
def __init__(self, *args, **kwds):
self.kind = kwds.pop('kind', 'min2max')
self.sigma = kwds.pop('sigma', None)
@ -824,14 +838,14 @@ class CyclePairs(PlotData):
#[xx nx]=max(extr(:,1))
nx = extr[0].argmax() + 1
levels = extr[0, 0:nx]
if defnr == 2: ## This are upcrossings + maxima
if defnr == 2: # This are upcrossings + maxima
dcount = cumsum(extr[1, 0:nx]) + extr[2, 0:nx] - extr[3, 0:nx]
elif defnr == 4: # # This are upcrossings + minima
elif defnr == 4: # This are upcrossings + minima
dcount = cumsum(extr[1, 0:nx])
dcount[nx - 1] = dcount[nx - 2]
elif defnr == 1: ## This are only upcrossings
elif defnr == 1: # This are only upcrossings
dcount = cumsum(extr[1, 0:nx]) - extr[3, 0:nx]
elif defnr == 3: ## This are upcrossings + minima + maxima
elif defnr == 3: # This are upcrossings + minima + maxima
dcount = cumsum(extr[1, 0:nx]) + extr[2, 0:nx]
ylab = 'Count'
if intensity:
@ -839,7 +853,9 @@ class CyclePairs(PlotData):
ylab = 'Intensity [count/sec]'
return LevelCrossings(dcount, levels, mean=self.mean, sigma=self.sigma, ylab=ylab, intensity=intensity)
class TurningPoints(PlotData):
'''
Container class for Turning Points data objects in WAFO
@ -858,6 +874,7 @@ class TurningPoints(PlotData):
>>> tp = ts.turning_points()
>>> h1 = tp.plot(marker='x')
'''
def __init__(self, *args, **kwds):
self.name_ = kwds.pop('name', 'WAFO TurningPoints Object')
self.sigma = kwds.pop('sigma', None)
@ -1029,7 +1046,9 @@ def mat2timeseries(x):
"""
return TimeSeries(x[:, 1::], x[:, 0].ravel())
class TimeSeries(PlotData):
'''
Container class for 1D TimeSeries data objects in WAFO
@ -1064,6 +1083,7 @@ class TimeSeries(PlotData):
>>> h2 = lc.plot()
'''
def __init__(self, *args, **kwds):
self.name_ = kwds.pop('name', 'WAFO TimeSeries Object')
self.sensortypes = kwds.pop('sensortypes', ['n', ])
@ -1075,7 +1095,6 @@ class TimeSeries(PlotData):
n = len(self.data)
self.args = range(0, n)
def sampling_period(self):
'''
Returns sampling interval
@ -1176,8 +1195,10 @@ class TimeSeries(PlotData):
t = linspace(0, lag * dt, lag + 1)
#cumsum = np.cumsum
acf = _wafocov.CovData1D(R[lags], t)
acf.sigma = sqrt(r_[ 0, r0 ** 2 , r0 ** 2 + 2 * cumsum(R[1:] ** 2)] / Ncens)
acf.children = [PlotData(-2. * acf.sigma[lags], t), PlotData(2. * acf.sigma[lags], t)]
acf.sigma = sqrt(
r_[0, r0 ** 2, r0 ** 2 + 2 * cumsum(R[1:] ** 2)] / Ncens)
acf.children = [
PlotData(-2. * acf.sigma[lags], t), PlotData(2. * acf.sigma[lags], t)]
acf.plot_args_children = ['r:']
acf.norm = norm
return acf
@ -1219,7 +1240,8 @@ class TimeSeries(PlotData):
"""
dt = self.sampling_period()
#fs = 1. / (2 * dt)
yy = self.data.ravel() if tr is None else tr.dat2gauss(self.data.ravel())
yy = self.data.ravel() if tr is None else tr.dat2gauss(
self.data.ravel())
yy = detrend(yy) if hasattr(detrend, '__call__') else yy
S, f = psd(yy, Fs=1. / dt, NFFT=L, detrend=detrend, window=window,
@ -1227,6 +1249,7 @@ class TimeSeries(PlotData):
fact = 2.0 * pi
w = fact * f
return _wafospec.SpecData1D(S / fact, w)
def tospecdata(self, L=None, tr=None, method='cov', detrend=detrend_mean, window=parzen, noverlap=0, ftype='w', alpha=None):
'''
Estimate one-sided spectral density from data.
@ -1291,18 +1314,22 @@ class TimeSeries(PlotData):
#% Initialize constants
#%~~~~~~~~~~~~~~~~~~~~~
nugget = 1e-12
rate = 2; #% interpolationrate for frequency
rate = 2
# % interpolationrate for frequency
wdef = 1; #% 1=parzen window 2=hanning window, 3= bartlett window
wdef = 1
# % 1=parzen window 2=hanning window, 3= bartlett window
dt = self.sampling_period()
#yy = self.data if tr is None else tr.dat2gauss(self.data)
yy = self.data.ravel() if tr is None else tr.dat2gauss(self.data.ravel())
yy = self.data.ravel() if tr is None else tr.dat2gauss(
self.data.ravel())
yy = detrend(yy) if hasattr(detrend, '__call__') else yy
n = len(yy)
L = min(L, n);
L = min(L, n)
max_L = min(300, n); #% maximum lag if L is undetermined
max_L = min(300, n)
# % maximum lag if L is undetermined
estimate_L = L is None
if estimate_L:
L = min(n - 2, int(4. / 3 * max_L + 0.5))
@ -1312,22 +1339,32 @@ class TimeSeries(PlotData):
R = tsy.tocovdata()
if estimate_L:
# finding where ACF is less than 2 st. deviations.
L = max_L + 2 - (np.abs(R.data[max_L::-1]) > 2 * R.sigma[max_L::-1]).argmax() # a better L value
if wdef == 1: # modify L so that hanning and Parzen give appr. the same result
# a better L value
L = max_L + 2 - \
(np.abs(R.data[max_L::-1]) > 2 * R.sigma[
max_L::-1]).argmax()
# modify L so that hanning and Parzen give appr. the same
# result
if wdef == 1:
L = min(int(4 * L / 3), n - 2)
print('The default L is set to %d' % L)
try:
win = window(2 * L - 1)
wname = window.__name__
if wname == 'parzen':
v = int(3.71 * n / L) # degrees of freedom used in chi^2 distribution
# degrees of freedom used in chi^2 distribution
v = int(3.71 * n / L)
Be = 2 * pi * 1.33 / (L * dt) # % bandwidth (rad/sec)
elif wname == 'hanning':
v = int(2.67 * n / L); # degrees of freedom used in chi^2 distribution
Be = 2 * pi / (L * dt); # % bandwidth (rad/sec)
# degrees of freedom used in chi^2 distribution
v = int(2.67 * n / L)
Be = 2 * pi / (L * dt)
# % bandwidth (rad/sec)
elif wname == 'bartlett':
v = int(3 * n / L); # degrees of freedom used in chi^2 distribution
Be = 2 * pi * 1.33 / (L * dt); # bandwidth (rad/sec)
# degrees of freedom used in chi^2 distribution
v = int(3 * n / L)
Be = 2 * pi * 1.33 / (L * dt)
# bandwidth (rad/sec)
except:
wname = None
win = window
@ -1337,7 +1374,8 @@ class TimeSeries(PlotData):
if method == 'psd':
nfft = 2 ** nextpow2(L)
pad_to = rate * nfft # Interpolate the spectrum with rate
S, f = psd(yy, Fs=1. / dt, NFFT=nfft, detrend=detrend, window=window(nfft),
S, f = psd(
yy, Fs=1. / dt, NFFT=nfft, detrend=detrend, window=window(nfft),
noverlap=noverlap, pad_to=pad_to, scale_by_freq=True)
fact = 2.0 * pi
w = fact * f
@ -1360,7 +1398,8 @@ class TimeSeries(PlotData):
if alpha is not None:
#% Confidence interval constants
spec.CI = [v / _invchi2(1 - alpha / 2 , v), v / _invchi2(alpha / 2 , v)];
spec.CI = [
v / _invchi2(1 - alpha / 2, v), v / _invchi2(alpha / 2, v)]
spec.tr = tr
spec.L = L
@ -1374,9 +1413,6 @@ class TimeSeries(PlotData):
# S.S = zeros(nf+1,m-1);
return spec
def _trdata_cdf(self, **options):
'''
Estimate transformation, g, from observed marginal CDF.
@ -1404,7 +1440,8 @@ class TimeSeries(PlotData):
sigma = self.data.std()
cdf = edf(self.data.ravel())
opt = DotDict(chkder=True, plotflag=False, gsm=0.05, param=[-5, 5, 513],
opt = DotDict(
chkder=True, plotflag=False, gsm=0.05, param=[-5, 5, 513],
delay=2, linextrap=True, ntr=1000, ne=7, gvar=1)
opt.update(options)
Ne = opt.ne
@ -1416,14 +1453,14 @@ class TimeSeries(PlotData):
Ne = 0
uu = linspace(*opt.param)
ncr = len(cdf.data);
ncr = len(cdf.data)
ng = len(np.atleast_1d(opt.gvar))
if ng == 1:
gvar = opt.gvar * ones(ncr)
else:
opt.gvar = np.atleast_1d(opt.gvar)
gvar = interp(linspace(0, 1, ncr), linspace(0, 1, ng), opt.gvar.ravel())
gvar = interp(
linspace(0, 1, ncr), linspace(0, 1, ng), opt.gvar.ravel())
ind = np.flatnonzero(diff(cdf.args) > 0) # remove equal points
nd = len(ind)
@ -1431,7 +1468,8 @@ class TimeSeries(PlotData):
tmp = invnorm(cdf.data[ind])
x = sigma * uu + mean
pp_tr = SmoothSpline(cdf.args[ind1], tmp[Ne:nd - Ne], p=opt.gsm, lin_extrap=opt.linextrap, var=gvar[ind1])
pp_tr = SmoothSpline(
cdf.args[ind1], tmp[Ne:nd - Ne], p=opt.gsm, lin_extrap=opt.linextrap, var=gvar[ind1])
# g(:,2) = smooth(Fx(ind1,1),tmp(Ne+1:end-Ne),opt.gsm,g(:,1),def,gvar);
tr = TrData(pp_tr(x), x, mean=mean, sigma=sigma)
tr_emp = TrData(tmp, cdf.args[ind], mean=mean, sigma=sigma)
@ -1442,8 +1480,11 @@ class TimeSeries(PlotData):
dy = diff(tr.data)
if (dy <= 0).any():
dy[dy > 0] = floatinfo.eps
gvar = -(np.hstack((dy, 0)) + np.hstack((0, dy))) / 2 + floatinfo.eps
pp_tr = SmoothSpline(cdf.args[ind1], tmp[Ne:nd - Ne], p=1, lin_extrap=opt.linextrap, var=ix * gvar)
gvar = - \
(np.hstack((dy, 0)) + np.hstack((0, dy))) / \
2 + floatinfo.eps
pp_tr = SmoothSpline(cdf.args[ind1], tmp[
Ne:nd - Ne], p=1, lin_extrap=opt.linextrap, var=ix * gvar)
tr = TrData(pp_tr(x), x, mean=mean, sigma=sigma)
else:
break
@ -1563,11 +1604,9 @@ class TimeSeries(PlotData):
in Proceedings of 9th ISOPE Conference, Vol III, pp 66-73
'''
# opt = troptset('plotflag','off','csm',.95,'gsm',.05,....
# 'param',[-5 5 513],'delay',2,'linextrap','on','ne',7,...
# 'cvar',1,'gvar',1,'multip',0);
opt = DotDict(chkder=True, plotflag=False, csm=.95, gsm=.05,
param=[-5, 5, 513], delay=2, ntr=1000, linextrap=True, ne=7, cvar=1, gvar=1,
multip=False, crossdef='uM')
@ -1590,7 +1629,7 @@ class TimeSeries(PlotData):
ga1 = skew(self.data)
ga2 = kurtosis(self.data, fisher=True) # kurt(xx(n+1:end))-3;
up = min(4 * (4 * ga1 / 3) ** 2, 13)
lo = (ga1 ** 2) * 3 / 2;
lo = (ga1 ** 2) * 3 / 2
kurt1 = min(up, max(ga2, lo)) + 3
return TrHermite(mean=ma, var=sa ** 2, skew=ga1, kurt=kurt1)
elif method[0] == 'o':
@ -1680,6 +1719,7 @@ class TimeSeries(PlotData):
mean = self.data.mean()
sigma = self.data.std()
return TurningPoints(self.data[ind], t, mean=mean, sigma=sigma)
def wave_parameters(self, rate=1):
'''
Returns several wave parameters from data.
@ -1754,7 +1794,8 @@ class TimeSeries(PlotData):
Tcf = tc_t[1::2] - tu[:-1]
Tcf[(Tcf == 0)] = dT # avoiding division by zero
Tcb = td[1:] - tc_t[1::2]
Tcb[(Tcb == 0)] = dT; #% avoiding division by zero
Tcb[(Tcb == 0)] = dT
# % avoiding division by zero
return dict(Ac=Ac, At=At, Hu=Hu, Hd=Hd, Tu=Tu, Td=Td, Tcf=Tcf, Tcb=Tcb)
def wave_height_steepness(self, method=1, rate=1, g=None):
@ -1849,7 +1890,8 @@ class TimeSeries(PlotData):
# time between crest and zero-downcrossing [s]
td = ecross(ti, xi, z_ind[2::2], v=0)
Tcb = td - tc_t[1::2]
Tcb[(Tcb == 0)] = dT; #% avoiding division by zero
Tcb[(Tcb == 0)] = dT
# % avoiding division by zero
if method == 0:
# max(Vcf, Vcr) and the corresponding wave height Hd or Hu in H
@ -1866,26 +1908,32 @@ class TimeSeries(PlotData):
# Zero-upcrossing wave height [m]
H = Ac + At[1:] # Hu
S = Ac / Tcb
elif method == 2: #crest front steepness in S and the wave height Hd in H.
#crest front steepness in S and the wave height Hd in H.
elif method == 2:
H = Ac + At[:-1] # Hd
Td = diff(ecross(ti, xi, z_ind[::2], v=0))
S = 2 * pi * Ac / Td / Tcf / g
elif method == -2: # crest back steepness in S and the wave height Hu in H.
# crest back steepness in S and the wave height Hu in H.
elif method == -2:
H = Ac + At[1:]
Tu = diff(ecross(ti, xi, z_ind[1::2], v=0))
S = 2 * pi * Ac / Tu / Tcb / g
elif method == 3: # total steepness in S and the wave height Hd in H
# for zero-doewncrossing waves.
H = Ac + At[:-1]
Td = diff(ecross(ti, xi , z_ind[::2], v=0))# Period zero-downcrossing waves
# Period zero-downcrossing waves
Td = diff(ecross(ti, xi, z_ind[::2], v=0))
S = 2 * pi * H / Td ** 2 / g
elif method == -3: # total steepness in S and the wave height Hu in H for
# total steepness in S and the wave height Hu in H for
elif method == -3:
# zero-upcrossing waves.
H = Ac + At[1:]
Tu = diff(ecross(ti, xi, z_ind[1::2], v=0))# Period zero-upcrossing waves
# Period zero-upcrossing waves
Tu = diff(ecross(ti, xi, z_ind[1::2], v=0))
S = 2 * pi * H / Tu ** 2 / g
return S, H
def wave_periods(self, vh=None, pdef='d2d', wdef=None, index=None, rate=1):
"""
Return sequence of wave periods/lengths from data.
@ -1959,22 +2007,22 @@ class TimeSeries(PlotData):
findcross, perioddef
"""
##% This is a more flexible version than the dat2hwa or tp2wa routines.
##% There is a secret option: if pdef='all' the function returns
##% all the waveperiods 'd2t', 't2u', 'u2c' and 'c2d' in sequence.
##% It is up to the user to extract the right waveperiods.
##% If the first is a down-crossing then the first is a 'd2t' waveperiod.
##% If the first is a up-crossing then the first is a 'u2c' waveperiod.
##%
##% Example:
##% [T ind]=dat2wa(x,0,'all') %returns all waveperiods
##% nn = length(T)
##% % want to extract all t2u waveperiods
##% if x(ind(1),2)>0 % if first is down-crossing
##% Tt2u=T(2:4:nn)
##% else % first is up-crossing
##% Tt2u=T(4:4:nn)
##% end
# % This is a more flexible version than the dat2hwa or tp2wa routines.
# % There is a secret option: if pdef='all' the function returns
# % all the waveperiods 'd2t', 't2u', 'u2c' and 'c2d' in sequence.
# % It is up to the user to extract the right waveperiods.
# % If the first is a down-crossing then the first is a 'd2t' waveperiod.
# % If the first is a up-crossing then the first is a 'u2c' waveperiod.
# %
# % Example:
# % [T ind]=dat2wa(x,0,'all') %returns all waveperiods
# % nn = length(T)
# % % want to extract all t2u waveperiods
# % if x(ind(1),2)>0 % if first is down-crossing
# % Tt2u=T(2:4:nn)
# % else % first is up-crossing
# % Tt2u=T(4:4:nn)
# % end
if rate > 1: # % interpolate with spline
n = ceil(self.data.size * rate)
@ -1984,7 +2032,6 @@ class TimeSeries(PlotData):
x = self.data
ti = self.args
if vh is None:
if pdef[0] in ('m', 'M'):
vh = 0
@ -1993,7 +2040,6 @@ class TimeSeries(PlotData):
vh = x.mean()
print(' The level l is set to: %g' % vh)
if index is None:
if pdef in ('m2m', 'm2M', 'M2m', 'M2M'):
index = findtp(x, vh, wdef)
@ -2003,7 +2049,8 @@ class TimeSeries(PlotData):
index = findtc(x, vh, wdef)[0]
elif pdef in ('d2t', 't2u', 'u2c', 'c2d', 'all'):
index, v_ind = findtc(x, vh, wdef)
index = sort(r_[index, v_ind]) #% sorting crossings and tp in sequence
#% sorting crossings and tp in sequence
index = sort(r_[index, v_ind])
else:
raise ValueError('Unknown pdef option!')
@ -2062,6 +2109,7 @@ class TimeSeries(PlotData):
def reconstruct(self):
# TODO: finish reconstruct
pass
def plot_wave(self, sym1='k.', ts=None, sym2='k+', nfig=None, nsub=None,
sigma=None, vfact=3):
'''
@ -2117,7 +2165,8 @@ class TimeSeries(PlotData):
sigma = xn[indg].std()
if nsub is None:
nsub = int(len(xn2) / (2 * nw)) + 1 # about Nw mdc waves in each plot
# about Nw mdc waves in each plot
nsub = int(len(xn2) / (2 * nw)) + 1
if nfig is None:
nfig = int(ceil(nsub / 6))
nsub = min(6, int(ceil(nsub / nfig)))
@ -2130,7 +2179,6 @@ class TimeSeries(PlotData):
else:
vscale = array([-1, 1]) * vfact * sigma # @UnusedVariable
XlblTxt = 'Time [sec]'
dT = 1
timespan = tn[ind[-1]] - tn[ind[0]]
@ -2171,7 +2219,6 @@ class TimeSeries(PlotData):
return figs
def plot_sp_wave(self, wave_idx_, *args, **kwds):
"""
Plot specified wave(s) from timeseries
@ -2200,7 +2247,8 @@ class TimeSeries(PlotData):
wave_idx = atleast_1d(wave_idx_).flatten()
tz_idx = kwds.pop('tz_idx', None)
if tz_idx is None:
unused_tc_ind, tz_idx = findtc(self.data, 0, 'tw') # finding trough to trough waves
# finding trough to trough waves
unused_tc_ind, tz_idx = findtc(self.data, 0, 'tw')
dw = nonzero(abs(diff(wave_idx)) > 1)[0]
Nsub = dw.size + 1
@ -2210,7 +2258,8 @@ class TimeSeries(PlotData):
Nwp[Nsub - 1] = wave_idx[-1] - wave_idx[dw[-1]] + 1
wave_idx[dw[-1] + 1:] = -2
for ix in range(Nsub - 2, 1, -2):
Nwp[ix] = wave_idx[dw[ix] - 1] - wave_idx[dw[ix - 1]] + 1 # # of waves pr subplot
# # of waves pr subplot
Nwp[ix] = wave_idx[dw[ix] - 1] - wave_idx[dw[ix - 1]] + 1
wave_idx[dw[ix - 1] + 1:dw[ix]] = -2
Nwp[0] = wave_idx[dw[0] - 1] - wave_idx[0] + 1
@ -2228,8 +2277,9 @@ class TimeSeries(PlotData):
figs.append(plotbackend.figure())
for ix in range(Nsub):
plotbackend.subplot(Nsub, 1, mod(ix, Nsub) + 1)
ind = r_[tz_idx[2 * wave_idx[ix] - 1]:tz_idx[2 * wave_idx[ix] + 2 * Nwp[ix] - 1]]
## indices to wave
ind = r_[tz_idx[2 * wave_idx[ix] - 1]:tz_idx[
2 * wave_idx[ix] + 2 * Nwp[ix] - 1]]
# indices to wave
plotbackend.plot(self.args[ind], self.data[ind], *args, **kwds)
plotbackend.hold('on')
xi = [self.args[ind[0]], self.args[ind[-1]]]
@ -2238,7 +2288,8 @@ class TimeSeries(PlotData):
if Nwp[ix] == 1:
plotbackend.ylabel('Wave %d' % wave_idx[ix])
else:
plotbackend.ylabel('Wave %d - %d' % (wave_idx[ix], wave_idx[ix] + Nwp[ix] - 1))
plotbackend.ylabel(
'Wave %d - %d' % (wave_idx[ix], wave_idx[ix] + Nwp[ix] - 1))
plotbackend.xlabel('Time [sec]')
# wafostamp
@ -2286,9 +2337,9 @@ class TimeSeries(PlotData):
# tran
# '''
# ak, bk, sak, sbk = np.atleast_1d(a, b, sign(sa), sign(sb))
# # old call
# #return exp(ak-bk)*(1+sak*exp(-2*ak))/(1+sbk*exp(-2*bk))
# # TODO: Does not always handle division by zero correctly
# old call
# return exp(ak-bk)*(1+sak*exp(-2*ak))/(1+sbk*exp(-2*bk))
# TODO: Does not always handle division by zero correctly
#
# signRatio = np.where(sak * ak < 0, sak, 1)
# signRatio = np.where(sbk * bk < 0, sbk * signRatio, signRatio)
@ -2548,7 +2599,7 @@ class TimeSeries(PlotData):
# kw, unusedkw2 = w2k(w, 0, self.h) #wave number as function of angular frequency
#
# w, theta, kw = np.atleast_1d(w, theta, kw)
# # make sure they have the correct orientation
# make sure they have the correct orientation
# theta.shape = (-1, 1)
# kw.shape = (-1,)
# w.shape = (-1,)
@ -2556,8 +2607,8 @@ class TimeSeries(PlotData):
# tran_fun = self._tran_dict[self.sensortype]
# Hw, Gwt = tran_fun(w, theta, kw)
#
# # New call to avoid singularities. pab 07.11.2000
# # Set Hw to 0 for expressions w*hyperbolic_ratio(z*k,h*k,1,-1)= 0*inf
# New call to avoid singularities. pab 07.11.2000
# Set Hw to 0 for expressions w*hyperbolic_ratio(z*k,h*k,1,-1)= 0*inf
# ind = np.flatnonzero(1 - np.isfinite(Hw))
# Hw.flat[ind] = 0
#
@ -2568,23 +2619,23 @@ class TimeSeries(PlotData):
# Hw[:, k0] = -Hw[:, k0]
#
# if self.igam == 2:
# #pab 09 Oct.2002: bug fix
# # Changing igam by 2 should affect the directional result in the same way that changing eta by -eta!
# pab 09 Oct.2002: bug fix
# Changing igam by 2 should affect the directional result in the same way that changing eta by -eta!
# Gwt = -Gwt
# return Hw, Gwt
# __call__ = tran
##---Private member methods
# ---Private member methods
# def _get_ee_cthxy(self, theta, kw):
# # convert from angle in degrees to radians
# convert from angle in degrees to radians
# bet = self.bet
# thxr = self.thetax * pi / 180
# thyr = self.thetay * pi / 180
#
# cthx = bet * cos(theta - thxr + pi / 2)
# #cthy = cos(theta-thyr-pi/2)
# cthy = cos(theta-thyr-pi/2)
# cthy = bet * sin(theta - thyr)
#
# # Compute location complex exponential
# Compute location complex exponential
# x, y, unused_z = list(self.pos)
# ee = exp((1j * (x * cthx + y * cthy)) * kw) # exp(i*k(w)*(x*cos(theta)+y*sin(theta)) size Nt X Nf
# return ee, cthx, cthy
@ -2600,14 +2651,14 @@ class TimeSeries(PlotData):
# zk = kw * z # z measured positive upward from sea floor
# return zk
#
# #--- Surface elevation ---
# --- Surface elevation ---
# def _n(self, w, theta, kw):
# '''n = Eta = wave profile
# '''
# ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
# return np.ones_like(w), ee
#
# #---- Vertical surface velocity and acceleration-----
# ---- Vertical surface velocity and acceleration-----
# def _n_t(self, w, theta, kw):
# ''' n_t = Eta_t '''
# ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -2617,7 +2668,7 @@ class TimeSeries(PlotData):
# ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
# return w ** 2, -ee
#
# #--- Surface slopes ---
# --- Surface slopes ---
# def _n_x(self, w, theta, kw):
# ''' n_x = Eta_x = x-slope'''
# ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -2627,7 +2678,7 @@ class TimeSeries(PlotData):
# ee, unused_cthx, cthy = self._get_ee_cthxy(theta, kw)
# return kw, 1j * cthy * ee
#
# #--- Surface curvatures ---
# --- Surface curvatures ---
# def _n_xx(self, w, theta, kw):
# ''' n_xx = Eta_xx = Surface curvature (x-dir)'''
# ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -2641,7 +2692,7 @@ class TimeSeries(PlotData):
# ee, cthx, cthy = self._get_ee_cthxy(theta, kw)
# return kw ** 2, -cthx * cthy * ee
#
# #--- Pressure---
# --- Pressure---
# def _p(self, w, theta, kw):
# ''' pressure fluctuations'''
# ee, unused_cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -2649,7 +2700,7 @@ class TimeSeries(PlotData):
# zk = self._get_zk(kw)
# return self.rho * self.g * hyperbolic_ratio(zk, hk, 1, 1), ee #hyperbolic_ratio = cosh(zk)/cosh(hk)
#
# #---- Water particle velocities ---
# ---- Water particle velocities ---
# def _u(self, w, theta, kw):
# ''' U = x-velocity'''
# ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -2669,7 +2720,7 @@ class TimeSeries(PlotData):
# zk = self._get_zk(kw)
# return w * hyperbolic_ratio(zk, hk, -1, -1), -1j * ee # w*sinh(zk)/sinh(hk), -?
#
# #---- Water particle acceleration ---
# ---- Water particle acceleration ---
# def _u_t(self, w, theta, kw):
# ''' U_t = x-acceleration'''
# ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -2690,7 +2741,7 @@ class TimeSeries(PlotData):
# zk = self._get_zk(kw)
# return (w ** 2) * hyperbolic_ratio(zk, hk, -1, -1), -ee # w*sinh(zk)/sinh(hk), ?
#
# #---- Water particle displacement ---
# ---- Water particle displacement ---
# def _x_p(self, w, theta, kw):
# ''' X_p = x-displacement'''
# ee, cthx, unused_cthy = self._get_ee_cthxy(theta, kw)
@ -2765,7 +2816,7 @@ class TimeSeries(PlotData):
# '''
#
#
# # Assume seastate with jonswap spectrum:
# Assume seastate with jonswap spectrum:
#
# Tp = 4 * np.sqrt(Hm0)
# gam = jonswap_peakfact(Hm0, Tp)
@ -2776,13 +2827,13 @@ class TimeSeries(PlotData):
# hk = kw * h
# zk1 = kw * z
# zk = hk + zk1 # z measured positive upward from mean water level (default)
# #zk = hk-zk1; % z measured positive downward from mean water level
# #zk1 = -zk1;
# #zk = zk1; % z measured positive upward from sea floor
# zk = hk-zk1; % z measured positive downward from mean water level
# zk1 = -zk1;
# zk = zk1; % z measured positive upward from sea floor
#
# # cosh(zk)/cosh(hk) approx exp(zk) for large h
# # hyperbolic_ratio(zk,hk,1,1) = cosh(zk)/cosh(hk)
# # pr = np.where(np.pi < hk, np.exp(zk1), hyperbolic_ratio(zk, hk, 1, 1))
# cosh(zk)/cosh(hk) approx exp(zk) for large h
# hyperbolic_ratio(zk,hk,1,1) = cosh(zk)/cosh(hk)
# pr = np.where(np.pi < hk, np.exp(zk1), hyperbolic_ratio(zk, hk, 1, 1))
# pr = hyperbolic_ratio(zk, hk, 1, 1)
# pressure = (rho * g * Hm0 / 2) * pr
#
@ -2793,6 +2844,7 @@ class TimeSeries(PlotData):
#
# return pressure
def main():
import wafo
ts = wafo.objects.mat2timeseries(wafo.data.sea())
@ -2802,8 +2854,6 @@ def main():
lc.plot()
T = ts.wave_periods(vh=0.0, pdef='c2c') # @UnusedVariable
# main()
import wafo.spectrum.models as sm
Sj = sm.Jonswap()
@ -2824,7 +2874,6 @@ def main():
# Plot 2 objects in one call
d2 = PlotData(np.sin(x), x, xlab='x', ylab='sin', title='sinus')
d0 = d2.copy()
d0.data = d0.data * 0.9
d1 = d2.copy()
@ -2835,6 +2884,7 @@ def main():
d2.plot()
print 'Done'
def test_docstrings():
import doctest
doctest.testmod()
@ -2848,4 +2898,3 @@ if __name__ == '__main__':
# doctest.testmod()
# else:
# main()

@ -1,5 +1,5 @@
GFORTRAN module version '4' created from intmodule.f on Sat May 05 23:15:41 2012
MD5:eb0327a40d874f78d04c89aa93e323f2 -- If you edit this, you'll get what you deserve.
GFORTRAN module version '4' created from intmodule.f on Fri Apr 05 14:43:34 2013
MD5:99db0c86db329df2a1ee0bbf67b9ec99 -- If you edit this, you'll get what you deserve.
(() () () () () () () () () () () () () () () () () () () () () () () ()
() () ())
@ -17,30 +17,30 @@ MD5:eb0327a40d874f78d04c89aa93e323f2 -- If you edit this, you'll get what you de
(2 'krobov' 'krobovmod' 'krobov' 1 ((PROCEDURE UNKNOWN-INTENT
MODULE-PROC DECL UNKNOWN 0 0 SUBROUTINE GENERIC) (UNKNOWN 0 0 0 UNKNOWN
()) 3 0 (4 5 6 7 8 9 10 11 12) () 0 () () () 0 0)
7 'functn' '' 'functn' 3 ((PROCEDURE UNKNOWN-INTENT UNKNOWN-PROC BODY
UNKNOWN 0 0 DUMMY FUNCTION ALWAYS_EXPLICIT) (REAL 8 0 0 REAL ()) 13 0 (
14 15) () 7 () () () 0 0)
5 'minvls' '' 'minvls' 3 ((VARIABLE INOUT UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
4 'ndim' '' 'ndim' 3 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
8 'abseps' '' 'abseps' 3 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
6 'maxvls' '' 'maxvls' 3 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
9 'releps' '' 'releps' 3 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
11 'finest' '' 'finest' 3 ((VARIABLE OUT UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
12 'inform' '' 'inform' 3 ((VARIABLE OUT UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
5 'minvls' '' 'minvls' 3 ((VARIABLE INOUT UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
10 'abserr' '' 'abserr' 3 ((VARIABLE OUT UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
11 'finest' '' 'finest' 3 ((VARIABLE OUT UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
4 'ndim' '' 'ndim' 3 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
9 'releps' '' 'releps' 3 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
6 'maxvls' '' 'maxvls' 3 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
14 'n' '' 'n' 13 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY) (
INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
7 'functn' '' 'functn' 3 ((PROCEDURE UNKNOWN-INTENT UNKNOWN-PROC BODY
UNKNOWN 0 0 DUMMY FUNCTION ALWAYS_EXPLICIT) (REAL 8 0 0 REAL ()) 13 0 (
14 15) () 7 () () () 0 0)
15 'z' '' 'z' 13 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DIMENSION DUMMY) (REAL 8 0 0 REAL ()) 0 0 () (1 ASSUMED_SHAPE (CONSTANT
(INTEGER 4 0 0 INTEGER ()) 0 '1') ()) 0 () () () 0 0)
14 'n' '' 'n' 13 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY) (
INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
)
('krobov' 0 2)

@ -1,5 +1,5 @@
GFORTRAN module version '4' created from intmodule.f on Sat May 05 23:15:40 2012
MD5:f628260304c0d5215e1ef95941599430 -- If you edit this, you'll get what you deserve.
GFORTRAN module version '4' created from intmodule.f on Fri Apr 05 14:43:34 2013
MD5:c88c5a15c480306fb971bd1e5ced587e -- If you edit this, you'll get what you deserve.
(() () () () () () () () () () () () () () () () () () () () () () () ()
() () ())
@ -17,6 +17,14 @@ MD5:f628260304c0d5215e1ef95941599430 -- If you edit this, you'll get what you de
(2 'ranmc' 'rcrudemod' 'ranmc' 1 ((PROCEDURE UNKNOWN-INTENT MODULE-PROC
DECL UNKNOWN 0 0 SUBROUTINE GENERIC) (UNKNOWN 0 0 0 UNKNOWN ()) 3 0 (4 5
6 7 8 9 10 11) () 0 () () () 0 0)
8 'releps' '' 'releps' 3 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN
UNKNOWN 0 0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
9 'error' '' 'error' 3 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN
UNKNOWN 0 0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
10 'value' '' 'value' 3 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN
UNKNOWN 0 0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
11 'inform' '' 'inform' 3 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN
UNKNOWN 0 0 DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
4 'n' '' 'n' 3 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
5 'maxpts' '' 'maxpts' 3 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN
@ -26,14 +34,6 @@ UNKNOWN 0 0 DUMMY FUNCTION ALWAYS_EXPLICIT) (REAL 8 0 0 REAL ()) 12 0 (
13 14) () 6 () () () 0 0)
7 'abseps' '' 'abseps' 3 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN
UNKNOWN 0 0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
8 'releps' '' 'releps' 3 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN
UNKNOWN 0 0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
9 'error' '' 'error' 3 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN
UNKNOWN 0 0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
10 'value' '' 'value' 3 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN
UNKNOWN 0 0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
11 'inform' '' 'inform' 3 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN
UNKNOWN 0 0 DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
13 'n' '' 'n' 12 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY) (
INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
14 'z' '' 'z' 12 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0

@ -1,5 +1,5 @@
GFORTRAN module version '4' created from rind71mod.f on Mon Feb 18 02:58:35 2013
MD5:520dd65f929350d1434842f22f38b888 -- If you edit this, you'll get what you deserve.
GFORTRAN module version '4' created from rind71mod.f on Fri Apr 05 14:43:37 2013
MD5:c5460e9301460ce17aef8031cd82ad57 -- If you edit this, you'll get what you deserve.
(() () () () () () () () () () () () () () () () () () () () () () ()
() () () ())
@ -27,16 +27,26 @@ UNKNOWN ()) 10 0 (11 12 13 14 15 16 17 18) () 0 () () () 0 0)
4 'setdata' 'rind71mod' 'setdata' 1 ((PROCEDURE UNKNOWN-INTENT
MODULE-PROC DECL UNKNOWN 0 0 SUBROUTINE GENERIC) (UNKNOWN 0 0 0 UNKNOWN
()) 19 0 (20 21 22 23 24 25 26 27 28) () 0 () () () 0 0)
25 'dnit' '' 'dnit' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
27 'dnint' '' 'dnint' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
28 'dxsplt' '' 'dxsplt' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
20 'method' '' 'method' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
23 'dreps' '' 'dreps' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
24 'deps2' '' 'deps2' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
25 'dnit' '' 'dnit' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
26 'dxc' '' 'dxc' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
27 'dnint' '' 'dnint' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
21 'scale' '' 'scale' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
22 'depss' '' 'depss' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
9 'speed' '' 'speed' 8 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
28 'dxsplt' '' 'dxsplt' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
7 'array' '' 'array' 6 ((VARIABLE UNKNOWN-INTENT UNKNOWN-PROC UNKNOWN
UNKNOWN 0 0 DIMENSION DUMMY) (REAL 8 0 0 REAL ()) 0 0 () (2
ASSUMED_SHAPE (CONSTANT (INTEGER 4 0 0 INTEGER ()) 0 '1') () (CONSTANT (
@ -68,16 +78,6 @@ DIMENSION DUMMY) (REAL 8 0 0 REAL ()) 0 0 () (2 ASSUMED_SHAPE (CONSTANT
DIMENSION DUMMY) (REAL 8 0 0 REAL ()) 0 0 () (2 ASSUMED_SHAPE (CONSTANT
(INTEGER 4 0 0 INTEGER ()) 0 '1') () (CONSTANT (INTEGER 4 0 0 INTEGER ())
0 '1') ()) 0 () () () 0 0)
9 'speed' '' 'speed' 8 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
20 'method' '' 'method' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
21 'scale' '' 'scale' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
22 'depss' '' 'depss' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
23 'dreps' '' 'dreps' 19 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
)
('echo' 0 2 'initdata' 0 3 'rind71' 0 5 'setdata' 0 4)

@ -1,5 +1,5 @@
GFORTRAN module version '4' created from rindmod.f on Sat May 05 23:15:44 2012
MD5:27b48943ab247880a4203cf14574fba3 -- If you edit this, you'll get what you deserve.
GFORTRAN module version '4' created from rindmod.f on Fri Apr 05 14:43:35 2013
MD5:dcdbb9dedca21469ecd6ba2a3e2bf880 -- If you edit this, you'll get what you deserve.
(() () () () () () () () () () () () () () () () () () () () () () ()
() () () ())
@ -52,12 +52,6 @@ UNKNOWN ()) 15 0 (16 17 18 19 20 21 22 23 24 25 26) () 0 () () () 0 0)
MODULE-PROC DECL UNKNOWN 0 0 SUBROUTINE GENERIC ALWAYS_EXPLICIT) (
UNKNOWN 0 0 0 UNKNOWN ()) 27 0 (28 29 30 31 32 33 34 35 36 37) () 0 () ()
() 0 0)
28 'method' '' 'method' 27 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 OPTIONAL DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
29 'xcscale' '' 'xcscale' 27 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN
0 0 OPTIONAL DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
30 'abseps' '' 'abseps' 27 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 OPTIONAL DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
31 'releps' '' 'releps' 27 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 OPTIONAL DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
32 'coveps' '' 'coveps' 27 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0
@ -72,6 +66,12 @@ OPTIONAL DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
0 0 OPTIONAL DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
37 'nc1c2' '' 'nc1c2' 27 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
OPTIONAL DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
28 'method' '' 'method' 27 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 OPTIONAL DUMMY) (INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
29 'xcscale' '' 'xcscale' 27 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN
0 0 OPTIONAL DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
30 'abseps' '' 'abseps' 27 ((VARIABLE IN UNKNOWN-PROC UNKNOWN UNKNOWN 0
0 OPTIONAL DUMMY) (REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
16 'vals' '' 'vals' 15 ((VARIABLE OUT UNKNOWN-PROC UNKNOWN UNKNOWN 0 0
DIMENSION DUMMY) (REAL 8 0 0 REAL ()) 0 0 () (1 ASSUMED_SHAPE (CONSTANT
(INTEGER 4 0 0 INTEGER ()) 0 '1') ()) 0 () () () 0 0)

@ -1,5 +1,5 @@
GFORTRAN module version '4' created from swapmod.f on Sat May 05 23:15:42 2012
MD5:52275e19413dc7ab9d6082dbb7b7af80 -- If you edit this, you'll get what you deserve.
GFORTRAN module version '4' created from swapmod.f on Fri Apr 05 14:43:34 2013
MD5:d3f134c81002cd5f6cec09ebff3e336f -- If you edit this, you'll get what you deserve.
(() () () () () () () () () () () () () () () () () () () () () () () ()
() () ())
@ -25,6 +25,12 @@ DECL UNKNOWN 0 0 SUBROUTINE) (UNKNOWN 0 0 0 UNKNOWN ()) 11 0 (12 13) ()
0 () () () 0 0)
14 'swapmod' 'swapmod' 'swapmod' 1 ((MODULE UNKNOWN-INTENT UNKNOWN-PROC
UNKNOWN UNKNOWN 0 0) (UNKNOWN 0 0 0 UNKNOWN ()) 0 0 () () 0 () () () 0 0)
12 'a' '' 'a' 11 ((VARIABLE INOUT UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY)
(REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
13 'b' '' 'b' 11 ((VARIABLE INOUT UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY)
(REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
9 'a' '' 'a' 8 ((VARIABLE INOUT UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY)
(INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
10 'b' '' 'b' 8 ((VARIABLE INOUT UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY)
(INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
6 'a' '' 'a' 5 ((VARIABLE INOUT UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY)
@ -33,12 +39,6 @@ UNKNOWN UNKNOWN 0 0) (UNKNOWN 0 0 0 UNKNOWN ()) 0 0 () () 0 () () () 0 0)
7 'b' '' 'b' 5 ((VARIABLE INOUT UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY)
(CHARACTER 1 0 0 CHARACTER ((CONSTANT (INTEGER 4 0 0 INTEGER ()) 0 '1')))
0 0 () () 0 () () () 0 0)
12 'a' '' 'a' 11 ((VARIABLE INOUT UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY)
(REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
13 'b' '' 'b' 11 ((VARIABLE INOUT UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY)
(REAL 8 0 0 REAL ()) 0 0 () () 0 () () () 0 0)
9 'a' '' 'a' 8 ((VARIABLE INOUT UNKNOWN-PROC UNKNOWN UNKNOWN 0 0 DUMMY)
(INTEGER 4 0 0 INTEGER ()) 0 0 () () 0 () () () 0 0)
)
('swap_c' 0 2 'swap_i' 0 3 'swap_r' 0 4 'swapmod' 0 14)

@ -0,0 +1,402 @@
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.lib.six import callable
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for a set of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
.. versionadded:: 0.11.0
Parameters
----------
x : array_like
A sequence of values to be binned.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean', bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3]))
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, xy = binned_statistic_dd([x], values, statistic,
bins, range)
return medians, edges[0], xy
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None):
"""
Compute a bidimensional binned statistic for a set of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
.. versionadded:: 0.11.0
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (M,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array-like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx=ny=bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edges = y_edges = bins),
* the bin edges in each dimension (x_edges, y_edges = bins).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin
xedges : (nx + 1) ndarray
The bin edges along the first dimension.
yedges : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as `values`.
See Also
--------
numpy.histogram2d, binned_statistic, binned_statistic_dd
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, xy = binned_statistic_dd([x, y], values, statistic,
bins, range)
return medians, edges[0], edges[1], xy
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
.. versionadded:: 0.11.0
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as x.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin
edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
np.histogramdd, binned_statistic, binned_statistic_2d
"""
if type(statistic) == str:
if statistic not in ['mean', 'median', 'count', 'sum', 'std']:
raise ValueError('unrecognized statistic "%s"' % statistic)
elif callable(statistic):
pass
else:
raise ValueError("statistic not understood")
# This code is based on np.histogramdd
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D * [None]
dedges = D * [None]
try:
M = len(bins)
if M != D:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = D * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(0), float))
smax = np.atleast_1d(np.array(sample.max(0), float))
else:
smin = np.zeros(D)
smax = np.zeros(D)
for i in np.arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in np.arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in np.arange(D):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in np.arange(D):
Ncount[i] = np.digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in np.arange(D):
# Rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal)
== np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
xy = np.zeros(N, int)
for i in np.arange(0, D - 1):
xy += Ncount[ni[i]] * nbin[ni[i + 1:]].prod()
xy += Ncount[ni[-1]]
result = np.empty(nbin.prod(), float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
a = flatcount.nonzero()
result[a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
flatsum2 = np.bincount(xy, values ** 2)
a = flatcount.nonzero()
result[a] = np.sqrt(flatsum2[a] / flatcount[a]
- (flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(xy, None)
a = np.arange(len(flatcount))
result[a] = flatcount
elif statistic == 'sum':
result.fill(0)
flatsum = np.bincount(xy, values)
a = np.arange(len(flatsum))
result[a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(xy):
result[i] = np.median(values[xy == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(xy):
result[i] = statistic(values[xy == i])
# Shape into a proper matrix
result = result.reshape(np.sort(nbin))
for i in np.arange(nbin.size):
j = ni.argsort()[i]
result = result.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D * [slice(1, -1)]
result = result[core]
if (result.shape != nbin - 2).any():
raise RuntimeError('Internal Shape Error')
return result, edges, xy

@ -0,0 +1,24 @@
"""
Statistics-related constants.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
# The smallest representable positive number such that 1.0 + _EPS != 1.0.
_EPS = np.finfo(float).eps
# The largest [in magnitude] usable floating value.
_XMAX = np.finfo(float).machar.xmax
# The smallest [in magnitude] usable floating value.
_XMIN = np.finfo(float).machar.xmin
# -special.psi(1)
_EULER = 0.577215664901532860606512090082402431042
# special.zeta(3, 1) Apery's constant
_ZETA3 = 1.202056903159594285399738161511449990765

File diff suppressed because it is too large Load Diff

@ -23,6 +23,7 @@ __all__ = [
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
@ -40,6 +41,7 @@ class binom_gen(rv_discrete):
%(example)s
"""
def _rvs(self, n, p):
return mtrand.binomial(n, p, self._size)
@ -87,6 +89,7 @@ binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
@ -105,6 +108,7 @@ class bernoulli_gen(binom_gen):
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
@ -136,6 +140,7 @@ bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
@ -153,6 +158,7 @@ class nbinom_gen(rv_discrete):
%(example)s
"""
def _rvs(self, n, p):
return mtrand.negative_binomial(n, p, self._size)
@ -193,6 +199,7 @@ nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
@ -210,6 +217,7 @@ class geom_gen(rv_discrete):
%(example)s
"""
def _rvs(self, p):
return mtrand.geometric(p, size=self._size)
@ -249,6 +257,7 @@ geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
@ -298,6 +307,7 @@ class hypergeom_gen(rv_discrete):
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n, M - n, N, size=self._size)
@ -311,9 +321,9 @@ class hypergeom_gen(rv_discrete):
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
return gamln(good + 1) - gamln(good - k + 1) - gamln(k + 1) + \
gamln(bad + 1) - gamln(bad - N + k + 1) - gamln(N - k + 1) - \
gamln(tot + 1) + gamln(tot - N + 1) + gamln(N + 1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
@ -329,7 +339,8 @@ class hypergeom_gen(rv_discrete):
mu = N * p
var = m * n * N * (M - N) * 1.0 / (M * M * (M - 1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g1 = (m - n) * (M - 2 * N) / (M - 2.0) * \
sqrt((M - 1.0) / (m * n * N * (M - N)))
g2 = M * (M + 1) - 6. * N * (M - N) - 6. * n * m
g2 *= (M - 1) * M * M
@ -361,6 +372,7 @@ hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
@ -378,6 +390,7 @@ class logser_gen(rv_discrete):
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
@ -407,6 +420,7 @@ logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
@ -424,6 +438,7 @@ class poisson_gen(rv_discrete):
%(example)s
"""
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
@ -458,6 +473,7 @@ poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
@ -475,6 +491,7 @@ class planck_gen(rv_discrete):
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
@ -516,6 +533,7 @@ planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
@ -533,6 +551,7 @@ class boltzmann_gen(rv_discrete):
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (expm1(-lambda_)) / (expm1(-lambda_ * N))
return fact * exp(-lambda_ * k)
@ -557,7 +576,8 @@ class boltzmann_gen(rv_discrete):
trm2 = (z * trm ** 2 - N * N * zN)
g1 = z * (1 + z) * trm ** 3 - N ** 3 * zN * (1 + zN)
g1 = g1 / trm2 ** (1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = z * (1 + 4 * z + z * z) * \
trm ** 4 - N ** 4 * zN * (1 + 4 * zN + zN * zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
@ -565,6 +585,7 @@ boltzmann = boltzmann_gen(name='boltzmann',
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
@ -585,6 +606,7 @@ class randint_gen(rv_discrete):
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
@ -628,6 +650,7 @@ randint = randint_gen(name='randint', longname='A discrete uniform '
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
@ -645,6 +668,7 @@ class zipf_gen(rv_discrete):
%(example)s
"""
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
@ -664,6 +688,7 @@ zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
@ -681,6 +706,7 @@ class dlaplace_gen(rv_discrete):
%(example)s
"""
def _pmf(self, k, a):
return tanh(a / 2.0) * exp(-a * abs(k))
@ -710,6 +736,7 @@ dlaplace = dlaplace_gen(a=-np.inf,
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
@ -735,6 +762,7 @@ class skellam_gen(rv_discrete):
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return mtrand.poisson(mu1, n) - mtrand.poisson(mu2, n)

@ -455,7 +455,7 @@ class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.dist = dist
args, loc, scale = dist._parse_args(*args, **kwds)
if len(args) == dist.numargs - 2: # isinstance(dist, rv_continuous):
if isinstance(dist, rv_continuous):
self.par = args + (loc, scale)
else: # rv_discrete
self.par = args + (loc,)

@ -0,0 +1,15 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 06 16:02:47 2011
@author: pab
"""
import numpy as np
import wafo.kdetools as wk
n = 100
x = np.sort(5*np.random.rand(1,n)-2.5, axis=-1).ravel()
y = (np.cos(x)>2*np.random.rand(n, 1)-1).ravel()
kreg = wk.KRegression(x,y)
f = kreg(output='plotobj', title='Kernel regression', plotflag=1)
f.plot()

@ -8,23 +8,25 @@ import math
import warnings
import numpy as np
from numpy import (isscalar, r_, log, sum, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d, sqrt, ceil,
floor, array, poly1d, compress, not_equal, pi, exp, ravel, angle)
from numpy import (isscalar, r_, log, sum, around, unique, asarray, zeros,
arange, sort, amin, amax, any, atleast_1d, sqrt, ceil,
floor, array, poly1d, compress, not_equal, pi, exp, ravel,
angle)
from numpy.testing.decorators import setastest
from scipy.lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from . import distributions
from ._distn_infrastructure import rv_generic
from wafo.stats import statlib
from wafo.stats import stats
from wafo.stats.stats import find_repeats
from wafo.stats import distributions
from wafo.stats._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max',
'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon',
@ -78,7 +80,8 @@ def bayes_mvs(data, alpha=0.90):
"""
res = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha)
raise ValueError(
"0 < alpha < 1 is required, but alpha=%s was given." % alpha)
return tuple((x.mean(), x.interval(alpha)) for x in res)
@ -135,7 +138,8 @@ def mvsdist(data):
C = x.var()
if (n > 1000): # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C/(2.*n)))
sdist = distributions.norm(
loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
@ -418,7 +422,7 @@ def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
osr = sort(x)
if fit or (plot is not None):
# perform a linear fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
slope, intercept, r, _prob, _sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope * osm + intercept, 'r-')
@ -470,10 +474,11 @@ def ppcc_max(x, brack=(0.0,1.0), dist='tukeylambda'):
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
r, _prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
return optimize.brent(tempfunc, brack=brack,
args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
@ -488,7 +493,7 @@ def ppcc_plot(x,a,b,dist='tukeylambda', plot=None, N=80):
ppcc = svals * 0.0
k = 0
for sval in svals:
r1,r2 = probplot(x,sval,dist=dist,fit=1)
_r1, r2 = probplot(x, sval, dist=dist, fit=1)
ppcc[k] = r2[-1]
k += 1
if plot is not None:
@ -719,7 +724,7 @@ def boxcox(x, lmbda=None, alpha=None):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
return special.boxcox(x, lmbda) # @UndefinedVariable
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
@ -810,7 +815,7 @@ def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
r, _prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
@ -1111,12 +1116,12 @@ def anderson(x,dist='norm'):
critical = around(_Avals_logistic / (1.0 + 0.25 / N), 3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
# the following is incorrect, see ticket:1097
## def fixedsolve(th,xj,N):
# def fixedsolve(th,xj,N):
## val = stats.sum(xj)*1.0/N
## tmp = exp(-xj/th)
## term = sum(xj*tmp,axis=0)
## term /= sum(tmp,axis=0)
## return val - term
# return val - term
## s = optimize.fixed_point(fixedsolve, 1.0, args=(x,N),xtol=1e-5)
## xbar = -s*log(sum(exp(-x/s),axis=0)*1.0/N)
xbar, s = distributions.gumbel_l.fit(x)
@ -1186,7 +1191,7 @@ def ansari(x,y):
if repeats and ((m < 55) or (n < 55)):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n,m)
astart, a1, _ifault = statlib.gscale(n, m)
ind = AB - astart
total = sum(a1, axis=0)
if ind < len(a1) / 2.0:
@ -1214,9 +1219,11 @@ def ansari(x,y):
# compute sum(tj * rj**2,axis=0)
fac = sum(symrank ** 2, axis=0)
if N % 2: # N odd
varAB = m*n*(16*N*fac-(N+1)**4)/(16.0 * N**2 * (N-1))
varAB = m * n * \
(16 * N * fac - (N + 1) ** 4) / (16.0 * N ** 2 * (N - 1))
else: # N even
varAB = m*n*(16*fac-N*(N+2)**2)/(16.0 * N * (N-1))
varAB = m * n * \
(16 * fac - N * (N + 2) ** 2) / (16.0 * N * (N - 1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AB, pval
@ -1262,7 +1269,8 @@ def bartlett(*args):
Ntot = sum(Ni, axis=0)
spsq = sum((Ni - 1) * ssq, axis=0) / (1.0 * (Ntot - k))
numer = (Ntot * 1.0 - k) * log(spsq) - sum((Ni - 1.0) * log(ssq), axis=0)
denom = 1.0 + (1.0/(3*(k-1)))*((sum(1.0/(Ni-1.0),axis=0))-1.0/(Ntot-k))
denom = 1.0 + (1.0 / (3 * (k - 1))) * \
((sum(1.0 / (Ni - 1.0), axis=0)) - 1.0 / (Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return T, pval
@ -1320,7 +1328,8 @@ def levene(*args,**kwds):
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword argument '%s'" % kw)
raise TypeError(
"levene() got an unexpected keyword argument '%s'" % kw)
if kw == 'center':
center = value
else:
@ -1341,7 +1350,8 @@ def levene(*args,**kwds):
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut) for arg in args)
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
@ -1429,11 +1439,13 @@ def binom_test(x,n=None,p=0.5):
elif (x < p * n):
i = np.arange(np.ceil(p * n), n + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d * rerr, axis=0)
pval = distributions.binom.cdf(x,n,p) + distributions.binom.sf(n-y,n,p)
pval = distributions.binom.cdf(
x, n, p) + distributions.binom.sf(n - y, n, p)
else:
i = np.arange(np.floor(p * n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d * rerr, axis=0)
pval = distributions.binom.cdf(y-1,n,p) + distributions.binom.sf(x-1,n,p)
pval = distributions.binom.cdf(
y - 1, n, p) + distributions.binom.sf(x - 1, n, p)
return min(1.0, pval)
@ -1498,7 +1510,8 @@ def fligner(*args,**kwds):
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword argument '%s'" % kw)
raise TypeError(
"fligner() got an unexpected keyword argument '%s'" % kw)
if kw == 'center':
center = value
else:
@ -1618,7 +1631,8 @@ def mood(x, y, axis=0):
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
res_shape = tuple([x.shape[ax]
for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
@ -1731,11 +1745,13 @@ def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
d = x - y
if zero_method == "wilcox":
d = compress(not_equal(d, 0), d, axis=-1) # Keep all non-zero differences
# Keep all non-zero differences
d = compress(not_equal(d, 0), d, axis=-1)
count = len(d)
if (count < 10):
warnings.warn("Warning: sample size too small for normal approximation.")
warnings.warn(
"Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = sum((d > 0) * r, axis=0)
r_minus = sum((d < 0) * r, axis=0)
@ -1752,7 +1768,7 @@ def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
_replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
@ -1913,7 +1929,7 @@ def circstd(samples, high=2*pi, low=0, axis=None):
# Tests to include (from R) -- some of these already in stats.
########
#
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial

@ -185,7 +185,7 @@ import numpy as np
from . import futil
from . import distributions
try:
from ._rank import rankdata, tiecorrect
from scipy.stats._rank import rankdata, tiecorrect
except:
rankdata = tiecorrect = None
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode',
@ -260,9 +260,9 @@ def find_repeats(arr):
v1, v2, n = futil.dfreps(arr)
return v1[:n], v2[:n]
#######
### NAN friendly functions
########
#
# NAN friendly functions
#
def nanmean(x, axis=0):
@ -451,9 +451,9 @@ def nanmedian(x, axis=0):
return x
#####################################
######## CENTRAL TENDENCY ########
#####################################
#
# CENTRAL TENDENCY ########
#
def gmean(a, axis=0, dtype=None):
@ -498,7 +498,8 @@ def gmean(a, axis=0, dtype=None):
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
# if not an ndarray object attempt to convert it
if not isinstance(a, np.ndarray):
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
@ -563,7 +564,8 @@ def hmean(a, axis=0, dtype=None):
size = a.shape[axis]
return size / np.sum(1.0 / a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
raise ValueError(
"Harmonic mean only defined if all elements greater than zero")
def mode(a, axis=0):
@ -885,9 +887,9 @@ def tsem(a, limits=None, inclusive=(True, True)):
return sd / np.sqrt(am.count())
#####################################
############ MOMENTS #############
#####################################
#
# MOMENTS #############
#
def moment(a, moment=1, axis=0):
"""
@ -1059,7 +1061,9 @@ def kurtosis(a, axis=0, fisher=True, bias=True):
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
nval = 1.0 / \
(n - 2) / (n - 3) * \
((n * n - 1.0) * m4 / m2 ** 2.0 - 3 * (n - 1) ** 2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
@ -1116,9 +1120,9 @@ def describe(a, axis=0):
kurt = kurtosis(a, axis)
return n, mm, m, v, sk, kurt
#####################################
######## NORMALITY TESTS ##########
#####################################
#
# NORMALITY TESTS ##########
#
def skewtest(a, axis=0):
@ -1207,15 +1211,18 @@ def kurtosistest(a, axis=0):
int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0 * (n - 1) / (n + 1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
varb2 = 24.0 * n * \
(n - 2) * (n - 3) / ((n + 1) * (n + 1) * (n + 3) * (n + 5))
x = (b2 - E) / np.sqrt(varb2)
sqrtbeta1 = 6.0 * (n * n - 5 * n + 2) / ((n + 7) * (n + 9)) * np.sqrt((6.0 * (n + 3) * (n + 5)) /
(n * (n - 2) * (n - 3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
A = 6.0 + 8.0 / sqrtbeta1 * \
(2.0 / sqrtbeta1 + np.sqrt(1 + 4.0 / (sqrtbeta1 ** 2)))
term1 = 1 - 2 / (9.0 * A)
denom = 1 + x * np.sqrt(2 / (A - 4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom,1/3.0))
term2 = np.where(
denom < 0, term1, np.power((1 - 2.0 / A) / denom, 1 / 3.0))
Z = (term1 - term2) / np.sqrt(2 / (9.0 * A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
@ -1261,8 +1268,8 @@ def normaltest(a, axis=0):
"""
a, axis = _chk_asarray(a, axis)
s,p = skewtest(a,axis)
k,p = kurtosistest(a,axis)
s, _p = skewtest(a, axis)
k, _p = kurtosistest(a, axis)
k2 = s * s + k * k
return k2, chisqprob(k2, 2)
@ -1315,7 +1322,8 @@ def jarque_bera(x):
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
skewness = (1 / n * np.sum(diffx ** 3)) / \
(1 / n * np.sum(diffx ** 2)) ** (3 / 2.)
kurtosis = (1 / n * np.sum(diffx ** 4)) / (1 / n * np.sum(diffx ** 2)) ** 2
jb_value = n / 6 * (skewness ** 2 + (kurtosis - 3) ** 2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
@ -1323,9 +1331,9 @@ def jarque_bera(x):
return jb_value, p
#####################################
###### FREQUENCY FUNCTIONS #######
#####################################
#
# FREQUENCY FUNCTIONS #######
#
def itemfreq(a):
"""
@ -1600,7 +1608,8 @@ def histogram2(a, bins):
return n[1:] - n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
def histogram(a, numbins=10, defaultlimits=None, weights=None,
printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
@ -1764,9 +1773,9 @@ def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
return h, l, b, e
#####################################
###### VARIABILITY FUNCTIONS #####
#####################################
#
# VARIABILITY FUNCTIONS #####
#
def obrientransform(*args):
"""
@ -2044,9 +2053,9 @@ def zmap(scores, compare, axis=0, ddof=0):
return (scores - mns) / sstd
#####################################
####### TRIMMING FUNCTIONS #######
#####################################
#
# TRIMMING FUNCTIONS #######
#
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
@ -2497,7 +2506,8 @@ def fisher_exact(table, alternative='two-sided'):
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
# int32 is not enough for the algorithm
c = np.asarray(table, dtype=np.int64)
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
@ -3021,9 +3031,9 @@ def linregress(x, y=None):
return slope, intercept, r, prob, sterrest
#####################################
##### INFERENTIAL STATISTICS #####
#####################################
#
# INFERENTIAL STATISTICS #####
#
def ttest_1samp(a, popmean, axis=0):
"""
@ -3095,7 +3105,8 @@ def ttest_1samp(a, popmean, axis=0):
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
# use np.abs to get upper tail
prob = distributions.t.sf(np.abs(t), df) * 2
if t.ndim == 0:
t = t[()]
@ -3206,7 +3217,8 @@ def ttest_ind(a, b, axis=0, equal_var=True):
else:
vn1 = v1 / n1
vn2 = v2 / n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
df = ((vn1 + vn2) ** 2) / \
((vn1 ** 2) / (n1 - 1) + (vn2 ** 2) / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
@ -3861,8 +3873,8 @@ def ks_2samp(data1, data2):
"""
data1, data2 = map(asarray, (data1, data2))
n1 = data1.shape[0]
n2 = data2.shape[0]
#n1 = data1.shape[0]
#n2 = data2.shape[0]
n1 = len(data1)
n2 = len(data2)
data1 = np.sort(data1)
@ -3917,7 +3929,8 @@ def mannwhitneyu(x, y, use_continuity=True):
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx,axis=0) # calc U for x
# calc U for x
u1 = n1 * n2 + (n1 * (n1 + 1)) / 2.0 - np.sum(rankx, axis=0)
u2 = n1 * n2 - u1 # remainder is U for y
bigu = max(u1, u2)
smallu = min(u1, u2)
@ -3930,7 +3943,8 @@ def mannwhitneyu(x, y, use_continuity=True):
# normal approximation for prob calc with continuity correction
z = abs((bigu - 0.5 - n1 * n2 / 2.0) / sd)
else:
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
# normal approximation for prob calc
z = abs((bigu - n1 * n2 / 2.0) / sd)
return smallu, distributions.norm.sf(z) # (1.0 - zprob(z))
@ -4080,7 +4094,8 @@ def friedmanchisquare(*args):
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
raise ValueError(
'\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
@ -4096,7 +4111,7 @@ def friedmanchisquare(*args):
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
_replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t * t - 1)
c = 1 - ties / float(k * (k * k - 1) * n)
@ -4106,9 +4121,9 @@ def friedmanchisquare(*args):
return chisq, chisqprob(chisq, k - 1)
#####################################
#### PROBABILITY CALCULATIONS ####
#####################################
#
# PROBABILITY CALCULATIONS ####
#
zprob = special.ndtr
@ -4169,9 +4184,9 @@ def betai(a, b, x):
return special.betainc(a, b, x)
#####################################
####### ANOVA CALCULATIONS #######
#####################################
#
# ANOVA CALCULATIONS #######
#
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
@ -4185,7 +4200,8 @@ def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
if (a - 1) ** 2 + (b - 1) ** 2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
q = np.sqrt(
((a - 1) ** 2 * (b - 1) ** 2 - 2) / ((a - 1) ** 2 + (b - 1) ** 2 - 5))
n_um = (1 - lmbda ** (1.0 / q)) * (a - 1) * (b - 1)
d_en = lmbda ** (1.0 / q) / (n_um * q - 0.5 * (a - 1) * (b - 1) + 1)
return n_um / d_en
@ -4251,9 +4267,9 @@ def f_value_multivariate(ER, EF, dfnum, dfden):
return n_um / d_en
#####################################
####### SUPPORT FUNCTIONS ########
#####################################
#
# SUPPORT FUNCTIONS ########
#
def ss(a, axis=0):
"""

@ -77,7 +77,8 @@ def check_skew_expect(distfn, arg, m, v, s, msg):
def check_kurt_expect(distfn, arg, m, v, k, msg):
if np.isfinite(k):
m4e = distfn.expect(lambda x: np.power(x - m, 4), arg)
npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2), atol=1e-5, rtol=1e-5,
npt.assert_allclose(
m4e, (k + 3.) * np.power(v, 2), atol=1e-5, rtol=1e-5,
err_msg=msg + ' - kurtosis')
else:
npt.assert_(np.isnan(k))
@ -115,7 +116,7 @@ def check_edge_support(distfn, args):
def check_named_args(distfn, x, shape_args, defaults, meths):
## Check calling w/ named arguments.
# Check calling w/ named arguments.
# check consistency of shapes, numargs and _parse signature
signature = inspect.getargspec(distfn._parse_args)
@ -123,7 +124,8 @@ def check_named_args(distfn, x, shape_args, defaults, meths):
npt.assert_(signature.keywords is None)
npt.assert_(signature.defaults == defaults)
shape_argnames = signature.args[1:-len(defaults)] # self, a, b, loc=0, scale=1
# self, a, b, loc=0, scale=1
shape_argnames = signature.args[1:-len(defaults)]
if distfn.shapes:
shapes_ = distfn.shapes.replace(',', ' ').split()
else:

@ -1,5 +1,4 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, run_module_suite
from scipy.stats import \
@ -235,4 +234,5 @@ class TestBinnedStatistic(object):
if __name__ == "__main__":
#unittest.main()
run_module_suite()

@ -6,8 +6,9 @@ import numpy as np
import numpy.testing as npt
from scipy import integrate
from scipy import stats
from common_tests import (check_normalization, check_moment, check_mean_expect,
from wafo import stats
from wafo.stats.tests.common_tests import (check_normalization, check_moment,
check_mean_expect,
check_var_expect, check_skew_expect, check_kurt_expect,
check_entropy, check_private_entropy, NUMPY_BELOW_1_7,
check_edge_support, check_named_args)
@ -181,7 +182,8 @@ def _silence_fp_errors(func):
def test_cont_basic():
# this test skips slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
# warnings.filterwarnings('ignore',
# category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname in distslow:
continue
@ -233,7 +235,8 @@ def test_cont_basic():
def test_cont_basic_slow():
# same as above for slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
# warnings.filterwarnings('ignore',
# category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname not in distslow:
continue
@ -284,7 +287,8 @@ def test_cont_basic_slow():
@npt.dec.slow
def test_moments():
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
# warnings.filterwarnings('ignore',
# category=integrate.IntegrationWarning)
knf = npt.dec.knownfailureif
fail_normalization = set(['vonmises', 'ksone'])
fail_higher = set(['vonmises', 'ksone', 'ncf'])
@ -329,7 +333,8 @@ def check_sample_mean(sm,v,n, popmean):
def check_sample_var(sv, n, popvar):
# two-sided chisquare test for sample variance equal to hypothesized variance
# two-sided chisquare test for sample variance equal to hypothesized
# variance
df = n - 1
chi2 = (n - 1) * popvar / float(popvar)
pval = stats.chisqprob(chi2, df) * 2
@ -360,15 +365,16 @@ def check_pdf(distfn, arg, msg):
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero or huge (singularity)
# avoid checking a case where pdf is close to zero or huge
# (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg)) / eps / 2.0
# replace with better diff and better test (more points),
# actually, this works pretty well
npt.assert_almost_equal(pdfv, cdfdiff,
decimal=DECIMAL, err_msg=msg + ' - cdf-pdf relationship')
npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL,
err_msg=msg + ' - cdf-pdf relationship')
def check_pdf_logpdf(distfn, args, msg):
@ -379,7 +385,8 @@ def check_pdf_logpdf(distfn, args, msg):
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[pdf != 0]
logpdf = logpdf[np.isfinite(logpdf)]
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg + " - logpdf-log(pdf) relationship")
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7,
err_msg=msg + " - logpdf-log(pdf) relationship")
def check_sf_logsf(distfn, args, msg):
@ -390,7 +397,8 @@ def check_sf_logsf(distfn, args, msg):
logsf = distfn.logsf(vals, *args)
sf = sf[sf != 0]
logsf = logsf[np.isfinite(logsf)]
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg + " - logsf-log(sf) relationship")
npt.assert_almost_equal(np.log(sf), logsf, decimal=7,
err_msg=msg + " - logsf-log(sf) relationship")
def check_cdf_logcdf(distfn, args, msg):
@ -401,7 +409,8 @@ def check_cdf_logcdf(distfn, args, msg):
logcdf = distfn.logcdf(vals, *args)
cdf = cdf[cdf != 0]
logcdf = logcdf[np.isfinite(logcdf)]
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg + " - logcdf-log(cdf) relationship")
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7,
err_msg=msg + " - logcdf-log(cdf) relationship")
def check_distribution_rvs(dist, args, alpha, rvs):
@ -417,6 +426,7 @@ def check_distribution_rvs(dist, args, alpha, rvs):
def check_vecentropy(distfn, args):
npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
@npt.dec.skipif(NUMPY_BELOW_1_7)
def check_loc_scale(distfn, arg, m, v, msg):
loc, scale = 10.0, 10.0

@ -3,11 +3,12 @@ from __future__ import division, print_function, absolute_import
import numpy.testing as npt
import numpy as np
try:
from scipy.lib.six import xrange
from wafo.stats.six import xrange
except:
pass
from scipy import stats
from .common_tests import (check_normalization, check_moment, check_mean_expect,
from wafo import stats
from wafo.stats.tests.common_tests import (check_normalization, check_moment,
check_mean_expect,
check_var_expect, check_skew_expect, check_kurt_expect,
check_entropy, check_private_entropy, check_edge_support,
check_named_args)
@ -39,7 +40,7 @@ def test_discrete_basic():
np.random.seed(9765456)
rvs = distfn.rvs(size=2000, *arg)
supp = np.unique(rvs)
m, v = distfn.stats(*arg)
#_m, v = distfn.stats(*arg)
yield check_cdf_ppf, distfn, arg, supp, distname + ' cdf_ppf'
yield check_pmf_cdf, distfn, arg, distname
@ -184,9 +185,9 @@ def check_discrete_chisquare(distfn, arg, rvs, alpha, msg):
histsupp[0] = distfn.a
# find sample frequencies and perform chisquare test
freq,hsupp = np.histogram(rvs,histsupp)
cdfs = distfn.cdf(distsupp,*arg)
(chis,pval) = stats.chisquare(np.array(freq),n*distmass)
freq, _hsupp = np.histogram(rvs, histsupp)
#cdfs = distfn.cdf(distsupp, *arg)
(_chis, pval) = stats.chisquare(np.array(freq), n * distmass)
npt.assert_(pval > alpha, 'chisquare - test for %s'
' at arg = %s with pval = %s' % (msg, str(arg), str(pval)))

@ -2,23 +2,24 @@
"""
from __future__ import division, print_function, absolute_import
#import unittest
import warnings
import re
import sys
from numpy.testing import (TestCase, run_module_suite, assert_equal,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_array_equal, assert_almost_equal,
assert_array_almost_equal,
assert_allclose, assert_, assert_raises, rand, dec)
from nose import SkipTest
import numpy
import numpy as np
from numpy import typecodes, array
from scipy.lib._version import NumpyVersion
#from scipy.lib._version import NumpyVersion
from scipy import special
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import wafo.stats as stats
from wafo.stats._distn_infrastructure import argsreduce
from scipy.special import xlogy
@ -109,6 +110,7 @@ def test_vonmises_line_support():
class TestRandInt(TestCase):
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
@ -131,12 +133,14 @@ class TestRandInt(TestCase):
def test_cdf(self):
x = numpy.r_[0:36:100j]
k = numpy.floor(x)
out = numpy.select([k >= 30,k >= 5],[1.0,(k-5.0+1)/(30-5.0)],0)
out = numpy.select(
[k >= 30, k >= 5], [1.0, (k - 5.0 + 1) / (30 - 5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(TestCase):
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
@ -173,6 +177,7 @@ class TestBinom(TestCase):
class TestBernoulli(TestCase):
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
@ -201,6 +206,7 @@ class TestBernoulli(TestCase):
class TestNBinom(TestCase):
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
@ -219,6 +225,7 @@ class TestNBinom(TestCase):
class TestGeom(TestCase):
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
@ -261,14 +268,17 @@ class TestGeom(TestCase):
class TestTruncnorm(TestCase):
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5,0,1e-4,0.5, 1-1e-4,1,2], -1., 1.,
vals = stats.truncnorm.ppf(
[-0.5, 0, 1e-4, 0.5, 1 - 1e-4, 1, 2], -1., 1.,
loc=[3] * 7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5,0,1e-4,0.5, 1-1e-4,1,2], -1., 1.,
vals = stats.truncnorm.isf(
[-0.5, 0, 1e-4, 0.5, 1 - 1e-4, 1, 2], -1., 1.,
loc=[3] * 7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
@ -298,6 +308,7 @@ class TestTruncnorm(TestCase):
class TestHypergeom(TestCase):
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
@ -329,7 +340,8 @@ class TestHypergeom(TestCase):
quantile = 2e4
res = []
for eaten in fruits_eaten:
res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten))
res.append(
stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten))
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
@ -372,6 +384,7 @@ class TestLoggamma(TestCase):
class TestLogser(TestCase):
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
@ -385,6 +398,7 @@ class TestLogser(TestCase):
class TestPareto(TestCase):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
@ -436,17 +450,20 @@ class TestPareto(TestCase):
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_allclose(
s, 2 * (1 + 4.0) / (4.0 - 3) * np.sqrt((4.0 - 2) / 4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5 * 3.5 * 2.5))
assert_allclose(s, (2 * 5.5 / 1.5) * np.sqrt(2.5 / 4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
assert_allclose(
k, 6 * (4.5 ** 3 + 4.5 ** 2 - 6 * 4.5 - 2) / (4.5 * 1.5 * 0.5))
class TestPearson3(TestCase):
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
@ -480,6 +497,7 @@ class TestPearson3(TestCase):
class TestPoisson(TestCase):
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
@ -498,6 +516,7 @@ class TestPoisson(TestCase):
class TestZipf(TestCase):
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
@ -520,6 +539,7 @@ class TestZipf(TestCase):
class TestDLaplace(TestCase):
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
@ -531,7 +551,6 @@ class TestDLaplace(TestCase):
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
@ -554,8 +573,9 @@ class TestDLaplace(TestCase):
class TestInvGamma(TestCase):
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"assert_* funcs broken with inf/nan")
# @dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
# "assert_* funcs broken with inf/nan")
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
@ -563,7 +583,8 @@ class TestInvGamma(TestCase):
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
assert_allclose(mvsk,
[0.05461496450, 0.0001723162534, 1.020362676, 2.055616582])
[0.05461496450, 0.0001723162534,
1.020362676, 2.055616582])
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
@ -576,6 +597,7 @@ class TestInvGamma(TestCase):
class TestF(TestCase):
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
@ -590,10 +612,10 @@ class TestF(TestCase):
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11] * 4, dfd=[2, 4, 6, 8], moments='mvsk')
@dec.knownfailureif(True, 'f stats does not properly broadcast')
#@dec.knownfailureif(True, 'f stats does not properly broadcast')
def test_stats_broadcast(self):
# stats do not fully broadcast just yet
mv = stats.f.stats(dfn=11, dfd=[11, 12])
_mv = stats.f.stats(dfn=11, dfd=[11, 12])
def test_rvgeneric_std():
@ -602,6 +624,7 @@ def test_rvgeneric_std():
class TestRvDiscrete(TestCase):
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
@ -630,6 +653,7 @@ class TestRvDiscrete(TestCase):
class TestExpon(TestCase):
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
@ -639,6 +663,7 @@ class TestExpon(TestCase):
class TestGenExpon(TestCase):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
@ -653,12 +678,15 @@ class TestGenExpon(TestCase):
class TestExponpow(TestCase):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8), 5)
assert_almost_equal(
stats.exponpow.isf(stats.exponpow.sf(5, .8), .8), 5)
class TestSkellam(TestCase):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
@ -703,6 +731,7 @@ class TestSkellam(TestCase):
class TestLognorm(TestCase):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
with np.errstate(divide='ignore'):
@ -711,6 +740,7 @@ class TestLognorm(TestCase):
class TestBeta(TestCase):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
@ -727,6 +757,7 @@ class TestBeta(TestCase):
class TestBetaPrime(TestCase):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
@ -736,6 +767,7 @@ class TestBetaPrime(TestCase):
class TestGamma(TestCase):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1. / 5)
@ -753,18 +785,23 @@ class TestGamma(TestCase):
class TestChi2(TestCase):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003, 14)
assert_almost_equal(
stats.chi2.pdf(1000, 1000), 8.919133934753128e-003, 14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778, 14)
class TestArrayArgument(TestCase): # test for ticket:992
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5), size=(10,5))
rvs = stats.norm.rvs(
loc=(np.arange(5)), scale=np.ones(5), size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring(TestCase):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
@ -779,6 +816,7 @@ class TestDocstring(TestCase):
class TestEntropy(TestCase):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5, 0.2, 0.3]
@ -810,8 +848,8 @@ class TestEntropy(TestCase):
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"assert_* funcs broken with inf/nan")
# @dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
# "assert_* funcs broken with inf/nan")
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
@ -986,7 +1024,7 @@ class TestFitMethod(object):
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
_da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
@ -1015,10 +1053,11 @@ class TestFitMethod(object):
class TestFrozen(TestCase):
# Test that a frozen distribution gives the same results as the original object.
#
# Test that a frozen distribution gives the same results as the original
# object.
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
@ -1137,6 +1176,7 @@ class TestExpect(TestCase):
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x - 5) * (x - 5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
@ -1155,7 +1195,8 @@ class TestExpect(TestCase):
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10,5),
v = stats.beta.expect(
lambda x: (x - 19 / 3.) * (x - 19 / 3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1. / 18., decimal=13)
@ -1185,7 +1226,8 @@ class TestExpect(TestCase):
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
v_bounds = stats.hypergeom.expect(
lambda x: (x - 9.) ** 2, args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
@ -1237,6 +1279,7 @@ class TestExpect(TestCase):
class TestNct(TestCase):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
@ -1246,7 +1289,8 @@ class TestNct(TestCase):
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4,7)[:,None], np.linspace(0.1, 1, 4))
res = stats.nct.pdf(
5, np.arange(4, 7)[:, None], np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
@ -1272,6 +1316,7 @@ class TestNct(TestCase):
class TestRice(TestCase):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
@ -1300,13 +1345,15 @@ class TestRice(TestCase):
class TestErlang(TestCase):
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a RuntimeWarning
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
@ -1320,6 +1367,7 @@ class TestErlang(TestCase):
class TestRdist(TestCase):
@dec.slow
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
@ -1336,14 +1384,15 @@ def test_540_567():
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),0.98353464004309321,
decimal=10, err_msg='test_540_567')
scale=0.204423758009),
0.98353464004309321, decimal=10,
err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
g = stats._continuous_distns.gamma_gen(name='gamma')
_g = stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
@ -1352,7 +1401,8 @@ def test_regression_ticket_1326():
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles non-positive lambdas.
# Make sure that Tukey-Lambda distribution correctly handles non-positive
# lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
@ -1622,7 +1672,8 @@ def test_stats_shapes_argcheck():
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
mv3 = stats.lognorm.stats([2, 2.4, -1]) # -1 is not a legal shape parameter
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
@ -1632,19 +1683,22 @@ def test_stats_shapes_argcheck():
# anyway, so some distributions may or may not fail.
## Test subclassing distributions w/ explicit shapes
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
@ -1656,6 +1710,7 @@ class _distr3_gen(stats.rv_continuous):
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a * x + b
@ -1717,6 +1772,7 @@ class TestSubclassingExplicitShapes(TestCase):
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
@ -1726,6 +1782,7 @@ class TestSubclassingExplicitShapes(TestCase):
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
@ -1736,6 +1793,7 @@ class TestSubclassingExplicitShapes(TestCase):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
@ -1749,6 +1807,7 @@ class TestSubclassingExplicitShapes(TestCase):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
@ -1763,9 +1822,11 @@ class TestSubclassingExplicitShapes(TestCase):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling" is
# ignoring *args and looking for ``extra_kwarg`` and using that.
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
@ -1775,6 +1836,7 @@ class TestSubclassingExplicitShapes(TestCase):
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
@ -1827,6 +1889,7 @@ class TestSubclassingNoShapes(TestCase):
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@ -1834,6 +1897,7 @@ class TestSubclassingNoShapes(TestCase):
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@ -1841,6 +1905,7 @@ class TestSubclassingNoShapes(TestCase):
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@ -1862,4 +1927,5 @@ def test_infinite_input():
if __name__ == "__main__":
#unittest.main()
run_module_suite()

@ -7,7 +7,7 @@ from numpy.testing import dec
from wafo import stats
from .test_continuous_basic import distcont
from wafo.stats.tests.test_continuous_basic import distcont
# this is not a proper statistical test for convergence, but only
# verifies that the estimate and true values don't differ by too much
@ -45,7 +45,7 @@ skip_fit = [
def test_cont_fit():
# this tests the closeness of the estimated parameters to the true
# parameters with fit method of continuous distributions
# Note: is slow, some distributions don't converge with sample size <= 10000
# Note: slow, some distributions don't converge with sample size <= 10000
for distname, arg in distcont:
if distname not in skip_fit:
@ -62,13 +62,15 @@ def check_cont_fit(distname,arg):
pass
if xfail:
msg = "Fitting %s doesn't work reliably yet" % distname
msg += " [Set environment variable SCIPY_XFAIL=1 to run this test nevertheless.]"
msg += " [Set environment variable SCIPY_XFAIL=1 to run this " + \
"test nevertheless.]"
dec.knownfailureif(True, msg)(lambda: None)()
distfn = getattr(stats, distname)
truearg = np.hstack([arg, [0.0, 1.0]])
diffthreshold = np.max(np.vstack([truearg*thresh_percent,
diffthreshold = np.max(np.vstack([
truearg * thresh_percent,
np.ones(distfn.numargs + 2) * thresh_min]), 0)
for fit_size in fit_sizes:
@ -77,12 +79,16 @@ def check_cont_fit(distname,arg):
with np.errstate(all='ignore'):
rvs = distfn.rvs(size=fit_size, *arg)
est = distfn.fit(rvs) # start with default values
#phat = distfn.fit2(rvs)
phat = distfn.fit2(rvs, method='mps')
est = phat.par
#est = distfn.fit(rvs) # start with default values
diff = est - truearg
# threshold for location
diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min])
diffthreshold[-2] = np.max([np.abs(rvs.mean()) * thresh_percent,
thresh_min])
if np.any(np.isnan(est)):
raise AssertionError('nan returned in fit')

@ -9,10 +9,11 @@ import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (TestCase, run_module_suite, assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_raises, assert_, assert_allclose, assert_equal, dec)
assert_almost_equal, assert_array_less,
assert_array_almost_equal, assert_raises, assert_,
assert_allclose, assert_equal, dec)
from scipy import stats
from wafo import stats
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
@ -36,6 +37,7 @@ g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestShapiro(TestCase):
def test_basic(self):
x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
@ -57,24 +59,25 @@ class TestShapiro(TestCase):
class TestAnderson(TestCase):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1)
A, crit, _sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A,crit,sig = stats.anderson(x2)
A, crit, _sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1,'expon')
A, crit, _sig = stats.anderson(x1, 'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A,crit,sig = stats.anderson(x2,'expon')
A, crit, _sig = stats.anderson(x2, 'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
@ -95,12 +98,13 @@ class TestAnsari(TestCase):
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104, 100,
96, 108, 103, 104, 114, 114, 113, 108, 106, 99))
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="Ties preclude use of exact statistic.")
message="Ties preclude use of exact " +
"statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W, 185.5, 11)
@ -141,7 +145,8 @@ class TestLevene(TestCase):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', proportiontocut=0.0)
W2, pval2 = stats.levene(
g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
@ -152,8 +157,10 @@ class TestLevene(TestCase):
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed', proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed', proportiontocut=0.125)
W0, _pval0 = stats.levene(x, y, center='trimmed',
proportiontocut=0.125)
W1, pval1 = stats.levene(
x2, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
@ -230,13 +237,15 @@ class TestFligner(TestCase):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1, x1 ** 2),
(3.2282229927203536, 0.072379187848207877), 11)
(3.2282229927203536, 0.072379187848207877),
11)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')
Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed', proportiontocut=0.0)
Xsq2, pval2 = stats.fligner(
g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
@ -244,7 +253,8 @@ class TestFligner(TestCase):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125)
Xsq1, pval1 = stats.fligner(
x, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
@ -279,11 +289,13 @@ class TestFligner(TestCase):
class TestMood(TestCase):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1 ** 2),
(-1.3830857299399906, 0.16663858066771478), 11)
(-1.3830857299399906, 0.16663858066771478),
11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
@ -375,7 +387,8 @@ class TestMood(TestCase):
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is less than 3
# Raise ValueError when the sum of the lengths of the args is less than
# 3
assert_raises(ValueError, stats.mood, [1], [])
@ -393,7 +406,7 @@ class TestProbplot(TestCase):
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
_res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
@ -410,7 +423,7 @@ class TestProbplot(TestCase):
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
_osm, _osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
@ -424,7 +437,9 @@ class TestProbplot(TestCase):
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
@ -555,7 +570,7 @@ class TestBoxcox(TestCase):
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1) ** (-lmbda)
xt, maxlog = stats.boxcox(x_inv)
_xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
@ -586,6 +601,7 @@ class TestBoxcox(TestCase):
class TestBoxcoxNormmax(TestCase):
def setUp(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
@ -608,6 +624,7 @@ class TestBoxcoxNormmax(TestCase):
class TestBoxcoxNormplot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
@ -645,6 +662,7 @@ class TestBoxcoxNormplot(TestCase):
class TestCircFuncs(TestCase):
def test_circfuncs(self):
x = np.array([355, 5, 2, 359, 10, 350])
M = stats.circmean(x, high=360)

@ -11,11 +11,11 @@ import numpy
import numpy as np
import scipy.linalg
import scipy.stats._multivariate
from scipy.stats import multivariate_normal
from scipy.stats import norm
#import wafo.stats._multivariate
from wafo.stats import multivariate_normal
from wafo.stats import norm
from scipy.stats._multivariate import _psd_pinv_decomposed_log_pdet
from wafo.stats._multivariate import _psd_pinv_decomposed_log_pdet
from scipy.integrate import romb
@ -70,7 +70,7 @@ def test_large_pseudo_determinant():
#assert_allclose(np.linalg.slogdet(cov[:npos, :npos]), (1, large_total_log))
# Check the pseudo-determinant.
U, log_pdet = scipy.stats._multivariate._psd_pinv_decomposed_log_pdet(cov)
U, log_pdet = _psd_pinv_decomposed_log_pdet(cov)
assert_allclose(log_pdet, large_total_log)

@ -4,7 +4,7 @@ import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_array_equal
from scipy.stats import rankdata, tiecorrect
from wafo.stats import rankdata, tiecorrect
class TestTieCorrect(TestCase):

@ -1,51 +1,44 @@
import numpy as np # @UnusedImport
#@UnusedImport
from numpy import cos, exp, linspace, pi, sin, diff, arange, ones
from numpy.random import randn # @UnusedImport
from wafo.data import sea # @UnusedImport
from wafo.misc import (JITImport, Bunch, detrendma, DotDict, findcross, ecross, findextrema, # @UnusedImport
#@UnusedImport
findrfc, rfcfilter, findtp, findtc, findoutliers,
common_shape, argsreduce, stirlerr, getshipchar, betaloge,
#@UnusedImport
#@UnusedImport
from numpy.testing import (run_module_suite, assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal)
import numpy as np
from numpy import array, cos, exp, linspace, pi, sin, diff, arange, ones
from wafo.data import sea
from wafo.misc import (JITImport, Bunch, detrendma, DotDict, findcross, ecross,
findextrema, findrfc, rfcfilter, findtp, findtc,
findoutliers, common_shape, argsreduce, stirlerr,
getshipchar, betaloge, hygfz,
gravity, nextpow2, discretize, polar2cart,
cart2polar, meshgrid, tranproc) # @UnusedImport
cart2polar, tranproc)
def test_JITImport():
'''
>>> np = JITImport('numpy')
>>> np.exp(0)==1.0
True
'''
np = JITImport('numpy')
assert_equal(1.0, np.exp(0))
def test_bunch():
'''
>>> d = Bunch(test1=1,test2=3)
>>> d.test1; d.test2
1
3
'''
d = Bunch(test1=1, test2=3)
assert_equal(1, d.test1)
assert_equal(3, d.test2)
def test_dotdict():
'''
>>> d = DotDict(test1=1,test2=3)
>>> d.test1; d.test2
1
3
'''
d = DotDict(test1=1, test2=3)
assert_equal(1, d.test1)
assert_equal(3, d.test2)
def test_detrendma():
'''
>>> x = linspace(0,1,200)
>>> y = exp(x)+0.1*cos(20*2*pi*x)
>>> y0 = detrendma(y,20); tr = y-y0
>>> y0,tr
(array([ -1.05815186e-02, -2.48280355e-02, -7.01800760e-02,
x = linspace(0, 1, 200)
y = exp(x) + 0.1 * cos(20 * 2 * pi * x)
y0 = detrendma(y, 20)
tr = y - y0
assert_array_almost_equal(
y0,
array(
[-1.05815186e-02, -2.48280355e-02, -7.01800760e-02,
-1.27193089e-01, -1.71915213e-01, -1.85125121e-01,
-1.59745361e-01, -1.03571981e-01, -3.62676515e-02,
1.82219951e-02, 4.09039083e-02, 2.50630186e-02,
@ -111,7 +104,9 @@ def test_detrendma():
2.43802139e-01, 2.39414013e-01, 2.03257341e-01,
1.54325635e-01, 1.16564992e-01, 1.09638547e-01,
1.41342814e-01, 2.04600808e-01, 2.80191671e-01,
3.44164010e-01, 3.77073744e-01]), array([
3.44164010e-01, 3.77073744e-01
]))
assert_array_almost_equal(tr, array([
1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152,
1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152,
1.11058152, 1.11058152, 1.11058152, 1.11058152, 1.11058152,
@ -152,45 +147,37 @@ def test_detrendma():
2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808,
2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808,
2.44120808, 2.44120808, 2.44120808, 2.44120808, 2.44120808]))
'''
def test_findcross_and_ecross():
'''
>>> findcross([0, 0, 1, -1, 1],0)
array([1, 2, 3])
>>> findcross([0, 1, -1, 1],0)
array([0, 1, 2])
>>> t = linspace(0,7*pi,250)
>>> x = sin(t)
>>> ind = findcross(x,0.75)
>>> ind
array([ 9, 25, 80, 97, 151, 168, 223, 239])
>>> t0 = ecross(t,x,ind,0.75)
>>> t0
array([ 0.84910514, 2.2933879 , 7.13205663, 8.57630119,
13.41484739, 14.85909194, 19.69776067, 21.14204343])
'''
assert_array_equal(findcross([0, 0, 1, -1, 1], 0), np.array([1, 2, 3]))
assert_array_equal(findcross([0, 1, -1, 1], 0), np.array([0, 1, 2]))
t = linspace(0, 7 * pi, 250)
x = sin(t)
ind = findcross(x, 0.75)
assert_array_equal(ind, np.array([9, 25, 80, 97, 151, 168, 223, 239]))
t0 = ecross(t, x, ind, 0.75)
assert_array_almost_equal(t0, np.array([0.84910514, 2.2933879, 7.13205663,
8.57630119, 13.41484739, 14.85909194,
19.69776067, 21.14204343]))
def test_findextrema():
'''
>>> t = linspace(0,7*pi,250)
>>> x = sin(t)
>>> ind = findextrema(x)
>>> ind
array([ 18, 53, 89, 125, 160, 196, 231])
'''
t = linspace(0, 7 * pi, 250)
x = sin(t)
ind = findextrema(x)
assert_array_almost_equal(ind, np.array([18, 53, 89, 125, 160, 196, 231]))
def test_findrfc():
'''
>>> t = linspace(0,7*pi,250)
>>> x = sin(t)+0.1*sin(50*t)
>>> ind = findextrema(x)
>>> ind
array([ 1, 3, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21,
t = linspace(0, 7 * pi, 250)
x = sin(t) + 0.1 * sin(50 * t)
ind = findextrema(x)
assert_array_almost_equal(
ind,
np.array(
[1, 3, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21,
23, 25, 26, 28, 29, 31, 33, 35, 36, 38, 39, 41, 43,
45, 46, 48, 50, 51, 53, 55, 56, 58, 60, 61, 63, 65,
67, 68, 70, 71, 73, 75, 77, 78, 80, 81, 83, 85, 87,
@ -201,35 +188,42 @@ def test_findrfc():
176, 177, 179, 181, 183, 184, 186, 187, 189, 191, 193, 194, 196,
198, 199, 201, 203, 205, 206, 208, 209, 211, 213, 215, 216, 218,
219, 221, 223, 225, 226, 228, 230, 231, 233, 235, 237, 238, 240,
241, 243, 245, 247, 248])
>>> ti, tp = t[ind], x[ind]
>>> ind1 = findrfc(tp,0.3)
>>> ind1
array([ 0, 9, 32, 53, 74, 95, 116, 137])
>>> tp[ind1]
array([-0.00743352, 1.08753972, -1.07206545, 1.09550837, -1.07940458,
1.07849396, -1.0995006 , 1.08094452])
'''
241, 243, 245, 247, 248]))
_ti, tp = t[ind], x[ind]
ind1 = findrfc(tp, 0.3)
assert_array_almost_equal(
ind1,
np.array([0, 9, 32, 53, 74, 95, 116, 137]))
assert_array_almost_equal(
tp[ind1],
np.array(
[-0.00743352, 1.08753972, -1.07206545, 1.09550837, -1.07940458,
1.07849396, -1.0995006, 1.08094452]))
def test_rfcfilter():
'''
# 1. Filtered signal y is the turning points of x.
>>> x = sea()
>>> y = rfcfilter(x[:,1], h=0, method=1)
>>> y[0:5]
array([-1.2004945 , 0.83950546, -0.09049454, -0.02049454, -0.09049454])
x = sea()
y = rfcfilter(x[:, 1], h=0, method=1)
assert_array_almost_equal(
y[0:5],
np.array([-1.2004945, 0.83950546, -0.09049454,
-0.02049454, -0.09049454]))
# 2. This removes all rainflow cycles with range less than 0.5.
>>> y1 = rfcfilter(x[:,1], h=0.5)
>>> y1[0:5]
array([-1.2004945 , 0.83950546, -0.43049454, 0.34950546, -0.51049454])
>>> t = linspace(0,7*pi,250)
>>> x = sin(t)+0.1*sin(50*t)
>>> ind = findextrema(x)
>>> ind
array([ 1, 3, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21,
y1 = rfcfilter(x[:, 1], h=0.5)
assert_array_almost_equal(
y1[0:5],
np.array([-1.2004945, 0.83950546, -0.43049454,
0.34950546, -0.51049454]))
t = linspace(0, 7 * pi, 250)
x = sin(t) + 0.1 * sin(50 * t)
ind = findextrema(x)
assert_array_almost_equal(
ind,
np.array(
[1, 3, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21,
23, 25, 26, 28, 29, 31, 33, 35, 36, 38, 39, 41, 43,
45, 46, 48, 50, 51, 53, 55, 56, 58, 60, 61, 63, 65,
67, 68, 70, 71, 73, 75, 77, 78, 80, 81, 83, 85, 87,
@ -240,284 +234,237 @@ def test_rfcfilter():
176, 177, 179, 181, 183, 184, 186, 187, 189, 191, 193, 194, 196,
198, 199, 201, 203, 205, 206, 208, 209, 211, 213, 215, 216, 218,
219, 221, 223, 225, 226, 228, 230, 231, 233, 235, 237, 238, 240,
241, 243, 245, 247, 248])
>>> ti, tp = t[ind], x[ind]
>>> tp03 = rfcfilter(tp,0.3)
>>> tp03
array([-0.00743352, 1.08753972, -1.07206545, 1.09550837, -1.07940458,
1.07849396, -1.0995006 , 1.08094452, 0.11983423])
'''
241, 243, 245, 247, 248]))
_ti, tp = t[ind], x[ind]
tp03 = rfcfilter(tp, 0.3)
assert_array_almost_equal(
tp03,
np.array(
[-0.00743352, 1.08753972, -1.07206545, 1.09550837, -1.07940458,
1.07849396, -1.0995006, 1.08094452, 0.11983423]))
def test_findtp():
'''
>>> import numpy as np
>>> x = sea()
>>> x1 = x[0:200,:]
>>> itp = findtp(x1[:,1],0,'Mw')
>>> itph = findtp(x1[:,1],0.3,'Mw')
>>> itp
array([ 11, 21, 22, 24, 26, 28, 31, 39, 43, 45, 47, 51, 56,
x = sea()
x1 = x[0:200, :]
itp = findtp(x1[:, 1], 0, 'Mw')
itph = findtp(x1[:, 1], 0.3, 'Mw')
assert_array_almost_equal(
itp,
np.array(
[11, 21, 22, 24, 26, 28, 31, 39, 43, 45, 47, 51, 56,
64, 70, 78, 82, 84, 89, 94, 101, 108, 119, 131, 141, 148,
149, 150, 159, 173, 184, 190, 199])
>>> itph
array([ 11, 28, 31, 39, 47, 51, 56, 64, 70, 78, 89, 94, 101,
108, 119, 131, 141, 148, 159, 173, 184, 190, 199])
'''
149, 150, 159, 173, 184, 190, 199]))
assert_array_almost_equal(
itph,
np.array(
[11, 28, 31, 39, 47, 51, 56, 64, 70, 78, 89, 94, 101,
108, 119, 131, 141, 148, 159, 173, 184, 190, 199]))
def test_findtc():
'''
>>> x = sea()
>>> x1 = x[0:200,:]
>>> itc, iv = findtc(x1[:,1],0,'dw')
>>> itc
array([ 28, 31, 39, 56, 64, 69, 78, 82, 83, 89, 94, 101, 108,
119, 131, 140, 148, 159, 173, 184])
>>> iv
array([ 19, 29, 34, 53, 60, 67, 76, 81, 82, 84, 90, 99, 103,
112, 127, 137, 143, 154, 166, 180, 185])
'''
x = sea()
x1 = x[0:200, :]
itc, iv = findtc(x1[:, 1], 0, 'dw')
assert_array_almost_equal(
itc,
np.array(
[28, 31, 39, 56, 64, 69, 78, 82, 83, 89, 94, 101, 108,
119, 131, 140, 148, 159, 173, 184]))
assert_array_almost_equal(
iv,
np.array(
[19, 29, 34, 53, 60, 67, 76, 81, 82, 84, 90, 99, 103,
112, 127, 137, 143, 154, 166, 180, 185]))
def test_findoutliers():
'''
>>> xx = sea()
>>> dt = diff(xx[:2,0])
>>> dcrit = 5*dt
>>> ddcrit = 9.81/2*dt*dt
>>> zcrit = 0
>>> [inds, indg] = findoutliers(xx[:,1],zcrit,dcrit,ddcrit,verbose=True)
Found 0 spurious positive jumps of Dx
Found 0 spurious negative jumps of Dx
Found 37 spurious positive jumps of D^2x
Found 200 spurious negative jumps of D^2x
Found 244 consecutive equal values
Found the total of 1152 spurious points
>>> inds
array([ 6, 7, 8, ..., 9509, 9510, 9511])
>>> indg
array([ 0, 1, 2, ..., 9521, 9522, 9523])
'''
xx = sea()
dt = diff(xx[:2, 0])
dcrit = 5 * dt
ddcrit = 9.81 / 2 * dt * dt
zcrit = 0
[inds, indg] = findoutliers(xx[:, 1], zcrit, dcrit, ddcrit, verbose=False)
assert_array_almost_equal(inds[np.r_[0, 1, 2, -3, -2, -1]],
np.array([6, 7, 8, 9509, 9510, 9511]))
assert_array_almost_equal(indg[np.r_[0, 1, 2, -3, -2, -1]],
np.array([0, 1, 2, 9521, 9522, 9523]))
def test_hygfz():
#y = hyp2f1_taylor(-1, -4, 1, .9)
assert_equal(4.6, hygfz(-1, -4, 1, .9))
assert_almost_equal(1.0464328112173522, hygfz(0.1, 0.2, 0.3, 0.5))
assert_almost_equal(1.2027034401166194, hygfz(0.1, 0.2, 0.3, 0.95))
#assert_equal(1.661006238211309e-07, hygfz(5, -300, 10, 0.5))
assert_equal(0.118311386286, hygfz(0.5, -99.0, 1.5, 0.5625))
assert_equal(0.0965606007742, hygfz(0.5, -149.0, 1.5, 0.5625))
assert_equal(0.49234384000963544+0.60513406166123973j, hygfz(1, 1, 4, 3+4j))
def test_common_shape():
'''
>>> import numpy as np
>>> A = np.ones((4,1))
>>> B = 2
>>> C = np.ones((1,5))*5
>>> common_shape(A,B,C)
(4, 5)
>>> common_shape(A,B,C,shape=(3,4,1))
(3, 4, 5)
>>> A = np.ones((4,1))
>>> B = 2
>>> C = np.ones((1,5))*5
>>> common_shape(A,B,C)
(4, 5)
>>> common_shape(A,B,C,shape=(3,4,1))
(3, 4, 5)
'''
A = np.ones((4, 1))
B = 2
C = np.ones((1, 5)) * 5
assert_array_equal(common_shape(A, B, C), (4, 5))
assert_array_equal(common_shape(A, B, C, shape=(3, 4, 1)), (3, 4, 5))
A = np.ones((4, 1))
B = 2
C = np.ones((1, 5)) * 5
assert_array_equal(common_shape(A, B, C), (4, 5))
assert_array_equal(common_shape(A, B, C, shape=(3, 4, 1)), (3, 4, 5))
def test_argsreduce():
'''
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = linspace(0,19,20).reshape((4,5))
>>> B = 2
>>> C = range(5)
>>> cond = np.ones(A.shape)
>>> [A1,B1,C1] = argsreduce(cond,A,B,C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2,B2,C2] = argsreduce(cond,A,B,C)
>>> B2.shape
(15,)
>>> A2;B2;C2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 15.,
16., 17., 18., 19.])
array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
'''
A = linspace(0, 19, 20).reshape((4, 5))
B = 2
C = range(5)
cond = np.ones(A.shape)
[_A1, B1, _C1] = argsreduce(cond, A, B, C)
assert_equal(B1.shape, (20,))
cond[2, :] = 0
[A2, B2, C2] = argsreduce(cond, A, B, C)
assert_equal(B2.shape, (15,))
assert_array_equal(A2,
np.array([0., 1., 2., 3., 4., 5., 6., 7.,
8., 9., 15., 16., 17., 18., 19.]))
assert_array_equal(
B2, np.array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
assert_array_equal(
C2, np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]))
def test_stirlerr():
'''
>>> stirlerr(range(5))
array([ inf, 0.08106147, 0.0413407 , 0.02767793, 0.02079067])
'''
assert_array_almost_equal(stirlerr(range(5)),
np.array([np.inf, 0.08106147, 0.0413407, 0.02767793,
0.02079067]))
def test_getshipchar():
'''
>>> sc = getshipchar(10,'service_speed')
>>> names = ['beam', 'beamSTD', 'draught',
... 'draughtSTD', 'length', 'lengthSTD',
... 'max_deadweight', 'max_deadweightSTD', 'propeller_diameter',
... 'propeller_diameterSTD', 'service_speed', 'service_speedSTD']
>>> for name in names: print( '%s : %g' % (name, sc[name]))
beam : 29
beamSTD : 2.9
draught : 9.6
draughtSTD : 2.112
length : 216
lengthSTD : 2.01131
max_deadweight : 30969
max_deadweightSTD : 3096.9
propeller_diameter : 6.76117
propeller_diameterSTD : 0.20267
service_speed : 10
service_speedSTD : 0
'''
sc = getshipchar(10, 'service_speed')
true_sc = dict(beam=29,
beamSTD=2.9,
draught=9.6,
draughtSTD=2.112,
length=216,
lengthSTD=2.011309883194276,
max_deadweight=30969,
max_deadweightSTD=3096.9,
propeller_diameter=6.761165385916601,
propeller_diameterSTD=0.20267047566705432,
service_speed=10,
service_speedSTD=0)
for name, val in true_sc.iteritems():
assert_almost_equal(val, sc[name])
def test_betaloge():
'''
>>> betaloge(3, arange(4))
array([ inf, -1.09861229, -2.48490665, -3.40119738])
'''
assert_array_almost_equal(betaloge(3, arange(4)),
np.array([np.inf, -1.09861229, -2.48490665, -3.40119738]))
def test_gravity():
'''
>>> phi = linspace(0,45,5)
>>> gravity(phi)
array([ 9.78049 , 9.78245014, 9.78803583, 9.79640552, 9.80629387])
'''
phi = linspace(0, 45, 5)
assert_array_almost_equal(gravity(phi),
np.array([9.78049, 9.78245014, 9.78803583,
9.79640552, 9.80629387]))
def test_nextpow2():
'''
>>> nextpow2(10)
4
>>> nextpow2(np.arange(5))
3
'''
assert_equal(nextpow2(10), 4)
assert_equal(nextpow2(np.arange(5)), 3)
def test_discretize():
'''
>>> x, y = discretize(np.cos,0,np.pi)
>>> x; y
array([ 0. , 0.19634954, 0.39269908, 0.58904862, 0.78539816,
x, y = discretize(np.cos, 0, np.pi)
assert_array_almost_equal(
x,
np.array(
[0., 0.19634954, 0.39269908, 0.58904862, 0.78539816,
0.9817477, 1.17809725, 1.37444679, 1.57079633, 1.76714587,
1.96349541, 2.15984495, 2.35619449, 2.55254403, 2.74889357,
2.94524311, 3.14159265])
array([ 1.00000000e+00, 9.80785280e-01, 9.23879533e-01,
2.94524311, 3.14159265]))
assert_array_almost_equal(
y, np.array([1.00000000e+00, 9.80785280e-01,
9.23879533e-01,
8.31469612e-01, 7.07106781e-01, 5.55570233e-01,
3.82683432e-01, 1.95090322e-01, 6.12323400e-17,
-1.95090322e-01, -3.82683432e-01, -5.55570233e-01,
-7.07106781e-01, -8.31469612e-01, -9.23879533e-01,
-9.80785280e-01, -1.00000000e+00])
'''
-9.80785280e-01, -1.00000000e+00]))
def test_discretize_adaptive():
'''
>>> x, y = discretize(np.cos,0,np.pi, method='adaptive')
>>> x; y
array([ 0. , 0.19634954, 0.39269908, 0.58904862, 0.78539816,
x, y = discretize(np.cos, 0, np.pi, method='adaptive')
assert_array_almost_equal(
x,
np.array(
[0., 0.19634954, 0.39269908, 0.58904862, 0.78539816,
0.9817477, 1.17809725, 1.37444679, 1.57079633, 1.76714587,
1.96349541, 2.15984495, 2.35619449, 2.55254403, 2.74889357,
2.94524311, 3.14159265])
array([ 1.00000000e+00, 9.80785280e-01, 9.23879533e-01,
2.94524311, 3.14159265]))
assert_array_almost_equal(
y,
np.array(
[1.00000000e+00, 9.80785280e-01, 9.23879533e-01,
8.31469612e-01, 7.07106781e-01, 5.55570233e-01,
3.82683432e-01, 1.95090322e-01, 6.12323400e-17,
-1.95090322e-01, -3.82683432e-01, -5.55570233e-01,
-7.07106781e-01, -8.31469612e-01, -9.23879533e-01,
-9.80785280e-01, -1.00000000e+00])
'''
-9.80785280e-01, -1.00000000e+00]))
def test_pol2cart_n_cart2pol():
'''
>>> r = 5
>>> t = linspace(0,pi,20)
>>> x, y = polar2cart(t,r)
>>> x; y
array([ 5. , 4.93180652, 4.72908621, 4.39736876, 3.94570255,
def test_polar2cart_n_cart2polar():
r = 5
t = linspace(0, pi, 20)
x, y = polar2cart(t, r)
assert_array_almost_equal(
x,
np.array(
[5., 4.93180652, 4.72908621, 4.39736876, 3.94570255,
3.38640786, 2.73474079, 2.00847712, 1.22742744, 0.41289673,
-0.41289673, -1.22742744, -2.00847712, -2.73474079, -3.38640786,
-3.94570255, -4.39736876, -4.72908621, -4.93180652, -5. ])
array([ 0.00000000e+00, 8.22972951e-01, 1.62349735e+00,
-3.94570255, -4.39736876, -4.72908621, -4.93180652, -5.]))
assert_array_almost_equal(
y,
np.array(
[0.00000000e+00, 8.22972951e-01, 1.62349735e+00,
2.37973697e+00, 3.07106356e+00, 3.67861955e+00,
4.18583239e+00, 4.57886663e+00, 4.84700133e+00,
4.98292247e+00, 4.98292247e+00, 4.84700133e+00,
4.57886663e+00, 4.18583239e+00, 3.67861955e+00,
3.07106356e+00, 2.37973697e+00, 1.62349735e+00,
8.22972951e-01, 6.12323400e-16])
>>> ti, ri = cart2polar(x,y)
>>> ti;ri
array([ 0. , 0.16534698, 0.33069396, 0.49604095, 0.66138793,
8.22972951e-01, 6.12323400e-16]))
ti, ri = cart2polar(x, y)
assert_array_almost_equal(
ti,
np.array(
[0., 0.16534698, 0.33069396, 0.49604095, 0.66138793,
0.82673491, 0.99208189, 1.15742887, 1.32277585, 1.48812284,
1.65346982, 1.8188168, 1.98416378, 2.14951076, 2.31485774,
2.48020473, 2.64555171, 2.81089869, 2.97624567, 3.14159265])
array([ 5., 5., 5., 5., 5., 5., 5., 5., 5., 5., 5., 5., 5.,
5., 5., 5., 5., 5., 5., 5.])
'''
def test_meshgrid():
'''
>>> x = np.linspace(0,1,3) # coordinates along x axis
>>> y = np.linspace(0,1,2) # coordinates along y axis
>>> xv, yv = meshgrid(x,y) # extend x and y for a 2D xy grid
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x,y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
>>> meshgrid(x,y,sparse=True,indexing='ij') # change to matrix indexing
[array([[ 0. ],
[ 0.5],
[ 1. ]]), array([[ 0., 1.]])]
>>> meshgrid(x,y,indexing='ij')
[array([[ 0. , 0. ],
[ 0.5, 0.5],
[ 1. , 1. ]]), array([[ 0., 1.],
[ 0., 1.],
[ 0., 1.]])]
>>> meshgrid(0,1,5) # just a 3D point
[array([[[0]]]), array([[[1]]]), array([[[5]]])]
>>> map(np.squeeze,meshgrid(0,1,5)) # just a 3D point
[array(0), array(1), array(5)]
>>> meshgrid(3)
array([3])
>>> meshgrid(y) # 1D grid y is just returned
array([ 0., 1.])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2+yy**2)/(xx**2+yy**2)
'''
2.48020473, 2.64555171, 2.81089869, 2.97624567, 3.14159265]))
assert_array_almost_equal(
ri,
np.array(
[5., 5., 5., 5., 5., 5., 5., 5., 5., 5., 5., 5., 5.,
5., 5., 5., 5., 5., 5., 5.]))
def test_tranproc():
'''
>>> import wafo.transform.models as wtm
>>> tr = wtm.TrHermite()
>>> x = linspace(-5,5,501)
>>> g = tr(x)
>>> y0, y1 = tranproc(x, g, range(5), ones(5))
>>> y0;y1
array([ 0.02659612, 1.00115284, 1.92872532, 2.81453257, 3.66292878])
array([ 1.00005295, 0.9501118 , 0.90589954, 0.86643821, 0.83096482])
'''
import wafo.transform.models as wtm
tr = wtm.TrHermite()
x = linspace(-5, 5, 501)
g = tr(x)
y0, y1 = tranproc(x, g, range(5), ones(5))
assert_array_almost_equal(
y0,
np.array([0.02659612, 1.00115284, 1.92872532,
2.81453257, 3.66292878]))
assert_array_almost_equal(
y1,
np.array([1.00005295, 0.9501118, 0.90589954,
0.86643821, 0.83096482]))
if __name__ == '__main__':
import doctest
doctest.testmod()
run_module_suite()

Loading…
Cancel
Save