pep8 + updated wafo.stats packages
parent
629ed411c9
commit
d308357c5b
@ -1,26 +1,27 @@
|
||||
from __future__ import division, print_function, absolute_import
|
||||
|
||||
from info import __doc__
|
||||
import misc
|
||||
import data
|
||||
import demos
|
||||
import kdetools
|
||||
import objects
|
||||
import spectrum
|
||||
import transform
|
||||
import definitions
|
||||
import polynomial
|
||||
import stats
|
||||
import interpolate
|
||||
import dctpack
|
||||
from .info import __doc__
|
||||
from . import misc
|
||||
from . import data
|
||||
from . import demos
|
||||
from . import kdetools
|
||||
from . import objects
|
||||
from . import spectrum
|
||||
from . import transform
|
||||
from . import definitions
|
||||
from . import polynomial
|
||||
from . import stats
|
||||
from . import interpolate
|
||||
from . import dctpack
|
||||
try:
|
||||
import fig
|
||||
from . import fig
|
||||
except ImportError:
|
||||
print 'fig import only supported on Windows'
|
||||
print('fig import only supported on Windows')
|
||||
|
||||
try:
|
||||
from wafo.version import version as __version__
|
||||
except ImportError:
|
||||
__version__='nobuilt'
|
||||
|
||||
__version__ = 'nobuilt'
|
||||
|
||||
from numpy.testing import Tester
|
||||
test = Tester().test
|
@ -1,98 +1,104 @@
|
||||
'''
|
||||
Module extending the bitoperator capabilites of numpy
|
||||
'''
|
||||
|
||||
from numpy import (bitwise_and, bitwise_or, #@UnresolvedImport
|
||||
bitwise_not, binary_repr, #@UnresolvedImport @UnusedImport
|
||||
bitwise_xor, where, arange) #@UnresolvedImport @UnusedImport
|
||||
#import numpy as np
|
||||
__all__ = ['bitwise_and', 'bitwise_or', 'bitwise_not', 'binary_repr',
|
||||
'bitwise_xor', 'getbit', 'setbit', 'getbits', 'setbits']
|
||||
|
||||
def getbit(i, bit):
|
||||
"""
|
||||
Get bit at specified position
|
||||
|
||||
Parameters
|
||||
----------
|
||||
i : array-like of uints, longs
|
||||
value to
|
||||
bit : array-like of ints or longs
|
||||
bit position between 0 and the number of bits in the uint class.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> binary_repr(13)
|
||||
'1101'
|
||||
>>> getbit(13,np.arange(3,-1,-1))
|
||||
array([1, 1, 0, 1])
|
||||
>>> getbit(5, np.r_[0:4])
|
||||
array([1, 0, 1, 0])
|
||||
"""
|
||||
return bitwise_and(i, 1 << bit) >> bit
|
||||
|
||||
def getbits(i, numbits=8):
|
||||
"""
|
||||
Returns bits of i in a list
|
||||
"""
|
||||
return getbit(i, arange(0, numbits))
|
||||
|
||||
def setbit(i, bit, value=1):
|
||||
"""
|
||||
Set bit at specified position
|
||||
|
||||
Parameters
|
||||
----------
|
||||
i : array-like of uints, longs
|
||||
value to
|
||||
bit : array-like of ints or longs
|
||||
bit position between 0 and the number of bits in the uint class.
|
||||
value : array-like of 0 or 1
|
||||
value to set the bit to.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Set bit fifth bit in the five bit binary binary representation of 9 (01001)
|
||||
yields 25 (11001)
|
||||
>>> setbit(9,4)
|
||||
array(25)
|
||||
"""
|
||||
val1 = 1 << bit
|
||||
val0 = bitwise_not(val1)
|
||||
return where((value==0) & (i==i) & (bit==bit), bitwise_and(i, val0),
|
||||
bitwise_or(i, val1))
|
||||
|
||||
def setbits(bitlist):
|
||||
"""
|
||||
Set bits of val to values in bitlist
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> setbits([1,1])
|
||||
3
|
||||
>>> setbits([1,0])
|
||||
1
|
||||
"""
|
||||
# return bitlist[7]<<7 | bitlist[6]<<6 | bitlist[5]<<5 | bitlist[4]<<4 | \
|
||||
# bitlist[3]<<3 | bitlist[2]<<2 | bitlist[1]<<1 | bitlist[0]
|
||||
val = 0
|
||||
for i, j in enumerate(bitlist):
|
||||
val |= j << i
|
||||
return val
|
||||
|
||||
def test_docstrings():
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
if __name__ == '__main__':
|
||||
test_docstrings()
|
||||
|
||||
# t = set(np.arange(8),1,1)
|
||||
# t=get(0x84,np.arange(0,8))
|
||||
# t=getbyte(0x84)
|
||||
# t=get(0x84,[0, 1, 2, 3, 4, 5, 6, 7])
|
||||
# t=get(0x20, 6)
|
||||
# bit = [0 for i in range(8)]
|
||||
# bit[7]=1
|
||||
# t = setbits(bit)
|
||||
# print(hex(t))
|
||||
'''
|
||||
Module extending the bitoperator capabilites of numpy
|
||||
'''
|
||||
|
||||
from numpy import (bitwise_and, bitwise_or,
|
||||
bitwise_not, binary_repr, # @UnusedImport
|
||||
bitwise_xor, where, arange) # @UnusedImport
|
||||
__all__ = ['bitwise_and', 'bitwise_or', 'bitwise_not', 'binary_repr',
|
||||
'bitwise_xor', 'getbit', 'setbit', 'getbits', 'setbits']
|
||||
|
||||
|
||||
def getbit(i, bit):
|
||||
"""
|
||||
Get bit at specified position
|
||||
|
||||
Parameters
|
||||
----------
|
||||
i : array-like of uints, longs
|
||||
value to
|
||||
bit : array-like of ints or longs
|
||||
bit position between 0 and the number of bits in the uint class.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> binary_repr(13)
|
||||
'1101'
|
||||
>>> getbit(13,np.arange(3,-1,-1))
|
||||
array([1, 1, 0, 1])
|
||||
>>> getbit(5, np.r_[0:4])
|
||||
array([1, 0, 1, 0])
|
||||
"""
|
||||
return bitwise_and(i, 1 << bit) >> bit
|
||||
|
||||
|
||||
def getbits(i, numbits=8):
|
||||
"""
|
||||
Returns bits of i in a list
|
||||
"""
|
||||
return getbit(i, arange(0, numbits))
|
||||
|
||||
|
||||
def setbit(i, bit, value=1):
|
||||
"""
|
||||
Set bit at specified position
|
||||
|
||||
Parameters
|
||||
----------
|
||||
i : array-like of uints, longs
|
||||
value to
|
||||
bit : array-like of ints or longs
|
||||
bit position between 0 and the number of bits in the uint class.
|
||||
value : array-like of 0 or 1
|
||||
value to set the bit to.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Set bit fifth bit in the five bit binary binary representation of 9 (01001)
|
||||
yields 25 (11001)
|
||||
>>> setbit(9,4)
|
||||
array(25)
|
||||
"""
|
||||
val1 = 1 << bit
|
||||
val0 = bitwise_not(val1)
|
||||
return where((value == 0) & (i == i) & (bit == bit), bitwise_and(i, val0),
|
||||
bitwise_or(i, val1))
|
||||
|
||||
|
||||
def setbits(bitlist):
|
||||
"""
|
||||
Set bits of val to values in bitlist
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> setbits([1,1])
|
||||
3
|
||||
>>> setbits([1,0])
|
||||
1
|
||||
"""
|
||||
# return bitlist[7]<<7 | bitlist[6]<<6 | bitlist[5]<<5 | bitlist[4]<<4 | \
|
||||
# bitlist[3]<<3 | bitlist[2]<<2 | bitlist[1]<<1 | bitlist[0]
|
||||
val = 0
|
||||
for i, j in enumerate(bitlist):
|
||||
val |= j << i
|
||||
return val
|
||||
|
||||
|
||||
def test_docstrings():
|
||||
import doctest
|
||||
print('Testing docstrings in %s' % __file__)
|
||||
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_docstrings()
|
||||
|
||||
# t = set(np.arange(8),1,1)
|
||||
# t=get(0x84,np.arange(0,8))
|
||||
# t=getbyte(0x84)
|
||||
# t=get(0x84,[0, 1, 2, 3, 4, 5, 6, 7])
|
||||
# t=get(0x20, 6)
|
||||
# bit = [0 for i in range(8)]
|
||||
# bit[7]=1
|
||||
# t = setbits(bit)
|
||||
# print(hex(t))
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,296 +1,305 @@
|
||||
"""
|
||||
WAFO defintions and numenclature
|
||||
|
||||
crossings :
|
||||
cycle_pairs :
|
||||
turning_points :
|
||||
wave_amplitudes :
|
||||
wave_periods :
|
||||
waves :
|
||||
|
||||
Examples
|
||||
--------
|
||||
In order to view the documentation do the following in an ipython window:
|
||||
|
||||
>>> import wafo.definitions as wd
|
||||
>>> wd.crossings()
|
||||
|
||||
or
|
||||
>>> wd.crossings?
|
||||
|
||||
|
||||
"""
|
||||
def wave_amplitudes():
|
||||
r"""
|
||||
Wave amplitudes and heights definitions and nomenclature
|
||||
|
||||
Definition of wave amplitudes and wave heights
|
||||
---------------------------------------------
|
||||
|
||||
<----- Direction of wave propagation
|
||||
|
||||
|
||||
|..............c_..........|
|
||||
| /| \ |
|
||||
Hd | _/ | \ | Hu
|
||||
M | / | \ |
|
||||
/ \ | M / Ac | \_ | c_
|
||||
F \ | / \m/ | \ | / \
|
||||
------d----|---u------------------d---|---u----d------ level v
|
||||
\ | /| \ | / \L
|
||||
\_ | / | At \_|_/
|
||||
\|/..| t
|
||||
t
|
||||
|
||||
Parameters
|
||||
----------
|
||||
Ac : crest amplitude
|
||||
At : trough amplitude
|
||||
Hd : wave height as defined for down crossing waves
|
||||
Hu : wave height as defined for up crossing waves
|
||||
|
||||
See also
|
||||
--------
|
||||
waves, crossings, turning_points
|
||||
"""
|
||||
print(wave_amplitudes.__doc__)
|
||||
|
||||
def crossings():
|
||||
r"""
|
||||
Level v crossing definitions and nomenclature
|
||||
|
||||
Definition of level v crossings
|
||||
-------------------------------
|
||||
M
|
||||
. . M M
|
||||
. . . . . .
|
||||
F d . . L
|
||||
-----------------------u-------d-------o----------------- level v
|
||||
. . . . u
|
||||
. m
|
||||
m
|
||||
|
||||
Let the letters 'm', 'M', 'F', 'L','d' and 'u' in the
|
||||
figure above denote local minimum, maximum, first value, last
|
||||
value, down- and up-crossing, respectively. The remaining
|
||||
sampled values are indicated with a '.'. Values that are identical
|
||||
with v, but do not cross the level is indicated with the letter 'o'.
|
||||
We have a level up-crossing at index, k, if
|
||||
|
||||
x(k) < v and v < x(k+1)
|
||||
or if
|
||||
x(k) == v and v < x(k+1) and x(r) < v for some di < r <= k-1
|
||||
|
||||
where di is the index to the previous downcrossing.
|
||||
Similarly there is a level down-crossing at index, k, if
|
||||
|
||||
x(k) > v and v > x(k+1)
|
||||
or if
|
||||
x(k) == v and v > x(k+1) and x(r) > v for some ui < r <= k-1
|
||||
|
||||
where ui is the index to the previous upcrossing.
|
||||
|
||||
The first (F) value is a up crossing if x(1) = v and x(2) > v.
|
||||
Similarly, it is a down crossing if x(1) = v and x(2) < v.
|
||||
|
||||
See also
|
||||
--------
|
||||
wave_periods, waves, turning_points, findcross, findtp
|
||||
"""
|
||||
print(crossings.__doc__)
|
||||
|
||||
def cycle_pairs():
|
||||
r"""
|
||||
Cycle pairs definitions and numenclature
|
||||
|
||||
Definition of Max2min and min2Max cycle pair
|
||||
--------------------------------------------
|
||||
A min2Max cycle pair (mM) is defined as the pair of a minimum
|
||||
and the following Maximum. Similarly a Max2min cycle pair (Mm)
|
||||
is defined as the pair of a Maximum and the following minimum.
|
||||
(all turning points possibly rainflowfiltered before pairing into cycles.)
|
||||
|
||||
See also
|
||||
--------
|
||||
turning_points
|
||||
"""
|
||||
print(cycle_pairs.__doc__)
|
||||
|
||||
def wave_periods():
|
||||
r"""
|
||||
Wave periods (lengths) definitions and nomenclature
|
||||
|
||||
Definition of wave periods (lengths)
|
||||
------------------------------------
|
||||
|
||||
|
||||
<----- Direction of wave propagation
|
||||
|
||||
<-------Tu--------->
|
||||
: :
|
||||
<---Tc-----> :
|
||||
: : : <------Tcc---->
|
||||
M : c : : : :
|
||||
/ \ : M / \_ : : c_ c
|
||||
F \ :/ \m/ \: :/ \ / \
|
||||
------d--------u----------d-------u----d--------u---d-------- level v
|
||||
\ / \ / :\_ _/: :\_ L
|
||||
\_ / \_t_/ : \t_/ : : \m/
|
||||
\t/ : : : :
|
||||
: : <---Tt---> :
|
||||
<--------Ttt-------> : :
|
||||
<-----Td----->
|
||||
Tu = Up crossing period
|
||||
Td = Down crossing period
|
||||
Tc = Crest period, i.e., period between up crossing and
|
||||
the next down crossing
|
||||
Tt = Trough period, i.e., period between down crossing and
|
||||
the next up crossing
|
||||
Ttt = Trough2trough period
|
||||
Tcc = Crest2crest period
|
||||
|
||||
|
||||
<----- Direction of wave propagation
|
||||
|
||||
<--Tcf-> Tuc
|
||||
: : <-Tcb-> <->
|
||||
M : c : : : :
|
||||
/ \ : M / \_ c_ : : c
|
||||
F \ :/ \m/ \ / \___: :/ \
|
||||
------d---------u----------d---------u-------d--------u---d------ level v
|
||||
:\_ / \ __/: \_ _/ \_ L
|
||||
: \_ / \_t_/ : \t_/ \m/
|
||||
: \t/ : :
|
||||
: : : :
|
||||
<-Ttf-> <-Ttb->
|
||||
|
||||
|
||||
Tcf = Crest front period, i.e., period between up crossing and crest
|
||||
Tcb = Crest back period, i.e., period between crest and down crossing
|
||||
Ttf = Trough front period, i.e., period between down crossing and trough
|
||||
Ttb = Trough back period, i.e., period between trough and up crossing
|
||||
Also note that Tcf and Ttf can also be abbreviated by their crossing
|
||||
marker, e.g. Tuc (u2c) and Tdt (d2t), respectively. Similar applies
|
||||
to all the other wave periods and wave lengths.
|
||||
|
||||
(The nomenclature for wave length is similar, just substitute T and
|
||||
period with L and length, respectively)
|
||||
|
||||
<----- Direction of wave propagation
|
||||
|
||||
<--TMm-->
|
||||
<-TmM-> : :
|
||||
M : : M :
|
||||
/ \ : M /:\_ : M_ M
|
||||
F \ : / \m/ : \ : /: \ / \
|
||||
\ : / : \ : / : \ / \
|
||||
\ : / : \ : / : \_ _/ \_ L
|
||||
\_ : / : \_m_/ : \m_/ \m/
|
||||
\m/ : : : :
|
||||
<-----TMM-----> <----Tmm----->
|
||||
|
||||
|
||||
TmM = Period between minimum and the following Maximum
|
||||
TMm = Period between Maximum and the following minimum
|
||||
TMM = Period between Maximum and the following Maximum
|
||||
Tmm = Period between minimum and the following minimum
|
||||
|
||||
See also
|
||||
--------
|
||||
waves,
|
||||
wave_amplitudes,
|
||||
crossings,
|
||||
turning_points
|
||||
"""
|
||||
print(wave_periods.__doc__)
|
||||
def turning_points():
|
||||
r"""
|
||||
Turning points definitions and numenclature
|
||||
|
||||
Definition of turningpoints
|
||||
---------------------------
|
||||
<----- Direction of wave propagation
|
||||
|
||||
M M
|
||||
/ \ .... M /:\_ M_ M
|
||||
F \ | / \m/ : \ /: \ / \
|
||||
\ h | / : \ / : \ / \
|
||||
\ | / : \ / : \_ _/ \_ L
|
||||
\_ | / : \_m_/ : \m_/ \m/
|
||||
\m/ : : : :
|
||||
<------Mw-----> <-----mw----->
|
||||
|
||||
Local minimum or maximum are indicated with the
|
||||
letters 'm' or 'M'. Turning points in this connection are all
|
||||
local max (M) and min (m) and the last (L) value and the
|
||||
first (F) value if the first local extremum is a max.
|
||||
|
||||
(This choice is made in order to get the exact up-crossing intensity
|
||||
from rfc by mm2lc(tp2mm(rfc)) )
|
||||
|
||||
|
||||
See also
|
||||
--------
|
||||
waves,
|
||||
crossings,
|
||||
cycle_pairs
|
||||
findtp
|
||||
|
||||
"""
|
||||
print(turning_points.__doc__)
|
||||
def waves():
|
||||
r"""
|
||||
Wave definitions and nomenclature
|
||||
|
||||
Definition of trough and crest
|
||||
------------------------------
|
||||
A trough (t) is defined as the global minimum between a
|
||||
level v down-crossing (d) and the next up-crossing (u)
|
||||
and a crest (c) is defined as the global maximum between a
|
||||
level v up-crossing and the following down-crossing.
|
||||
|
||||
Definition of down- and up -crossing waves
|
||||
------------------------------------------
|
||||
A level v-down-crossing wave (dw) is a wave from a
|
||||
down-crossing to the following down-crossing.
|
||||
Similarly, a level v-up-crossing wave (uw) is a wave from an up-crossing
|
||||
to the next up-crossing.
|
||||
|
||||
Definition of trough and crest waves
|
||||
------------------------------------
|
||||
A trough-to-trough wave (tw) is a wave from a trough (t) to the
|
||||
following trough. The crest-to-crest wave (cw) is defined similarly.
|
||||
|
||||
|
||||
Definition of min2min and Max2Max wave
|
||||
--------------------------------------
|
||||
A min2min wave (mw) is defined starting from a minimum (m) and
|
||||
ending in the following minimum.
|
||||
Similarly a Max2Max wave (Mw) is thus a wave from a maximum (M)
|
||||
to the next maximum (all waves optionally rainflow filtered).
|
||||
|
||||
<----- Direction of wave propagation
|
||||
|
||||
|
||||
<------Mw-----> <----mw---->
|
||||
M : : c :
|
||||
/ \ M : / \_ : c_ c
|
||||
F \ / \m/ \ : /: \ /:\
|
||||
------d--------u----------d-------u----d--------u---d------ level v
|
||||
\ /: \ : /: : :\_ _/ : :\_ L
|
||||
\_ / : \_t_/ : : : \t_/ : : \m/
|
||||
\t/ <-------uw---------> : <-----dw----->
|
||||
: : : :
|
||||
<--------tw--------> <------cw----->
|
||||
|
||||
(F=first value and L=last value).
|
||||
|
||||
See also
|
||||
--------
|
||||
turning_points,
|
||||
crossings,
|
||||
wave_periods
|
||||
findtc,
|
||||
findcross
|
||||
"""
|
||||
print(waves.__doc__)
|
||||
"""
|
||||
WAFO defintions and numenclature
|
||||
|
||||
crossings :
|
||||
cycle_pairs :
|
||||
turning_points :
|
||||
wave_amplitudes :
|
||||
wave_periods :
|
||||
waves :
|
||||
|
||||
Examples
|
||||
--------
|
||||
In order to view the documentation do the following in an ipython window:
|
||||
|
||||
>>> import wafo.definitions as wd
|
||||
>>> wd.crossings()
|
||||
|
||||
or
|
||||
>>> wd.crossings?
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def wave_amplitudes():
|
||||
r"""
|
||||
Wave amplitudes and heights definitions and nomenclature
|
||||
|
||||
Definition of wave amplitudes and wave heights
|
||||
---------------------------------------------
|
||||
|
||||
<----- Direction of wave propagation
|
||||
|
||||
|
||||
|..............c_..........|
|
||||
| /| \ |
|
||||
Hd | _/ | \ | Hu
|
||||
M | / | \ |
|
||||
/ \ | M / Ac | \_ | c_
|
||||
F \ | / \m/ | \ | / \
|
||||
------d----|---u------------------d---|---u----d------ level v
|
||||
\ | /| \ | / \L
|
||||
\_ | / | At \_|_/
|
||||
\|/..| t
|
||||
t
|
||||
|
||||
Parameters
|
||||
----------
|
||||
Ac : crest amplitude
|
||||
At : trough amplitude
|
||||
Hd : wave height as defined for down crossing waves
|
||||
Hu : wave height as defined for up crossing waves
|
||||
|
||||
See also
|
||||
--------
|
||||
waves, crossings, turning_points
|
||||
"""
|
||||
print(wave_amplitudes.__doc__)
|
||||
|
||||
|
||||
def crossings():
|
||||
r"""
|
||||
Level v crossing definitions and nomenclature
|
||||
|
||||
Definition of level v crossings
|
||||
-------------------------------
|
||||
M
|
||||
. . M M
|
||||
. . . . . .
|
||||
F d . . L
|
||||
-----------------------u-------d-------o----------------- level v
|
||||
. . . . u
|
||||
. m
|
||||
m
|
||||
|
||||
Let the letters 'm', 'M', 'F', 'L','d' and 'u' in the
|
||||
figure above denote local minimum, maximum, first value, last
|
||||
value, down- and up-crossing, respectively. The remaining
|
||||
sampled values are indicated with a '.'. Values that are identical
|
||||
with v, but do not cross the level is indicated with the letter 'o'.
|
||||
We have a level up-crossing at index, k, if
|
||||
|
||||
x(k) < v and v < x(k+1)
|
||||
or if
|
||||
x(k) == v and v < x(k+1) and x(r) < v for some di < r <= k-1
|
||||
|
||||
where di is the index to the previous downcrossing.
|
||||
Similarly there is a level down-crossing at index, k, if
|
||||
|
||||
x(k) > v and v > x(k+1)
|
||||
or if
|
||||
x(k) == v and v > x(k+1) and x(r) > v for some ui < r <= k-1
|
||||
|
||||
where ui is the index to the previous upcrossing.
|
||||
|
||||
The first (F) value is a up crossing if x(1) = v and x(2) > v.
|
||||
Similarly, it is a down crossing if x(1) = v and x(2) < v.
|
||||
|
||||
See also
|
||||
--------
|
||||
wave_periods, waves, turning_points, findcross, findtp
|
||||
"""
|
||||
print(crossings.__doc__)
|
||||
|
||||
|
||||
def cycle_pairs():
|
||||
r"""
|
||||
Cycle pairs definitions and numenclature
|
||||
|
||||
Definition of Max2min and min2Max cycle pair
|
||||
--------------------------------------------
|
||||
A min2Max cycle pair (mM) is defined as the pair of a minimum
|
||||
and the following Maximum. Similarly a Max2min cycle pair (Mm)
|
||||
is defined as the pair of a Maximum and the following minimum.
|
||||
(all turning points possibly rainflowfiltered before pairing into cycles.)
|
||||
|
||||
See also
|
||||
--------
|
||||
turning_points
|
||||
"""
|
||||
print(cycle_pairs.__doc__)
|
||||
|
||||
|
||||
def wave_periods():
|
||||
r"""
|
||||
Wave periods (lengths) definitions and nomenclature
|
||||
|
||||
Definition of wave periods (lengths)
|
||||
------------------------------------
|
||||
|
||||
|
||||
<----- Direction of wave propagation
|
||||
|
||||
<-------Tu--------->
|
||||
: :
|
||||
<---Tc-----> :
|
||||
: : : <------Tcc---->
|
||||
M : c : : : :
|
||||
/ \ : M / \_ : : c_ c
|
||||
F \ :/ \m/ \: :/ \ / \
|
||||
------d--------u----------d-------u----d--------u---d-------- level v
|
||||
\ / \ / :\_ _/: :\_ L
|
||||
\_ / \_t_/ : \t_/ : : \m/
|
||||
\t/ : : : :
|
||||
: : <---Tt---> :
|
||||
<--------Ttt-------> : :
|
||||
<-----Td----->
|
||||
Tu = Up crossing period
|
||||
Td = Down crossing period
|
||||
Tc = Crest period, i.e., period between up crossing and
|
||||
the next down crossing
|
||||
Tt = Trough period, i.e., period between down crossing and
|
||||
the next up crossing
|
||||
Ttt = Trough2trough period
|
||||
Tcc = Crest2crest period
|
||||
|
||||
|
||||
<----- Direction of wave propagation
|
||||
|
||||
<--Tcf-> Tuc
|
||||
: : <-Tcb-> <->
|
||||
M : c : : : :
|
||||
/ \ : M / \_ c_ : : c
|
||||
F \ :/ \m/ \ / \___: :/ \
|
||||
------d---------u----------d---------u-------d--------u---d------ level v
|
||||
:\_ / \ __/: \_ _/ \_ L
|
||||
: \_ / \_t_/ : \t_/ \m/
|
||||
: \t/ : :
|
||||
: : : :
|
||||
<-Ttf-> <-Ttb->
|
||||
|
||||
|
||||
Tcf = Crest front period, i.e., period between up crossing and crest
|
||||
Tcb = Crest back period, i.e., period between crest and down crossing
|
||||
Ttf = Trough front period, i.e., period between down crossing and trough
|
||||
Ttb = Trough back period, i.e., period between trough and up crossing
|
||||
Also note that Tcf and Ttf can also be abbreviated by their crossing
|
||||
marker, e.g. Tuc (u2c) and Tdt (d2t), respectively. Similar applies
|
||||
to all the other wave periods and wave lengths.
|
||||
|
||||
(The nomenclature for wave length is similar, just substitute T and
|
||||
period with L and length, respectively)
|
||||
|
||||
<----- Direction of wave propagation
|
||||
|
||||
<--TMm-->
|
||||
<-TmM-> : :
|
||||
M : : M :
|
||||
/ \ : M /:\_ : M_ M
|
||||
F \ : / \m/ : \ : /: \ / \
|
||||
\ : / : \ : / : \ / \
|
||||
\ : / : \ : / : \_ _/ \_ L
|
||||
\_ : / : \_m_/ : \m_/ \m/
|
||||
\m/ : : : :
|
||||
<-----TMM-----> <----Tmm----->
|
||||
|
||||
|
||||
TmM = Period between minimum and the following Maximum
|
||||
TMm = Period between Maximum and the following minimum
|
||||
TMM = Period between Maximum and the following Maximum
|
||||
Tmm = Period between minimum and the following minimum
|
||||
|
||||
See also
|
||||
--------
|
||||
waves,
|
||||
wave_amplitudes,
|
||||
crossings,
|
||||
turning_points
|
||||
"""
|
||||
print(wave_periods.__doc__)
|
||||
|
||||
|
||||
def turning_points():
|
||||
r"""
|
||||
Turning points definitions and numenclature
|
||||
|
||||
Definition of turningpoints
|
||||
---------------------------
|
||||
<----- Direction of wave propagation
|
||||
|
||||
M M
|
||||
/ \ .... M /:\_ M_ M
|
||||
F \ | / \m/ : \ /: \ / \
|
||||
\ h | / : \ / : \ / \
|
||||
\ | / : \ / : \_ _/ \_ L
|
||||
\_ | / : \_m_/ : \m_/ \m/
|
||||
\m/ : : : :
|
||||
<------Mw-----> <-----mw----->
|
||||
|
||||
Local minimum or maximum are indicated with the
|
||||
letters 'm' or 'M'. Turning points in this connection are all
|
||||
local max (M) and min (m) and the last (L) value and the
|
||||
first (F) value if the first local extremum is a max.
|
||||
|
||||
(This choice is made in order to get the exact up-crossing intensity
|
||||
from rfc by mm2lc(tp2mm(rfc)) )
|
||||
|
||||
|
||||
See also
|
||||
--------
|
||||
waves,
|
||||
crossings,
|
||||
cycle_pairs
|
||||
findtp
|
||||
|
||||
"""
|
||||
print(turning_points.__doc__)
|
||||
|
||||
|
||||
def waves():
|
||||
r"""
|
||||
Wave definitions and nomenclature
|
||||
|
||||
Definition of trough and crest
|
||||
------------------------------
|
||||
A trough (t) is defined as the global minimum between a
|
||||
level v down-crossing (d) and the next up-crossing (u)
|
||||
and a crest (c) is defined as the global maximum between a
|
||||
level v up-crossing and the following down-crossing.
|
||||
|
||||
Definition of down- and up -crossing waves
|
||||
------------------------------------------
|
||||
A level v-down-crossing wave (dw) is a wave from a
|
||||
down-crossing to the following down-crossing.
|
||||
Similarly, a level v-up-crossing wave (uw) is a wave from an up-crossing
|
||||
to the next up-crossing.
|
||||
|
||||
Definition of trough and crest waves
|
||||
------------------------------------
|
||||
A trough-to-trough wave (tw) is a wave from a trough (t) to the
|
||||
following trough. The crest-to-crest wave (cw) is defined similarly.
|
||||
|
||||
|
||||
Definition of min2min and Max2Max wave
|
||||
--------------------------------------
|
||||
A min2min wave (mw) is defined starting from a minimum (m) and
|
||||
ending in the following minimum.
|
||||
Similarly a Max2Max wave (Mw) is thus a wave from a maximum (M)
|
||||
to the next maximum (all waves optionally rainflow filtered).
|
||||
|
||||
<----- Direction of wave propagation
|
||||
|
||||
|
||||
<------Mw-----> <----mw---->
|
||||
M : : c :
|
||||
/ \ M : / \_ : c_ c
|
||||
F \ / \m/ \ : /: \ /:\
|
||||
------d--------u----------d-------u----d--------u---d------ level v
|
||||
\ /: \ : /: : :\_ _/ : :\_ L
|
||||
\_ / : \_t_/ : : : \t_/ : : \m/
|
||||
\t/ <-------uw---------> : <-----dw----->
|
||||
: : : :
|
||||
<--------tw--------> <------cw----->
|
||||
|
||||
(F=first value and L=last value).
|
||||
|
||||
See also
|
||||
--------
|
||||
turning_points,
|
||||
crossings,
|
||||
wave_periods
|
||||
findtc,
|
||||
findcross
|
||||
"""
|
||||
print(waves.__doc__)
|
||||
|
@ -1,132 +1,138 @@
|
||||
'''
|
||||
Created on 20. jan. 2011
|
||||
|
||||
@author: pab
|
||||
'''
|
||||
import numpy as np
|
||||
from numpy import exp
|
||||
from wafo.misc import meshgrid
|
||||
__all__ = ['peaks', 'humps', 'magic']
|
||||
|
||||
def magic(n):
|
||||
'''
|
||||
Return magic square for n of any orders > 2.
|
||||
|
||||
A magic square has the property that the sum of every row and column,
|
||||
as well as both diagonals, is the same number.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> magic(3)
|
||||
array([[8, 1, 6],
|
||||
[3, 5, 7],
|
||||
[4, 9, 2]])
|
||||
|
||||
>>> magic(4)
|
||||
array([[16, 2, 3, 13],
|
||||
[ 5, 11, 10, 8],
|
||||
[ 9, 7, 6, 12],
|
||||
[ 4, 14, 15, 1]])
|
||||
|
||||
>>> magic(6)
|
||||
array([[35, 1, 6, 26, 19, 24],
|
||||
[ 3, 32, 7, 21, 23, 25],
|
||||
[31, 9, 2, 22, 27, 20],
|
||||
[ 8, 28, 33, 17, 10, 15],
|
||||
[30, 5, 34, 12, 14, 16],
|
||||
[ 4, 36, 29, 13, 18, 11]])
|
||||
'''
|
||||
if (n<3):
|
||||
raise ValueError('n must be greater than 2.')
|
||||
|
||||
if np.mod(n,2)==1: # odd order
|
||||
ix = np.arange(n) + 1
|
||||
J, I = np.meshgrid(ix, ix)
|
||||
A = np.mod(I + J - (n + 3) / 2, n)
|
||||
B = np.mod(I + 2 * J - 2, n)
|
||||
M = n * A + B + 1
|
||||
elif np.mod(n,4)==0: # doubly even order
|
||||
M = np.arange(1,n*n+1).reshape(n,n)
|
||||
ix = np.mod(np.arange(n) + 1,4)//2
|
||||
J, I = np.meshgrid(ix, ix)
|
||||
iz = np.flatnonzero(I==J)
|
||||
M.put(iz, n*n+1-M.flat[iz])
|
||||
else: # singly even order
|
||||
p = n//2
|
||||
M0 = magic(p)
|
||||
|
||||
M = np.hstack((np.vstack((M0, M0+3*p*p)),np.vstack((M0+2*p*p, M0+p*p))))
|
||||
|
||||
if n>2:
|
||||
k = (n-2)//4
|
||||
Jvec = np.hstack((np.arange(k), np.arange(n-k+1, n)))
|
||||
for i in range(p):
|
||||
for j in Jvec:
|
||||
temp = M[i][j]
|
||||
M[i][j]=M[i+p][j]
|
||||
M[i+p][j] = temp
|
||||
|
||||
i=k
|
||||
j=0
|
||||
temp = M[i][j];
|
||||
M[i][j] = M[i+p][j]
|
||||
M[i+p][j] = temp;
|
||||
|
||||
j=i
|
||||
temp=M[i+p][j]
|
||||
M[i+p][j]=M[i][j]
|
||||
M[i][j]=temp
|
||||
|
||||
return M
|
||||
|
||||
def peaks(x=None, y=None, n=51):
|
||||
'''
|
||||
Return the "well" known MatLab (R) peaks function
|
||||
evaluated in the [-3,3] x,y range
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> x,y,z = peaks()
|
||||
|
||||
h = plt.contourf(x,y,z)
|
||||
|
||||
'''
|
||||
if x is None:
|
||||
x = np.linspace(-3, 3, n)
|
||||
if y is None:
|
||||
y = np.linspace(-3, 3, n)
|
||||
|
||||
[x1, y1] = meshgrid(x, y)
|
||||
|
||||
z = (3 * (1 - x1) ** 2 * exp(-(x1 ** 2) - (y1 + 1) ** 2)
|
||||
- 10 * (x1 / 5 - x1 ** 3 - y1 ** 5) * exp(-x1 ** 2 - y1 ** 2)
|
||||
- 1. / 3 * exp(-(x1 + 1) ** 2 - y1 ** 2))
|
||||
|
||||
return x1, y1, z
|
||||
|
||||
def humps(x=None):
|
||||
'''
|
||||
Computes a function that has three roots, and some humps.
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> x = np.linspace(0,1)
|
||||
>>> y = humps(x)
|
||||
|
||||
h = plt.plot(x,y)
|
||||
'''
|
||||
if x is None:
|
||||
y = np.linspace(0, 1)
|
||||
else:
|
||||
y = np.asarray(x)
|
||||
|
||||
return 1.0 / ((y - 0.3) ** 2 + 0.01) + 1.0 / ((y - 0.9) ** 2 + 0.04) + 2 * y - 5.2
|
||||
|
||||
def test_docstrings():
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_docstrings()
|
||||
'''
|
||||
Created on 20. jan. 2011
|
||||
|
||||
@author: pab
|
||||
'''
|
||||
import numpy as np
|
||||
from numpy import exp, meshgrid
|
||||
__all__ = ['peaks', 'humps', 'magic']
|
||||
|
||||
|
||||
def magic(n):
|
||||
'''
|
||||
Return magic square for n of any orders > 2.
|
||||
|
||||
A magic square has the property that the sum of every row and column,
|
||||
as well as both diagonals, is the same number.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> magic(3)
|
||||
array([[8, 1, 6],
|
||||
[3, 5, 7],
|
||||
[4, 9, 2]])
|
||||
|
||||
>>> magic(4)
|
||||
array([[16, 2, 3, 13],
|
||||
[ 5, 11, 10, 8],
|
||||
[ 9, 7, 6, 12],
|
||||
[ 4, 14, 15, 1]])
|
||||
|
||||
>>> magic(6)
|
||||
array([[35, 1, 6, 26, 19, 24],
|
||||
[ 3, 32, 7, 21, 23, 25],
|
||||
[31, 9, 2, 22, 27, 20],
|
||||
[ 8, 28, 33, 17, 10, 15],
|
||||
[30, 5, 34, 12, 14, 16],
|
||||
[ 4, 36, 29, 13, 18, 11]])
|
||||
'''
|
||||
if (n < 3):
|
||||
raise ValueError('n must be greater than 2.')
|
||||
|
||||
if np.mod(n, 2) == 1: # odd order
|
||||
ix = np.arange(n) + 1
|
||||
J, I = np.meshgrid(ix, ix)
|
||||
A = np.mod(I + J - (n + 3) / 2, n)
|
||||
B = np.mod(I + 2 * J - 2, n)
|
||||
M = n * A + B + 1
|
||||
elif np.mod(n, 4) == 0: # doubly even order
|
||||
M = np.arange(1, n * n + 1).reshape(n, n)
|
||||
ix = np.mod(np.arange(n) + 1, 4) // 2
|
||||
J, I = np.meshgrid(ix, ix)
|
||||
iz = np.flatnonzero(I == J)
|
||||
M.put(iz, n * n + 1 - M.flat[iz])
|
||||
else: # singly even order
|
||||
p = n // 2
|
||||
M0 = magic(p)
|
||||
|
||||
M = np.hstack((np.vstack((M0, M0 + 3 * p * p)),
|
||||
np.vstack((M0 + 2 * p * p, M0 + p * p))))
|
||||
|
||||
if n > 2:
|
||||
k = (n - 2) // 4
|
||||
Jvec = np.hstack((np.arange(k), np.arange(n - k + 1, n)))
|
||||
for i in range(p):
|
||||
for j in Jvec:
|
||||
temp = M[i][j]
|
||||
M[i][j] = M[i + p][j]
|
||||
M[i + p][j] = temp
|
||||
|
||||
i = k
|
||||
j = 0
|
||||
temp = M[i][j]
|
||||
M[i][j] = M[i + p][j]
|
||||
M[i + p][j] = temp
|
||||
|
||||
j = i
|
||||
temp = M[i + p][j]
|
||||
M[i + p][j] = M[i][j]
|
||||
M[i][j] = temp
|
||||
|
||||
return M
|
||||
|
||||
|
||||
def peaks(x=None, y=None, n=51):
|
||||
'''
|
||||
Return the "well" known MatLab (R) peaks function
|
||||
evaluated in the [-3,3] x,y range
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> x,y,z = peaks()
|
||||
|
||||
h = plt.contourf(x,y,z)
|
||||
|
||||
'''
|
||||
if x is None:
|
||||
x = np.linspace(-3, 3, n)
|
||||
if y is None:
|
||||
y = np.linspace(-3, 3, n)
|
||||
|
||||
[x1, y1] = meshgrid(x, y)
|
||||
|
||||
z = (3 * (1 - x1) ** 2 * exp(-(x1 ** 2) - (y1 + 1) ** 2)
|
||||
- 10 * (x1 / 5 - x1 ** 3 - y1 ** 5) * exp(-x1 ** 2 - y1 ** 2)
|
||||
- 1. / 3 * exp(-(x1 + 1) ** 2 - y1 ** 2))
|
||||
|
||||
return x1, y1, z
|
||||
|
||||
|
||||
def humps(x=None):
|
||||
'''
|
||||
Computes a function that has three roots, and some humps.
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> x = np.linspace(0,1)
|
||||
>>> y = humps(x)
|
||||
|
||||
h = plt.plot(x,y)
|
||||
'''
|
||||
if x is None:
|
||||
y = np.linspace(0, 1)
|
||||
else:
|
||||
y = np.asarray(x)
|
||||
|
||||
return 1.0 / ((y - 0.3) ** 2 + 0.01) + 1.0 / ((y - 0.9) ** 2 + 0.04) + \
|
||||
2 * y - 5.2
|
||||
|
||||
|
||||
def test_docstrings():
|
||||
import doctest
|
||||
print('Testing docstrings in %s' % __file__)
|
||||
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_docstrings()
|
||||
|
@ -1,282 +1,267 @@
|
||||
'''
|
||||
Created on 20. jan. 2011
|
||||
|
||||
@author: pab
|
||||
|
||||
license BSD
|
||||
'''
|
||||
from __future__ import division
|
||||
import warnings
|
||||
import numpy as np
|
||||
from wafo.plotbackend import plotbackend
|
||||
from matplotlib import mlab
|
||||
__all__ = ['cltext', 'epcolor', 'tallibing', 'test_docstrings']
|
||||
|
||||
_TALLIBING_GID = 'TALLIBING'
|
||||
_CLTEXT_GID = 'CLTEXT'
|
||||
|
||||
def _matchfun(x, gidtxt):
|
||||
if hasattr(x, 'get_gid'):
|
||||
return x.get_gid() == gidtxt
|
||||
return False
|
||||
|
||||
def delete_text_object(gidtxt, figure=None, axis=None, verbose=False):
|
||||
'''
|
||||
Delete all text objects matching the gidtxt if it exists
|
||||
|
||||
Parameters
|
||||
----------
|
||||
gidtxt : string
|
||||
|
||||
figure, axis : objects
|
||||
current figure and current axis, respectively.
|
||||
verbose : bool
|
||||
If true print warnings when trying to delete non-existent objects
|
||||
'''
|
||||
if figure is None:
|
||||
figure = plotbackend.gcf()
|
||||
if axis is None:
|
||||
axis = figure.gca()
|
||||
lmatchfun = lambda x : _matchfun(x, gidtxt)
|
||||
objs = axis.findobj(lmatchfun)
|
||||
for obj in objs:
|
||||
try:
|
||||
axis.texts.remove(obj)
|
||||
except:
|
||||
if verbose:
|
||||
warnings.warn('Tried to delete a non-existing %s from axis' % gidtxt)
|
||||
objs = figure.findobj(lmatchfun)
|
||||
for obj in objs:
|
||||
try:
|
||||
figure.texts.remove(obj)
|
||||
except:
|
||||
if verbose:
|
||||
warnings.warn('Tried to delete a non-existing %s from figure' % gidtxt)
|
||||
|
||||
def cltext(levels, percent=False, n=4, xs=0.036, ys=0.94, zs=0, figure=None, axis=None):
|
||||
'''
|
||||
Places contour level text in the current window
|
||||
|
||||
Parameters
|
||||
----------
|
||||
levels : vector
|
||||
contour levels or the corresponding percent which the
|
||||
contour line encloses
|
||||
percent : bool
|
||||
False if levels are the actual contour levels (default)
|
||||
True if levels are the corresponding percent which the
|
||||
contour line encloses
|
||||
n : integer
|
||||
maximum N digits of precision (default 4)
|
||||
figure, axis : objects
|
||||
current figure and current axis, respectively.
|
||||
default figure = plotbackend.gcf(),
|
||||
axis = plotbackend.gca()
|
||||
|
||||
Returns
|
||||
-------
|
||||
h = handles to the text objects.
|
||||
|
||||
|
||||
Notes
|
||||
-----
|
||||
CLTEXT creates text objects in the current figure and prints
|
||||
"Level curves at:" if percent is False and
|
||||
"Level curves enclosing:" otherwise
|
||||
and the contour levels or percent.
|
||||
|
||||
The handles to the lines of text may also be found by
|
||||
h = findobj(gcf,'gid','CLTEXT','type','text');
|
||||
h = findobj(gca,'gid','CLTEXT','type','text');
|
||||
To make the text objects follow the data in the axes set the units
|
||||
for the text objects 'data' by
|
||||
set(h,'unit','data')
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import wafo.graphutil as wg
|
||||
>>> import wafo.demos as wd
|
||||
>>> import pylab as plt
|
||||
>>> x,y,z = wd.peaks();
|
||||
>>> h = plt.contour(x,y,z)
|
||||
>>> h = wg.cltext(h.levels)
|
||||
>>> plt.show()
|
||||
'''
|
||||
# TODO : Make it work like legend does (but without the box): include position options etc...
|
||||
if figure is None:
|
||||
figure = plotbackend.gcf()
|
||||
if axis is None:
|
||||
axis = figure.gca()
|
||||
|
||||
clevels = np.atleast_1d(levels)
|
||||
|
||||
|
||||
axpos = axis.get_position()
|
||||
xint = axpos.intervalx
|
||||
yint = axpos.intervaly
|
||||
|
||||
xss = xint[0] + xs * (xint[1] - xint[0])
|
||||
yss = yint[0] + ys * (yint[1] - yint[0])
|
||||
|
||||
# delete cltext object if it exists
|
||||
delete_text_object(_CLTEXT_GID, axis=axis)
|
||||
|
||||
charHeight = 1.0 / 33.0
|
||||
delta_y = charHeight
|
||||
|
||||
if percent:
|
||||
titletxt = 'Level curves enclosing:';
|
||||
else:
|
||||
titletxt = 'Level curves at:';
|
||||
|
||||
format_ = '%0.' + ('%d' % n) + 'g\n'
|
||||
|
||||
cltxt = ''.join([format_ % level for level in clevels.tolist()])
|
||||
|
||||
titleProp = dict(gid=_CLTEXT_GID, horizontalalignment='left',
|
||||
verticalalignment='center', fontweight='bold', axes=axis) #
|
||||
|
||||
ha1 = figure.text(xss, yss, titletxt, **titleProp)
|
||||
|
||||
yss -= delta_y;
|
||||
txtProp = dict(gid=_CLTEXT_GID, horizontalalignment='left',
|
||||
verticalalignment='top', axes=axis)
|
||||
|
||||
ha2 = figure.text(xss, yss, cltxt, **txtProp)
|
||||
plotbackend.draw_if_interactive()
|
||||
return ha1, ha2
|
||||
|
||||
def tallibing(x, y, n, **kwds):
|
||||
'''
|
||||
TALLIBING Display numbers on field-plot
|
||||
|
||||
CALL h=tallibing(x,y,n,size,color)
|
||||
|
||||
x,y = position matrices
|
||||
n = the corresponding matrix of the values to be written
|
||||
(non-integers are rounded)
|
||||
size = font size (optional) (default=8)
|
||||
color = color of text (optional) (default='white')
|
||||
h = column-vector of handles to TEXT objects
|
||||
|
||||
TALLIBING writes the numbers in a 2D array as text at the positions
|
||||
given by the x and y coordinate matrices.
|
||||
When plotting binned results, the number of datapoints in each
|
||||
bin can be written on the bins in the plot.
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> import wafo.graphutil as wg
|
||||
>>> import wafo.demos as wd
|
||||
>>> [x,y,z] = wd.peaks(n=20)
|
||||
>>> h0 = wg.epcolor(x,y,z)
|
||||
>>> h1 = wg.tallibing(x,y,z)
|
||||
|
||||
pcolor(x,y,z); shading interp;
|
||||
|
||||
See also
|
||||
--------
|
||||
text
|
||||
'''
|
||||
|
||||
axis = kwds.pop('axis',None)
|
||||
if axis is None:
|
||||
axis = plotbackend.gca()
|
||||
|
||||
x, y, n = np.atleast_1d(x, y, n)
|
||||
if mlab.isvector(x) or mlab.isvector(y):
|
||||
x, y = np.meshgrid(x,y)
|
||||
|
||||
x = x.ravel()
|
||||
y = y.ravel()
|
||||
n = n.ravel()
|
||||
n = np.round(n)
|
||||
|
||||
# delete tallibing object if it exists
|
||||
delete_text_object(_TALLIBING_GID, axis=axis)
|
||||
|
||||
txtProp = dict(gid=_TALLIBING_GID, size=8, color='w', horizontalalignment='center',
|
||||
verticalalignment='center', fontweight='demi', axes=axis)
|
||||
|
||||
txtProp.update(**kwds)
|
||||
h = []
|
||||
for xi,yi, ni in zip(x,y,n):
|
||||
if ni:
|
||||
h.append(axis.text(xi, yi, str(ni), **txtProp))
|
||||
plotbackend.draw_if_interactive()
|
||||
return h
|
||||
|
||||
def epcolor(*args, **kwds):
|
||||
'''
|
||||
Pseudocolor (checkerboard) plot with mid-bin positioning.
|
||||
|
||||
h = epcolor(x,y,data)
|
||||
|
||||
|
||||
[x,y]= the axes corresponding to the data-positions. Vectors or
|
||||
matrices. If omitted, giving only data-matrix as inargument, the
|
||||
matrix-indices are used as axes.
|
||||
data = data-matrix
|
||||
|
||||
EPCOLOR make a checkerboard plot where the data-point-positions are in
|
||||
the middle of the bins instead of in the corners, and the last column
|
||||
and row of data are used.
|
||||
|
||||
|
||||
Example:
|
||||
>>> import wafo.demos as wd
|
||||
>>> import wafo.graphutil as wg
|
||||
>>> x, y, z = wd.peaks(n=20)
|
||||
>>> h = wg.epcolor(x,y,z)
|
||||
|
||||
See also
|
||||
--------
|
||||
pylab.pcolor
|
||||
'''
|
||||
axis = kwds.pop('axis',None)
|
||||
if axis is None:
|
||||
axis = plotbackend.gca()
|
||||
midbin = kwds.pop('midbin', True)
|
||||
if not midbin:
|
||||
ret = axis.pcolor(*args,**kwds)
|
||||
plotbackend.draw_if_interactive()
|
||||
return ret
|
||||
|
||||
nargin = len(args)
|
||||
data = np.atleast_2d(args[-1]).copy()
|
||||
M, N = data.shape
|
||||
if nargin==1:
|
||||
x = np.arange(N)
|
||||
y = np.arange(M)
|
||||
elif nargin==3:
|
||||
x, y = np.atleast_1d(*args[:-1])
|
||||
if min(x.shape)!=1:
|
||||
x = x[0]
|
||||
if min(y.shape)!=1:
|
||||
y = y[:,0]
|
||||
else:
|
||||
raise ValueError('pcolor takes 3 or 1 inarguments! (x,y,data) or (data)')
|
||||
|
||||
xx = _findbins(x)
|
||||
yy = _findbins(y)
|
||||
ret = axis.pcolor(xx, yy, data, **kwds)
|
||||
plotbackend.draw_if_interactive()
|
||||
return ret
|
||||
|
||||
|
||||
def _findbins(x):
|
||||
''' Return points half way between all values of X _and_ outside the
|
||||
endpoints. The outer limits have same distance from X's endpoints as
|
||||
the limits just inside.
|
||||
'''
|
||||
dx = np.diff(x) * 0.5
|
||||
dx = np.hstack((dx, dx[-1]))
|
||||
return np.hstack((x[0] - dx[0], x + dx))
|
||||
|
||||
|
||||
def test_docstrings():
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_docstrings()
|
||||
'''
|
||||
Created on 20. jan. 2011
|
||||
|
||||
@author: pab
|
||||
|
||||
license BSD
|
||||
'''
|
||||
from __future__ import division
|
||||
import warnings
|
||||
import numpy as np
|
||||
from wafo.plotbackend import plotbackend
|
||||
from matplotlib import mlab
|
||||
__all__ = ['cltext', 'tallibing', 'test_docstrings']
|
||||
|
||||
_TALLIBING_GID = 'TALLIBING'
|
||||
_CLTEXT_GID = 'CLTEXT'
|
||||
|
||||
|
||||
def _matchfun(x, gidtxt):
|
||||
if hasattr(x, 'get_gid'):
|
||||
return x.get_gid() == gidtxt
|
||||
return False
|
||||
|
||||
|
||||
def delete_text_object(gidtxt, figure=None, axis=None, verbose=False):
|
||||
'''
|
||||
Delete all text objects matching the gidtxt if it exists
|
||||
|
||||
Parameters
|
||||
----------
|
||||
gidtxt : string
|
||||
|
||||
figure, axis : objects
|
||||
current figure and current axis, respectively.
|
||||
verbose : bool
|
||||
If true print warnings when trying to delete non-existent objects
|
||||
'''
|
||||
if figure is None:
|
||||
figure = plotbackend.gcf()
|
||||
if axis is None:
|
||||
axis = figure.gca()
|
||||
lmatchfun = lambda x: _matchfun(x, gidtxt)
|
||||
objs = axis.findobj(lmatchfun)
|
||||
for obj in objs:
|
||||
try:
|
||||
axis.texts.remove(obj)
|
||||
except:
|
||||
if verbose:
|
||||
warnings.warn(
|
||||
'Tried to delete a non-existing %s from axis' % gidtxt)
|
||||
objs = figure.findobj(lmatchfun)
|
||||
for obj in objs:
|
||||
try:
|
||||
figure.texts.remove(obj)
|
||||
except:
|
||||
if verbose:
|
||||
warnings.warn(
|
||||
'Tried to delete a non-existing %s from figure' % gidtxt)
|
||||
|
||||
|
||||
def cltext(levels, percent=False, n=4, xs=0.036, ys=0.94, zs=0, figure=None,
|
||||
axis=None):
|
||||
'''
|
||||
Places contour level text in the current window
|
||||
|
||||
Parameters
|
||||
----------
|
||||
levels : vector
|
||||
contour levels or the corresponding percent which the
|
||||
contour line encloses
|
||||
percent : bool
|
||||
False if levels are the actual contour levels (default)
|
||||
True if levels are the corresponding percent which the
|
||||
contour line encloses
|
||||
n : integer
|
||||
maximum N digits of precision (default 4)
|
||||
figure, axis : objects
|
||||
current figure and current axis, respectively.
|
||||
default figure = plotbackend.gcf(),
|
||||
axis = plotbackend.gca()
|
||||
|
||||
Returns
|
||||
-------
|
||||
h = handles to the text objects.
|
||||
|
||||
|
||||
Notes
|
||||
-----
|
||||
CLTEXT creates text objects in the current figure and prints
|
||||
"Level curves at:" if percent is False and
|
||||
"Level curves enclosing:" otherwise
|
||||
and the contour levels or percent.
|
||||
|
||||
The handles to the lines of text may also be found by
|
||||
h = findobj(gcf,'gid','CLTEXT','type','text');
|
||||
h = findobj(gca,'gid','CLTEXT','type','text');
|
||||
To make the text objects follow the data in the axes set the units
|
||||
for the text objects 'data' by
|
||||
set(h,'unit','data')
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import wafo.graphutil as wg
|
||||
>>> import wafo.demos as wd
|
||||
>>> import pylab as plt
|
||||
>>> x,y,z = wd.peaks();
|
||||
>>> h = plt.contour(x,y,z)
|
||||
>>> h = wg.cltext(h.levels)
|
||||
>>> plt.show()
|
||||
'''
|
||||
# TODO : Make it work like legend does (but without the box): include
|
||||
# position options etc...
|
||||
if figure is None:
|
||||
figure = plotbackend.gcf()
|
||||
if axis is None:
|
||||
axis = figure.gca()
|
||||
|
||||
clevels = np.atleast_1d(levels)
|
||||
|
||||
axpos = axis.get_position()
|
||||
xint = axpos.intervalx
|
||||
yint = axpos.intervaly
|
||||
|
||||
xss = xint[0] + xs * (xint[1] - xint[0])
|
||||
yss = yint[0] + ys * (yint[1] - yint[0])
|
||||
|
||||
# delete cltext object if it exists
|
||||
delete_text_object(_CLTEXT_GID, axis=axis)
|
||||
|
||||
charHeight = 1.0 / 33.0
|
||||
delta_y = charHeight
|
||||
|
||||
if percent:
|
||||
titletxt = 'Level curves enclosing:'
|
||||
else:
|
||||
titletxt = 'Level curves at:'
|
||||
|
||||
format_ = '%0.' + ('%d' % n) + 'g\n'
|
||||
|
||||
cltxt = ''.join([format_ % level for level in clevels.tolist()])
|
||||
|
||||
titleProp = dict(gid=_CLTEXT_GID, horizontalalignment='left',
|
||||
verticalalignment='center', fontweight='bold', axes=axis)
|
||||
|
||||
ha1 = figure.text(xss, yss, titletxt, **titleProp)
|
||||
|
||||
yss -= delta_y
|
||||
txtProp = dict(gid=_CLTEXT_GID, horizontalalignment='left',
|
||||
verticalalignment='top', axes=axis)
|
||||
|
||||
ha2 = figure.text(xss, yss, cltxt, **txtProp)
|
||||
plotbackend.draw_if_interactive()
|
||||
return ha1, ha2
|
||||
|
||||
|
||||
def tallibing(*args, **kwds):
|
||||
'''
|
||||
TALLIBING Display numbers on field-plot
|
||||
|
||||
CALL h=tallibing(x,y,n,size,color)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x, y : array
|
||||
position matrices
|
||||
n : array
|
||||
corresponding matrix of the values to be written
|
||||
(non-integers are rounded)
|
||||
mid_points : bool (default True)
|
||||
data-point-positions are in the middle of bins instead of the corners
|
||||
size : int, (default=8)
|
||||
font size (optional)
|
||||
color : str, (default='white')
|
||||
color of text (optional)
|
||||
|
||||
Returns
|
||||
-------
|
||||
h : list
|
||||
handles to TEXT objects
|
||||
|
||||
TALLIBING writes the numbers in a 2D array as text at the positions
|
||||
given by the x and y coordinate matrices.
|
||||
When plotting binned results, the number of datapoints in each
|
||||
bin can be written on the bins in the plot.
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> import wafo.graphutil as wg
|
||||
>>> import wafo.demos as wd
|
||||
>>> [x,y,z] = wd.peaks(n=20)
|
||||
>>> h0 = wg.pcolor(x,y,z)
|
||||
>>> h1 = wg.tallibing(x,y,z)
|
||||
|
||||
See also
|
||||
--------
|
||||
text
|
||||
'''
|
||||
|
||||
axis = kwds.pop('axis', None)
|
||||
if axis is None:
|
||||
axis = plotbackend.gca()
|
||||
|
||||
x, y, n = _parse_data(*args, **kwds)
|
||||
if mlab.isvector(x) or mlab.isvector(y):
|
||||
x, y = np.meshgrid(x, y)
|
||||
|
||||
n = np.round(n)
|
||||
|
||||
# delete tallibing object if it exists
|
||||
delete_text_object(_TALLIBING_GID, axis=axis)
|
||||
|
||||
txtProp = dict(gid=_TALLIBING_GID, size=8, color='w',
|
||||
horizontalalignment='center',
|
||||
verticalalignment='center', fontweight='demi', axes=axis)
|
||||
|
||||
txtProp.update(**kwds)
|
||||
h = []
|
||||
for xi, yi, ni in zip(x.ravel(), y.ravel(), n.ravel()):
|
||||
if ni:
|
||||
h.append(axis.text(xi, yi, str(ni), **txtProp))
|
||||
plotbackend.draw_if_interactive()
|
||||
return h
|
||||
|
||||
|
||||
def _parse_data(*args, **kwds):
|
||||
nargin = len(args)
|
||||
data = np.atleast_2d(args[-1]).copy()
|
||||
M, N = data.shape
|
||||
if nargin == 1:
|
||||
x = np.arange(N)
|
||||
y = np.arange(M)
|
||||
elif nargin == 3:
|
||||
x, y = np.atleast_1d(*args[:-1])
|
||||
if min(x.shape) != 1:
|
||||
x = x[0]
|
||||
if min(y.shape) != 1:
|
||||
y = y[:, 0]
|
||||
else:
|
||||
raise ValueError(
|
||||
'Requires 3 or 1 in arguments! (x,y,data) or (data)')
|
||||
if kwds.pop('mid_point', True):
|
||||
xx = _find_mid_points(x)
|
||||
yy = _find_mid_points(y)
|
||||
return xx, yy, data
|
||||
return x, y, data
|
||||
|
||||
pcolor = plotbackend.pcolor
|
||||
pcolormesh = plotbackend.pcolormesh
|
||||
|
||||
|
||||
def _find_mid_points(x):
|
||||
''' Return points half way between all values of X and outside the
|
||||
endpoints. The outer limits have same distance from X's endpoints as
|
||||
the limits just inside.
|
||||
'''
|
||||
dx = np.diff(x) * 0.5
|
||||
dx = np.hstack((dx, dx[-1]))
|
||||
return x + dx
|
||||
|
||||
|
||||
def test_docstrings():
|
||||
import doctest
|
||||
print('Testing docstrings in %s' % __file__)
|
||||
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_docstrings()
|
||||
|
@ -1,136 +0,0 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
def meshgrid(*xi, **kwargs):
|
||||
"""
|
||||
Return coordinate matrices from one or more coordinate vectors.
|
||||
|
||||
Make N-D coordinate arrays for vectorized evaluations of
|
||||
N-D scalar/vector fields over N-D grids, given
|
||||
one-dimensional coordinate arrays x1, x2,..., xn.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x1, x2,..., xn : array_like
|
||||
1-D arrays representing the coordinates of a grid.
|
||||
indexing : 'xy' or 'ij' (optional)
|
||||
cartesian ('xy', default) or matrix ('ij') indexing of output
|
||||
sparse : True or False (default) (optional)
|
||||
If True a sparse grid is returned in order to conserve memory.
|
||||
copy : True (default) or False (optional)
|
||||
If False a view into the original arrays are returned in order to
|
||||
conserve memory. Please note that sparse=False, copy=False will likely
|
||||
return non-contiguous arrays. Furthermore, more than one element of a
|
||||
broadcasted array may refer to a single memory location. If you
|
||||
need to write to the arrays, make copies first.
|
||||
|
||||
Returns
|
||||
-------
|
||||
X1, X2,..., XN : ndarray
|
||||
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
|
||||
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
|
||||
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
|
||||
with the elements of `xi` repeated to fill the matrix along
|
||||
the first dimension for `x1`, the second for `x2` and so on.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This function supports both indexing conventions through the indexing
|
||||
keyword argument. Giving the string 'ij' returns a meshgrid with matrix
|
||||
indexing, while 'xy' returns a meshgrid with Cartesian indexing. The
|
||||
difference is illustrated by the following code snippet:
|
||||
|
||||
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
|
||||
for i in range(nx):
|
||||
for j in range(ny):
|
||||
# treat xv[i,j], yv[i,j]
|
||||
|
||||
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
|
||||
for i in range(nx):
|
||||
for j in range(ny):
|
||||
# treat xv[j,i], yv[j,i]
|
||||
|
||||
See Also
|
||||
--------
|
||||
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
|
||||
using indexing notation.
|
||||
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
|
||||
using indexing notation.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> nx, ny = (3, 2)
|
||||
>>> x = np.linspace(0, 1, nx)
|
||||
>>> y = np.linspace(0, 1, ny)
|
||||
>>> xv, yv = meshgrid(x, y)
|
||||
>>> xv
|
||||
array([[ 0. , 0.5, 1. ],
|
||||
[ 0. , 0.5, 1. ]])
|
||||
>>> yv
|
||||
array([[ 0., 0., 0.],
|
||||
[ 1., 1., 1.]])
|
||||
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
|
||||
>>> xv
|
||||
array([[ 0. , 0.5, 1. ]])
|
||||
>>> yv
|
||||
array([[ 0.],
|
||||
[ 1.]])
|
||||
|
||||
`meshgrid` is very useful to evaluate functions on a grid.
|
||||
|
||||
>>> x = np.arange(-5, 5, 0.1)
|
||||
>>> y = np.arange(-5, 5, 0.1)
|
||||
>>> xx, yy = meshgrid(x, y, sparse=True)
|
||||
>>> z = np.sin(xx**2+yy**2)/(xx**2+yy**2)
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> h = plt.contourf(x,y,z)
|
||||
"""
|
||||
copy_ = kwargs.get('copy', True)
|
||||
args = np.atleast_1d(*xi)
|
||||
ndim = len(args)
|
||||
|
||||
if not isinstance(args, list) or ndim < 2:
|
||||
raise TypeError(
|
||||
'meshgrid() takes 2 or more arguments (%d given)' % int(ndim > 0))
|
||||
|
||||
sparse = kwargs.get('sparse', False)
|
||||
indexing = kwargs.get('indexing', 'xy')
|
||||
|
||||
s0 = (1,) * ndim
|
||||
output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::])
|
||||
for i, x in enumerate(args)]
|
||||
|
||||
shape = [x.size for x in output]
|
||||
|
||||
if indexing == 'xy':
|
||||
# switch first and second axis
|
||||
output[0].shape = (1, -1) + (1,) * (ndim - 2)
|
||||
output[1].shape = (-1, 1) + (1,) * (ndim - 2)
|
||||
shape[0], shape[1] = shape[1], shape[0]
|
||||
|
||||
if sparse:
|
||||
if copy_:
|
||||
return [x.copy() for x in output]
|
||||
else:
|
||||
return output
|
||||
else:
|
||||
# Return the full N-D matrix (not only the 1-D vector)
|
||||
if copy_:
|
||||
mult_fact = np.ones(shape, dtype=int)
|
||||
return [x * mult_fact for x in output]
|
||||
else:
|
||||
return np.broadcast_arrays(*output)
|
||||
|
||||
|
||||
def ndgrid(*args, **kwargs):
|
||||
"""
|
||||
Same as calling meshgrid with indexing='ij' (see meshgrid for
|
||||
documentation).
|
||||
"""
|
||||
kwargs['indexing'] = 'ij'
|
||||
return meshgrid(*args, **kwargs)
|
||||
|
||||
if __name__ == '__main__':
|
||||
import doctest
|
||||
doctest.testmod()
|
File diff suppressed because it is too large
Load Diff
@ -1,132 +1,144 @@
|
||||
from operator import itemgetter as _itemgetter
|
||||
from keyword import iskeyword as _iskeyword
|
||||
import sys as _sys
|
||||
|
||||
def namedtuple(typename, field_names, verbose=False):
|
||||
"""Returns a new subclass of tuple with named fields.
|
||||
|
||||
>>> Point = namedtuple('Point', 'x y')
|
||||
>>> Point.__doc__ # docstring for the new class
|
||||
'Point(x, y)'
|
||||
>>> p = Point(11, y=22) # instantiate with positional args or keywords
|
||||
>>> p[0] + p[1] # indexable like a plain tuple
|
||||
33
|
||||
>>> x, y = p # unpack like a regular tuple
|
||||
>>> x, y
|
||||
(11, 22)
|
||||
>>> p.x + p.y # fields also accessable by name
|
||||
33
|
||||
>>> d = p._asdict() # convert to a dictionary
|
||||
>>> d['x']
|
||||
11
|
||||
>>> Point(**d) # convert from a dictionary
|
||||
Point(x=11, y=22)
|
||||
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
|
||||
Point(x=100, y=22)
|
||||
|
||||
"""
|
||||
|
||||
# Parse and validate the field names. Validation serves two purposes,
|
||||
# generating informative error messages and preventing template injection attacks.
|
||||
if isinstance(field_names, basestring):
|
||||
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
|
||||
field_names = tuple(field_names)
|
||||
for name in (typename,) + field_names:
|
||||
if not min(c.isalnum() or c=='_' for c in name):
|
||||
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
|
||||
if _iskeyword(name):
|
||||
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
|
||||
if name[0].isdigit():
|
||||
raise ValueError('Type names and field names cannot start with a number: %r' % name)
|
||||
seen_names = set()
|
||||
for name in field_names:
|
||||
if name.startswith('_'):
|
||||
raise ValueError('Field names cannot start with an underscore: %r' % name)
|
||||
if name in seen_names:
|
||||
raise ValueError('Encountered duplicate field name: %r' % name)
|
||||
seen_names.add(name)
|
||||
|
||||
# Create and fill-in the class template
|
||||
numfields = len(field_names)
|
||||
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
|
||||
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
|
||||
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
|
||||
template = '''class %(typename)s(tuple):
|
||||
'%(typename)s(%(argtxt)s)' \n
|
||||
__slots__ = () \n
|
||||
_fields = %(field_names)r \n
|
||||
def __new__(cls, %(argtxt)s):
|
||||
return tuple.__new__(cls, (%(argtxt)s)) \n
|
||||
@classmethod
|
||||
def _make(cls, iterable, new=tuple.__new__, len=len):
|
||||
'Make a new %(typename)s object from a sequence or iterable'
|
||||
result = new(cls, iterable)
|
||||
if len(result) != %(numfields)d:
|
||||
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
|
||||
return result \n
|
||||
def __repr__(self):
|
||||
return '%(typename)s(%(reprtxt)s)' %% self \n
|
||||
def _asdict(t):
|
||||
'Return a new dict which maps field names to their values'
|
||||
return {%(dicttxt)s} \n
|
||||
def _replace(self, **kwds):
|
||||
'Return a new %(typename)s object replacing specified fields with new values'
|
||||
result = self._make(map(kwds.pop, %(field_names)r, self))
|
||||
if kwds:
|
||||
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
|
||||
return result \n\n''' % locals()
|
||||
for i, name in enumerate(field_names):
|
||||
template += ' %s = property(itemgetter(%d))\n' % (name, i)
|
||||
if verbose:
|
||||
print template
|
||||
|
||||
# Execute the template string in a temporary namespace
|
||||
namespace = dict(itemgetter=_itemgetter)
|
||||
try:
|
||||
exec template in namespace
|
||||
except SyntaxError, e:
|
||||
raise SyntaxError(e.message + ':\n' + template)
|
||||
result = namespace[typename]
|
||||
|
||||
# For pickling to work, the __module__ variable needs to be set to the frame
|
||||
# where the named tuple is created. Bypass this step in enviroments where
|
||||
# sys._getframe is not defined (Jython for example).
|
||||
if hasattr(_sys, '_getframe'):
|
||||
result.__module__ = _sys._getframe(1).f_globals['__name__']
|
||||
|
||||
return result
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# verify that instances can be pickled
|
||||
from cPickle import loads, dumps
|
||||
Point = namedtuple('Point', 'x, y', True)
|
||||
p = Point(x=10, y=20)
|
||||
assert p == loads(dumps(p))
|
||||
|
||||
# test and demonstrate ability to override methods
|
||||
class Point(namedtuple('Point', 'x y')):
|
||||
@property
|
||||
def hypot(self):
|
||||
return (self.x ** 2 + self.y ** 2) ** 0.5
|
||||
def __str__(self):
|
||||
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
|
||||
|
||||
for p in Point(3,4), Point(14,5), Point(9./7,6):
|
||||
print p
|
||||
|
||||
class Point(namedtuple('Point', 'x y')):
|
||||
'Point class with optimized _make() and _replace() without error-checking'
|
||||
_make = classmethod(tuple.__new__)
|
||||
def _replace(self, _map=map, **kwds):
|
||||
return self._make(_map(kwds.get, ('x', 'y'), self))
|
||||
|
||||
print Point(11, 22)._replace(x=100)
|
||||
|
||||
import doctest
|
||||
TestResults = namedtuple('TestResults', 'failed attempted')
|
||||
print TestResults(*doctest.testmod())
|
||||
from operator import itemgetter as _itemgetter
|
||||
from keyword import iskeyword as _iskeyword
|
||||
import sys as _sys
|
||||
|
||||
|
||||
def namedtuple(typename, field_names, verbose=False):
|
||||
"""Returns a new subclass of tuple with named fields.
|
||||
|
||||
>>> Point = namedtuple('Point', 'x y')
|
||||
>>> Point.__doc__ # docstring for the new class
|
||||
'Point(x, y)'
|
||||
>>> p = Point(11, y=22) # instantiate with positional args or keywords
|
||||
>>> p[0] + p[1] # indexable like a plain tuple
|
||||
33
|
||||
>>> x, y = p # unpack like a regular tuple
|
||||
>>> x, y
|
||||
(11, 22)
|
||||
>>> p.x + p.y # fields also accessable by name
|
||||
33
|
||||
>>> d = p._asdict() # convert to a dictionary
|
||||
>>> d['x']
|
||||
11
|
||||
>>> Point(**d) # convert from a dictionary
|
||||
Point(x=11, y=22)
|
||||
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
|
||||
Point(x=100, y=22)
|
||||
|
||||
"""
|
||||
|
||||
# Parse and validate the field names. Validation serves two purposes,
|
||||
# generating informative error messages and preventing template injection
|
||||
# attacks.
|
||||
if isinstance(field_names, basestring):
|
||||
# names separated by whitespace and/or commas
|
||||
field_names = field_names.replace(',', ' ').split()
|
||||
field_names = tuple(field_names)
|
||||
for name in (typename,) + field_names:
|
||||
if not min(c.isalnum() or c == '_' for c in name):
|
||||
raise ValueError(
|
||||
'Type names and field names can only contain alphanumeric ' +
|
||||
'characters and underscores: %r' % name)
|
||||
if _iskeyword(name):
|
||||
raise ValueError(
|
||||
'Type names and field names cannot be a keyword: %r' % name)
|
||||
if name[0].isdigit():
|
||||
raise ValueError('Type names and field names cannot start ' +
|
||||
'with a number: %r' % name)
|
||||
seen_names = set()
|
||||
for name in field_names:
|
||||
if name.startswith('_'):
|
||||
raise ValueError(
|
||||
'Field names cannot start with an underscore: %r' % name)
|
||||
if name in seen_names:
|
||||
raise ValueError('Encountered duplicate field name: %r' % name)
|
||||
seen_names.add(name)
|
||||
|
||||
# Create and fill-in the class template
|
||||
numfields = len(field_names)
|
||||
# tuple repr without parens or quotes
|
||||
argtxt = repr(field_names).replace("'", "")[1:-1]
|
||||
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
|
||||
dicttxt = ', '.join('%r: t[%d]' % (name, pos)
|
||||
for pos, name in enumerate(field_names))
|
||||
template = '''class %(typename)s(tuple):
|
||||
'%(typename)s(%(argtxt)s)' \n
|
||||
__slots__ = () \n
|
||||
_fields = %(field_names)r \n
|
||||
def __new__(cls, %(argtxt)s):
|
||||
return tuple.__new__(cls, (%(argtxt)s)) \n
|
||||
@classmethod
|
||||
def _make(cls, iterable, new=tuple.__new__, len=len):
|
||||
'Make a new %(typename)s object from a sequence or iterable'
|
||||
result = new(cls, iterable)
|
||||
if len(result) != %(numfields)d:
|
||||
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
|
||||
return result \n
|
||||
def __repr__(self):
|
||||
return '%(typename)s(%(reprtxt)s)' %% self \n
|
||||
def _asdict(t):
|
||||
'Return a new dict which maps field names to their values'
|
||||
return {%(dicttxt)s} \n
|
||||
def _replace(self, **kwds):
|
||||
'Return a new %(typename)s object replacing specified fields with new values'
|
||||
result = self._make(map(kwds.pop, %(field_names)r, self))
|
||||
if kwds:
|
||||
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
|
||||
return result \n\n''' % locals()
|
||||
for i, name in enumerate(field_names):
|
||||
template += ' %s = property(itemgetter(%d))\n' % (name, i)
|
||||
if verbose:
|
||||
print template
|
||||
|
||||
# Execute the template string in a temporary namespace
|
||||
namespace = dict(itemgetter=_itemgetter)
|
||||
try:
|
||||
exec template in namespace
|
||||
except SyntaxError, e:
|
||||
raise SyntaxError(e.message + ':\n' + template)
|
||||
result = namespace[typename]
|
||||
|
||||
# For pickling to work, the __module__ variable needs to be set to the
|
||||
# frame where the named tuple is created. Bypass this step in enviroments
|
||||
# where sys._getframe is not defined (Jython for example).
|
||||
if hasattr(_sys, '_getframe'):
|
||||
result.__module__ = _sys._getframe(1).f_globals['__name__']
|
||||
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# verify that instances can be pickled
|
||||
from cPickle import loads, dumps
|
||||
Point = namedtuple('Point', 'x, y', True)
|
||||
p = Point(x=10, y=20)
|
||||
assert p == loads(dumps(p))
|
||||
|
||||
# test and demonstrate ability to override methods
|
||||
class Point(namedtuple('Point', 'x y')):
|
||||
|
||||
@property
|
||||
def hypot(self):
|
||||
return (self.x ** 2 + self.y ** 2) ** 0.5
|
||||
|
||||
def __str__(self):
|
||||
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y,
|
||||
self.hypot)
|
||||
|
||||
for p in Point(3, 4), Point(14, 5), Point(9. / 7, 6):
|
||||
print p
|
||||
|
||||
class Point(namedtuple('Point', 'x y')):
|
||||
'''Point class with optimized _make() and _replace()
|
||||
without error-checking
|
||||
'''
|
||||
_make = classmethod(tuple.__new__)
|
||||
|
||||
def _replace(self, _map=map, **kwds):
|
||||
return self._make(_map(kwds.get, ('x', 'y'), self))
|
||||
|
||||
print Point(11, 22)._replace(x=100)
|
||||
|
||||
import doctest
|
||||
TestResults = namedtuple('TestResults', 'failed attempted')
|
||||
print TestResults(*doctest.testmod())
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,303 +1,320 @@
|
||||
'''
|
||||
Created on 15. des. 2009
|
||||
|
||||
@author: pab
|
||||
'''
|
||||
#import os
|
||||
#import sys
|
||||
#import win32com
|
||||
#from win32com.client.selecttlb import EnumTlbs
|
||||
#typelib_mso = None
|
||||
#typelib_msppt = None
|
||||
#for typelib in EnumTlbs():
|
||||
# d = typelib.desc.split(' ')
|
||||
# if d[0] == 'Microsoft' and d[1] == 'Office' and d[3] == 'Object' and d[4] == 'Library':
|
||||
# typelib_mso = typelib
|
||||
# if d[0] == 'Microsoft' and d[1] == 'PowerPoint' and d[3] == 'Object' and d[4] == 'Library':
|
||||
# typelib_msppt = typelib
|
||||
#if hasattr(sys, 'frozen'): # If we're an .exe file
|
||||
# win32com.__gen_path__ = os.path.dirname(sys.executable)
|
||||
## win32com.__gen_path__ = os.environ['TEMP']
|
||||
#if win32com.client.gencache.is_readonly:
|
||||
# win32com.client.gencache.is_readonly = False
|
||||
# win32com.client.gencache.Rebuild()
|
||||
#MSPPT = win32com.client.gencache.EnsureModule(typelib_msppt.clsid, typelib_msppt.lcid,
|
||||
# int(typelib_msppt.major), int(typelib_msppt.minor))
|
||||
#MSO = win32com.client.gencache.EnsureModule(typelib_mso.clsid, typelib_mso.lcid,
|
||||
# int(typelib_mso.major), int(typelib_mso.minor))
|
||||
import os
|
||||
import warnings
|
||||
import win32com.client
|
||||
import MSO
|
||||
import MSPPT
|
||||
from PIL import Image #@UnresolvedImport
|
||||
|
||||
g = globals()
|
||||
for c in dir(MSO.constants):
|
||||
g[c] = getattr(MSO.constants, c)
|
||||
for c in dir(MSPPT.constants):
|
||||
g[c] = getattr(MSPPT.constants, c)
|
||||
|
||||
class Powerpoint(object):
|
||||
def __init__(self, file_name=''):
|
||||
|
||||
self.application = win32com.client.Dispatch("Powerpoint.Application")
|
||||
#self.application.Visible = True
|
||||
self._visible = self.application.Visible
|
||||
if file_name:
|
||||
self.presentation = self.application.Presentations.Open(file_name)
|
||||
else:
|
||||
self.presentation = self.application.Presentations.Add()
|
||||
self.num_slides = 0
|
||||
# default picture width and height
|
||||
self.default_width = 500
|
||||
self.default_height = 400
|
||||
self.title_font = 'Arial' #'Boopee'
|
||||
self.title_size = 36
|
||||
self.text_font = 'Arial' #'Boopee'
|
||||
self.text_size = 20
|
||||
self.footer = ''
|
||||
|
||||
def set_footer(self):
|
||||
'''
|
||||
Set Footer in SlideMaster and NotesMaster
|
||||
'''
|
||||
if self.footer:
|
||||
if self.presentation.HasTitleMaster:
|
||||
TMHF = self.presentation.TitleMaster.HeadersFooters
|
||||
TMHF.Footer.Text = self.footer
|
||||
TMHF.Footer.Visible = True
|
||||
|
||||
SMHF = self.presentation.SlideMaster.HeadersFooters
|
||||
SMHF.Footer.Text = self.footer
|
||||
SMHF.Footer.Visible = True
|
||||
SMHF.SlideNumber.Visible= True
|
||||
NMHF = self.presentation.NotesMaster.HeadersFooters
|
||||
NMHF.Footer.Text = self.footer
|
||||
NMHF.SlideNumber.Visible= True
|
||||
for slide in self.presentation.Slides:
|
||||
shapes = slide.Shapes
|
||||
for shape in shapes:
|
||||
if shape.Name=='Footer':
|
||||
footer = shape
|
||||
break
|
||||
else:
|
||||
footer = shapes.AddTextbox(msoTextOrientationHorizontal, Left=0, Top=510, Width=720, Height=28.875) #@UndefinedVariable
|
||||
footer.Name = 'Footer'
|
||||
footer.TextFrame.TextRange.Text = self.footer
|
||||
|
||||
|
||||
def add_title_slide(self, title, subtitle=''):
|
||||
self.num_slides +=1
|
||||
slide = self.presentation.Slides.Add(self.num_slides, MSPPT.constants.ppLayoutTitle)
|
||||
|
||||
unused_title_id, unused_textbox_id = 1, 2
|
||||
for id_, title1 in enumerate([title, subtitle]):
|
||||
titlerange = slide.Shapes(id_+1).TextFrame.TextRange
|
||||
titlerange.Text = title1
|
||||
titlerange.Font.Name = self.title_font
|
||||
titlerange.Font.Size = self.title_size-id_*12 if self.title_size>22 else self.title_size
|
||||
|
||||
def add_slide(self, title='', texts='', notes='', image_file='',
|
||||
maxlevel=None, left=220, width=-1, height=-1):
|
||||
self.num_slides +=1
|
||||
slide = self.presentation.Slides.Add(self.num_slides, MSPPT.constants.ppLayoutText)
|
||||
|
||||
self.add2slide(slide, title, texts, notes, image_file, maxlevel, left, width, height)
|
||||
return slide
|
||||
|
||||
def add2slide(self, slide, title='', texts='', notes='', image_file='',
|
||||
maxlevel=None, left=220, width=-1, height=-1, keep_aspect=True):
|
||||
title_id, textbox_id = 1, 2
|
||||
if title:
|
||||
titlerange = slide.Shapes(title_id).TextFrame.TextRange
|
||||
titlerange.Font.Name = self.title_font
|
||||
titlerange.Text = title
|
||||
titlerange.Font.Size = self.title_size
|
||||
|
||||
if texts != '' and texts != ['']:
|
||||
#textrange = slide.Shapes(textbox_id).TextFrame.TextRange
|
||||
self._add_text(slide, textbox_id, texts, maxlevel)
|
||||
|
||||
if image_file != '' and image_file != ['']:
|
||||
if keep_aspect:
|
||||
im = Image.open(image_file)
|
||||
t_w, t_h = im.size
|
||||
if height<=0 and width<=0:
|
||||
if t_w*self.default_height < t_h*self.default_width:
|
||||
height = self.default_height
|
||||
else:
|
||||
width = self.default_width
|
||||
if height<=0 and width:
|
||||
height = t_h * width / t_w
|
||||
elif height and width <=0:
|
||||
width = t_w * height / t_h
|
||||
|
||||
slide.Shapes.AddPicture(FileName=image_file, LinkToFile=False,
|
||||
SaveWithDocument=True,
|
||||
Left=left, Top=110,
|
||||
Width=width, Height=height) #400)
|
||||
if notes != '' and notes != ['']:
|
||||
notespage = slide.NotesPage #.Shapes(2).TextFrame.TextRange
|
||||
self._add_text(notespage, 2, notes)
|
||||
return slide
|
||||
|
||||
def _add_text(self, page, id, txt, maxlevel=None): #@ReservedAssignment
|
||||
page.Shapes(id).TextFrame.TextRange.Font.Name = self.text_font
|
||||
|
||||
if isinstance(txt, dict):
|
||||
self._add_text_from_dict(page, id, txt, 1, maxlevel)
|
||||
elif isinstance(txt, (list, tuple)):
|
||||
self._add_text_from_list(page, id, txt, maxlevel)
|
||||
else:
|
||||
unused_tr = page.Shapes(id).TextFrame.TextRange.InsertAfter(txt)
|
||||
unused_temp = page.Shapes(id).TextFrame.TextRange.InsertAfter('\r')
|
||||
|
||||
page.Shapes(id).TextFrame.TextRange.Font.Size = self.text_size
|
||||
|
||||
def _add_text_from_dict(self, page, id, txt_dict, level, maxlevel=None): #@ReservedAssignment
|
||||
if maxlevel is None or level<=maxlevel:
|
||||
for name, subdict in txt_dict.iteritems():
|
||||
tr = page.Shapes(id).TextFrame.TextRange.InsertAfter(name)
|
||||
unused_temp = page.Shapes(id).TextFrame.TextRange.InsertAfter('\r')
|
||||
tr.IndentLevel = level
|
||||
self._add_text_from_dict(page, id, subdict, min(level+1,5), maxlevel)
|
||||
|
||||
def _add_text_from_list(self, page, id, txt_list, maxlevel=None): #@ReservedAssignment
|
||||
for txt in txt_list:
|
||||
level = 1
|
||||
while isinstance(txt, (list, tuple)):
|
||||
txt = txt[0]
|
||||
level += 1
|
||||
if maxlevel is None or level<=maxlevel:
|
||||
tr = page.Shapes(id).TextFrame.TextRange.InsertAfter(txt)
|
||||
unused_temp = page.Shapes(id).TextFrame.TextRange.InsertAfter('\r')
|
||||
tr.IndentLevel = level
|
||||
|
||||
|
||||
def save(self, fullfile=''):
|
||||
if fullfile:
|
||||
self.presentation.SaveAs(FileName=fullfile)
|
||||
else:
|
||||
self.presentation.Save()
|
||||
|
||||
|
||||
def quit(self): #@ReservedAssignment
|
||||
if self._visible:
|
||||
self.presentation.Close()
|
||||
else:
|
||||
self.application.Quit()
|
||||
|
||||
def quit_only_if_hidden(self):
|
||||
if not self._visible:
|
||||
self.application.Quit()
|
||||
|
||||
def test_powerpoint():
|
||||
# Make powerpoint
|
||||
|
||||
ppt = Powerpoint()
|
||||
#time.
|
||||
ppt.footer='This is the footer'
|
||||
ppt.add_title_slide('Title', 'Per A.')
|
||||
ppt.add_slide(title='alsfkasldk', texts='asdflaf', notes='asdfas')
|
||||
ppt.set_footer()
|
||||
|
||||
def make_ppt():
|
||||
application = win32com.client.Dispatch("Powerpoint.Application")
|
||||
application.Visible = True
|
||||
presentation = application.Presentations.Add()
|
||||
slide1 = presentation.Slides.Add(1, MSPPT.constants.ppLayoutText)
|
||||
|
||||
|
||||
# title = slide1.Shapes.AddTextBox(Type=msoTextOrientationHorizontal,Left=50, Top=10, Width=620, Height=70)
|
||||
# title.TextFrame.TextRange.Text = 'Overskrift'
|
||||
|
||||
|
||||
title_id, textbox_id = 1,2
|
||||
slide1.Shapes(title_id).TextFrame.TextRange.Text = 'Overskrift'
|
||||
#slide1.Shapes(title_id).TextFrame.Width = 190
|
||||
|
||||
|
||||
slide1.Shapes(textbox_id).TextFrame.TextRange.InsertAfter('Test')
|
||||
unused_tr = slide1.Shapes(textbox_id).TextFrame.TextRange.InsertAfter('\r')
|
||||
slide1.Shapes(textbox_id).TextFrame.TextRange.IndentLevel = 1
|
||||
tr = slide1.Shapes(textbox_id).TextFrame.TextRange.InsertAfter('tests')
|
||||
unused_tr0 = slide1.Shapes(textbox_id).TextFrame.TextRange.InsertAfter('\r')
|
||||
tr.IndentLevel=2
|
||||
tr1 = slide1.Shapes(textbox_id).TextFrame.TextRange.InsertAfter('test3')
|
||||
tr1.IndentLevel=3
|
||||
#slide1.Shapes(textbox_id).TextFrame.TextRange.Text = 'Test \r test2'
|
||||
|
||||
# textbox = slide1.Shapes.AddTextBox(Type=msoTextOrientationHorizontal,Left=30, Top=100, Width=190, Height=400)
|
||||
# textbox.TextFrame.TextRange.Text = 'Test \r test2'
|
||||
#picbox = slide1.Shapes(picb_id)
|
||||
|
||||
filename = r'c:\temp\data1_report1_and_2_Tr120_1.png'
|
||||
slide1.Shapes.AddPicture(FileName=filename, LinkToFile=False,
|
||||
SaveWithDocument=True,
|
||||
Left=220, Top=100, Width=500, Height=420)
|
||||
|
||||
slide1.NotesPage.Shapes(2).TextFrame.TextRange.Text = 'test'
|
||||
|
||||
|
||||
|
||||
# for shape in slide1.Shapes:
|
||||
# shape.TextFrame.TextRange.Text = 'Test \r test2'
|
||||
#slide1.Shapes.Titles.TextFrames.TestRange.Text
|
||||
# shape = slide1.Shapes.AddShape(msoShapeRectangle, 300, 100, 400, 400)
|
||||
# shape.TextFrame.TextRange.Text = 'Test \n test2'
|
||||
# shape.TextFrame.TextRange.Font.Size = 12
|
||||
|
||||
#
|
||||
# app = wx.PySimpleApp()
|
||||
# dialog = wx.FileDialog(None, 'Choose image file', defaultDir=os.getcwd(),
|
||||
# wildcard='*.*',
|
||||
# style=wx.OPEN | wx.CHANGE_DIR | wx.MULTIPLE)
|
||||
#
|
||||
# if dialog.ShowModal() == wx.ID_OK:
|
||||
# files_or_paths = dialog.GetPaths()
|
||||
# for filename in files_or_paths:
|
||||
# slide1.Shapes.AddPicture(FileName=filename, LinkToFile=False,
|
||||
# SaveWithDocument=True,
|
||||
# Left=100, Top=100, Width=200, Height=200)
|
||||
# dialog.Destroy()
|
||||
#presentation.Save()
|
||||
#application.Quit()
|
||||
def rename_ppt():
|
||||
root = r'C:/pab/tsm_opeval/analysis_tsmps_aco_v2008b/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_mag_v2008b/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_mag_v2010a/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_aco_v2010a/plots'
|
||||
#filename = r'mag_sweep_best_tsmps_ship_eff0-10.ppt'
|
||||
filenames = os.listdir(root)
|
||||
prefix = 'TSMPSv2008b_'
|
||||
#prefix = 'TSMPSv2010a_'
|
||||
for filename in filenames:
|
||||
if filename.endswith('.ppt'):
|
||||
try:
|
||||
ppt = Powerpoint(os.path.join(root,filename))
|
||||
ppt.footer = prefix + filename
|
||||
ppt.set_footer()
|
||||
ppt.save(os.path.join(root, ppt.footer))
|
||||
except:
|
||||
warnings.warn('Unable to load %s' % filename)
|
||||
def load_file_into_ppt():
|
||||
root = r'C:/pab/tsm_opeval/analysis_tsmps_aco_v2008b/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_mag_v2008b/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_mag_v2010a/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_aco_v2010a/plots'
|
||||
#filename = r'mag_sweep_best_tsmps_ship_eff0-10.ppt'
|
||||
filenames = os.listdir(root)
|
||||
prefix = 'TSMPSv2008b_'
|
||||
#prefix = 'TSMPSv2010a_'
|
||||
for filename in filenames:
|
||||
if filename.startswith(prefix) and filename.endswith('.ppt'):
|
||||
try:
|
||||
unused_ppt = Powerpoint(os.path.join(root,filename))
|
||||
except:
|
||||
warnings.warn('Unable to load %s' % filename)
|
||||
if __name__ == '__main__':
|
||||
#make_ppt()
|
||||
#test_powerpoint()
|
||||
#load_file_into_ppt()
|
||||
rename_ppt()
|
||||
'''
|
||||
Created on 15. des. 2009
|
||||
|
||||
@author: pab
|
||||
'''
|
||||
#import os
|
||||
#import sys
|
||||
#import win32com
|
||||
#from win32com.client.selecttlb import EnumTlbs
|
||||
#typelib_mso = None
|
||||
#typelib_msppt = None
|
||||
# for typelib in EnumTlbs():
|
||||
# d = typelib.desc.split(' ')
|
||||
# if d[0] == 'Microsoft' and d[1] == 'Office' and d[3] == 'Object' \
|
||||
# and d[4] == 'Library':
|
||||
# typelib_mso = typelib
|
||||
# if d[0] == 'Microsoft' and d[1] == 'PowerPoint' and d[3] == 'Object' \
|
||||
# and d[4] == 'Library':
|
||||
# typelib_msppt = typelib
|
||||
# if hasattr(sys, 'frozen'): # If we're an .exe file
|
||||
# win32com.__gen_path__ = os.path.dirname(sys.executable)
|
||||
## win32com.__gen_path__ = os.environ['TEMP']
|
||||
# if win32com.client.gencache.is_readonly:
|
||||
# win32com.client.gencache.is_readonly = False
|
||||
# win32com.client.gencache.Rebuild()
|
||||
# MSPPT = win32com.client.gencache.EnsureModule(typelib_msppt.clsid,
|
||||
# typelib_msppt.lcid,
|
||||
# int(typelib_msppt.major),
|
||||
# int(typelib_msppt.minor))
|
||||
# MSO = win32com.client.gencache.EnsureModule(typelib_mso.clsid,
|
||||
# typelib_mso.lcid,
|
||||
# int(typelib_mso.major), int(typelib_mso.minor))
|
||||
import os
|
||||
import warnings
|
||||
import win32com.client
|
||||
import MSO
|
||||
import MSPPT
|
||||
from PIL import Image # @UnresolvedImport
|
||||
|
||||
g = globals()
|
||||
for c in dir(MSO.constants):
|
||||
g[c] = getattr(MSO.constants, c)
|
||||
for c in dir(MSPPT.constants):
|
||||
g[c] = getattr(MSPPT.constants, c)
|
||||
|
||||
|
||||
class Powerpoint(object):
|
||||
|
||||
def __init__(self, file_name=''):
|
||||
|
||||
self.application = win32com.client.Dispatch("Powerpoint.Application")
|
||||
#self.application.Visible = True
|
||||
self._visible = self.application.Visible
|
||||
if file_name:
|
||||
self.presentation = self.application.Presentations.Open(file_name)
|
||||
else:
|
||||
self.presentation = self.application.Presentations.Add()
|
||||
self.num_slides = 0
|
||||
# default picture width and height
|
||||
self.default_width = 500
|
||||
self.default_height = 400
|
||||
self.title_font = 'Arial' # 'Boopee'
|
||||
self.title_size = 36
|
||||
self.text_font = 'Arial' # 'Boopee'
|
||||
self.text_size = 20
|
||||
self.footer = ''
|
||||
|
||||
def set_footer(self):
|
||||
'''
|
||||
Set Footer in SlideMaster and NotesMaster
|
||||
'''
|
||||
if self.footer:
|
||||
if self.presentation.HasTitleMaster:
|
||||
TMHF = self.presentation.TitleMaster.HeadersFooters
|
||||
TMHF.Footer.Text = self.footer
|
||||
TMHF.Footer.Visible = True
|
||||
|
||||
SMHF = self.presentation.SlideMaster.HeadersFooters
|
||||
SMHF.Footer.Text = self.footer
|
||||
SMHF.Footer.Visible = True
|
||||
SMHF.SlideNumber.Visible = True
|
||||
NMHF = self.presentation.NotesMaster.HeadersFooters
|
||||
NMHF.Footer.Text = self.footer
|
||||
NMHF.SlideNumber.Visible = True
|
||||
for slide in self.presentation.Slides:
|
||||
shapes = slide.Shapes
|
||||
for shape in shapes:
|
||||
if shape.Name == 'Footer':
|
||||
footer = shape
|
||||
break
|
||||
else:
|
||||
footer = shapes.AddTextbox(
|
||||
msoTextOrientationHorizontal, # @UndefinedVariable
|
||||
Left=0, Top=510, Width=720, Height=28.875)
|
||||
footer.Name = 'Footer'
|
||||
footer.TextFrame.TextRange.Text = self.footer
|
||||
|
||||
def add_title_slide(self, title, subtitle=''):
|
||||
self.num_slides += 1
|
||||
slide = self.presentation.Slides.Add(
|
||||
self.num_slides, MSPPT.constants.ppLayoutTitle)
|
||||
|
||||
unused_title_id, unused_textbox_id = 1, 2
|
||||
for id_, title1 in enumerate([title, subtitle]):
|
||||
titlerange = slide.Shapes(id_ + 1).TextFrame.TextRange
|
||||
titlerange.Text = title1
|
||||
titlerange.Font.Name = self.title_font
|
||||
titlerange.Font.Size = self.title_size - id_ * \
|
||||
12 if self.title_size > 22 else self.title_size
|
||||
|
||||
def add_slide(self, title='', texts='', notes='', image_file='',
|
||||
maxlevel=None, left=220, width=-1, height=-1):
|
||||
self.num_slides += 1
|
||||
slide = self.presentation.Slides.Add(
|
||||
self.num_slides, MSPPT.constants.ppLayoutText)
|
||||
|
||||
self.add2slide(slide, title, texts, notes, image_file, maxlevel, left,
|
||||
width, height)
|
||||
return slide
|
||||
|
||||
def add2slide(self, slide, title='', texts='', notes='', image_file='',
|
||||
maxlevel=None, left=220, width=-1, height=-1,
|
||||
keep_aspect=True):
|
||||
title_id, textbox_id = 1, 2
|
||||
if title:
|
||||
titlerange = slide.Shapes(title_id).TextFrame.TextRange
|
||||
titlerange.Font.Name = self.title_font
|
||||
titlerange.Text = title
|
||||
titlerange.Font.Size = self.title_size
|
||||
|
||||
if texts != '' and texts != ['']:
|
||||
#textrange = slide.Shapes(textbox_id).TextFrame.TextRange
|
||||
self._add_text(slide, textbox_id, texts, maxlevel)
|
||||
|
||||
if image_file != '' and image_file != ['']:
|
||||
if keep_aspect:
|
||||
im = Image.open(image_file)
|
||||
t_w, t_h = im.size
|
||||
if height <= 0 and width <= 0:
|
||||
if t_w * self.default_height < t_h * self.default_width:
|
||||
height = self.default_height
|
||||
else:
|
||||
width = self.default_width
|
||||
if height <= 0 and width:
|
||||
height = t_h * width / t_w
|
||||
elif height and width <= 0:
|
||||
width = t_w * height / t_h
|
||||
|
||||
slide.Shapes.AddPicture(FileName=image_file, LinkToFile=False,
|
||||
SaveWithDocument=True,
|
||||
Left=left, Top=110,
|
||||
Width=width, Height=height) # 400)
|
||||
if notes != '' and notes != ['']:
|
||||
notespage = slide.NotesPage # .Shapes(2).TextFrame.TextRange
|
||||
self._add_text(notespage, 2, notes)
|
||||
return slide
|
||||
|
||||
def _add_text(self, page, id, txt, maxlevel=None): # @ReservedAssignment
|
||||
page.Shapes(id).TextFrame.TextRange.Font.Name = self.text_font
|
||||
|
||||
if isinstance(txt, dict):
|
||||
self._add_text_from_dict(page, id, txt, 1, maxlevel)
|
||||
elif isinstance(txt, (list, tuple)):
|
||||
self._add_text_from_list(page, id, txt, maxlevel)
|
||||
else:
|
||||
unused_tr = page.Shapes(id).TextFrame.TextRange.InsertAfter(txt)
|
||||
unused_temp = page.Shapes(id).TextFrame.TextRange.InsertAfter('\r')
|
||||
|
||||
page.Shapes(id).TextFrame.TextRange.Font.Size = self.text_size
|
||||
|
||||
def _add_text_from_dict(self, page, id, txt_dict, # @ReservedAssignment
|
||||
level, maxlevel=None):
|
||||
if maxlevel is None or level <= maxlevel:
|
||||
for name, subdict in txt_dict.iteritems():
|
||||
tr = page.Shapes(id).TextFrame.TextRange.InsertAfter(name)
|
||||
unused_temp = page.Shapes(
|
||||
id).TextFrame.TextRange.InsertAfter('\r')
|
||||
tr.IndentLevel = level
|
||||
self._add_text_from_dict(
|
||||
page, id, subdict, min(level + 1, 5), maxlevel)
|
||||
|
||||
def _add_text_from_list(self, page, id, # @ReservedAssignment
|
||||
txt_list, maxlevel=None):
|
||||
for txt in txt_list:
|
||||
level = 1
|
||||
while isinstance(txt, (list, tuple)):
|
||||
txt = txt[0]
|
||||
level += 1
|
||||
if maxlevel is None or level <= maxlevel:
|
||||
tr = page.Shapes(id).TextFrame.TextRange.InsertAfter(txt)
|
||||
unused_temp = page.Shapes(
|
||||
id).TextFrame.TextRange.InsertAfter('\r')
|
||||
tr.IndentLevel = level
|
||||
|
||||
def save(self, fullfile=''):
|
||||
if fullfile:
|
||||
self.presentation.SaveAs(FileName=fullfile)
|
||||
else:
|
||||
self.presentation.Save()
|
||||
|
||||
def quit(self): # @ReservedAssignment
|
||||
if self._visible:
|
||||
self.presentation.Close()
|
||||
else:
|
||||
self.application.Quit()
|
||||
|
||||
def quit_only_if_hidden(self):
|
||||
if not self._visible:
|
||||
self.application.Quit()
|
||||
|
||||
|
||||
def test_powerpoint():
|
||||
# Make powerpoint
|
||||
|
||||
ppt = Powerpoint()
|
||||
# time.
|
||||
ppt.footer = 'This is the footer'
|
||||
ppt.add_title_slide('Title', 'Per A.')
|
||||
ppt.add_slide(title='alsfkasldk', texts='asdflaf', notes='asdfas')
|
||||
ppt.set_footer()
|
||||
|
||||
|
||||
def make_ppt():
|
||||
application = win32com.client.Dispatch("Powerpoint.Application")
|
||||
application.Visible = True
|
||||
presentation = application.Presentations.Add()
|
||||
slide1 = presentation.Slides.Add(1, MSPPT.constants.ppLayoutText)
|
||||
|
||||
# title = slide1.Shapes.AddTextBox(Type=msoTextOrientationHorizontal,
|
||||
# Left=50, Top=10, Width=620, Height=70)
|
||||
# title.TextFrame.TextRange.Text = 'Overskrift'
|
||||
title_id, textbox_id = 1, 2
|
||||
slide1.Shapes(title_id).TextFrame.TextRange.Text = 'Overskrift'
|
||||
#slide1.Shapes(title_id).TextFrame.Width = 190
|
||||
|
||||
slide1.Shapes(textbox_id).TextFrame.TextRange.InsertAfter('Test')
|
||||
unused_tr = slide1.Shapes(textbox_id).TextFrame.TextRange.InsertAfter('\r')
|
||||
slide1.Shapes(textbox_id).TextFrame.TextRange.IndentLevel = 1
|
||||
tr = slide1.Shapes(textbox_id).TextFrame.TextRange.InsertAfter('tests')
|
||||
unused_tr0 = slide1.Shapes(
|
||||
textbox_id).TextFrame.TextRange.InsertAfter('\r')
|
||||
tr.IndentLevel = 2
|
||||
tr1 = slide1.Shapes(textbox_id).TextFrame.TextRange.InsertAfter('test3')
|
||||
tr1.IndentLevel = 3
|
||||
#slide1.Shapes(textbox_id).TextFrame.TextRange.Text = 'Test \r test2'
|
||||
|
||||
# textbox = slide1.Shapes.AddTextBox(Type=msoTextOrientationHorizontal,
|
||||
# Left=30, Top=100, Width=190, Height=400)
|
||||
# textbox.TextFrame.TextRange.Text = 'Test \r test2'
|
||||
#picbox = slide1.Shapes(picb_id)
|
||||
|
||||
filename = r'c:\temp\data1_report1_and_2_Tr120_1.png'
|
||||
slide1.Shapes.AddPicture(FileName=filename, LinkToFile=False,
|
||||
SaveWithDocument=True,
|
||||
Left=220, Top=100, Width=500, Height=420)
|
||||
|
||||
slide1.NotesPage.Shapes(2).TextFrame.TextRange.Text = 'test'
|
||||
|
||||
|
||||
# for shape in slide1.Shapes:
|
||||
# shape.TextFrame.TextRange.Text = 'Test \r test2'
|
||||
# slide1.Shapes.Titles.TextFrames.TestRange.Text
|
||||
# shape = slide1.Shapes.AddShape(msoShapeRectangle, 300, 100, 400, 400)
|
||||
# shape.TextFrame.TextRange.Text = 'Test \n test2'
|
||||
# shape.TextFrame.TextRange.Font.Size = 12
|
||||
#
|
||||
# app = wx.PySimpleApp()
|
||||
# dialog = wx.FileDialog(None, 'Choose image file', defaultDir=os.getcwd(),
|
||||
# wildcard='*.*',
|
||||
# style=wx.OPEN | wx.CHANGE_DIR | wx.MULTIPLE)
|
||||
#
|
||||
# if dialog.ShowModal() == wx.ID_OK:
|
||||
# files_or_paths = dialog.GetPaths()
|
||||
# for filename in files_or_paths:
|
||||
# slide1.Shapes.AddPicture(FileName=filename, LinkToFile=False,
|
||||
# SaveWithDocument=True,
|
||||
# Left=100, Top=100, Width=200, Height=200)
|
||||
# dialog.Destroy()
|
||||
# presentation.Save()
|
||||
# application.Quit()
|
||||
def rename_ppt():
|
||||
root = r'C:/pab/tsm_opeval/analysis_tsmps_aco_v2008b/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_mag_v2008b/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_mag_v2010a/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_aco_v2010a/plots'
|
||||
#filename = r'mag_sweep_best_tsmps_ship_eff0-10.ppt'
|
||||
filenames = os.listdir(root)
|
||||
prefix = 'TSMPSv2008b_'
|
||||
#prefix = 'TSMPSv2010a_'
|
||||
for filename in filenames:
|
||||
if filename.endswith('.ppt'):
|
||||
try:
|
||||
ppt = Powerpoint(os.path.join(root, filename))
|
||||
ppt.footer = prefix + filename
|
||||
ppt.set_footer()
|
||||
ppt.save(os.path.join(root, ppt.footer))
|
||||
except:
|
||||
warnings.warn('Unable to load %s' % filename)
|
||||
|
||||
|
||||
def load_file_into_ppt():
|
||||
root = r'C:/pab/tsm_opeval/analysis_tsmps_aco_v2008b/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_mag_v2008b/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_mag_v2010a/plots'
|
||||
# root = r'C:/pab/tsm_opeval/analysis_tsmps_aco_v2010a/plots'
|
||||
#filename = r'mag_sweep_best_tsmps_ship_eff0-10.ppt'
|
||||
filenames = os.listdir(root)
|
||||
prefix = 'TSMPSv2008b_'
|
||||
#prefix = 'TSMPSv2010a_'
|
||||
for filename in filenames:
|
||||
if filename.startswith(prefix) and filename.endswith('.ppt'):
|
||||
try:
|
||||
unused_ppt = Powerpoint(os.path.join(root, filename))
|
||||
except:
|
||||
warnings.warn('Unable to load %s' % filename)
|
||||
if __name__ == '__main__':
|
||||
# make_ppt()
|
||||
# test_powerpoint()
|
||||
# load_file_into_ppt()
|
||||
rename_ppt()
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,76 +1,133 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
from wafo.spectrum.models import (Bretschneider, Jonswap, OchiHubble, Tmaspec,
|
||||
Torsethaugen, McCormick, Wallop)
|
||||
|
||||
|
||||
def test_bretschneider():
|
||||
S = Bretschneider(Hm0=6.5, Tp=10)
|
||||
vals = S((0, 1, 2, 3))
|
||||
true_vals = np.array([0., 1.69350993, 0.06352698, 0.00844783])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
|
||||
|
||||
def test_if_jonswap_with_gamma_one_equals_bretschneider():
|
||||
S = Jonswap(Hm0=7, Tp=11, gamma=1)
|
||||
vals = S((0, 1, 2, 3))
|
||||
true_vals = np.array([0., 1.42694133, 0.05051648, 0.00669692])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
|
||||
w = np.linspace(0, 5)
|
||||
S2 = Bretschneider(Hm0=7, Tp=11)
|
||||
# JONSWAP with gamma=1 should be equal to Bretscneider:
|
||||
assert(np.all(np.abs(S(w) - S2(w)) < 1.e-7))
|
||||
|
||||
|
||||
def test_tmaspec():
|
||||
S = Tmaspec(Hm0=7, Tp=11, gamma=1, h=10)
|
||||
vals = S((0, 1, 2, 3))
|
||||
true_vals = np.array([0., 0.70106233, 0.05022433, 0.00669692])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
|
||||
|
||||
def test_torsethaugen():
|
||||
|
||||
S = Torsethaugen(Hm0=7, Tp=11, gamma=1, h=10)
|
||||
vals = S((0, 1, 2, 3))
|
||||
true_vals = np.array([0., 1.19989709, 0.05819794, 0.0093541])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
|
||||
vals = S.wind(range(4))
|
||||
true_vals = np.array([0., 1.13560528, 0.05529849, 0.00888989])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
vals = S.swell(range(4))
|
||||
true_vals = np.array([0., 0.0642918, 0.00289946, 0.00046421])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
|
||||
|
||||
def test_ochihubble():
|
||||
|
||||
S = OchiHubble(par=2)
|
||||
vals = S(range(4))
|
||||
true_vals = np.array([0., 0.90155636, 0.04185445, 0.00583207])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
|
||||
|
||||
def test_mccormick():
|
||||
|
||||
S = McCormick(Hm0=6.5, Tp=10)
|
||||
vals = S(range(4))
|
||||
true_vals = np.array([0., 1.87865908, 0.15050447, 0.02994663])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
|
||||
|
||||
def test_wallop():
|
||||
|
||||
S = Wallop(Hm0=6.5, Tp=10)
|
||||
vals = S(range(4))
|
||||
true_vals = np.array([0.00000000e+00, 9.36921871e-01, 2.76991078e-03,
|
||||
7.72996150e-05])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
from wafo.spectrum.models import (Bretschneider, Jonswap, OchiHubble, Tmaspec,
|
||||
Torsethaugen, McCormick, Wallop, Spreading)
|
||||
|
||||
|
||||
class TestCase(unittest.TestCase):
|
||||
def assertListAlmostEqual(self, list1, list2, places=None, msg=None):
|
||||
self.assertEqual(len(list1), len(list2))
|
||||
for a, b in zip(list1, list2):
|
||||
self.assertAlmostEqual(a, b, places, msg)
|
||||
|
||||
|
||||
class TestSpectra(TestCase):
|
||||
def test_bretschneider(self):
|
||||
S = Bretschneider(Hm0=6.5, Tp=10)
|
||||
vals = S((0, 1, 2, 3)).tolist()
|
||||
true_vals = [0., 1.69350993, 0.06352698, 0.00844783]
|
||||
self.assertListAlmostEqual(vals, true_vals)
|
||||
|
||||
def test_if_jonswap_with_gamma_one_equals_bretschneider(self):
|
||||
S = Jonswap(Hm0=7, Tp=11, gamma=1)
|
||||
vals = S((0, 1, 2, 3))
|
||||
true_vals = np.array([0., 1.42694133, 0.05051648, 0.00669692])
|
||||
self.assertListAlmostEqual(vals, true_vals)
|
||||
w = np.linspace(0, 5)
|
||||
S2 = Bretschneider(Hm0=7, Tp=11)
|
||||
# JONSWAP with gamma=1 should be equal to Bretscneider:
|
||||
self.assertListAlmostEqual(S(w), S2(w))
|
||||
|
||||
def test_tmaspec(self):
|
||||
S = Tmaspec(Hm0=7, Tp=11, gamma=1, h=10)
|
||||
vals = S((0, 1, 2, 3))
|
||||
true_vals = np.array([0., 0.70106233, 0.05022433, 0.00669692])
|
||||
self.assertListAlmostEqual(vals, true_vals)
|
||||
|
||||
def test_torsethaugen(self):
|
||||
S = Torsethaugen(Hm0=7, Tp=11, gamma=1, h=10)
|
||||
vals = S((0, 1, 2, 3))
|
||||
true_vals = np.array([0., 1.19989709, 0.05819794, 0.0093541])
|
||||
self.assertListAlmostEqual(vals, true_vals)
|
||||
|
||||
vals = S.wind(range(4))
|
||||
true_vals = np.array([0., 1.13560528, 0.05529849, 0.00888989])
|
||||
self.assertListAlmostEqual(vals, true_vals)
|
||||
|
||||
vals = S.swell(range(4))
|
||||
true_vals = np.array([0., 0.0642918, 0.00289946, 0.00046421])
|
||||
self.assertListAlmostEqual(vals, true_vals)
|
||||
|
||||
def test_ochihubble(self):
|
||||
|
||||
S = OchiHubble(par=2)
|
||||
vals = S(range(4))
|
||||
true_vals = np.array([0., 0.90155636, 0.04185445, 0.00583207])
|
||||
self.assertListAlmostEqual(vals, true_vals)
|
||||
|
||||
def test_mccormick(self):
|
||||
|
||||
S = McCormick(Hm0=6.5, Tp=10)
|
||||
vals = S(range(4))
|
||||
true_vals = np.array([0., 1.87865908, 0.15050447, 0.02994663])
|
||||
self.assertListAlmostEqual(vals, true_vals)
|
||||
|
||||
def test_wallop(self):
|
||||
S = Wallop(Hm0=6.5, Tp=10)
|
||||
vals = S(range(4))
|
||||
true_vals = np.array([0.00000000e+00, 9.36921871e-01, 2.76991078e-03,
|
||||
7.72996150e-05])
|
||||
self.assertListAlmostEqual(vals, true_vals)
|
||||
|
||||
|
||||
class TestSpreading(TestCase):
|
||||
def test_cos2s(self):
|
||||
theta = np.linspace(0, 2 * np.pi)
|
||||
d = Spreading(type='cos2s')
|
||||
dvals = [[1.10168934e+00],
|
||||
[1.03576796e+00],
|
||||
[8.60302298e-01],
|
||||
[6.30309013e-01],
|
||||
[4.06280137e-01],
|
||||
[2.29514882e-01],
|
||||
[1.13052757e-01],
|
||||
[4.82339343e-02],
|
||||
[1.76754409e-02],
|
||||
[5.50490020e-03],
|
||||
[1.43800617e-03],
|
||||
[3.09907242e-04],
|
||||
[5.39672445e-05],
|
||||
[7.39553743e-06],
|
||||
[7.70796579e-07],
|
||||
[5.84247670e-08],
|
||||
[3.03264905e-09],
|
||||
[9.91950201e-11],
|
||||
[1.81442131e-12],
|
||||
[1.55028269e-14],
|
||||
[4.63223469e-17],
|
||||
[2.90526245e-20],
|
||||
[1.35842977e-24],
|
||||
[3.26077455e-31],
|
||||
[1.65021852e-45],
|
||||
[1.65021852e-45],
|
||||
[3.26077455e-31],
|
||||
[1.35842977e-24],
|
||||
[2.90526245e-20],
|
||||
[4.63223469e-17],
|
||||
[1.55028269e-14],
|
||||
[1.81442131e-12],
|
||||
[9.91950201e-11],
|
||||
[3.03264905e-09],
|
||||
[5.84247670e-08],
|
||||
[7.70796579e-07],
|
||||
[7.39553743e-06],
|
||||
[5.39672445e-05],
|
||||
[3.09907242e-04],
|
||||
[1.43800617e-03],
|
||||
[5.50490020e-03],
|
||||
[1.76754409e-02],
|
||||
[4.82339343e-02],
|
||||
[1.13052757e-01],
|
||||
[2.29514882e-01],
|
||||
[4.06280137e-01],
|
||||
[6.30309013e-01],
|
||||
[8.60302298e-01],
|
||||
[1.03576796e+00],
|
||||
[1.10168934e+00]]
|
||||
|
||||
self.assertListAlmostEqual(d(theta)[0], dvals)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# main()
|
||||
import nose
|
||||
nose.run()
|
||||
#test_tmaspec()
|
||||
unittest.main()
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,116 @@
|
||||
"""
|
||||
Sane parameters for stats.distributions.
|
||||
"""
|
||||
|
||||
distcont = [
|
||||
['alpha', (3.5704770516650459,)],
|
||||
['anglit', ()],
|
||||
['arcsine', ()],
|
||||
['beta', (2.3098496451481823, 0.62687954300963677)],
|
||||
['betaprime', (5, 6)],
|
||||
['bradford', (0.29891359763170633,)],
|
||||
['burr', (10.5, 4.3)],
|
||||
['cauchy', ()],
|
||||
['chi', (78,)],
|
||||
['chi2', (55,)],
|
||||
['cosine', ()],
|
||||
['dgamma', (1.1023326088288166,)],
|
||||
['dweibull', (2.0685080649914673,)],
|
||||
['erlang', (10,)],
|
||||
['expon', ()],
|
||||
['exponpow', (2.697119160358469,)],
|
||||
['exponweib', (2.8923945291034436, 1.9505288745913174)],
|
||||
['f', (29, 18)],
|
||||
['fatiguelife', (29,)], # correction numargs = 1
|
||||
['fisk', (3.0857548622253179,)],
|
||||
['foldcauchy', (4.7164673455831894,)],
|
||||
['foldnorm', (1.9521253373555869,)],
|
||||
['frechet_l', (3.6279911255583239,)],
|
||||
['frechet_r', (1.8928171603534227,)],
|
||||
['gamma', (1.9932305483800778,)],
|
||||
['gausshyper', (13.763771604130699, 3.1189636648681431,
|
||||
2.5145980350183019, 5.1811649903971615)], # veryslow
|
||||
['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)],
|
||||
['genextreme', (-0.1,)],
|
||||
['gengamma', (4.4162385429431925, 3.1193091679242761)],
|
||||
['genhalflogistic', (0.77274727809929322,)],
|
||||
['genlogistic', (0.41192440799679475,)],
|
||||
['genpareto', (0.1,)], # use case with finite moments
|
||||
['gilbrat', ()],
|
||||
['gompertz', (0.94743713075105251,)],
|
||||
['gumbel_l', ()],
|
||||
['gumbel_r', ()],
|
||||
['halfcauchy', ()],
|
||||
['halflogistic', ()],
|
||||
['halfnorm', ()],
|
||||
['hypsecant', ()],
|
||||
['invgamma', (4.0668996136993067,)],
|
||||
['invgauss', (0.14546264555347513,)],
|
||||
['invweibull', (10.58,)],
|
||||
['johnsonsb', (4.3172675099141058, 3.1837781130785063)],
|
||||
['johnsonsu', (2.554395574161155, 2.2482281679651965)],
|
||||
['ksone', (1000,)], # replace 22 by 100 to avoid failing range, ticket 956
|
||||
['kstwobign', ()],
|
||||
['laplace', ()],
|
||||
['levy', ()],
|
||||
['levy_l', ()],
|
||||
['levy_stable', (0.35667405469844993,
|
||||
-0.67450531578494011)], # NotImplementedError
|
||||
# rvs not tested
|
||||
['loggamma', (0.41411931826052117,)],
|
||||
['logistic', ()],
|
||||
['loglaplace', (3.2505926592051435,)],
|
||||
['lognorm', (0.95368226960575331,)],
|
||||
['lomax', (1.8771398388773268,)],
|
||||
['maxwell', ()],
|
||||
['mielke', (10.4, 3.6)],
|
||||
['nakagami', (4.9673794866666237,)],
|
||||
['ncf', (27, 27, 0.41578441799226107)],
|
||||
['nct', (14, 0.24045031331198066)],
|
||||
['ncx2', (21, 1.0560465975116415)],
|
||||
['norm', ()],
|
||||
['pareto', (2.621716532144454,)],
|
||||
['pearson3', (0.1,)],
|
||||
['powerlaw', (1.6591133289905851,)],
|
||||
['powerlognorm', (2.1413923530064087, 0.44639540782048337)],
|
||||
['powernorm', (4.4453652254590779,)],
|
||||
['rayleigh', ()],
|
||||
['rdist', (0.9,)], # feels also slow
|
||||
['recipinvgauss', (0.63004267809369119,)],
|
||||
['reciprocal', (0.0062309367010521255, 1.0062309367010522)],
|
||||
['rice', (0.7749725210111873,)],
|
||||
['semicircular', ()],
|
||||
['t', (2.7433514990818093,)],
|
||||
['triang', (0.15785029824528218,)],
|
||||
['truncexpon', (4.6907725456810478,)],
|
||||
['truncnorm', (-1.0978730080013919, 2.7306754109031979)],
|
||||
['truncnorm', (0.1, 2.)],
|
||||
['tukeylambda', (3.1321477856738267,)],
|
||||
['uniform', ()],
|
||||
['vonmises', (3.9939042581071398,)],
|
||||
['vonmises_line', (3.9939042581071398,)],
|
||||
['wald', ()],
|
||||
['weibull_max', (2.8687961709100187,)],
|
||||
['weibull_min', (1.7866166930421596,)],
|
||||
['wrapcauchy', (0.031071279018614728,)]]
|
||||
|
||||
|
||||
distdiscrete = [
|
||||
['bernoulli',(0.3,)],
|
||||
['binom', (5, 0.4)],
|
||||
['boltzmann',(1.4, 19)],
|
||||
['dlaplace', (0.8,)], # 0.5
|
||||
['geom', (0.5,)],
|
||||
['hypergeom',(30, 12, 6)],
|
||||
['hypergeom',(21,3,12)], # numpy.random (3,18,12) numpy ticket:921
|
||||
['hypergeom',(21,18,11)], # numpy.random (18,3,11) numpy ticket:921
|
||||
['logser', (0.6,)], # reenabled, numpy ticket:921
|
||||
['nbinom', (5, 0.5)],
|
||||
['nbinom', (0.4, 0.4)], # from tickets: 583
|
||||
['planck', (0.51,)], # 4.1
|
||||
['poisson', (0.6,)],
|
||||
['randint', (7, 31)],
|
||||
['skellam', (15, 8)],
|
||||
['zipf', (6.5,)]
|
||||
]
|
||||
|
@ -0,0 +1,54 @@
|
||||
"""Functions copypasted from newer versions of numpy.
|
||||
|
||||
"""
|
||||
from __future__ import division, print_function, absolute_import
|
||||
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
|
||||
from scipy.lib._version import NumpyVersion
|
||||
|
||||
if NumpyVersion(np.__version__) > '1.7.0.dev':
|
||||
_assert_warns = np.testing.assert_warns
|
||||
else:
|
||||
def _assert_warns(warning_class, func, *args, **kw):
|
||||
r"""
|
||||
Fail unless the given callable throws the specified warning.
|
||||
|
||||
This definition is copypasted from numpy 1.9.0.dev.
|
||||
The version in earlier numpy returns None.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
warning_class : class
|
||||
The class defining the warning that `func` is expected to throw.
|
||||
func : callable
|
||||
The callable to test.
|
||||
*args : Arguments
|
||||
Arguments passed to `func`.
|
||||
**kwargs : Kwargs
|
||||
Keyword arguments passed to `func`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
The value returned by `func`.
|
||||
|
||||
"""
|
||||
with warnings.catch_warnings(record=True) as l:
|
||||
warnings.simplefilter('always')
|
||||
result = func(*args, **kw)
|
||||
if not len(l) > 0:
|
||||
raise AssertionError("No warning raised when calling %s"
|
||||
% func.__name__)
|
||||
if not l[0].category is warning_class:
|
||||
raise AssertionError("First warning for %s is not a "
|
||||
"%s( is %s)" % (func.__name__, warning_class, l[0]))
|
||||
return result
|
||||
|
||||
|
||||
if NumpyVersion(np.__version__) >= '1.6.0':
|
||||
count_nonzero = np.count_nonzero
|
||||
else:
|
||||
def count_nonzero(a):
|
||||
return (a != 0).sum()
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,209 +1,221 @@
|
||||
'''
|
||||
'''
|
||||
from __future__ import division
|
||||
#import numpy as np
|
||||
from numpy import trapz, sqrt, linspace #@UnresolvedImport
|
||||
|
||||
from wafo.wafodata import PlotData
|
||||
from wafo.misc import tranproc #, trangood
|
||||
|
||||
__all__ = ['TrData', 'TrCommon']
|
||||
|
||||
class TrCommon(object):
|
||||
"""
|
||||
<generic> transformation model, g.
|
||||
|
||||
Information about the moments of the process can be obtained by site
|
||||
specific data, laboratory measurements or by resort to theoretical models.
|
||||
|
||||
Assumption
|
||||
----------
|
||||
The Gaussian process, Y, distributed N(0,1) is related to the
|
||||
non-Gaussian process, X, by Y = g(X).
|
||||
|
||||
Methods
|
||||
-------
|
||||
dist2gauss : Returns a measure of departure from the Gaussian model, i.e.,
|
||||
int (g(x)-xn)**2 dx where int. limits are given by X.
|
||||
dat2gauss : Transform non-linear data to Gaussian scale
|
||||
gauss2dat : Transform Gaussian data to non-linear scale
|
||||
|
||||
Member variables
|
||||
----------------
|
||||
mean, sigma, skew, kurt : real, scalar
|
||||
mean, standard-deviation, skewness and kurtosis, respectively, of the
|
||||
non-Gaussian process. Default mean=0, sigma=1, skew=0.16, kurt=3.04.
|
||||
skew=kurt-3=0 for a Gaussian process.
|
||||
"""
|
||||
|
||||
def __init__(self, mean=0.0, var=1.0, skew=0.16, kurt=3.04, *args, **kwds):
|
||||
sigma = kwds.get('sigma',None)
|
||||
if sigma is None:
|
||||
sigma = sqrt(var)
|
||||
self.mean = mean
|
||||
self.sigma = sigma
|
||||
self.skew = skew
|
||||
self.kurt = kurt
|
||||
# Mean and std in the Gaussian world:
|
||||
self.ymean = kwds.get('ymean', 0e0)
|
||||
self.ysigma = kwds.get('ysigma', 1e0)
|
||||
|
||||
def __call__(self, x, *xi):
|
||||
return self._dat2gauss(x, *xi)
|
||||
|
||||
def dist2gauss(self, x=None, xnmin=-5, xnmax=5, n=513):
|
||||
"""
|
||||
Return a measure of departure from the Gaussian model.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : vector (default sigma*linspace(xnmin,xnmax,n)+mean)
|
||||
xnmin : real, scalar
|
||||
minimum on normalized scale
|
||||
xnmax : real, scalar
|
||||
maximum on normalized scale
|
||||
n : integer, scalar
|
||||
number of evaluation points
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
t0 : real, scalar
|
||||
a measure of departure from the Gaussian model calculated as
|
||||
trapz((xn-g(x))**2., xn) where int. limits is given by X.
|
||||
"""
|
||||
if x is None:
|
||||
xn = linspace(xnmin, xnmax, n)
|
||||
x = self.sigma*xn+self.mean
|
||||
else:
|
||||
xn = (x-self.mean)/self.sigma
|
||||
|
||||
yn = (self._dat2gauss(x)-self.ymean)/self.ysigma
|
||||
t0 = trapz((xn-yn)**2., xn)
|
||||
return t0
|
||||
|
||||
def gauss2dat(self, y, *yi):
|
||||
"""
|
||||
Transforms Gaussian data, y, to non-linear scale.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
y, y1,..., yn : array-like
|
||||
input vectors with Gaussian data values, where yi is the i'th time
|
||||
derivative of y. (n<=4)
|
||||
Returns
|
||||
-------
|
||||
x, x1,...,xn : array-like
|
||||
transformed data to a non-linear scale
|
||||
|
||||
See also
|
||||
--------
|
||||
dat2gauss
|
||||
tranproc
|
||||
"""
|
||||
return self._gauss2dat(y, *yi)
|
||||
def _gauss2dat(self, y, *yi):
|
||||
pass
|
||||
def dat2gauss(self, x, *xi):
|
||||
"""
|
||||
Transforms non-linear data, x, to Gaussian scale.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x, x1,...,xn : array-like
|
||||
input vectors with non-linear data values, where xi is the i'th time
|
||||
derivative of x. (n<=4)
|
||||
Returns
|
||||
-------
|
||||
y, y1,...,yn : array-like
|
||||
transformed data to a Gaussian scale
|
||||
|
||||
See also
|
||||
--------
|
||||
gauss2dat
|
||||
tranproc.
|
||||
"""
|
||||
return self._dat2gauss(x, *xi)
|
||||
def _dat2gauss(self, x, *xi):
|
||||
pass
|
||||
|
||||
class TrData(PlotData, TrCommon):
|
||||
__doc__ = TrCommon.__doc__.split('mean')[0].replace('<generic>','Data' #@ReservedAssignment
|
||||
) + """
|
||||
data : array-like
|
||||
Gaussian values, Y
|
||||
args : array-like
|
||||
non-Gaussian values, X
|
||||
ymean, ysigma : real, scalars (default ymean=0, ysigma=1)
|
||||
mean and standard-deviation, respectively, of the process in Gaussian world.
|
||||
mean, sigma : real, scalars
|
||||
mean and standard-deviation, respectively, of the non-Gaussian process.
|
||||
Default:
|
||||
mean = self.gauss2dat(ymean),
|
||||
sigma = (self.gauss2dat(ysigma)-self.gauss2dat(-ysigma))/2
|
||||
|
||||
Example
|
||||
-------
|
||||
Construct a linear transformation model
|
||||
>>> import numpy as np
|
||||
>>> import wafo.transform as wt
|
||||
>>> sigma = 5; mean = 1
|
||||
>>> u = np.linspace(-5,5); x = sigma*u+mean; y = u
|
||||
>>> g = wt.TrData(y,x)
|
||||
>>> g.mean
|
||||
array([ 1.])
|
||||
>>> g.sigma
|
||||
array([ 5.])
|
||||
|
||||
>>> g = wt.TrData(y,x,mean=1,sigma=5)
|
||||
>>> g.mean
|
||||
1
|
||||
>>> g.sigma
|
||||
5
|
||||
>>> g.dat2gauss(1,2,3)
|
||||
[array([ 0.]), array([ 0.4]), array([ 0.6])]
|
||||
|
||||
Check that the departure from a Gaussian model is zero
|
||||
>>> g.dist2gauss() < 1e-16
|
||||
True
|
||||
"""
|
||||
def __init__(self, *args, **kwds):
|
||||
options = dict(title='Transform',
|
||||
xlab='x', ylab='g(x)',
|
||||
plot_args=['r'],
|
||||
plot_args_children=['g--'],)
|
||||
options.update(**kwds)
|
||||
super(TrData, self).__init__(*args, **options)
|
||||
self.ymean = kwds.get('ymean', 0e0)
|
||||
self.ysigma = kwds.get('ysigma', 1e0)
|
||||
self.mean = kwds.get('mean', None)
|
||||
self.sigma = kwds.get('sigma', None)
|
||||
|
||||
if self.mean is None:
|
||||
#self.mean = np.mean(self.args) #
|
||||
self.mean = self.gauss2dat(self.ymean)
|
||||
if self.sigma is None:
|
||||
yp = self.ymean+self.ysigma
|
||||
ym = self.ymean-self.ysigma
|
||||
self.sigma = (self.gauss2dat(yp)-self.gauss2dat(ym))/2.
|
||||
|
||||
self.children = [PlotData((self.args-self.mean)/self.sigma, self.args)]
|
||||
|
||||
def trdata(self):
|
||||
return self
|
||||
|
||||
def _gauss2dat(self, y, *yi):
|
||||
return tranproc(self.data, self.args, y, *yi)
|
||||
|
||||
def _dat2gauss(self, x, *xi):
|
||||
return tranproc(self.args, self.data, x, *xi)
|
||||
|
||||
def main():
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
if True: #False : #
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
else:
|
||||
main()
|
||||
'''
|
||||
'''
|
||||
from __future__ import division
|
||||
#import numpy as np
|
||||
from numpy import trapz, sqrt, linspace # @UnresolvedImport
|
||||
|
||||
from wafo.containers import PlotData
|
||||
from wafo.misc import tranproc # , trangood
|
||||
|
||||
__all__ = ['TrData', 'TrCommon']
|
||||
|
||||
|
||||
class TrCommon(object):
|
||||
|
||||
"""
|
||||
<generic> transformation model, g.
|
||||
|
||||
Information about the moments of the process can be obtained by site
|
||||
specific data, laboratory measurements or by resort to theoretical models.
|
||||
|
||||
Assumption
|
||||
----------
|
||||
The Gaussian process, Y, distributed N(0,1) is related to the
|
||||
non-Gaussian process, X, by Y = g(X).
|
||||
|
||||
Methods
|
||||
-------
|
||||
dist2gauss : Returns a measure of departure from the Gaussian model, i.e.,
|
||||
int (g(x)-xn)**2 dx where int. limits are given by X.
|
||||
dat2gauss : Transform non-linear data to Gaussian scale
|
||||
gauss2dat : Transform Gaussian data to non-linear scale
|
||||
|
||||
Member variables
|
||||
----------------
|
||||
mean, sigma, skew, kurt : real, scalar
|
||||
mean, standard-deviation, skewness and kurtosis, respectively, of the
|
||||
non-Gaussian process. Default mean=0, sigma=1, skew=0.16, kurt=3.04.
|
||||
skew=kurt-3=0 for a Gaussian process.
|
||||
"""
|
||||
|
||||
def __init__(self, mean=0.0, var=1.0, skew=0.16, kurt=3.04, *args, **kwds):
|
||||
sigma = kwds.get('sigma', None)
|
||||
if sigma is None:
|
||||
sigma = sqrt(var)
|
||||
self.mean = mean
|
||||
self.sigma = sigma
|
||||
self.skew = skew
|
||||
self.kurt = kurt
|
||||
# Mean and std in the Gaussian world:
|
||||
self.ymean = kwds.get('ymean', 0e0)
|
||||
self.ysigma = kwds.get('ysigma', 1e0)
|
||||
|
||||
def __call__(self, x, *xi):
|
||||
return self._dat2gauss(x, *xi)
|
||||
|
||||
def dist2gauss(self, x=None, xnmin=-5, xnmax=5, n=513):
|
||||
"""
|
||||
Return a measure of departure from the Gaussian model.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : vector (default sigma*linspace(xnmin,xnmax,n)+mean)
|
||||
xnmin : real, scalar
|
||||
minimum on normalized scale
|
||||
xnmax : real, scalar
|
||||
maximum on normalized scale
|
||||
n : integer, scalar
|
||||
number of evaluation points
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
t0 : real, scalar
|
||||
a measure of departure from the Gaussian model calculated as
|
||||
trapz((xn-g(x))**2., xn) where int. limits is given by X.
|
||||
"""
|
||||
if x is None:
|
||||
xn = linspace(xnmin, xnmax, n)
|
||||
x = self.sigma * xn + self.mean
|
||||
else:
|
||||
xn = (x - self.mean) / self.sigma
|
||||
|
||||
yn = (self._dat2gauss(x) - self.ymean) / self.ysigma
|
||||
t0 = trapz((xn - yn) ** 2., xn)
|
||||
return t0
|
||||
|
||||
def gauss2dat(self, y, *yi):
|
||||
"""
|
||||
Transforms Gaussian data, y, to non-linear scale.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
y, y1,..., yn : array-like
|
||||
input vectors with Gaussian data values, where yi is the i'th time
|
||||
derivative of y. (n<=4)
|
||||
Returns
|
||||
-------
|
||||
x, x1,...,xn : array-like
|
||||
transformed data to a non-linear scale
|
||||
|
||||
See also
|
||||
--------
|
||||
dat2gauss
|
||||
tranproc
|
||||
"""
|
||||
return self._gauss2dat(y, *yi)
|
||||
|
||||
def _gauss2dat(self, y, *yi):
|
||||
pass
|
||||
|
||||
def dat2gauss(self, x, *xi):
|
||||
"""
|
||||
Transforms non-linear data, x, to Gaussian scale.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x, x1,...,xn : array-like
|
||||
input vectors with non-linear data values, where xi is the i'th
|
||||
time derivative of x. (n<=4)
|
||||
Returns
|
||||
-------
|
||||
y, y1,...,yn : array-like
|
||||
transformed data to a Gaussian scale
|
||||
|
||||
See also
|
||||
--------
|
||||
gauss2dat
|
||||
tranproc.
|
||||
"""
|
||||
return self._dat2gauss(x, *xi)
|
||||
|
||||
def _dat2gauss(self, x, *xi):
|
||||
pass
|
||||
|
||||
|
||||
class TrData(PlotData, TrCommon):
|
||||
__doc__ = TrCommon.__doc__.split('mean')[0].replace('<generic>',
|
||||
'Data') + """
|
||||
data : array-like
|
||||
Gaussian values, Y
|
||||
args : array-like
|
||||
non-Gaussian values, X
|
||||
ymean, ysigma : real, scalars (default ymean=0, ysigma=1)
|
||||
mean and standard-deviation, respectively, of the process in Gaussian
|
||||
world.
|
||||
mean, sigma : real, scalars
|
||||
mean and standard-deviation, respectively, of the non-Gaussian process.
|
||||
Default:
|
||||
mean = self.gauss2dat(ymean),
|
||||
sigma = (self.gauss2dat(ysigma)-self.gauss2dat(-ysigma))/2
|
||||
|
||||
Example
|
||||
-------
|
||||
Construct a linear transformation model
|
||||
>>> import numpy as np
|
||||
>>> import wafo.transform as wt
|
||||
>>> sigma = 5; mean = 1
|
||||
>>> u = np.linspace(-5,5); x = sigma*u+mean; y = u
|
||||
>>> g = wt.TrData(y,x)
|
||||
>>> g.mean
|
||||
array([ 1.])
|
||||
>>> g.sigma
|
||||
array([ 5.])
|
||||
|
||||
>>> g = wt.TrData(y,x,mean=1,sigma=5)
|
||||
>>> g.mean
|
||||
1
|
||||
>>> g.sigma
|
||||
5
|
||||
>>> g.dat2gauss(1,2,3)
|
||||
[array([ 0.]), array([ 0.4]), array([ 0.6])]
|
||||
|
||||
Check that the departure from a Gaussian model is zero
|
||||
>>> g.dist2gauss() < 1e-16
|
||||
True
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwds):
|
||||
options = dict(title='Transform',
|
||||
xlab='x', ylab='g(x)',
|
||||
plot_args=['r'],
|
||||
plot_args_children=['g--'],)
|
||||
options.update(**kwds)
|
||||
super(TrData, self).__init__(*args, **options)
|
||||
self.ymean = kwds.get('ymean', 0e0)
|
||||
self.ysigma = kwds.get('ysigma', 1e0)
|
||||
self.mean = kwds.get('mean', None)
|
||||
self.sigma = kwds.get('sigma', None)
|
||||
|
||||
if self.mean is None:
|
||||
#self.mean = np.mean(self.args) #
|
||||
self.mean = self.gauss2dat(self.ymean)
|
||||
if self.sigma is None:
|
||||
yp = self.ymean + self.ysigma
|
||||
ym = self.ymean - self.ysigma
|
||||
self.sigma = (self.gauss2dat(yp) - self.gauss2dat(ym)) / 2.
|
||||
|
||||
self.children = [
|
||||
PlotData((self.args - self.mean) / self.sigma, self.args)]
|
||||
|
||||
def trdata(self):
|
||||
return self
|
||||
|
||||
def _gauss2dat(self, y, *yi):
|
||||
return tranproc(self.data, self.args, y, *yi)
|
||||
|
||||
def _dat2gauss(self, x, *xi):
|
||||
return tranproc(self.args, self.data, x, *xi)
|
||||
|
||||
class EstimateTransform(object):
|
||||
pass
|
||||
|
||||
def main():
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
if True: # False : #
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
else:
|
||||
main()
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,206 +1,210 @@
|
||||
"""
|
||||
Dispersion relation module
|
||||
--------------------------
|
||||
k2w - Translates from wave number to frequency
|
||||
w2k - Translates from frequency to wave number
|
||||
"""
|
||||
import warnings
|
||||
#import numpy as np
|
||||
from numpy import (atleast_1d, sqrt, ones_like, zeros_like, arctan2, where, tanh, any, #@UnresolvedImport
|
||||
sin, cos, sign, inf, flatnonzero, finfo, cosh, abs) #@UnresolvedImport
|
||||
|
||||
__all__ = ['k2w', 'w2k']
|
||||
|
||||
def k2w(k1, k2=0e0, h=inf, g=9.81, u1=0e0, u2=0e0):
|
||||
''' Translates from wave number to frequency
|
||||
using the dispersion relation
|
||||
|
||||
Parameters
|
||||
----------
|
||||
k1 : array-like
|
||||
wave numbers [rad/m].
|
||||
k2 : array-like, optional
|
||||
second dimension wave number
|
||||
h : real scalar, optional
|
||||
water depth [m].
|
||||
g : real scalar, optional
|
||||
acceleration of gravity, see gravity
|
||||
u1, u2 : real scalars, optional
|
||||
current velocity [m/s] along dimension 1 and 2.
|
||||
note: when u1!=0 | u2!=0 then theta is not calculated correctly
|
||||
|
||||
Returns
|
||||
-------
|
||||
w : ndarray
|
||||
angular frequency [rad/s].
|
||||
theta : ndarray
|
||||
direction [rad].
|
||||
|
||||
Dispersion relation
|
||||
-------------------
|
||||
w = sqrt(g*K*tanh(K*h)) ( 0 < w < inf)
|
||||
theta = arctan2(k2,k1) (-pi < theta < pi)
|
||||
where
|
||||
K = sqrt(k1**2+k2**2)
|
||||
|
||||
The shape of w and theta is the common shape of k1 and k2 according to the
|
||||
numpy broadcasting rules.
|
||||
|
||||
See also
|
||||
--------
|
||||
w2k
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> from numpy import arange
|
||||
>>> import wafo.spectrum.dispersion_relation as wsd
|
||||
>>> wsd.k2w(arange(0.01,.5,0.2))[0]
|
||||
array([ 0.3132092 , 1.43530485, 2.00551739])
|
||||
>>> wsd.k2w(arange(0.01,.5,0.2),h=20)[0]
|
||||
array([ 0.13914927, 1.43498213, 2.00551724])
|
||||
'''
|
||||
|
||||
k1i, k2i, hi, gi, u1i, u2i = atleast_1d(k1, k2, h, g, u1, u2)
|
||||
|
||||
if k1i.size == 0:
|
||||
return zeros_like(k1i)
|
||||
ku1 = k1i*u1i
|
||||
ku2 = k2i*u2i
|
||||
|
||||
theta = arctan2(k2, k1)
|
||||
|
||||
k = sqrt(k1i**2+k2i**2)
|
||||
w = where(k>0, ku1+ku2+sqrt(gi*k*tanh(k*hi)), 0.0)
|
||||
|
||||
cond = (w<0)
|
||||
if any(cond):
|
||||
txt0 = '''
|
||||
Waves and current are in opposite directions
|
||||
making some of the frequencies negative.
|
||||
Here we are forcing the negative frequencies to zero.
|
||||
'''
|
||||
warnings.warn(txt0)
|
||||
w = where(cond, 0.0, w) # force w to zero
|
||||
|
||||
return w, theta
|
||||
|
||||
def w2k(w, theta=0.0, h=inf, g=9.81, count_limit=100):
|
||||
'''
|
||||
Translates from frequency to wave number
|
||||
using the dispersion relation
|
||||
|
||||
Parameters
|
||||
----------
|
||||
w : array-like
|
||||
angular frequency [rad/s].
|
||||
theta : array-like, optional
|
||||
direction [rad].
|
||||
h : real scalar, optional
|
||||
water depth [m].
|
||||
g : real scalar or array-like of size 2.
|
||||
constant of gravity [m/s**2] or 3D normalizing constant
|
||||
|
||||
Returns
|
||||
-------
|
||||
k1, k2 : ndarray
|
||||
wave numbers [rad/m] along dimension 1 and 2.
|
||||
|
||||
Description
|
||||
-----------
|
||||
Uses Newton Raphson method to find the wave number k in the dispersion relation
|
||||
w**2= g*k*tanh(k*h).
|
||||
The solution k(w) => k1 = k(w)*cos(theta)
|
||||
k2 = k(w)*sin(theta)
|
||||
The size of k1,k2 is the common shape of w and theta according to numpy
|
||||
broadcasting rules. If w or theta is scalar it functions as a constant
|
||||
matrix of the same shape as the other.
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> import pylab as plb
|
||||
>>> import wafo.spectrum.dispersion_relation as wsd
|
||||
>>> w = plb.linspace(0,3);
|
||||
>>> h = plb.plot(w,w2k(w)[0])
|
||||
>>> wsd.w2k(range(4))[0]
|
||||
array([ 0. , 0.1019368 , 0.4077472 , 0.91743119])
|
||||
>>> wsd.w2k(range(4),h=20)[0]
|
||||
array([ 0. , 0.10503601, 0.40774726, 0.91743119])
|
||||
|
||||
>>> plb.close('all')
|
||||
|
||||
See also
|
||||
--------
|
||||
k2w
|
||||
'''
|
||||
wi, th, hi, gi = atleast_1d(w, theta, h, g)
|
||||
|
||||
if wi.size == 0:
|
||||
return zeros_like(wi)
|
||||
|
||||
k = 1.0*sign(wi)*wi**2.0 / gi[0] # deep water
|
||||
if (hi > 10. ** 25).all():
|
||||
k2 = k*sin(th)*gi[0]/gi[-1] #size np x nf
|
||||
k1 = k*cos(th)
|
||||
return k1, k2
|
||||
|
||||
|
||||
if gi.size > 1:
|
||||
txt0 = '''
|
||||
Finite depth in combination with 3D normalization (len(g)=2) is not implemented yet.
|
||||
'''
|
||||
raise ValueError(txt0)
|
||||
|
||||
|
||||
find = flatnonzero
|
||||
eps = finfo(float).eps
|
||||
|
||||
oshape = k.shape
|
||||
wi, k, hi = wi.ravel(), k.ravel(), hi.ravel()
|
||||
|
||||
# Newton's Method
|
||||
# Permit no more than count_limit iterations.
|
||||
hi = hi * ones_like(k)
|
||||
hn = zeros_like(k)
|
||||
ix = find((wi<0) | (0<wi))
|
||||
|
||||
# Break out of the iteration loop for three reasons:
|
||||
# 1) the last update is very small (compared to x)
|
||||
# 2) the last update is very small (compared to sqrt(eps))
|
||||
# 3) There are more than 100 iterations. This should NEVER happen.
|
||||
count = 0
|
||||
while (ix.size>0 and count < count_limit):
|
||||
ki = k[ix]
|
||||
kh = ki * hi[ix]
|
||||
hn[ix] = (ki*tanh(kh)-wi[ix]**2.0/gi)/(tanh(kh)+kh/(cosh(kh)**2.0))
|
||||
knew = ki - hn[ix]
|
||||
# Make sure that the current guess is not zero.
|
||||
# When Newton's Method suggests steps that lead to zero guesses
|
||||
# take a step 9/10ths of the way to zero:
|
||||
ksmall = find(abs(knew)==0)
|
||||
if ksmall.size>0:
|
||||
knew[ksmall] = ki[ksmall] / 10.0
|
||||
hn[ix[ksmall]] = ki[ksmall]-knew[ksmall]
|
||||
|
||||
k[ix] = knew
|
||||
# disp(['Iteration ',num2str(count),' Number of points left: ' num2str(length(ix)) ]),
|
||||
|
||||
ix = find((abs(hn) > sqrt(eps)*abs(k)) * abs(hn) > sqrt(eps))
|
||||
count += 1
|
||||
|
||||
if count == count_limit:
|
||||
txt1 = ''' W2K did not converge.
|
||||
The maximum error in the last step was: %13.8f''' % max(hn[ix])
|
||||
warnings.warn(txt1)
|
||||
|
||||
k.shape = oshape
|
||||
|
||||
k2 = k*sin(th)
|
||||
k1 = k*cos(th)
|
||||
return k1, k2
|
||||
|
||||
def main():
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
"""
|
||||
Dispersion relation module
|
||||
--------------------------
|
||||
k2w - Translates from wave number to frequency
|
||||
w2k - Translates from frequency to wave number
|
||||
"""
|
||||
import warnings
|
||||
#import numpy as np
|
||||
from numpy import (atleast_1d, sqrt, ones_like, zeros_like, arctan2, where,
|
||||
tanh, any, sin, cos, sign, inf,
|
||||
flatnonzero, finfo, cosh, abs)
|
||||
|
||||
__all__ = ['k2w', 'w2k']
|
||||
|
||||
|
||||
def k2w(k1, k2=0e0, h=inf, g=9.81, u1=0e0, u2=0e0):
|
||||
''' Translates from wave number to frequency
|
||||
using the dispersion relation
|
||||
|
||||
Parameters
|
||||
----------
|
||||
k1 : array-like
|
||||
wave numbers [rad/m].
|
||||
k2 : array-like, optional
|
||||
second dimension wave number
|
||||
h : real scalar, optional
|
||||
water depth [m].
|
||||
g : real scalar, optional
|
||||
acceleration of gravity, see gravity
|
||||
u1, u2 : real scalars, optional
|
||||
current velocity [m/s] along dimension 1 and 2.
|
||||
note: when u1!=0 | u2!=0 then theta is not calculated correctly
|
||||
|
||||
Returns
|
||||
-------
|
||||
w : ndarray
|
||||
angular frequency [rad/s].
|
||||
theta : ndarray
|
||||
direction [rad].
|
||||
|
||||
Dispersion relation
|
||||
-------------------
|
||||
w = sqrt(g*K*tanh(K*h)) ( 0 < w < inf)
|
||||
theta = arctan2(k2,k1) (-pi < theta < pi)
|
||||
where
|
||||
K = sqrt(k1**2+k2**2)
|
||||
|
||||
The shape of w and theta is the common shape of k1 and k2 according to the
|
||||
numpy broadcasting rules.
|
||||
|
||||
See also
|
||||
--------
|
||||
w2k
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> from numpy import arange
|
||||
>>> import wafo.wave_theory.dispersion_relation as wsd
|
||||
>>> wsd.k2w(arange(0.01,.5,0.2))[0]
|
||||
array([ 0.3132092 , 1.43530485, 2.00551739])
|
||||
>>> wsd.k2w(arange(0.01,.5,0.2),h=20)[0]
|
||||
array([ 0.13914927, 1.43498213, 2.00551724])
|
||||
'''
|
||||
|
||||
k1i, k2i, hi, gi, u1i, u2i = atleast_1d(k1, k2, h, g, u1, u2)
|
||||
|
||||
if k1i.size == 0:
|
||||
return zeros_like(k1i)
|
||||
ku1 = k1i * u1i
|
||||
ku2 = k2i * u2i
|
||||
|
||||
theta = arctan2(k2, k1)
|
||||
|
||||
k = sqrt(k1i ** 2 + k2i ** 2)
|
||||
w = where(k > 0, ku1 + ku2 + sqrt(gi * k * tanh(k * hi)), 0.0)
|
||||
|
||||
cond = (w < 0)
|
||||
if any(cond):
|
||||
txt0 = '''
|
||||
Waves and current are in opposite directions
|
||||
making some of the frequencies negative.
|
||||
Here we are forcing the negative frequencies to zero.
|
||||
'''
|
||||
warnings.warn(txt0)
|
||||
w = where(cond, 0.0, w) # force w to zero
|
||||
|
||||
return w, theta
|
||||
|
||||
|
||||
def w2k(w, theta=0.0, h=inf, g=9.81, count_limit=100):
|
||||
'''
|
||||
Translates from frequency to wave number
|
||||
using the dispersion relation
|
||||
|
||||
Parameters
|
||||
----------
|
||||
w : array-like
|
||||
angular frequency [rad/s].
|
||||
theta : array-like, optional
|
||||
direction [rad].
|
||||
h : real scalar, optional
|
||||
water depth [m].
|
||||
g : real scalar or array-like of size 2.
|
||||
constant of gravity [m/s**2] or 3D normalizing constant
|
||||
|
||||
Returns
|
||||
-------
|
||||
k1, k2 : ndarray
|
||||
wave numbers [rad/m] along dimension 1 and 2.
|
||||
|
||||
Description
|
||||
-----------
|
||||
Uses Newton Raphson method to find the wave number k in the dispersion
|
||||
relation
|
||||
w**2= g*k*tanh(k*h).
|
||||
The solution k(w) => k1 = k(w)*cos(theta)
|
||||
k2 = k(w)*sin(theta)
|
||||
The size of k1,k2 is the common shape of w and theta according to numpy
|
||||
broadcasting rules. If w or theta is scalar it functions as a constant
|
||||
matrix of the same shape as the other.
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> import pylab as plb
|
||||
>>> import wafo.wave_theory.dispersion_relation as wsd
|
||||
>>> w = plb.linspace(0,3);
|
||||
>>> h = plb.plot(w,w2k(w)[0])
|
||||
>>> wsd.w2k(range(4))[0]
|
||||
array([ 0. , 0.1019368 , 0.4077472 , 0.91743119])
|
||||
>>> wsd.w2k(range(4),h=20)[0]
|
||||
array([ 0. , 0.10503601, 0.40774726, 0.91743119])
|
||||
|
||||
>>> plb.close('all')
|
||||
|
||||
See also
|
||||
--------
|
||||
k2w
|
||||
'''
|
||||
wi, th, hi, gi = atleast_1d(w, theta, h, g)
|
||||
|
||||
if wi.size == 0:
|
||||
return zeros_like(wi)
|
||||
|
||||
k = 1.0 * sign(wi) * wi ** 2.0 / gi[0] # deep water
|
||||
if (hi > 10. ** 25).all():
|
||||
k2 = k * sin(th) * gi[0] / gi[-1] # size np x nf
|
||||
k1 = k * cos(th)
|
||||
return k1, k2
|
||||
|
||||
if gi.size > 1:
|
||||
raise ValueError('Finite depth in combination with 3D normalization' +
|
||||
' (len(g)=2) is not implemented yet.')
|
||||
|
||||
find = flatnonzero
|
||||
eps = finfo(float).eps
|
||||
|
||||
oshape = k.shape
|
||||
wi, k, hi = wi.ravel(), k.ravel(), hi.ravel()
|
||||
|
||||
# Newton's Method
|
||||
# Permit no more than count_limit iterations.
|
||||
hi = hi * ones_like(k)
|
||||
hn = zeros_like(k)
|
||||
ix = find((wi < 0) | (0 < wi))
|
||||
|
||||
# Break out of the iteration loop for three reasons:
|
||||
# 1) the last update is very small (compared to x)
|
||||
# 2) the last update is very small (compared to sqrt(eps))
|
||||
# 3) There are more than 100 iterations. This should NEVER happen.
|
||||
count = 0
|
||||
while (ix.size > 0 and count < count_limit):
|
||||
ki = k[ix]
|
||||
kh = ki * hi[ix]
|
||||
hn[ix] = (ki * tanh(kh) - wi[ix] ** 2.0 / gi) / \
|
||||
(tanh(kh) + kh / (cosh(kh) ** 2.0))
|
||||
knew = ki - hn[ix]
|
||||
# Make sure that the current guess is not zero.
|
||||
# When Newton's Method suggests steps that lead to zero guesses
|
||||
# take a step 9/10ths of the way to zero:
|
||||
ksmall = find(abs(knew) == 0)
|
||||
if ksmall.size > 0:
|
||||
knew[ksmall] = ki[ksmall] / 10.0
|
||||
hn[ix[ksmall]] = ki[ksmall] - knew[ksmall]
|
||||
|
||||
k[ix] = knew
|
||||
# disp(['Iteration ',num2str(count),' Number of points left: '
|
||||
# num2str(length(ix)) ]),
|
||||
|
||||
ix = find((abs(hn) > sqrt(eps) * abs(k)) * abs(hn) > sqrt(eps))
|
||||
count += 1
|
||||
|
||||
if count == count_limit:
|
||||
warnings.warn('W2K did not converge. The maximum error in the ' +
|
||||
'last step was: %13.8f' % max(hn[ix]))
|
||||
|
||||
k.shape = oshape
|
||||
|
||||
k2 = k * sin(th)
|
||||
k1 = k * cos(th)
|
||||
return k1, k2
|
||||
|
||||
|
||||
def test_docstrings():
|
||||
import doctest
|
||||
print('Testing docstrings in %s' % __file__)
|
||||
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_docstrings()
|
||||
|
@ -1,31 +1,35 @@
|
||||
'''
|
||||
Created on 19. juli 2010
|
||||
|
||||
@author: pab
|
||||
'''
|
||||
import numpy as np
|
||||
from wafo.wave_theory.dispersion_relation import w2k,k2w #@UnusedImport
|
||||
|
||||
def test_k2w_infinite_water_depth():
|
||||
vals = k2w(np.arange(0.01,.5,0.2))[0]
|
||||
true_vals = np.array([ 0.3132092 , 1.43530485, 2.00551739])
|
||||
assert((np.abs(vals-true_vals)<1e-7).all())
|
||||
|
||||
def test_k2w_finite_water_depth():
|
||||
vals = k2w(np.arange(0.01,.5,0.2),h=20)[0]
|
||||
true_vals = np.array([ 0.13914927, 1.43498213, 2.00551724])
|
||||
assert((np.abs(vals-true_vals)<1e-7).all())
|
||||
|
||||
def test_w2k_infinite_water_depth():
|
||||
vals = w2k(range(4))[0]
|
||||
true_vals = np.array([ 0. , 0.1019368 , 0.4077472 , 0.91743119])
|
||||
assert((np.abs(vals-true_vals)<1e-7).all())
|
||||
|
||||
def test_w2k_finite_water_depth():
|
||||
vals = w2k(range(4),h=20)[0]
|
||||
true_vals = np.array([ 0. , 0.10503601, 0.40774726, 0.91743119])
|
||||
assert((np.abs(vals-true_vals)<1e-7).all())
|
||||
|
||||
if __name__ == '__main__':
|
||||
import nose
|
||||
nose.run()
|
||||
'''
|
||||
Created on 19. juli 2010
|
||||
|
||||
@author: pab
|
||||
'''
|
||||
import numpy as np
|
||||
from wafo.wave_theory.dispersion_relation import w2k, k2w # @UnusedImport
|
||||
|
||||
|
||||
def test_k2w_infinite_water_depth():
|
||||
vals = k2w(np.arange(0.01, .5, 0.2))[0]
|
||||
true_vals = np.array([0.3132092, 1.43530485, 2.00551739])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
|
||||
|
||||
def test_k2w_finite_water_depth():
|
||||
vals = k2w(np.arange(0.01, .5, 0.2), h=20)[0]
|
||||
true_vals = np.array([0.13914927, 1.43498213, 2.00551724])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
|
||||
|
||||
def test_w2k_infinite_water_depth():
|
||||
vals = w2k(range(4))[0]
|
||||
true_vals = np.array([0., 0.1019368, 0.4077472, 0.91743119])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
|
||||
|
||||
def test_w2k_finite_water_depth():
|
||||
vals = w2k(range(4), h=20)[0]
|
||||
true_vals = np.array([0., 0.10503601, 0.40774726, 0.91743119])
|
||||
assert((np.abs(vals - true_vals) < 1e-7).all())
|
||||
|
||||
if __name__ == '__main__':
|
||||
import nose
|
||||
nose.run()
|
||||
|
Loading…
Reference in New Issue