前回の見延さんのスペクトル密度の推定誤差理論を使って、
以前用いたmatlabplotlib.mlab.psd関数を拡張したモジュールを作った(psd_withError)。
stand aloneで用いる時(
if __name__ == '__main__':以下)は、見延さんのスクリプトと同じことを確認するプログラム。結果が同じにならないのは、
- 見延さんのスクリプトではfftの後データ数で割ってから二乗をとっている
- matplotlibのpsd関数でonesidedを取るとき2倍している
- matplotlibのpsd関数では、データテーパーをかけることによる振幅の減少を補償している
ためである。もし同じ結果を得たければ、プログラム中のコメントアウトしている部分をはずすことで可能である。
psd_withError.py
import numpy as np
from numpy.random import randn
import scipy as sp
import scipy.signal as signal
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
#------------------------------------------
def psd_withError(x, NFFT=256, Fs=2, detrend=mlab.detrend_none, window=mlab.window_hanning,noverlap=0, pad_to=None, sides='def
ault', scale_by_freq=None,smooth=None,Confidencelevel=0.9):
"""
Extention of matplotlib.mlab.psd
The power spectral density with upper and lower limits within confidence level
You should not use Weltch's method here, instead you can use smoother in frequency domain with *smooth*
Same input as matplotlib.mlab.psd except the following new inputs
*smooth*:
smoothing window in frequency domain
for example, 1-2-1 filter is [1,2,1]
default is None
*Confidencelevel*
Confidence level to estimate upper and lower estimates.
Default is 0.9
Returns the tuple (*Pxx*, *freqs*,*upper*,*lower*).
upper and lower are limits of psd within confidence level.
"""
Pxxtemp,freqs = mlab.psd( x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,scale_by_freq)
# commented out, if you want to get Minobe-san's result Pxxtemp=Pxxtemp/float(NFFT)/2.*((np.abs(window)**2).mean())
#smoothing
smooth=np.asarray(smooth)
if smooth is not None:
avfnc=smooth/float(sum(smooth))
Pxx=np.convolve(Pxxtemp[:,0],avfnc,mode="same")
else:
Pxx=Pxxtemp[:,0]
avfnc=np.asarray([1.])
#estimate uppper and lower estimate woth equivalent degree of freedom
if pad_to is None:
pad_to = NFFT
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
edof=(1.+(1./sum(avfnc**2)-1.)*float(NFFT)/float(pad_to)*np.mean(windowVals))# equivalent degree of freedom
a1=(1.-Confidencelevel)/2.
a2=1.-a1
lower=Pxx*stats.chi2.ppf(a1,2*edof)/stats.chi2.ppf(0.50,2*edof)
upper=Pxx*stats.chi2.ppf(a2,2*edof)/stats.chi2.ppf(0.50,2*edof)
return Pxx,freqs,upper,lower
########################################################
if __name__ == '__main__':
plt.figure()
for npnl in range(4):
tlng=100; nsmpl=1000
if npnl==0 :
nfft=100
taper=signal.boxcar(tlng)
avfnc=[1, 1, 1, 1, 1];
cttl='no padding, boxcar, 5-running'
elif npnl==1:
nfft=100
taper=signal.hamming(tlng)
avfnc=[1, 1, 1, 1, 1]
cttl='no padding, hamming, 5-running'
elif npnl==2:
nfft=200
taper=signal.boxcar(tlng)
avfnc=[1, 1, 1, 1, 1];
cttl='double padding, boxcar, 5-running'
elif npnl==3:
nfft=200
taper=signal.hamming(tlng)
avfnc=np.convolve([1, 2, 1],[1, 2, 1])
cttl='double padding, hamming, double 1-2-1'
tsrs=randn(tlng,nsmpl)
ds_psd=np.zeros([nfft/2+1,nsmpl])
upper_psd=np.zeros([nfft/2+1,nsmpl])
lower_psd=np.zeros([nfft/2+1,nsmpl])
for n in range(nsmpl):
a,b,u,l=psd_withError(tsrs[:,n],NFFT=tlng,pad_to=nfft,Fs=1,window=taper,smooth=avfnc,Confidencelevel=0.9)
ds_psd[:,n]=a[:]
upper_psd[:,n]=u[:]
lower_psd[:,n]=l[:]
frq=b[:]
# 90% confidence level by Monte-Carlo
srt_psd=np.sort(ds_psd,axis=1)
c=np.zeros([frq.size,2])
c[:,0]=np.log10(srt_psd[:,nsmpl*0.05])
c[:,1]=np.log10(srt_psd[:,nsmpl*0.95])
# estimate from extended degree of freedom
ce=np.zeros([frq.size,2])
ce[:,0]=np.log10(np.sort(upper_psd,axis=1)[:,nsmpl*0.5])
ce[:,1]=np.log10(np.sort(lower_psd,axis=1)[:,nsmpl*0.5])
#plot
plt.subplot(2,2,npnl+1)
plt.plot(frq,c,'b',frq,ce,'r')
plt.title(cttl)
plt.xlabel('frq')
plt.ylabel('psd')
if (npnl==0): plt.legend(('Monte-carlo','','Theory'),'lower center',labelspacing=0.05)
plt.subplots_adjust(wspace=0.6,hspace=0.4)
plt.savefig("psd_withError.png")
plt.show()