本文整理汇总了Python中pylab.mean函数的典型用法代码示例。如果您正苦于以下问题:Python mean函数的具体用法?Python mean怎么用?Python mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mean函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: data_to_ch
def data_to_ch(data):
ch = {}
for ch_ind in range(1, 97):
ch[ch_ind] = {}
ch[ch_ind]["bl"] = data[ch_ind]["blanks"]
ch[ch_ind]["bl_mu"] = pl.mean(ch[ch_ind]["bl"])
ch[ch_ind]["bl_sem"] = pl.std(ch[ch_ind]["bl"]) / pl.sqrt(len(ch[ch_ind]["bl"]))
for ind in sorted(data[ch_ind].keys()):
if ind != "blanks":
k = ind[0]
if k not in ch[ch_ind]:
ch[ch_ind][k] = {}
ch[ch_ind][k]["fr"] = []
ch[ch_ind][k]["fr_mu"] = []
ch[ch_ind][k]["fr_sem"] = []
ch[ch_ind][k]["pos_y"] = []
ch[ch_ind][k]["dprime"] = []
ch[ch_ind][k]["fr"].append(data[ch_ind][ind]["on"])
ch[ch_ind][k]["fr_mu"].append(pl.mean(data[ch_ind][ind]["on"]))
ch[ch_ind][k]["fr_sem"].append(pl.std(data[ch_ind][ind]["on"]) / pl.sqrt(len(data[1][ind]["on"])))
ch[ch_ind][k]["pos_y"].append(ind[2])
# print ch[ch_ind][k]['pos_y']
# print pl.std(data[ch_ind][ind]['on'])
ch[ch_ind][k]["dprime"].append(
(pl.mean(data[ch_ind][ind]["on"]) - ch[ch_ind]["bl_mu"])
/ ((pl.std(ch[ch_ind]["bl"]) + pl.std(data[ch_ind][ind]["on"])) / 2)
)
# print ch[ch_ind]['OSImage_5']['pos_y']
return ch
开发者ID:hahong,项目名称:array_proj,代码行数:29,代码来源:plot_RSVP_POS.py
示例2: broadgauss
def broadgauss(x, y, sigma):
'''Gaussian function for broadening
'''
bla = True
plot = False
c = 299792458.
if bla:
print " sigma = ", round(sigma, 4), " km/s"
sigma = sigma * 1.0e3/c * pl.mean(x) # sigma in Å
if bla:
print " sigma = ", round(sigma, 3), " Å "
xk = x - pl.mean(x)
g = make_gauss(1, 0, sigma)
yk = [g(i) for i in xk]
if bla:
print " Integral of the gaussian function: ", pl.trapz(yk, xk).__format__('5.3')
if plot:
pl.figure(2)
pl.plot(xk, yk, '+-')
pl.show()
#if bla: print" size y:", y.size
y = pl.convolve(y, yk, mode='same')
#if bla: print" size y:", y.size
return y/max(y)
开发者ID:thibaultmerle,项目名称:pspec,代码行数:33,代码来源:pspec.py
示例3: scatter_stats
def scatter_stats(db, s1, s2, f1=None, f2=None, **kwargs):
if f1 == None:
f1 = lambda x: x # constant function
if f2 == None:
f2 = f1
x = []
xerr = []
y = []
yerr = []
for k in db:
x_k = [f1(x_ki) for x_ki in db[k].__getattribute__(s1).gettrace()]
y_k = [f2(y_ki) for y_ki in db[k].__getattribute__(s2).gettrace()]
x.append(pl.mean(x_k))
xerr.append(pl.std(x_k))
y.append(pl.mean(y_k))
yerr.append(pl.std(y_k))
pl.text(x[-1], y[-1], " %s" % k, fontsize=8, alpha=0.4, zorder=-1)
default_args = {"fmt": "o", "ms": 10}
default_args.update(kwargs)
pl.errorbar(x, y, xerr=xerr, yerr=yerr, **default_args)
pl.xlabel(s1)
pl.ylabel(s2)
开发者ID:aflaxman,项目名称:bednet_stock_and_flow,代码行数:30,代码来源:explore.py
示例4: compare_models
def compare_models(db, stoch="itn coverage", stat_func=None, plot_type="", **kwargs):
if stat_func == None:
stat_func = lambda x: x
X = {}
for k in sorted(db.keys()):
c = k.split("_")[2]
X[c] = []
for k in sorted(db.keys()):
c = k.split("_")[2]
X[c].append([stat_func(x_ki) for x_ki in db[k].__getattribute__(stoch).gettrace()])
x = pl.array([pl.mean(xc[0]) for xc in X.values()])
xerr = pl.array([pl.std(xc[0]) for xc in X.values()])
y = pl.array([pl.mean(xc[1]) for xc in X.values()])
yerr = pl.array([pl.std(xc[1]) for xc in X.values()])
if plot_type == "scatter":
default_args = {"fmt": "o", "ms": 10}
default_args.update(kwargs)
for c in X.keys():
pl.text(pl.mean(X[c][0]), pl.mean(X[c][1]), " %s" % c, fontsize=8, alpha=0.4, zorder=-1)
pl.errorbar(x, y, xerr=xerr, yerr=yerr, **default_args)
pl.xlabel("First Model")
pl.ylabel("Second Model")
pl.plot([0, 1], [0, 1], alpha=0.5, linestyle="--", color="k", linewidth=2)
elif plot_type == "rel_diff":
d1 = sorted(100 * (x - y) / x)
d2 = sorted(100 * (xerr - yerr) / xerr)
pl.subplot(2, 1, 1)
pl.title("Percent Model 2 deviates from Model 1")
pl.plot(d1, "o")
pl.xlabel("Countries sorted by deviation in mean")
pl.ylabel("deviation in mean (%)")
pl.subplot(2, 1, 2)
pl.plot(d2, "o")
pl.xlabel("Countries sorted by deviation in std err")
pl.ylabel("deviation in std err (%)")
elif plot_type == "abs_diff":
d1 = sorted(x - y)
d2 = sorted(xerr - yerr)
pl.subplot(2, 1, 1)
pl.title("Percent Model 2 deviates from Model 1")
pl.plot(d1, "o")
pl.xlabel("Countries sorted by deviation in mean")
pl.ylabel("deviation in mean")
pl.subplot(2, 1, 2)
pl.plot(d2, "o")
pl.xlabel("Countries sorted by deviation in std err")
pl.ylabel("deviation in std err")
else:
assert 0, "plot_type must be abs_diff, rel_diff, or scatter"
return pl.array([x, y, xerr, yerr])
开发者ID:aflaxman,项目名称:bednet_stock_and_flow,代码行数:60,代码来源:explore.py
示例5: calZsocre
def calZsocre(self,core,surface,sampleSize):
coreMean=mean(core)
s=[]
for i in range(sampleSize):
s.append(mean(sample(surface,len(core))))
sig= sqrt(var(s))
return (coreMean-mean(s))/sig
开发者ID:kumar-physics,项目名称:eppic-pred,代码行数:7,代码来源:analyzeSurface.py
示例6: flow_rate_hist
def flow_rate_hist(sheets):
ant_rates = []
weights = []
for sheet in sheets:
ants, seconds, weight = flow_rate(sheet)
ant_rate = seconds / ants
#ant_rate = ants / seconds
ant_rates.append(ant_rate)
weights.append(float(weight))
#weights.append(seconds)
weights = pylab.array(weights)
weights /= sum(weights)
#print "ants per second"
print "seconds per ant"
mu = pylab.mean(ant_rates)
print "mean", pylab.mean(ant_rates)
wmean = pylab.average(ant_rates, weights=weights)
print "weighted mean", wmean
print "median", pylab.median(ant_rates)
print "std", pylab.std(ant_rates, ddof=1)
ant_rates = pylab.array(ant_rates)
werror = (ant_rates - mu) * weights
print "weighted std", ((sum(werror ** 2))) ** 0.5
print "weighted std 2", (pylab.average((ant_rates - mu)**2, weights=weights)) ** 0.5
pylab.figure()
pylab.hist(ant_rates)
pylab.savefig('ant_flow_rates.pdf', format='pdf')
pylab.close()
开发者ID:arjunc12,项目名称:Ants,代码行数:30,代码来源:flow_rate.py
示例7: latent_simplex
def latent_simplex(X):
""" TODO: describe this function"""
N, T, J = X.shape
alpha = []
for t in range(T):
alpha_t = []
for j in range(J):
mu_alpha_tj = pl.mean(X[:,t,j]) / pl.mean(X[:,t,:], 0).sum()
alpha_t.append(mc.Normal('alpha_%d_%d'%(t,j), mu=0., tau=1., value=pl.log(mu_alpha_tj)))
alpha.append(alpha_t)
@mc.deterministic
def pi(alpha=alpha):
pi = pl.zeros((T, J))
for t in range(T):
pi[t] = pl.reshape(pl.exp(alpha[t]), J) / pl.sum(pl.exp(alpha[t]))
return pi
@mc.observed
def X_obs(pi=pi, value=X.mean(0), sigma=X.std(0), pow=2):
""" TODO: experiment with different values of pow, although
pow=2 seems like a fine choice based on our limited
experience."""
return -((pl.absolute(pi - value) / sigma)**pow).sum()
return vars()
开发者ID:ldwyerlindgren,项目名称:pymc-cod-correct,代码行数:27,代码来源:models.py
示例8: plot2
def plot2():
import pylab as pl
hs, ds = [], []
for event, time in load():
if event == main_start:
start_time = time
elif event == main_end:
d0, h0 = days_hours(start_time)
d1, h1 = days_hours(time)
hs.append((h0, h1))
ds.append((d0, d1))
pl.plot([d0, d1], [h0, h1], 'b')
ihs, fhs = zip(*hs)
ids, fds = zip(*ds)
pl.plot(ids, ihs, 'g')
pl.plot([ids[0], ids[-1]], [pl.mean(ihs)] * 2, 'g--')
pl.plot(fds, fhs, 'r')
pl.plot([fds[0], fds[-1]], [pl.mean(fhs)] * 2, 'r--')
f, i = pl.mean(fhs), pl.mean(ihs)
pl.plot([fds[0], fds[-1]], [(f + i) / 2] * 2, 'b--')
print i, f, f - i, (f + i) / 2
std_i, std_f = pl.std(ihs), pl.std(fhs)
print std_i, std_f
pl.xlim(ids[0], fds[-1])
pl.ylim(4, 28)
pl.grid(True)
pl.xlabel('Time [day]')
pl.ylabel('Day interval [hours]')
pl.show()
开发者ID:maurob,项目名称:timestamp,代码行数:29,代码来源:timestamp.py
示例9: build_moving5
def build_moving5(days, avg):
moving5 = array(zeros(len(days)-4), dtype = float)
cday = 1
moving5[0] = pylab.mean(avg[0:4])
for a in avg[5:]:
moving5[cday] = pylab.mean(avg[cday:cday+4])
cday += 1
return moving5
开发者ID:PeterGottesman,项目名称:eve-central.com,代码行数:8,代码来源:market_stat.py
示例10: perlin_covariance_corr
def perlin_covariance_corr(delta,N=1000000,bound=1):
ts = bound*pl.rand(N)
tds = ts+delta
ps = [p(t) for t in ts]
pds = [p(td) for td in tds]
#cov = pl.mean([pp*pd for pp,pd in zip(ps,pds)])
cov = pl.mean([(pp-pd)**2 for pp,pd in zip(ps,pds)])
corr = pl.mean([pp*pd for pp,pd in zip(ps,pds)])
return cov, corr
开发者ID:DiNAi,项目名称:hueperlin,代码行数:9,代码来源:perlin_experiments.py
示例11: int_f
def int_f(a, fs=1.):
"""
A fourier-based integrator.
===========
Parameters:
===========
a : *array* (1D)
The array which should be integrated
fs : *float*
sampling time of the data
========
Returns:
========
y : *array* (1D)
The integrated array
"""
if False:
# version with "mirrored" code
xp = hstack([a, a[::-1]])
int_fluc = int_f0(xp, float(fs))[:len(a)]
baseline = mean(a) * arange(len(a)) / float(fs)
return int_fluc + baseline - int_fluc[0]
# old version
baseline = mean(a) * arange(len(a)) / float(fs)
int_fluc = int_f0(a, float(fs))
return int_fluc + baseline - int_fluc[0]
# old code - remove eventually (comment on 02/2014)
# periodify
if False:
baseline = linspace(a[0], a[-1], len(a))
a0 = a - baseline
m = a0[-1] - a0[-2]
b2 = linspace(0, -.5 * m, len(a))
baseline -= b2
a0 += b2
a2 = hstack([a0, -1. * a0[1:][::-1]]) # "smooth" periodic signal
dbase = baseline[1] - baseline[0]
t_vec = arange(len(a)) / float(fs)
baseint = baseline[0] * t_vec + .5 * dbase * t_vec ** 2
# define frequencies
T = len(a2) / float(fs)
freqs = 1. / T * arange(len(a2))
freqs[len(freqs) // 2 + 1 :] -= float(fs)
spec = fft.fft(a2)
spec_i = zeros_like(spec, dtype=complex)
spec_i[1:] = spec[1:] / (2j * pi* freqs[1:])
res_int = fft.ifft(spec_i).real[:len(a0)] + baseint
return res_int - res_int[0]
开发者ID:MMaus,项目名称:mutils,代码行数:57,代码来源:fourier.py
示例12: xyamb
def xyamb(xytab,qu,xyout=''):
mytb=taskinit.tbtool()
if not isinstance(qu,tuple):
raise Exception,'qu must be a tuple: (Q,U)'
if xyout=='':
xyout=xytab
if xyout!=xytab:
os.system('cp -r '+xytab+' '+xyout)
QUexp=complex(qu[0],qu[1])
print 'Expected QU = ',qu # , ' (',pl.angle(QUexp)*180/pi,')'
mytb.open(xyout,nomodify=False)
QU=mytb.getkeyword('QU')['QU']
P=pl.sqrt(QU[0,:]**2+QU[1,:]**2)
nspw=P.shape[0]
for ispw in range(nspw):
st=mytb.query('SPECTRAL_WINDOW_ID=='+str(ispw))
if (st.nrows()>0):
q=QU[0,ispw]
u=QU[1,ispw]
qufound=complex(q,u)
c=st.getcol('CPARAM')
fl=st.getcol('FLAG')
xyph0=pl.angle(pl.mean(c[0,:,:][pl.logical_not(fl[0,:,:])]),True)
print 'Spw = '+str(ispw)+': Found QU = '+str(QU[:,ispw]) # +' ('+str(pl.angle(qufound)*180/pi)+')'
#if ( (abs(q)>0.0 and abs(qu[0])>0.0 and (q/qu[0])<0.0) or
# (abs(u)>0.0 and abs(qu[1])>0.0 and (u/qu[1])<0.0) ):
if ( pl.absolute(pl.angle(qufound/QUexp)*180/pi)>90.0 ):
c[0,:,:]*=-1.0
xyph1=pl.angle(pl.mean(c[0,:,:][pl.logical_not(fl[0,:,:])]),True)
st.putcol('CPARAM',c)
QU[:,ispw]*=-1
print ' ...CONVERTING X-Y phase from '+str(xyph0)+' to '+str(xyph1)+' deg'
else:
print ' ...KEEPING X-Y phase '+str(xyph0)+' deg'
st.close()
QUr={}
QUr['QU']=QU
mytb.putkeyword('QU',QUr)
mytb.close()
QUm=pl.mean(QU[:,P>0],1)
QUe=pl.std(QU[:,P>0],1)
Pm=pl.sqrt(QUm[0]**2+QUm[1]**2)
Xm=0.5*atan2(QUm[1],QUm[0])*180/pi
print 'Ambiguity resolved (spw mean): Q=',QUm[0],'U=',QUm[1],'(rms=',QUe[0],QUe[1],')','P=',Pm,'X=',Xm
stokes=[1.0,QUm[0],QUm[1],0.0]
print 'Returning the following Stokes vector: '+str(stokes)
return stokes
开发者ID:schiebel,项目名称:casa,代码行数:57,代码来源:almapolhelpers.py
示例13: correctBias
def correctBias(AllData):
# correct for difficulty and plot each subject %correct vs confidence
corrmatrix, confmatrix = returnConfMatrix(AllData)
Qs, subjects = py.shape(corrmatrix)
copts = [1,2,3,4,5]
datamat = np.array(py.zeros([len(copts), subjects]))
print(datamat)
fig = py.figure()
ax15 = fig.add_subplot(111)
i = 0
while i < subjects:
c1, c2, c3, c4, c5 = [],[],[],[],[]
# get confidences for each subject
j = 0
while j < Qs:
# get confidences and correct for each question
if confmatrix[j][i] == 1:
c1.append(corrmatrix[j][i])
elif confmatrix[j][i] == 2:
c2.append(corrmatrix[j][i])
elif confmatrix[j][i] == 3:
c3.append(corrmatrix[j][i])
elif confmatrix[j][i] == 4:
c4.append(corrmatrix[j][i])
elif confmatrix[j][i] == 5:
c5.append(corrmatrix[j][i])
else:
print('bad num encountered')
j += 1
print('i is %d' %i)
minconf = ([py.mean(c1), py.mean(c2), py.mean(c3),
py.mean(c4), py.mean(c5)])
pmin = 10
for p in minconf:
if p < pmin and p != 0 and math.isnan(p) is not True:
pmin = p
print(pmin)
datamat[0][i] = py.mean(c1)/pmin
datamat[1][i] = py.mean(c2)/pmin
datamat[2][i] = py.mean(c3)/pmin
datamat[3][i] = py.mean(c4)/pmin
datamat[4][i] = py.mean(c5)/pmin
# print(datamat)
print( py.shape(datamat))
print(len(datamat[:,i]))
ax15.plot(range(1,6), datamat[:,i], alpha=0.4, linewidth=4)
i += 1
ax15.set_ylabel('Modified Correct')
ax15.set_xlabel('Confidence')
ax15.set_title('All responses')
ax15.set_xticks(np.arange(1,6))
ax15.set_xticklabels( [1, 2, 3, 4, 5] )
ax15.set_xlim(0,6)
开发者ID:acsutt0n,项目名称:WisdomOfCrowd,代码行数:57,代码来源:showData.py
示例14: nrms
def nrms(data_fit, data_true):
"""
Normalized root mean square error.
"""
# root mean square error
rms = pl.mean(pl.norm(data_fit - data_true, axis=0))
# normalization factor is the max - min magnitude, or 2 times max dist from mean
norm_factor = 2*pl.norm(data_true - pl.mean(data_true, axis=1), axis=0).max()
return (norm_factor - rms)/norm_factor
开发者ID:syantek,项目名称:sysid,代码行数:10,代码来源:subspace.py
示例15: ttest
def ttest(X,Y):
"""
Takes two lists of values, returns t value
>>> ttest([2, 3, 7, 6, 10], [11,2,3,1,2])
0.77459666924148329
"""
if len(X) <= 1 or len(Y) <= 1: return 0.0
return ((pylab.mean(X) - pylab.mean(Y))
/ stderr(X,Y))
开发者ID:ronaldahmed,项目名称:robot-navigation,代码行数:10,代码来源:__init__.py
示例16: DFA
def DFA(data, npoints=None, degree=1, use_median=False):
"""
computes the detrended fluctuation analysis
returns the fluctuation F and the corresponding window length L
:args:
data (n-by-1 array): the data from which to compute the DFA
npoints (int): the number of points to evaluate; if omitted the log(n)
will be used
degree (int): degree of the polynomial to use for detrending
use_median (bool): use median instead of mean fluctuation
:returns:
F, L: the fluctuation F as function of the window length L
"""
# max window length: n/4
#0th: compute integral
integral = cumsum(data - mean(data))
#1st: compute different window lengths
n_samples = npoints if npoints is not None else int(log(len(data)))
lengths = sort(array(list(set(
logspace(2,log(len(data)/4.),n_samples,base=exp(1)).astype(int)
))))
#print lengths
all_flucs = []
used_lengths = []
for wlen in lengths:
# compute the fluctuation of residuals from a linear fit
# according to Kantz&Schreiber, ddof must be the degree of polynomial,
# i.e. 1 (or 2, if mean also counts? -> see in book)
curr_fluc = []
# rrt = 0
for startIdx in arange(0,len(integral),wlen):
pt = integral[startIdx:startIdx+wlen]
if len(pt) > 3*(degree+1):
resids = pt - polyval(polyfit(arange(len(pt)),pt,degree),
arange(len(pt)))
# if abs(wlen - lengths[0]) < -1:
# print resids[:20]
# elif rrt == 0:
# print "wlen", wlen, "l0", lengths[0]
# rrt += 1
curr_fluc.append(std(resids, ddof=degree+1))
if len(curr_fluc) > 0:
if use_median:
all_flucs.append(median(curr_fluc))
else:
all_flucs.append(mean(curr_fluc))
used_lengths.append(wlen)
return array(all_flucs), array(used_lengths)
开发者ID:MMaus,项目名称:mutils,代码行数:54,代码来源:statistics.py
示例17: zoom
def zoom(beg, end, x1_plot, y1_plot, z_plot, x2_plot, y2_plot, t_plot, KOP_plot, radical):
#resize sample according zoom interval
x1_plot = x1_plot[:,beg/0.05:end/0.05]
x2_plot = x2_plot[:,beg/0.05:end/0.05]
y1_plot = y1_plot[:,beg/0.05:end/0.05]
y2_plot = y2_plot[:,beg/0.05:end/0.05]
z_plot = z_plot[:,beg/0.05:end/0.05]
t_plot = t_plot[beg/0.05:end/0.05]
KOP_plot = KOP_plot[0, beg/0.05:end/0.05] #0 because k only needed, no psi
nbn1=x1_plot.shape[0]
nbn2=x2_plot.shape[0]
x1bar_plot = pb.zeros(x1_plot.shape[1])
x2bar_plot = pb.zeros(x2_plot.shape[1])
zbar_plot = pb.zeros(z_plot.shape[1])
for i in range(x1bar_plot.size):
x1bar_plot[i]=pb.mean(x1_plot[:,i])
x2bar_plot[i]=pb.mean(x2_plot[:,i])
zbar_plot[i]=pb.mean(z_plot[:,i])
#plotting
fig = pb.figure(figsize=(20,10))
pb.hold(True)
ax1=pb.subplot(5,1,1); ax1.hold(True); ax1.set_title("x1 (thick black=x1bar)")
ax2=pb.subplot(5,1,2); ax2.hold(True); ax2.set_title("x2 (thick black=x2bar)")
ax3=pb.subplot(5,1,3); ax3.hold(True); ax3.set_title("x1bar - x2bar")
ax4=pb.subplot(5,1,4); ax4.hold(True); ax4.set_title("Z (thick black=zbar)")
ax5=pb.subplot(5,1,5); ax5.hold(True); ax5.set_title("Amplitude of the Kuramoto Order parameter")
for i in range(nbn1):
#time series pop1
ax1.plot(t_plot, x1_plot[i,:]) #i -> all neurons, 0 -> only neuron 0 ...
#time series z
ax4.plot(t_plot, z_plot[i,:], label=None)
for j in range(nbn2):
#time series pop2
ax2.plot(t_plot, x2_plot[j,:])
#draw time series
ax1.plot(t_plot, x1bar_plot, 'black', linewidth=1.5)
ax2.plot(t_plot, x2bar_plot, 'black', linewidth=1.5)
ax3.plot(t_plot, x2bar_plot - x1bar_plot, label='x2bar - x1bar')
ax3.legend(prop={'size':10})
ax4.plot(t_plot, zbar_plot, 'black', linewidth=2., label="zbar")
ax4.legend(prop={'size':10})
ax5.plot(t_plot, KOP_plot[:])
#ax5.legend(prop={'size':10})
fig.savefig("epilepton"+radical+"_zoom.png", dpi=200)
开发者ID:AlexBoro,项目名称:epilepton,代码行数:53,代码来源:figTool.py
示例18: lsqReg
def lsqReg(X,Y):
"""
Returns the least square fit of Y = a*X + b.
"""
m_x = pylab.mean(X)
m_y = pylab.mean(Y)
m_x2 = pylab.mean(X*X)
m_xy = pylab.mean(X*Y)
a = (m_xy - m_x*m_y)/(m_x2 - m_x*m_x)
b = m_y - a*m_x
return a,b
开发者ID:ashivni,项目名称:FuseNetwork,代码行数:13,代码来源:statUtils.py
示例19: sample
def sample(self, model, evidence):
z = evidence['z']
T = evidence['T']
g = evidence['g']
h = evidence['h']
transition_var_g = evidence['transition_var_g']
shot_id = evidence['shot_id']
observation_var_g = model.known_params['observation_var_g']
observation_var_h = model.known_params['observation_var_h']
prior_mu_g = model.hyper_params['g']['mu']
prior_cov_g = model.hyper_params['g']['cov']
N = len(z)
n = len(g)
# Make g, h, and z vector valued to avoid ambiguity
g = g.copy().reshape((n, 1))
h = h.copy().reshape((n, 1))
z_g = ma.asarray(nan + zeros((n, 1)))
obs_cov = ma.asarray(inf + zeros((n, 1, 1)))
for i in xrange(n):
z_i = z[shot_id == i]
T_i = T[shot_id == i]
if 1 in T_i and 2 in T_i:
# Sample mean and variance for multiple observations
n_obs_g, n_obs_h = sum(T_i == 1), sum(T_i == 2)
obs_cov_g, obs_cov_h = observation_var_g/n_obs_g, observation_var_h/n_obs_h
z_g[i] = (mean(z_i[T_i == 1])/obs_cov_g + mean(z_i[T_i == 2] - h[i])/obs_cov_h)/(1/obs_cov_g + 1/obs_cov_h)
obs_cov[i] = 1/(1/obs_cov_g + 1/obs_cov_h)
elif 1 in T_i:
n_obs_g = sum(T_i == 1)
z_g[i] = mean(z_i[T_i == 1])
obs_cov[i] = observation_var_g/n_obs_g
elif 2 in T_i:
n_obs_h = sum(T_i == 2)
z_g[i] = mean(z_i[T_i == 2] - h[i])
obs_cov[i] = observation_var_h/n_obs_h
z_g[isnan(z_g)] = ma.masked
obs_cov[isinf(obs_cov)] = ma.masked
kalman = self._kalman
kalman.initial_state_mean = array([prior_mu_g[0],])
kalman.initial_state_covariance = array([prior_cov_g[0],])
kalman.transition_matrices = eye(1)
kalman.transition_covariance = array([transition_var_g,])
kalman.observation_matrices = eye(1)
kalman.observation_covariance = obs_cov
sampled_g = forward_filter_backward_sample(kalman, z_g, prior_mu_g, prior_cov_g)
return sampled_g.reshape((n,))
开发者ID:bwallin,项目名称:thesis-code,代码行数:51,代码来源:model_simulation_eta.py
示例20: est_dtlnorm
def est_dtlnorm(x, thres, opt_method):
def cond_dtlnorm(par):
return m_nl_dtlnorm(x=x, mu=par[0], sigma=par[1], thres=thres)
if opt_method in ['L-BFGS-B', 'SLSQP', 'TNC']:
est_par = minimize(x0=[mean(log(x)), std(log(x))],
fun=cond_dtlnorm,
method=opt_method,
bounds=[(log(thres[0]), log(thres[1])),
(1e-16, Inf)]).x
else:
est_par = minimize(x0=[mean(log(x)), std(log(x))],
fun=cond_dtlnorm,
method=opt_method).x
return est_par
开发者ID:GongYiLiao,项目名称:Python_Daily,代码行数:14,代码来源:test_python_optimize.py
注:本文中的pylab.mean函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论