本文整理汇总了Python中theano.tensor.std函数的典型用法代码示例。如果您正苦于以下问题:Python std函数的具体用法?Python std怎么用?Python std使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了std函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _output
def _output(self, input, *args, **kwargs):
input = self.input_layer.output()
out = T.switch(T.gt(input, 0), 1, 0)
if out.ndim > 2:
std = T.std(out, axis=(0, 2, 3))
else:
std = T.std(out, axis=0)
return T.concatenate([T.mean(std).reshape((1,)), T.mean(out).reshape((1,))])
开发者ID:rbn42,项目名称:LearningToDrive,代码行数:8,代码来源:layers.py
示例2: cross_correlation
def cross_correlation(x, y):
x_mean = mean(x)
y_mean = mean(y)
x_stdev = std(x)
y_stdev = std(y)
y_dev = y - y_mean
x_dev = x - x_mean
return 1 - (mean(x_dev*y_dev / (x_stdev*y_stdev)))
开发者ID:marianocabezas,项目名称:cnn,代码行数:8,代码来源:objective_functions.py
示例3: __build_center
def __build_center(self):
# We only want to compile our theano functions once
imgv = T.dtensor3("imgv")
# Get the mean
u = T.mean(imgv, 0)
# Get the standard deviation
s = T.std(T.std(imgv, 0), 0)
# Subtract our mean
return function(inputs=[imgv], outputs=[(imgv - u) / s])
开发者ID:tkaplan,项目名称:MLTextParser,代码行数:9,代码来源:ImgPreprocessing.py
示例4: batch_normalize
def batch_normalize(Y):
"""
Set columns of Y to zero mean and unit variance.
"""
Y_zmuv = (Y - T.mean(Y, axis=0, keepdims=True)) / \
T.std(Y, axis=0, keepdims=True)
return Y_zmuv
开发者ID:Philip-Bachman,项目名称:ICML-2015,代码行数:7,代码来源:OneStageModel.py
示例5: correlation
def correlation(input1,input2):
n=T.shape(input1)
n0=n[0]
n1=n[1]
s0=T.std(input1,axis=1,keepdims=True)#.reshape((n0,1)),reps=n1)
s1=T.std(input2,axis=1,keepdims=True)#.reshape((n0,1)),reps=n1)
m0=T.mean(input1,axis=1,keepdims=True)
m1=T.mean(input2,axis=1,keepdims=True)
corr=T.sum(((input1-m0)/s0)*((input2-m1)/s1), axis=1)/n1
corr=(corr+np.float32(1.))/np.float32(2.)
corr=T.reshape(corr,(n0,))
return corr
开发者ID:yaliamit,项目名称:Compare,代码行数:16,代码来源:run_compare.py
示例6: _train_fprop
def _train_fprop(self, state_below):
miu = state_below.mean(axis=0)
std = T.std(state_below, axis=0)
self.moving_mean += self.mem * miu + (1-self.mem) * self.moving_mean
self.moving_std += self.mem * std + (1-self.mem) * self.moving_std
Z = (state_below - self.moving_mean) / (self.moving_std + self.epsilon)
return self.gamma * Z + self.beta
开发者ID:Modasshir,项目名称:Mozi,代码行数:7,代码来源:normalization.py
示例7: get_stats
def get_stats(input, stat=None):
"""
Returns a dictionary mapping the name of the statistic to the result on the input.
Currently gets mean, var, std, min, max, l1, l2.
Parameters
----------
input : tensor
Theano tensor to grab stats for.
Returns
-------
dict
Dictionary of all the statistics expressions {string_name: theano expression}
"""
stats = {
'mean': T.mean(input),
'var': T.var(input),
'std': T.std(input),
'min': T.min(input),
'max': T.max(input),
'l1': input.norm(L=1),
'l2': input.norm(L=2),
#'num_nonzero': T.sum(T.nonzero(input)),
}
stat_list = raise_to_list(stat)
compiled_stats = {}
if stat_list is None:
return stats
for stat in stat_list:
if isinstance(stat, string_types) and stat in stats:
compiled_stats.update({stat: stats[stat]})
return compiled_stats
开发者ID:EqualInformation,项目名称:OpenDeep,代码行数:34,代码来源:statistics.py
示例8: _build_activation
def _build_activation(self, act=None):
'''Given an activation description, return a callable that implements it.
'''
def compose(a, b):
c = lambda z: b(a(z))
c.__theanets_name__ = '%s(%s)' % (b.__theanets_name__, a.__theanets_name__)
return c
act = act or self.args.activation.lower()
if '+' in act:
return reduce(compose, (self._build_activation(a) for a in act.split('+')))
options = {
'tanh': TT.tanh,
'linear': lambda z: z,
'logistic': TT.nnet.sigmoid,
'softplus': TT.nnet.softplus,
# shorthands
'relu': lambda z: TT.maximum(0, z),
# modifiers
'rect:max': lambda z: TT.minimum(1, z),
'rect:min': lambda z: TT.maximum(0, z),
# normalization
'norm:dc': lambda z: (z.T - z.mean(axis=1)).T,
'norm:max': lambda z: (z.T / TT.maximum(1e-10, abs(z).max(axis=1))).T,
'norm:std': lambda z: (z.T / TT.maximum(1e-10, TT.std(z, axis=1))).T,
}
for k, v in options.iteritems():
v.__theanets_name__ = k
try:
return options[act]
except:
raise KeyError('unknown --activation %s' % act)
开发者ID:ageek,项目名称:theano-nets,代码行数:34,代码来源:main.py
示例9: model
def model(self, X, w1, w2, w3, w4, w5, w6,w_o, p_drop_conv, p_drop_hidden):
l1a = l.rectify(conv2d(X, w1, border_mode='valid') + self.b1)
l1 = max_pool_2d(l1a, (2, 2), ignore_border=True)
#l1 = l.dropout(l1, p_drop_conv)
l2a = l.rectify(conv2d(l1, w2,border_mode='valid') + self.b2)
l2 = max_pool_2d(l2a, (2, 2), ignore_border=True)
#l2 = l.dropout(l2, p_drop_conv)
l3 = l.rectify(conv2d(l2, w3, border_mode='valid') + self.b3)
#l3 = l.dropout(l3a, p_drop_conv)
l4a = l.rectify(conv2d(l3, w4, border_mode='valid') + self.b4)
l4 = max_pool_2d(l4a, (2, 2), ignore_border=True)
#l4 = T.flatten(l4, outdim=2)
#l4 = l.dropout(l4, p_drop_conv)
l5 = l.rectify(conv2d(l4, w5, border_mode='valid') + self.b5)
#l5 = l.dropout(l5, p_drop_hidden)
l6 = l.rectify(conv2d(l5, w6, border_mode='valid') + self.b6)
#l6 = l.dropout(l6, p_drop_hidden)
#l6 = self.bn(l6, self.g,self.b,self.m,self.v)
l6 = conv2d(l6, w_o, border_mode='valid')
#l6 = self.bn(l6, self.g, self.b, T.mean(l6, axis=1), T.std(l6,axis=1))
l6 = T.flatten(l6, outdim=2)
#l6 = ((l6 - T.mean(l6, axis=0))/T.std(l6,axis=0))*self.g + self.b#self.bn( l6, self.g,self.b,T.mean(l6, axis=0),T.std(l6,axis=0) )
l6 = ((l6 - T.mean(l6, axis=0))/(T.std(l6,axis=0)+1e-4))*self.g + self.b
pyx = T.nnet.softmax(l6)
return l1, l2, l3, l4, l5, l6, pyx
开发者ID:chinnadhurai,项目名称:machine_vision_course,代码行数:30,代码来源:conv_net.py
示例10: collect_statistics
def collect_statistics(self, X):
"""Updates Statistics of data"""
stat_mean = T.mean(X, axis=0)
stat_std = T.std(X, axis=0)
updates_stats = [(self.stat_mean, stat_mean), (self.stat_std, stat_std)]
return updates_stats
开发者ID:Thelordofdream,项目名称:GRAN,代码行数:7,代码来源:batch_norm_conv_layer.py
示例11: setup_model
def setup_model():
# shape: T x B x F
input_ = T.tensor3('features')
# shape: B
target = T.lvector('targets')
model = LSTMAttention(dim=500,
mlp_hidden_dims=[400, 4],
batch_size=100,
image_shape=(100, 100),
patch_shape=(28, 28),
weights_init=Glorot(),
biases_init=Constant(0))
model.initialize()
h, c, location, scale = model.apply(input_)
classifier = MLP([Rectifier(), Softmax()], [500, 100, 10],
weights_init=Glorot(),
biases_init=Constant(0))
model.h = h
classifier.initialize()
probabilities = classifier.apply(h[-1])
cost = CategoricalCrossEntropy().apply(target, probabilities)
error_rate = MisclassificationRate().apply(target, probabilities)
location_x_avg = T.mean(location[:, 0])
location_x_avg.name = 'location_x_avg'
location_y_avg = T.mean(location[:, 1])
location_y_avg.name = 'location_y_avg'
scale_x_avg = T.mean(scale[:, 0])
scale_x_avg.name = 'scale_x_avg'
scale_y_avg = T.mean(scale[:, 1])
scale_y_avg.name = 'scale_y_avg'
location_x_std = T.std(location[:, 0])
location_x_std.name = 'location_x_std'
location_y_std = T.std(location[:, 1])
location_y_std.name = 'location_y_std'
scale_x_std = T.std(scale[:, 0])
scale_x_std.name = 'scale_x_std'
scale_y_std = T.std(scale[:, 1])
scale_y_std.name = 'scale_y_std'
monitorings = [error_rate,
location_x_avg, location_y_avg, scale_x_avg, scale_y_avg,
location_x_std, location_y_std, scale_x_std, scale_y_std]
return cost, monitorings
开发者ID:mohammadpz,项目名称:LSTM-Attention,代码行数:47,代码来源:main.py
示例12: _layer_stats
def _layer_stats(self, state_below, layer_output):
ls = super(PRELU, self)._layer_stats(state_below, layer_output)
rlist = []
rlist.append(('alpha_mean', T.mean(self.alpha)))
rlist.append(('alpha_max', T.max(self.alpha)))
rlist.append(('alpha_min', T.min(self.alpha)))
rlist.append(('alpha_std', T.std(self.alpha)))
return ls + rlist
开发者ID:hycis,项目名称:Pynet,代码行数:8,代码来源:layer.py
示例13: get_output_for
def get_output_for(self, input, **kwargs):
input1=input[0,]
input2=input[1,]
n=self.input_shape
#n0=n[1]
n1=n[2]
# tt=tuple([n0,1])
s0=T.std(input1,axis=1,keepdims=True)
s1=T.std(input2,axis=1,keepdims=True)
m0=T.mean(input1,axis=1,keepdims=True)
m1=T.mean(input2,axis=1,keepdims=True)
corr=T.sum(((input1-m0)/s0)*((input2-m1)/s1), axis=1)/n1
corr=(corr+np.float32(1.))/np.float32(2.)
return corr
开发者ID:yaliamit,项目名称:Compare,代码行数:17,代码来源:corr_layer.py
示例14: testFcn
def testFcn(self,massBinned,trainY,trainX):
y = T.dvector('y')
varBinned = T.ivector('var')
baseHist = T.bincount(varBinned,1-y)+0.01
selectedHist = T.bincount(varBinned,(1-y)*self.outLayer.P[T.arange(y.shape[0]),1])+0.01
print baseHist.eval({y:trainY, varBinned:massBinned}), selectedHist.eval({y:trainY, varBinned:massBinned, self.input:trainX})
rTensor = T.std(selectedHist/baseHist)
return (rTensor).eval({y:trainY, varBinned:massBinned, self.input:trainX})
开发者ID:sidnarayanan,项目名称:RelativisticML,代码行数:8,代码来源:NeuralNet.py
示例15: get_output_for
def get_output_for(self, input, **kwargs):
output_shape = input.shape
if input.ndim > 2:
input = T.flatten(input, 2)
if self.norm_type == "mean_var":
input -= T.mean(input, axis=1, keepdims=True)
input /= T.std(input, axis=1, keepdims=True)
input = input.reshape(output_shape)
return input
开发者ID:eglxiang,项目名称:xnn,代码行数:9,代码来源:normalization.py
示例16: batch_norm
def batch_norm(self, h, dim, use_shift=True, use_std=True):
bn = (h - T.mean(h,axis=1,keepdims=True)) / (T.std(h,axis=1,keepdims=True) + numpy.float32(1e-10))
if use_std:
gamma = self.add_param(self.shared(numpy.zeros((dim,), 'float32') + numpy.float32(0.1), "%s_gamma" % h.name))
bn *= gamma.dimshuffle('x','x',0).repeat(h.shape[0],axis=0).repeat(h.shape[1],axis=1)
if use_shift:
beta = self.add_param(self.shared(numpy.zeros((dim,), 'float32'), "%s_beta" % h.name))
bn += beta
return bn
开发者ID:chagge,项目名称:returnn,代码行数:9,代码来源:NetworkBaseLayer.py
示例17: zScoreNormalization
def zScoreNormalization(self, X_data):
f = function([], [T.mean(self.out, axis=0, dtype='float32'),
T.std (self.out, axis=0, dtype='float32')],
givens=[(self.X, X_data)])
mean, std = f()
std += (std < 1e-5)
self.out = (self.out - mean) / std
开发者ID:crimsonlander,项目名称:nn,代码行数:9,代码来源:nn.py
示例18: get_output_for
def get_output_for(self, input, **kwargs):
# compute featurewise mean and std for the minibatch
orig_shape = input.shape
temp = T.reshape(input, (-1, orig_shape[-1]))
means = T.mean(input, 0, dtype=input.dtype)
stds = T.std(input, 0)
temp = (temp - means) / stds
input = T.reshape(temp, orig_shape)
return input
开发者ID:behtak,项目名称:ip-avsr,代码行数:9,代码来源:layers.py
示例19: output
def output(self, x):
d_0 = global_theano_rand.binomial(x.shape, p=1-self.d_p_0, dtype=FLOATX)
d_1 = global_theano_rand.binomial((x.shape[0], self.projection_dim), p=1-self.d_p_1, dtype=FLOATX)
tl_raw = T.dot(x * d_0, self.W_tl)
hl_raw = T.dot(x * d_0, self.W_hl)
tl_mean = T.mean(tl_raw, axis=0)
hl_mean = T.mean(hl_raw, axis=0)
tl_std = T.std(tl_raw, axis=0)
hl_std = T.std(hl_raw, axis=0)
tl = (tl_raw - tl_mean) / (tl_std + self.epsilon)
hl = (hl_raw - hl_mean) / (hl_std + self.epsilon)
new_Mean_tl = self.tau * tl_mean + (1.0 - self.tau) * self.Mean_tl
new_Mean_hl = self.tau * hl_mean + (1.0 - self.tau) * self.Mean_hl
new_Std_tl = self.tau * tl_std + (1.0 - self.tau) * self.Std_tl
new_Std_hl = self.tau * hl_std + (1.0 - self.tau) * self.Std_hl
tr_raw = (tl * d_1).dot(self.W_tr)
hr_raw = (hl * d_1).dot(self.W_hr)
tr_mean = T.mean(tr_raw, axis=0)
hr_mean = T.mean(hr_raw, axis=0)
tr_std = T.std(tr_raw, axis=0)
hr_std = T.std(hr_raw, axis=0)
tr = (tr_raw - tr_mean) / (tr_std + self.epsilon)
hr = (hr_raw - hr_mean) / (hr_std + self.epsilon)
new_Mean_tr = self.tau * tr_mean + (1.0 - self.tau) * self.Mean_tr
new_Mean_hr = self.tau * hr_mean + (1.0 - self.tau) * self.Mean_hr
new_Std_tr = self.tau * tr_std + (1.0 - self.tau) * self.Std_tr
new_Std_hr = self.tau * hr_std + (1.0 - self.tau) * self.Std_hr
t = T.nnet.sigmoid(tr * self.S_t + self.B_t)
h = self._act(hr * self.S_h + self.B_h)
rv = h * t + x * (1 - t)
self.register_training_updates((self.Mean_tl, new_Mean_tl),
(self.Mean_hl, new_Mean_hl),
(self.Mean_tr, new_Mean_tr),
(self.Mean_hr, new_Mean_hr),
(self.Std_tl, new_Std_tl),
(self.Std_hl, new_Std_hl),
(self.Std_tr, new_Std_tr),
(self.Std_hr, new_Std_hr))
return rv
开发者ID:Avmb,项目名称:lowrank-highwaynetwork,代码行数:44,代码来源:highwaylrdropoutbn_layer.py
示例20: __call__
def __call__(self, x, *args):
if self.normalize:
W = self.g.dimshuffle(0,'x','x','x') * \
(self.W - self.W.mean(axis=[1,2,3]).dimshuffle(0,'x','x','x')) / \
T.sqrt(T.sum(self.W**2, axis=[1,2,3])).dimshuffle(0,'x','x','x')
else:
W = self.W
#print("conv call:",x,W,self.mode,self.stride)
#print(x.tag.test_value.shape
#try:
# print(W.tag.test_value.shape)
#except:
# print(W.get_value().shape)
#print(self.mode)
#print(self.stride)
if self.cudnn:
conv_out = dnn_conv(x,W,self.mode,self.stride)
else:
if self.mode == 'half' and 'cpu' in theano.config.device:
fso = self.filter_shape[2] - 1
nps = x.shape[2]
conv_out = conv.conv2d(input=x, filters=W,
filter_shape=self.filter_shape,
border_mode='full',
subsample=self.stride)[:,:,fso:nps+fso,fso:nps+fso]
else:
conv_out = conv.conv2d(
input=x,
filters=W,
filter_shape=self.filter_shape,
border_mode=self.mode,
subsample=self.stride,
#image_shape=self.image_shape if image_shape is None else image_shape
)
if self.normalize and not shared.isJustReloadingModel:
mu = T.mean(conv_out, axis=[0,2,3]).eval({shared.init_tensor_x: shared.init_minibatch_x})
sigma = T.std(conv_out, axis=[0,2,3]).eval({shared.init_tensor_x: shared.init_minibatch_x})
print("normalizing:",mu.mean(),sigma.mean())
self.g.set_value( 1 / sigma)
self.b.set_value(-mu/sigma)
if hasattr(shared, 'preactivations'):
shared.preactivations.append(conv_out)
if 0: # mean-norm
conv_out = conv_out - conv_out.mean(axis=[0,2,3]).dimshuffle('x',0,'x','x')
if self.use_bias:
out = self.activation(conv_out + self.b.dimshuffle('x',0,'x','x'))
else:
out = self.activation(conv_out)
#print("out:", out.tag.test_value.shape)
return out
开发者ID:bengioe,项目名称:theano_tools,代码行数:56,代码来源:deep.py
注:本文中的theano.tensor.std函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论