本文整理汇总了Python中theano.tensor.mean函数的典型用法代码示例。如果您正苦于以下问题:Python mean函数的具体用法?Python mean怎么用?Python mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mean函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_cost_updates
def get_cost_updates(self, contraction_level, learning_rate, cost_measure="cross_entropy"):
""" This function computes the cost and the updates for one trainng
step of the cA """
y = self.get_hidden_values(self.x)
z = self.get_reconstructed_input(y)
J = self.get_jacobian(y, self.W)
if cost_measure=="cross_entropy":
#self.L_rec = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
self.L_rec = T.mean(- T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z),axis=1))
elif cost_measure=="euclidean":
self.L_rec = T.mean(T.sum((self.x-z)**2,axis=1))
# Compute the jacobian and average over the number of samples/minibatch
self.L_jacob = T.mean(T.sum(J ** 2) / self.n_batchsize)
cost = self.L_rec + contraction_level * self.L_jacob
# compute the gradients of the cost of the `cA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - learning_rate * gparam))
return (cost, updates)
开发者ID:LazyXuan,项目名称:DECRES,代码行数:28,代码来源:cA.py
示例2: _test_layer_stats
def _test_layer_stats(self, layer_output):
"""
DESCRIPTION:
This method is called every batch whereby the examples from test or valid set
is pass through, the final result will be the mean of all the results from all
the batches in an epoch from the test set or valid set.
PARAM:
layer_output: the output from the layer
RETURN:
A list of tuples of [('name_a', var_a), ('name_b', var_b)] whereby var is scalar
"""
w_len = T.sqrt((self.W ** 2).sum(axis=0))
max_length = T.max(w_len)
mean_length = T.mean(w_len)
min_length = T.min(w_len)
return [('max_col_length', max_length),
('mean_col_length', mean_length),
('min_col_length', min_length),
('output_max', T.max(layer_output)),
('output_mean', T.mean(layer_output)),
('output_min', T.min(layer_output)),
('max_W', T.max(self.W)),
('mean_W', T.mean(self.W)),
('min_W', T.min(self.W)),
('max_b', T.max(self.b)),
('mean_b', T.mean(self.b)),
('min_b', T.min(self.b))]
开发者ID:hycis,项目名称:smartNN,代码行数:29,代码来源:layer.py
示例3: get_lossfun
def get_lossfun(self, l1, l2):
"""
Generate a loss function
The default one is mean negative log-likelihood
:param l1: weight of L1 term, None for no L1 term
:param l2: weight of L2 term, None for no L2 term
"""
if self.ff_net.layers[-1].activation_name == 'softmax':
q = -T.mean( # minimize negative log-likelihood
T.log(
self.ff_net.get_learning_passthrough(self.x)
)
[T.arange(self.y.shape[0]), self.y]
)
else:
q = T.mean( # minimize error function
(self.ff_net.get_learning_passthrough(self.x) - self.y)**2
)
try:
if l1 is not None:
q = q + self.ff_net.l1 * l1
except AttributeError:
pass
try:
if l2 is not None:
q = q + self.ff_net.l2 * l2
except AttributeError:
pass
return q
开发者ID:piotrmaslanka,项目名称:nnetsys,代码行数:35,代码来源:teacher.py
示例4: get_monitoring_channels
def get_monitoring_channels(self, V):
try:
self.compile_mode()
rval = {}
#from_ip = self.inference_procedure.get_monitoring_channels(V, self)
#rval.update(from_ip)
if self.monitor_params:
for param in self.get_params():
rval[param.name + '_min'] = full_min(param)
rval[param.name + '_mean'] = T.mean(param)
rval[param.name + '_max'] = full_max(param)
if 'W' in param.name:
norms = theano_norms(param)
rval[param.name + '_norms_min' ]= T.min(norms)
rval[param.name + '_norms_mean'] = T.mean(norms)
rval[param.name + '_norms_max'] = T.max(norms)
new_rval = {}
for key in rval:
new_rval[self.monitoring_channel_prefix+key] = rval[key]
rval = new_rval
return rval
finally:
self.deploy_mode()
开发者ID:JasonBens,项目名称:pylearn,代码行数:33,代码来源:dbm.py
示例5: batchnorm
def batchnorm(X, rescale=None, reshift=None, u=None, s=None, e=1e-8):
"""
batchnorm with support for not using scale and shift parameters
as well as inference values (u and s) and partial batchnorm (via a)
will detect and use convolutional or fully connected version
"""
g = rescale
b = reshift
if X.ndim == 4:
if u is not None and s is not None:
# use normalization params given a priori
b_u = u.dimshuffle('x', 0, 'x', 'x')
b_s = s.dimshuffle('x', 0, 'x', 'x')
else:
# compute normalization params from input
b_u = T.mean(X, axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
b_s = T.mean(T.sqr(X - b_u), axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
# batch normalize
X = (X - b_u) / T.sqrt(b_s + e)
if g is not None and b is not None:
# apply rescale and reshift
X = X*T.exp(0.2*g.dimshuffle('x', 0, 'x', 'x')) + b.dimshuffle('x', 0, 'x', 'x')
elif X.ndim == 2:
if u is None and s is None:
# compute normalization params from input
u = T.mean(X, axis=0)
s = T.mean(T.sqr(X - u), axis=0)
# batch normalize
X = (X - u) / T.sqrt(s + e)
if g is not None and b is not None:
# apply rescale and reshift
X = X*T.exp(0.2*g) + b
else:
raise NotImplementedError
return X
开发者ID:Philip-Bachman,项目名称:Sequential-Generation,代码行数:35,代码来源:NetLayers.py
示例6: add_regularization
def add_regularization(self, layer):
regularization = 0
if self._recon_strategy == 'forward':
input_x = layer.x
recon_x = layer.reconstruct_x()
input_y = layer.y
recon_y = layer.reconstruct_y()
regularization += Tensor.mean((abs(input_x - recon_x)).sum(axis=1, dtype=Tensor.config.floatX))
regularization += Tensor.mean((abs(input_y - recon_y)).sum(axis=1, dtype=Tensor.config.floatX))
elif self._recon_strategy == 'backward':
input_x = layer.x
recon_x = Tensor.dot(layer.output_forward_x,
layer.Wx.T)
input_y = layer.y
recon_y = Tensor.dot(layer.output_forward_y,
layer.Wy.T)
regularization += Tensor.mean((abs(input_x - recon_x)).sum(axis=1, dtype=Tensor.config.floatX))
regularization += Tensor.mean((abs(input_y - recon_y)).sum(axis=1, dtype=Tensor.config.floatX))
return regularization
开发者ID:aviveise,项目名称:double_encoder,代码行数:25,代码来源:reconstruction_regularization.py
示例7: cost_updates
def cost_updates(self,lr,data,k=1):
ph_activation_scores = T.dot(data,self.W) + self.h_bias
ph_activation_probs, ph_samples, ph_updates = self.h.sample(ph_activation_scores)
chain_start = ph_samples
[nv_activation_scores,nv_activation_probs,nv_samples,\
nh_activation_scores,nh_activation_probs,nh_samples], updates = \
theano.scan(
self.gibbs_hvh,
outputs_info = [None,None,None,None,None,chain_start],
n_steps = k
)
chain_end = nv_samples[-1]
cost = T.mean(self.free_energy(data))\
- T.mean(self.free_energy(chain_end))\
+ self.regularisation()
gparams = T.grad(cost,self.tunables,consider_constant=[chain_end])
alpha = T.cast(self.momentum,dtype=theano.config.floatX)
updates = [
( param, param - ( alpha * prev_chg + gparam * lr ) )
for gparam,param,prev_chg in zip(gparams,self.tunables,self.deltas)
] + [
( prev_chg, alpha * prev_chg + gparam * lr )
for prev_chg,gparam in zip(self.deltas,gparams)
]# + ph_updates + nv_updates + nh_updates
monitoring_cost = self.reconstruction_cost(updates,nv_activation_scores[-1],data)
return monitoring_cost,updates
开发者ID:parasitew,项目名称:python-dbn,代码行数:32,代码来源:rbm.py
示例8: __init__
def __init__(self, fin, f1, nin1, f2, nin2, f3, nin3, expand, h1, outputs,
lr, C, pDropConv=0.2, pDropHidden=0.5):
# 超参数
self.lr = lr
self.C = C
self.pDropConv = pDropConv
self.pDropHidden = pDropHidden
# 所有需要优化的参数放入列表中,分别是连接权重和偏置
self.params = []
self.paramsNIN = []
self.paramsConv = []
# 卷积层,w=(本层特征图个数,上层特征图个数,卷积核行数,卷积核列数),b=(本层特征图个数)
self.paramsNIN.append(layerNINParams((f1, fin, nin1, 3, 3), expand))
self.paramsNIN.append(layerNINParams((f2, f1 * expand, nin2, 3, 3), expand))
self.paramsNIN.append(layerNINParams((f3, f2 * expand, nin3, 3, 3), expand))
# 全局平均池化层
self.paramsConv.append(layerConvParams((h1, f3 * expand, 1, 1)))
self.paramsConv.append(layerConvParams((outputs, h1, 1, 1)))
self.params = self.paramsNIN + self.paramsConv
# 定义 Theano 符号变量,并构建 Theano 表达式
self.X = T.tensor4('X')
self.Y = T.matrix('Y')
# 训练集代价函数
YDropProb = model(self.X, self.params, pDropConv, pDropHidden)
self.trNeqs = basicUtils.neqs(YDropProb, self.Y)
trCrossEntropy = categorical_crossentropy(YDropProb, self.Y)
self.trCost = T.mean(trCrossEntropy) + C * basicUtils.regularizer(flatten(self.params))
# 测试验证集代价函数
YFullProb = model(self.X, self.params, 0., 0.)
self.vateNeqs = basicUtils.neqs(YFullProb, self.Y)
self.YPred = T.argmax(YFullProb, axis=1)
vateCrossEntropy = categorical_crossentropy(YFullProb, self.Y)
self.vateCost = T.mean(vateCrossEntropy) + C * basicUtils.regularizer(flatten(self.params))
开发者ID:ifenghao,项目名称:myDeepLearning,代码行数:35,代码来源:rowfccolfcv1.py
示例9: forward
def forward(self,input_org,train=True,update_batch_stat=True,finetune=False):
print "Layer/BatchNormalization"
ldim,cdim,rdim = self._internal_shape(input_org)
input = input_org.reshape((ldim,cdim,rdim))
if (train):
mean = T.mean(input, axis=(0, 2), keepdims=True )
var = T.mean((input-mean)**2, axis=(0, 2), keepdims=True)
if(update_batch_stat):
finetune_N = theano.clone(self.finetune_N, share_inputs=False)
if(finetune):
finetune_N.default_update = finetune_N+1
ratio = T.cast(1-1.0/(finetune_N+1),theano.config.floatX)
else:
finetune_N.default_update = 0
ratio = self.moving_avg_ratio
m = ldim*rdim
scale = T.cast(m/(m-1.0),theano.config.floatX)
est_mean = theano.clone(self.est_mean, share_inputs=False)
est_var = theano.clone(self.est_var, share_inputs=False)
est_mean.default_update = T.cast(ratio*self.est_mean + (1-ratio)*mean,theano.config.floatX)
est_var.default_update = T.cast(ratio*self.est_var + (1-ratio)*scale*var,theano.config.floatX)
mean += 0 * est_mean
var += 0 * est_var
output = self._pbc(self.gamma) * (input - self._pbc(mean)) \
/ T.sqrt(1e-6+self._pbc(var)) + self._pbc(self.beta)
else:
output = self._pbc(self.gamma) * (input - self._pbc(self.est_mean)) \
/ T.sqrt(1e-6+self._pbc(self.est_var)) + self._pbc(self.beta)
return output.reshape(input_org.shape)
开发者ID:ilovecv,项目名称:vat,代码行数:32,代码来源:batch_normalization.py
示例10: test_minres_with_jacobi
def test_minres_with_jacobi():
vv = theano.shared(v, name='v')
gg = theano.shared(g, name='g')
hh = theano.shared(h, name='h')
dw = T.dot(v.T,g) / M
dv = T.dot(g.T,h) / M
da = T.mean(v, axis=0)
db = T.mean(g, axis=0)
dc = T.mean(h, axis=0)
Ldiag_terms = natural.generic_compute_L_diag([vv,gg,hh])
Ms = [Ldiag_term + 0.1 for Ldiag_term in Ldiag_terms]
newgrads = minres.minres(
lambda xw, xv, xa, xb, xc: natural.compute_Lx(vv,gg,hh,xw,xv,xa,xb,xc),
[dw, dv, da, db, dc],
rtol=1e-5,
damp = 0.,
maxiter = 10000,
Ms = Ms,
profile=0)[0]
f = theano.function([], newgrads)
[new_dw, new_dv, new_da, new_db, new_dc] = f()
numpy.testing.assert_almost_equal(Linv_x_w, new_dw, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_v, new_dv, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_a, new_da, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_b, new_db, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_c, new_dc, decimal=1)
开发者ID:gdesjardins,项目名称:DBM,代码行数:29,代码来源:test_natural.py
示例11: test_linearcg
def test_linearcg():
vv = theano.shared(v, name='v')
gg = theano.shared(g, name='g')
hh = theano.shared(h, name='h')
dw = T.dot(v.T,g) / M
dv = T.dot(g.T,h) / M
da = T.mean(v, axis=0)
db = T.mean(g, axis=0)
dc = T.mean(h, axis=0)
newgrads = lincg.linear_cg(
lambda xw, xv, xa, xb, xc: natural.compute_Lx(vv,gg,hh,xw,xv,xa,xb,xc),
[dw, dv, da, db, dc],
rtol=1e-5,
maxiter = 30,
damp = 0.,
floatX = floatX,
profile=0)
f = theano.function([], newgrads)
[new_dw, new_dv, new_da, new_db, new_dc] = f()
numpy.testing.assert_almost_equal(Linv_x_w, new_dw, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_v, new_dv, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_a, new_da, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_b, new_db, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_c, new_dc, decimal=1)
开发者ID:gdesjardins,项目名称:DBM,代码行数:26,代码来源:test_natural.py
示例12: test_minres_with_xinit
def test_minres_with_xinit():
rng = numpy.random.RandomState(123412)
vv = theano.shared(v, name='v')
gg = theano.shared(g, name='g')
hh = theano.shared(h, name='h')
dw = T.dot(v.T,g) / M
dv = T.dot(g.T,h) / M
da = T.mean(v, axis=0)
db = T.mean(g, axis=0)
dc = T.mean(h, axis=0)
xinit = [ rng.rand(N0,N1),
rng.rand(N1,N2),
rng.rand(N0),
rng.rand(N1),
rng.rand(N2)]
xinit = [xi.astype(floatX) for xi in xinit]
newgrads = minres.minres(
lambda xw, xv, xa, xb, xc: natural.compute_Lx(vv,gg,hh,xw,xv,xa,xb,xc),
[dw, dv, da, db, dc],
rtol=1e-5,
damp = 0.,
maxiter = 10000,
xinit = xinit,
profile=0)[0]
f = theano.function([], newgrads)
[new_dw, new_dv, new_da, new_db, new_dc] = f()
numpy.testing.assert_almost_equal(Linv_x_w, new_dw, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_v, new_dv, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_a, new_da, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_b, new_db, decimal=1)
numpy.testing.assert_almost_equal(Linv_x_c, new_dc, decimal=1)
开发者ID:gdesjardins,项目名称:DBM,代码行数:35,代码来源:test_natural.py
示例13: plotUpdate
def plotUpdate(self,updates):
'''
>>>get update info of each layer
>>>type updates: dict
>>>para updates: update dictionary
'''
maxdict=T.zeros(shape=(self.deep*2+1,))
mindict=T.zeros(shape=(self.deep*2+1,))
meandict=T.zeros(shape=(self.deep*2+1,))
for i in xrange(self.deep):
updw=updates[self.layers[i].w]-self.layers[i].w
maxdict=T.set_subtensor(maxdict[2*i],T.max(updw))
mindict=T.set_subtensor(mindict[2*i],T.min(updw))
meandict=T.set_subtensor(meandict[2*i],T.mean(updw))
updb=updates[self.layers[i].b]-self.layers[i].b
maxdict=T.set_subtensor(maxdict[2*i+1],T.max(updb))
mindict=T.set_subtensor(mindict[2*i+1],T.min(updb))
meandict=T.set_subtensor(meandict[2*i+1],T.mean(updb))
updw=updates[self.classifier.w]-self.classifier.w
maxdict=T.set_subtensor(maxdict[self.deep*2],T.max(updw))
mindict=T.set_subtensor(mindict[self.deep*2],T.min(updw))
meandict=T.set_subtensor(meandict[self.deep*2],T.mean(updw))
return [maxdict,mindict,meandict]
开发者ID:wolfhu,项目名称:RCNNSentence,代码行数:25,代码来源:dcnnModel.py
示例14: negative_log_likelihood
def negative_log_likelihood(self, y):
""" Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
if self.is_binary:
-T.mean(T.log(self.p_y_given_x))
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
开发者ID:caglar,项目名称:prmlp,代码行数:29,代码来源:prmlp.py
示例15: stddev_bias
def stddev_bias(x, eps, axis=0):
mu = T.mean(x + eps, axis=axis)
mu.name = "std_mean"
var = T.mean((x - mu)**2 + eps)
var.name = "std_variance"
stddev = T.sqrt(var)
return stddev
开发者ID:LeonBai,项目名称:lisa_emotiw-1,代码行数:7,代码来源:utils.py
示例16: get_cost_updates
def get_cost_updates(self, contraction_level, learning_rate):
""" This function computes the cost and the updates for one trainng
step of the cA """
y = self.get_hidden_values(self.x)
z = self.get_reconstructed_input(y)
J = self.get_jacobian(y, self.W)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
self.L_rec = - T.sum(self.x * T.log(z) +
(1 - self.x) * T.log(1 - z),
axis=1)
# Compute the jacobian and average over the number of samples/minibatch
self.L_jacob = T.sum(J ** 2) // self.n_batchsize
# note : L is now a vector, where each element is the
# cross-entropy cost of the reconstruction of the
# corresponding example of the minibatch. We need to
# compute the average of all these to get the cost of
# the minibatch
cost = T.mean(self.L_rec) + contraction_level * T.mean(self.L_jacob)
# compute the gradients of the cost of the `cA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - learning_rate * gparam))
return (cost, updates)
开发者ID:2php,项目名称:DeepLearningTutorials,代码行数:33,代码来源:cA.py
示例17: error_classification
def error_classification(self,target):
output, updates = theano.scan(fn=lambda a: T.nnet.softmax(a),
sequences=[self.output])
y=T.mean(output,0)
self.y_pred = T.argmax(y, axis=1)
label=T.argmax(target, axis=1)
return T.mean(T.neq(self.y_pred, label))
开发者ID:futoshi-futami,项目名称:GP-and-GPLVM,代码行数:7,代码来源:kernel_layer_rff.py
示例18: create_learn_function
def create_learn_function(self):
losses = sum([c.get_lin_losses() for c in self.clauses if True or c.has_free_argument], [])
ws = []
if any([p.arity == 1 for p in self.predicates.values()]):
ws += [l.sum_layer.w for l in cortex.hidden_x.layers]
ws += [l.sum_layer.b for l in cortex.hidden_x.layers]
if any([p.arity == 2 for p in self.predicates.values()]):
ws += [l.sum_layer.w for l in cortex.hidden_xy.layers]
ws += [l.sum_layer.b for l in cortex.hidden_xy.layers]
for p in self.predicates.values():
ws.append(p.out_layer.w)
ws.append(p.out_layer.b)
alpha = theano.tensor.fscalar()
regularisation = alpha * tensor.mean([tensor.mean(w ** 2) for w in ws])
ws += [self.constant_representations]
do_update = theano.tensor.bscalar()
rp = net3.Momentum(ws, do_update * (tensor.mean(losses) + regularisation))
updates, lr = rp.get_updates()
self.rp = rp
self.learn_function = function([lr, alpha, do_update], tensor.mean(losses), updates=updates, on_unused_input="ignore")
开发者ID:wuhu,项目名称:pll,代码行数:26,代码来源:pl4.py
示例19: compile
def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
self.optimizer = optimizers.get(optimizer)
self.loss = objectives.get(loss)
weighted_loss = weighted_objective(objectives.get(loss))
# input of model
self.X_train = self.get_input(train=True)
self.X_test = self.get_input(train=False)
self.y_train = self.get_output(train=True)
self.y_test = self.get_output(train=False)
# target of model
self.y = T.zeros_like(self.y_train)
self.weights = T.ones_like(self.y_train)
train_loss = weighted_loss(self.y, self.y_train, self.weights)
test_loss = weighted_loss(self.y, self.y_test, self.weights)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
self.y.name = 'y'
if class_mode == "categorical":
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))
elif class_mode == "binary":
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
else:
raise Exception("Invalid class mode:" + str(class_mode))
self.class_mode = class_mode
self.theano_mode = theano_mode
for r in self.regularizers:
train_loss = r(train_loss)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
if type(self.X_train) == list:
train_ins = self.X_train + [self.y, self.weights]
test_ins = self.X_test + [self.y, self.weights]
predict_ins = self.X_test
else:
train_ins = [self.X_train, self.y, self.weights]
test_ins = [self.X_test, self.y, self.weights]
predict_ins = [self.X_test]
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(predict_ins, self.y_test,
allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_loss,
allow_input_downcast=True, mode=theano_mode)
self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
allow_input_downcast=True, mode=theano_mode)
开发者ID:0xa-saline,项目名称:CAPTCHA-breaking,代码行数:60,代码来源:models.py
示例20: finetune_cost_updates
def finetune_cost_updates(self, center, mu, learning_rate):
""" This function computes the cost and the updates ."""
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, withd one entry per
# example in minibatch
network_output = self.get_output()
temp = T.pow(center - network_output, 2)
L = T.sum(temp, axis=1)
# Add the network reconstruction error
z = self.get_network_reconst()
reconst_err = T.sum(T.pow(self.x - z, 2), axis = 1)
L = self.beta*L + self.lbd*reconst_err
cost1 = T.mean(L)
cost2 = self.lbd*T.mean(reconst_err)
cost3 = cost1 - cost2
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost1, self.params)
# generate the list of updates
updates = []
grad_values = []
param_norm = []
for param, delta, gparam in zip(self.params, self.delta, gparams):
updates.append( (delta, mu*delta - learning_rate * gparam) )
updates.append( (param, param + mu*mu*delta - (1+mu)*learning_rate*gparam ))
grad_values.append(gparam.norm(L=2))
param_norm.append(param.norm(L=2))
grad_ = T.stack(*grad_values)
param_ = T.stack(*param_norm)
return ((cost1, cost2, cost3, grad_, param_), updates)
开发者ID:WenjunJiang,项目名称:DCN,代码行数:35,代码来源:multi_layer_km.py
注:本文中的theano.tensor.mean函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论