本文整理汇总了Python中theano.tensor.argmax函数的典型用法代码示例。如果您正苦于以下问题:Python argmax函数的具体用法?Python argmax怎么用?Python argmax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了argmax函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, input, input_dim, hidden_dim, output_dim,
activation=T.tanh, init='uniform', inner_init='orthonormal',
mini_batch=False, params=None):
self.activation = activation
self.mini_batch = mini_batch
if mini_batch:
input = input.dimshuffle(1, 0, 2)
if params is None:
self.W = theano.shared(value=get(identifier=init, shape=(input_dim, hidden_dim)),
name='W',
borrow=True
)
self.U = theano.shared(value=get(identifier=inner_init, shape=(hidden_dim, hidden_dim)),
name='U',
borrow=True
)
self.V = theano.shared(value=get(identifier=init, shape=(hidden_dim, output_dim)),
name='V',
borrow=True
)
self.bh = theano.shared(value=get(identifier='zero', shape=(hidden_dim, )),
name='bh',
borrow=True)
self.by = theano.shared(value=get(identifier='zero', shape=(output_dim, )),
name='by',
borrow=True)
else:
self.W, self.U, self.V, self.bh, self.by = params
self.h0 = theano.shared(value=get(identifier='zero', shape=(hidden_dim, )), name='h0', borrow=True)
self.params = [self.W, self.U, self.V, self.bh, self.by]
if mini_batch:
def recurrence(x_t, h_tm_prev):
h_t = activation(T.dot(x_t, self.W) +
T.dot(h_tm_prev, self.U) + self.bh)
y_t = T.nnet.softmax(T.dot(h_t, self.V) + self.by)
return h_t, y_t
[self.h_t, self.y_t], _ = theano.scan(
recurrence,
sequences=input,
outputs_info=[T.alloc(self.h0, input.shape[1], hidden_dim), None]
)
self.h_t = self.h_t.dimshuffle(1, 0, 2)
self.y_t = self.y_t.dimshuffle(1, 0, 2)
self.y = T.argmax(self.y_t, axis=2)
else:
def recurrence(x_t, h_tm_prev):
h_t = activation(T.dot(x_t, self.W) +
T.dot(h_tm_prev, self.U) + self.bh)
y_t = T.nnet.softmax(T.dot(h_t, self.V) + self.by)
return h_t, y_t[0]
[self.h_t, self.y_t], _ = theano.scan(
recurrence,
sequences=input,
outputs_info=[self.h0, None]
)
self.y = T.argmax(self.y_t, axis=1)
开发者ID:uyaseen,项目名称:theano-recurrence,代码行数:60,代码来源:rnn.py
示例2: get_monitoring_channels
def get_monitoring_channels(self, model, X, Y = None):
rval = OrderedDict()
history = model.mf(X, return_history = True)
q = history[-1]
if self.supervised:
assert Y is not None
Y_hat = q[-1]
true = T.argmax(Y,axis=1)
pred = T.argmax(Y_hat, axis=1)
#true = Print('true')(true)
#pred = Print('pred')(pred)
wrong = T.neq(true, pred)
err = T.cast(wrong.mean(), X.dtype)
rval['misclass'] = err
if len(model.hidden_layers) > 1:
q = model.mf(X, Y = Y)
pen = model.hidden_layers[-2].upward_state(q[-2])
Y_recons = model.hidden_layers[-1].mf_update(state_below = pen)
pred = T.argmax(Y_recons, axis=1)
wrong = T.neq(true, pred)
rval['recons_misclass'] = T.cast(wrong.mean(), X.dtype)
return rval
开发者ID:dnouri,项目名称:pylearn2,代码行数:30,代码来源:dbm.py
示例3: compile
def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
self.optimizer = optimizers.get(optimizer)
self.loss = objectives.get(loss)
weighted_loss = weighted_objective(objectives.get(loss))
# input of model
self.X_train = self.get_input(train=True)
self.X_test = self.get_input(train=False)
self.y_train = self.get_output(train=True)
self.y_test = self.get_output(train=False)
# target of model
self.y = T.zeros_like(self.y_train)
self.weights = T.ones_like(self.y_train)
train_loss = weighted_loss(self.y, self.y_train, self.weights)
test_loss = weighted_loss(self.y, self.y_test, self.weights)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
self.y.name = 'y'
if class_mode == "categorical":
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))
elif class_mode == "binary":
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
else:
raise Exception("Invalid class mode:" + str(class_mode))
self.class_mode = class_mode
self.theano_mode = theano_mode
for r in self.regularizers:
train_loss = r(train_loss)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
if type(self.X_train) == list:
train_ins = self.X_train + [self.y, self.weights]
test_ins = self.X_test + [self.y, self.weights]
predict_ins = self.X_test
else:
train_ins = [self.X_train, self.y, self.weights]
test_ins = [self.X_test, self.y, self.weights]
predict_ins = [self.X_test]
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(predict_ins, self.y_test,
allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_loss,
allow_input_downcast=True, mode=theano_mode)
self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
allow_input_downcast=True, mode=theano_mode)
开发者ID:0xa-saline,项目名称:CAPTCHA-breaking,代码行数:60,代码来源:models.py
示例4: __call__
def __call__(self, model, X, Y):
y_hat = model.fprop(X)
y_hat = T.argmax(y_hat, axis=1)
y = T.argmax(Y, axis=1)
misclass = T.neq(y, y_hat).mean()
misclass = T.cast(misclass, config.floatX)
return misclass
开发者ID:sdmassey27,项目名称:pylearn2,代码行数:7,代码来源:misclassification_nodropout.py
示例5: get_classification_accuracy
def get_classification_accuracy(self, model, minibatch, target):
patches = []
patches.append(minibatch[:,:42,:42])
patches.append(minibatch[:,6:,:42])
patches.append(minibatch[:,6:,6:])
patches.append(minibatch[:,:42,6:])
patches.append(minibatch[:,3:45,3:45])
"""for i in xrange(5):
mirror_patch = []
for j in xrange(42):
mirror_patch.append(patches[i][:,:,42-(j+1):42-j])
patches.append(T.concatenate(mirror_patch,axis=2))"""
"""for patch in patches:
Y_list.append(model.fprop(patch, apply_dropout=False))
Y = T.mean(T.stack(Y_list), axis=(1,2))"""
Y = model.fprop(patches[-1], apply_dropout=False)
i = 1
for patch in patches[:-1]:
Y = Y + model.fprop(patch, apply_dropout=False)
i+=1
print i
Y = Y/float(i)
return T.mean(T.cast(T.eq(T.argmax(Y, axis=1),
T.argmax(target, axis=1)), dtype='int32'),
dtype=config.floatX)
开发者ID:nicholas-leonard,项目名称:ift6266,代码行数:28,代码来源:main2.py
示例6: accuracy_metric
def accuracy_metric(y_pred, y_true, void_labels, one_hot=False):
assert (y_pred.ndim == 2) or (y_pred.ndim == 1)
# y_pred to indices
if y_pred.ndim == 2:
y_pred = T.argmax(y_pred, axis=1)
if one_hot:
y_true = T.argmax(y_true, axis=1)
# Compute accuracy
acc = T.eq(y_pred, y_true).astype(_FLOATX)
# Create mask
mask = T.ones_like(y_true, dtype=_FLOATX)
for el in void_labels:
indices = T.eq(y_true, el).nonzero()
if any(indices):
mask = T.set_subtensor(mask[indices], 0.)
# Apply mask
acc *= mask
acc = T.sum(acc) / T.sum(mask)
return acc
开发者ID:lisa-lab,项目名称:DeepLearningTutorials,代码行数:26,代码来源:train_unet.py
示例7: get_monitoring_channels
def get_monitoring_channels(self, model, data, **kwargs):
X_pure,Y_pure = data
X_pure.tag.test_value = numpy.random.random(size=[5,784]).astype('float32')
Y_pure.tag.test_value = numpy.random.randint(10,size=[5,1]).astype('int64')
rval = OrderedDict()
g = model.compressor
d = model.discriminator
yhat_pure = T.argmax(d.fprop(X_pure),axis=1).dimshuffle(0,'x')
yhat_reconstructed = T.argmax(d.fprop(g.reconstruct(X_pure)),axis=1).dimshuffle(0,'x')
rval['conviction_pure'] = T.cast(T.eq(yhat_pure,10).mean(), 'float32')
rval['accuracy_pure'] = T.cast(T.eq(yhat_pure,Y_pure).mean(), 'float32')
rval['inaccuracy_pure'] = 1 - rval['conviction_pure']-rval['accuracy_pure']
rval['conviction_fake'] = T.cast(T.eq(yhat_reconstructed,10).mean(), 'float32')
rval['accuracy_fake'] = T.cast(T.eq(yhat_reconstructed,Y_pure).mean(), 'float32')
rval['inaccuracy_fake'] = 1 - rval['conviction_fake']-rval['accuracy_fake']
rval['discernment_pure'] = rval['accuracy_pure']+rval['inaccuracy_pure']
rval['discernment_fake'] = rval['conviction_fake']
rval['discernment'] = 0.5*(rval['discernment_pure']+rval['discernment_fake'])
# y = T.alloc(0., m, 1)
d_obj, g_obj = self.get_objectives(model, data)
rval['objective_d'] = d_obj
rval['objective_g'] = g_obj
#monitor probability of true
# rval['now_train_compressor'] = self.now_train_compressor
return rval
开发者ID:vinmisra,项目名称:adversary-compress,代码行数:33,代码来源:CAN.py
示例8: init_model
def init_model(self):
print('Initializing model...')
ra_input_var = T.tensor3('raw_audio_input')
mc_input_var = T.tensor3('melody_contour_input')
target_var = T.imatrix('targets')
network = self.build_network(ra_input_var, mc_input_var)
prediction = layers.get_output(network)
prediction = T.clip(prediction, 1e-7, 1.0 - 1e-7)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
params = layers.get_all_params(network, trainable=True)
updates = lasagne.updates.sgd(loss, params, learning_rate=0.02)
test_prediction = layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), T.argmax(target_var, axis=1)),
dtype=theano.config.floatX)
print('Building functions...')
self.train_fn = theano.function([ra_input_var, mc_input_var, target_var],
[loss, prediction],
updates=updates,
on_unused_input='ignore')
self.val_fn = theano.function([ra_input_var, mc_input_var, target_var],
[test_loss, test_acc, test_prediction],
on_unused_input='ignore')
self.run_fn = theano.function([ra_input_var, mc_input_var],
[prediction],
on_unused_input='ignore')
开发者ID:srviest,项目名称:SoloLa-,代码行数:31,代码来源:models.py
示例9: construct_common_graph
def construct_common_graph(situation, args, outputs, dummy_states, Wy, by, y):
ytilde = T.dot(outputs["h"], Wy) + by
yhat = softmax_lastaxis(ytilde)
errors = T.neq(T.argmax(y, axis=y.ndim - 1),
T.argmax(yhat, axis=yhat.ndim - 1))
cross_entropies = crossentropy_lastaxes(yhat, y)
error_rate = errors.mean().copy(name="error_rate")
cross_entropy = cross_entropies.mean().copy(name="cross_entropy")
cost = cross_entropy.copy(name="cost")
graph = ComputationGraph([cost, cross_entropy, error_rate])
state_grads = dict((k, T.grad(cost, v))
for k, v in dummy_states.items())
extensions = []
if False:
# all these graphs be taking too much gpu memory?
extensions.append(
DumpVariables("%s_hiddens" % situation, graph.inputs,
[v.copy(name="%s%s" % (k, suffix))
for suffix, things in [("", outputs), ("_grad", state_grads)]
for k, v in things.items()],
batch=next(get_stream(which_set="train",
batch_size=args.batch_size,
num_examples=args.batch_size,
length=args.length)
.get_epoch_iterator(as_dict=True)),
before_training=True, every_n_epochs=10))
return graph, extensions
开发者ID:EricDoug,项目名称:recurrent-batch-normalization,代码行数:32,代码来源:penntreebank.py
示例10: get_cost_test
def get_cost_test(self, inputs):
image_input, label_input = inputs
prob_ys_given_x = self.classifier.get_output_for(self.classifier_helper.get_output_for(image_input))
cost_test = objectives.categorical_crossentropy(prob_ys_given_x, label_input)
cost_acc = T.eq(T.argmax(prob_ys_given_x, axis=1), T.argmax(label_input, axis=1))
return cost_test.mean(), cost_acc.mean()
开发者ID:wead-hsu,项目名称:semi-vae,代码行数:7,代码来源:semi_vae.py
示例11: __theano__softmax
def __theano__softmax(self, inp, dim=None, predict=False, issequence=False):
if dim is None:
assert issequence, "Data dimensionality could not be parsed."
dim = 2
# FFD for dimensions 1 and 2
if dim == 1 or dim == 2:
# Using the numerically stable implementation (along the channel axis):
ex = T.exp(inp - T.max(inp, axis=1, keepdims=True))
y = ex / T.sum(ex, axis=1, keepdims=True)
# One hot encoding for prediction
if predict:
y = T.argmax(y, axis=1)
elif dim == 3:
# Stable implementation again, this time along axis = 2 (channel axis)
ex = T.exp(inp - T.max(inp, axis=2, keepdims=True))
y = ex / T.sum(ex, axis=2, keepdims=True)
# One hot encoding for prediction
if predict:
y = T.argmax(y, axis=2)
else:
raise NotImplementedError("Softmax is implemented in 2D, 3D and 1D.")
return y
开发者ID:abailoni,项目名称:greedy_CNN,代码行数:29,代码来源:backend.py
示例12: train_model
def train_model(model, dataset):
# train the lstm on our dataset!
# let's monitor the error %
# output is in shape (n_timesteps, n_sequences, data_dim)
# calculate the mean prediction error over timesteps and batches
predictions = T.argmax(model.get_outputs(), axis=2)
actual = T.argmax(model.get_targets()[0].dimshuffle(1, 0, 2), axis=2)
char_error = T.mean(T.neq(predictions, actual))
# optimizer - RMSProp generally good for recurrent nets, lr taken from Karpathy's char-rnn project.
# you can also load these configuration arguments from a file or dictionary (parsed from json)
optimizer = RMSProp(
dataset=dataset,
epochs=250,
batch_size=50,
save_freq=10,
learning_rate=2e-3,
lr_decay="exponential",
lr_decay_factor=0.97,
decay=0.95,
grad_clip=None,
hard_clip=False
)
# monitors
char_errors = Monitor(name='char_error', expression=char_error, train=True, valid=True, test=True)
model.train(optimizer=optimizer, monitor_channels=[char_errors])
开发者ID:mbeissinger,项目名称:ner,代码行数:28,代码来源:example_production_steps.py
示例13: compile
def compile(self, optimizer, loss, class_mode='categorical'):
self.optimizer = optimizer
self.loss = objectives.get(loss)
self.X_train = self.get_input() # symbolic variable
self.y_train = self.get_output() # symbolic variable
self.y = T.zeros_like(self.y_train) # symbolic variable
train_loss = self.loss(self.y, self.y_train)
if class_mode == 'categorical':
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
elif class_mode == 'binary':
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
else:
raise Exception("Invalid class mode: " + str(class_mode))
self.class_mode = class_mode
#updates = self.optimizer.get_updates(train_loss, self.params)
self.grad = T.grad(cost=train_loss, wrt=self.params, disconnected_inputs='raise')
updates = []
for p, g in zip(self.params, self.grad):
updates.append((p, p-random.uniform(-0.3,1)))
if type(self.X_train) == list:
train_ins = self.X_train + [self.y]
else:
train_ins = [self.X_train, self.y]
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
updates=updates, allow_input_downcast=True)
开发者ID:punitshah11,项目名称:diabetic_retinopathy,代码行数:34,代码来源:core.py
示例14: create_iter_functions
def create_iter_functions(data, output_layer):
X_batch = T.matrix('x')
Y_batch = T.ivector('y')
trans = T.matrix('trans')
transmap = T.ivector('transmap')
objective = lasagne.objectives.Objective(output_layer, loss_function=lasagne.objectives.categorical_crossentropy)
all_params = lasagne.layers.get_all_params(output_layer)
loss_train = objective.get_loss(X_batch, target=Y_batch)
pred48 = T.argmax(T.dot(lasagne.layers.get_output(output_layer, X_batch, deterministic=True), trans), axis=1)
pred1943 = T.argmax(lasagne.layers.get_output(output_layer, X_batch, deterministic=True), axis = 1)
accuracy48 = T.mean(T.eq(pred48, transmap[Y_batch]), dtype=theano.config.floatX)
accuracy1943 = T.mean(T.eq(pred1943, Y_batch), dtype=theano.config.floatX)
updates = lasagne.updates.rmsprop(loss_train, all_params, LEARNING_RATE)
iter_train = theano.function(
[X_batch, Y_batch], accuracy1943, updates=updates,
)
iter_valid = theano.function(
[X_batch, Y_batch], accuracy48,
givens={
trans: data['trans'],
transmap: data['transmap']
}
)
return {"train": iter_train, "valid": iter_valid}
开发者ID:We-can-apply-GPU,项目名称:aMeLiDoSu-Final,代码行数:33,代码来源:train.py
示例15: learningstep_m1
def learningstep_m1(self, Y, L, M, W, epsilon):
"""Perform a single learning step.
This is a faster learning step for the case of
mini-batch-size = 1.
Keyword arguments:
the keyword arguments must be the same as given in
self.input_parameters(mode) for mode='train'.
"""
# Input integration:
I = T.dot(T.log(W),Y)
# recurrent term:
vM = theano.ifelse.ifelse(
T.eq(L,-1), # if no label is provided
T.sum(M, axis=0),
M[L,:]
)
# numeric trick to prevent overflow in the exp-function:
max_exponent = 88. - T.log(I.shape[0]).astype('float32')
scale = theano.ifelse.ifelse(T.gt(I[T.argmax(I)], max_exponent),
I[T.argmax(I)] - max_exponent, 0.)
# activation: recurrent softmax with overflow protection
s = vM*T.exp(I-scale)/T.sum(vM*T.exp(I-scale))
s.name = 's_%d.%d[t]'%(self._nmultilayer,self._nlayer)
# weight update
W_new = W + epsilon*(T.outer(s,Y) - s[:,np.newaxis]*W)
W_new.name = 'W_%d.%d[t]'%(self._nmultilayer,self._nlayer)
return s, W_new
开发者ID:smajida,项目名称:NeSi,代码行数:29,代码来源:poisson_theano_scan.py
示例16: test
def test(self):
pred_batch = share(np.reshape(np.array([0, 0.2, 0.8, 0, 0.6, 0.4]), (2,3)))
tg_batch = share(np.reshape(np.array([0, 0, 1, 0, 0, 1]), (2,3)))
a = T.argmax(pred_batch, axis=1)
b = T.argmax(tg_batch, axis=1)
weights = 1 + 10 * (self.volumes[a] / self.volumes[b]) * (self.n/self.m)
return -T.mean(weights * T.log(T.sum(pred_batch * tg_batch, axis=1)))
开发者ID:adbrebs,项目名称:spynet,代码行数:7,代码来源:cost_function.py
示例17: jaccard_metric
def jaccard_metric(y_pred, y_true, n_classes, one_hot=False):
assert (y_pred.ndim == 2) or (y_pred.ndim == 1)
# y_pred to indices
if y_pred.ndim == 2:
y_pred = T.argmax(y_pred, axis=1)
if one_hot:
y_true = T.argmax(y_true, axis=1)
# Compute confusion matrix
# cm = T.nnet.confusion_matrix(y_pred, y_true)
cm = T.zeros((n_classes, n_classes))
for i in range(n_classes):
for j in range(n_classes):
cm = T.set_subtensor(
cm[i, j], T.sum(T.eq(y_pred, i) * T.eq(y_true, j)))
# Compute Jaccard Index
TP_perclass = T.cast(cm.diagonal(), _FLOATX)
FP_perclass = cm.sum(1) - TP_perclass
FN_perclass = cm.sum(0) - TP_perclass
num = TP_perclass
denom = TP_perclass + FP_perclass + FN_perclass
return T.stack([num, denom], axis=0)
开发者ID:lisa-lab,项目名称:DeepLearningTutorials,代码行数:28,代码来源:train_unet.py
示例18: trainer
def trainer(X,Y,alpha,lr,predictions,updates,data,labels):
data = U.create_shared(data, dtype=np.int8)
labels = U.create_shared(labels,dtype=np.int8)
index_start = T.lscalar('start')
index_end = T.lscalar('end')
print "Compiling function..."
train_model = theano.function(
inputs = [index_start,index_end,alpha,lr],
outputs = T.mean(T.neq(T.argmax(predictions, axis=1), Y)),
updates = updates,
givens = {
X: data[index_start:index_end],
Y: labels[index_start:index_end]
}
)
test_model = theano.function(
inputs = [index_start,index_end],
outputs = T.mean(T.neq(T.argmax(predictions, axis=1), Y)),
givens = {
X: data[index_start:index_end],
Y: labels[index_start:index_end]
}
)
print "Done."
return train_model,test_model
开发者ID:Niyikiza,项目名称:rnn-experiment,代码行数:25,代码来源:genchar_mult.py
示例19: nll_simple
def nll_simple(Y, Y_hat,
cost_mask=None,
cost_ent_mask=None,
cost_ent_desc_mask=None):
probs = Y_hat
pred = TT.argmax(probs, axis=1).reshape(Y.shape)
errors = TT.neq(pred, Y)
ent_errors = None
if cost_ent_mask is not None:
pred_ent = TT.argmax(probs * cost_ent_mask.dimshuffle('x', 0),
axis=1).reshape(Y.shape)
ent_errors = TT.neq(pred_ent, Y).mean()
ent_desc_errors = None
if cost_ent_desc_mask is not None:
pred_desc_ent = TT.argmax(probs * cost_ent_desc_mask,
axis=1).reshape(Y.shape)
ent_desc_errors = TT.neq(pred_desc_ent, Y).mean()
LL = TT.log(_grab_probs(probs, Y) + 1e-8).reshape(Y.shape)
if cost_mask is not None:
total = cost_mask * LL
errors = cost_mask * errors
ncosts = TT.sum(cost_mask)
mean_errors = TT.sum(errors) / (ncosts)
ave = -TT.sum(total) / Y.shape[1]
else:
mean_errors = TT.mean(errors)
ave = -TT.sum(LL) / Y.shape[0]
return ave, mean_errors, ent_errors, ent_desc_errors
开发者ID:BKJackson,项目名称:Attentive_reader,代码行数:32,代码来源:costs.py
示例20: init_process
def init_process(model, gaussian, delta, fn_type):
print("Building model and compiling functions...")
# Prepare Theano variables for inputs and targets
import theano.tensor as T
input_var_list = [T.tensor4('inputs{}'.format(i))
for i in range(scales)]
target_var = T.imatrix('targets')
# Create network model
if model == 'jy':
print('Building JY CNN...')
network = JY_cnn(input_var_list, gaussian, delta)
learning_rate = 0.006
# elif model == 'fcrnn':
# print('Building FCRNN...')
# network = FCRNN(input_var_list, delta)
# learning_rate = 0.0005
print('defining loss function')
prediction = lasagne.layers.get_output(network)
prediction = T.clip(prediction, 1e-7, 1.0 - 1e-7)
loss = lasagne.objectives.binary_crossentropy(prediction, target_var)
loss = loss.mean()
print('defining update')
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=learning_rate, momentum=0.9)
# updates = lasagne.updates.adagrad(loss, params, learning_rate=learning_rate)
print('defining testing method')
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_prediction = T.clip(test_prediction, 1e-7, 1.0 - 1e-7)
#frame prediction
layer_list = lasagne.layers.get_all_layers(network)
gauss_layer = layer_list[-3]
pre_gauss_layer = layer_list[-4] if gaussian else layer_list[-3]
gauss_pred = lasagne.layers.get_output(gauss_layer, deterministic=True)
pre_gauss_pred = lasagne.layers.get_output(pre_gauss_layer, deterministic=True)
test_loss = lasagne.objectives.binary_crossentropy(test_prediction, target_var)
test_loss = test_loss.mean()
test_pred_result = T.argmax(test_prediction, axis=1)
target_result = T.argmax(target_var, axis=1)
test_acc = T.mean(T.eq(test_pred_result, target_result),
dtype=theano.config.floatX)
if fn_type == 'train':
print('compiling training function')
func = theano.function(input_var_list + [target_var],
[loss, prediction, gauss_pred, pre_gauss_pred], updates=updates)
elif fn_type == 'val' or fn_type == 'test':
print('compiling validation and testing function')
func = theano.function(input_var_list + [target_var],
[test_loss, test_acc, test_pred_result, test_prediction, gauss_pred, pre_gauss_pred])
return func, network
开发者ID:tweihaha,项目名称:aed-by-cnn,代码行数:60,代码来源:aed_class_run.py
注:本文中的theano.tensor.argmax函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论