本文整理汇总了Python中tensorflow.clip_by_global_norm函数的典型用法代码示例。如果您正苦于以下问题:Python clip_by_global_norm函数的具体用法?Python clip_by_global_norm怎么用?Python clip_by_global_norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clip_by_global_norm函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _add_shared_train_op
def _add_shared_train_op(self):
"""Sets self._train_op, the op to run for training."""
# Take gradients of the trainable variables w.r.t. the loss function to minimize
if self._hps.rl_training or self._hps.ac_training:
loss_to_minimize = self._reinforce_shared_loss
if self._hps.coverage:
loss_to_minimize = self._reinforce_cov_total_loss
else:
loss_to_minimize = self._pgen_loss
if self._hps.coverage:
loss_to_minimize = self._pointer_cov_total_loss
tvars = tf.trainable_variables()
gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)
# Clip the gradients
with tf.device("/gpu:{}".format(self._hps.gpu_num)):
grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)
# Add a summary
tf.summary.scalar('global_norm', global_norm)
# Apply adagrad optimizer
optimizer = tf.train.AdagradOptimizer(self._hps.lr, initial_accumulator_value=self._hps.adagrad_init_acc)
with tf.device("/gpu:{}".format(self._hps.gpu_num)):
self._shared_train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')
开发者ID:sra4077,项目名称:RLSeq2Seq,代码行数:26,代码来源:model.py
示例2: optimizer
def optimizer(someloss):
global_step = tf.Variable(0)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
gradients, v = zip(*optimizer.compute_gradients(someloss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
optimizer = optimizer.apply_gradients(zip(gradients, v), global_step=global_step)
return optimizer
开发者ID:AlexMoreo,项目名称:tensorflow-Tex2Vis,代码行数:7,代码来源:lstm_text2vis.py
示例3: __init__
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
size = config.hidden_size
self.max_len = max_len = config.max_len
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, config.max_len])
self._targets = tf.placeholder(tf.int32, [batch_size])
embedding = tf.get_variable("embedding", [vocab_size, size])
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
output = tf.reduce_sum(inputs, 1)
softmax_w = tf.get_variable("softmax_w", [size, 2])
softmax_b = tf.get_variable("softmax_b", [2])
logits = tf.matmul(output, softmax_w) + softmax_b
prediction = tf.nn.softmax(logits)
self._prediction = prediction
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, self._targets)
self._cost = cost = tf.reduce_sum(loss) / batch_size
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
开发者ID:cbienpourtoi,项目名称:test-tensorflow-sentiments,代码行数:33,代码来源:sentiment_model_3.py
示例4: __init__
def __init__(self, loss, global_step, optimizer, learning_rate, clip_gradients=5.0):
"""Build a trainer part of graph.
Args:
loss: Tensor that evaluates to model's loss.
global_step: Tensor with global step of the model.
optimizer: Name of the optimizer class (SGD, Adam, Adagrad) or class.
"""
self.loss = loss
self.global_step = global_step
self._learning_rate = tf.get_variable(
"learning_rate",
[],
initializer=tf.constant_initializer(learning_rate))
params = tf.trainable_variables()
self.gradients = tf.gradients(loss, params)
if clip_gradients > 0.0:
self.gradients, self.gradients_norm = tf.clip_by_global_norm(
self.gradients, clip_gradients)
grads_and_vars = zip(self.gradients, params)
if isinstance(optimizer, str):
self._optimizer = OPTIMIZER_CLS_NAMES[
optimizer](self._learning_rate)
else:
self._optimizer = optimizer(self.learning_rate)
self.trainer = self._optimizer.apply_gradients(grads_and_vars,
global_step=global_step,
name="train")
# Get all initializers for all trainable variables.
self._initializers = tf.initialize_all_variables()
开发者ID:tsipporah,项目名称:skflow,代码行数:30,代码来源:trainer.py
示例5: create_gen_train_op
def create_gen_train_op(hparams, learning_rate, gen_loss, global_step, mode):
"""Create Generator train op."""
del hparams
with tf.name_scope('train_generator'):
if FLAGS.generator_optimizer == 'sgd':
gen_optimizer = tf.train.GradientDescentOptimizer(learning_rate)
elif FLAGS.generator_optimizer == 'adam':
gen_optimizer = tf.train.AdamOptimizer(learning_rate)
else:
raise NotImplementedError
gen_vars = [
v for v in tf.trainable_variables() if v.op.name.startswith('gen')
]
print('Optimizing Generator vars.')
for v in gen_vars:
print(v)
if mode == 'MINIMIZE':
gen_grads = tf.gradients(gen_loss, gen_vars)
elif mode == 'MAXIMIZE':
gen_grads = tf.gradients(-gen_loss, gen_vars)
else:
raise ValueError("Must be one of 'MINIMIZE' or 'MAXIMIZE'")
gen_grads_clipped, _ = tf.clip_by_global_norm(gen_grads,
FLAGS.grad_clipping)
gen_train_op = gen_optimizer.apply_gradients(
zip(gen_grads_clipped, gen_vars), global_step=global_step)
return gen_train_op, gen_grads_clipped, gen_vars
开发者ID:ALISCIFP,项目名称:models,代码行数:27,代码来源:model_optimization.py
示例6: apply_gradients
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Applying gradients and tune hyperparams with YellowFin.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
(A group of operations)
Variable Update with Momentum ops,
YellowFin ops(Curvature, Variance, Distance) ops,
SingleStep and lr_mu tuning ops,
Step increment ops.
"""
self._grad, self._vars = zip(*[(g, t)
for g, t in grads_and_vars if g is not None])
# Var update with Momentum.
with tf.variable_scope("apply_updates"):
# Gradient Clipping?
if self._clip_thresh_var is not None:
self._grad, _ = tf.clip_by_global_norm(
self._grad, self._clip_thresh_var)
apply_grad_op = self._momentum_optimizer.apply_gradients(
zip(self._grad, self._vars),
global_step=global_step,
name=name)
else:
apply_grad_op = self._momentum_optimizer.apply_gradients(
zip(self._grad, self._vars),
global_step=global_step,
name=name)
# Begin lr and mu tuning.
with tf.variable_scope("prepare_yellowFin_variables"):
# the dependencies ideally only need to be after clip is done,
# i.e. depends on self._grads. However, the control_dependencies
# does not support indexed slice for sparse gradients.
# The alternative dependencies here might be slightly slower due
# to less parallelization.
with tf.control_dependencies([apply_grad_op,]):
prepare_variables_op = self._prepare_variables()
with tf.variable_scope("yellowfin"):
with tf.control_dependencies([prepare_variables_op]):
yellowfin_op = self._yellowfin()
# Update YellowFin step variable.
with tf.control_dependencies([yellowfin_op]):
self._increment_step_op = tf.assign_add(self._step, 1).op
return tf.group(apply_grad_op,
prepare_variables_op,
yellowfin_op,
self._increment_step_op)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:60,代码来源:yellowfin.py
示例7: build_rmsprop_optimizer
def build_rmsprop_optimizer(self, learning_rate, rmsprop_decay, rmsprop_constant, gradient_clip, version):
with tf.name_scope('rmsprop'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(self.loss)
grads = [gv[0] for gv in grads_and_vars]
params = [gv[1] for gv in grads_and_vars]
if gradient_clip > 0:
grads = tf.clip_by_global_norm(grads, gradient_clip)
if version == 'rmsprop':
return optimizer.apply_gradients(zip(grads, params))
elif version == 'graves_rmsprop':
square_grads = [tf.square(grad) for grad in grads]
avg_grads = [tf.Variable(tf.ones(var.get_shape())) for var in params]
avg_square_grads = [tf.Variable(tf.ones(var.get_shape())) for var in params]
update_avg_grads = [grad_pair[0].assign((rmsprop_decay * grad_pair[0]) + ((1 - rmsprop_decay) * grad_pair[1]))
for grad_pair in zip(avg_grads, grads)]
update_avg_square_grads = [grad_pair[0].assign((rmsprop_decay * grad_pair[0]) + ((1 - rmsprop_decay) * tf.square(grad_pair[1])))
for grad_pair in zip(avg_square_grads, grads)]
avg_grad_updates = update_avg_grads + update_avg_square_grads
rms = [tf.sqrt(avg_grad_pair[1] - tf.square(avg_grad_pair[0]) + rmsprop_constant)
for avg_grad_pair in zip(avg_grads, avg_square_grads)]
rms_updates = [grad_rms_pair[0] / grad_rms_pair[1] for grad_rms_pair in zip(grads, rms)]
train = optimizer.apply_gradients(zip(rms_updates, params))
return tf.group(train, tf.group(*avg_grad_updates))
开发者ID:hihell,项目名称:deep_rl_ale,代码行数:34,代码来源:parallel_q_network.py
示例8: make_train_op
def make_train_op(local_net, global_net):
"""
Use gradients from local network to update the global network
"""
# Idea:
# We want a list of gradients and corresponding variables
# e.g. [[g1, g2, g3], [v1, v2, v3]]
# Since that's what the optimizer expects.
# But we would like the gradients to come from the local network
# And the variables to come from the global network
# So we want to make a list like this:
# [[local_g1, local_g2, local_g3], [global_v1, global_v2, global_v3]]
# First get only the gradients
local_grads, _ = zip(*local_net.grads_and_vars)
# Clip gradients to avoid large values
local_grads, _ = tf.clip_by_global_norm(local_grads, 5.0)
# Get global vars
_, global_vars = zip(*global_net.grads_and_vars)
# Combine local grads and global vars
local_grads_global_vars = list(zip(local_grads, global_vars))
# Run a gradient descent step, e.g.
# var = var - learning_rate * grad
return global_net.optimizer.apply_gradients(
local_grads_global_vars,
global_step=tf.train.get_global_step())
开发者ID:lazyprogrammer,项目名称:machine_learning_examples,代码行数:31,代码来源:worker.py
示例9: __init__
def __init__(self, model, optimizer, learning_rate, clip_gradients=5.0):
"""Build a trainer part of graph.
Args:
model: Model object, that has loss and global_step attributes.
optimizer: Name of the optimizer class (SGD, Adam, Adagrad) or class.
"""
self.model = model
self._learning_rate = tf.get_variable(
"learning_rate",
[],
initializer=tf.constant_initializer(learning_rate))
params = tf.trainable_variables()
self.gradients = tf.gradients(model.loss, params)
if clip_gradients > 0.0:
self.gradients, self.gradients_norm = tf.clip_by_global_norm(
self.gradients, clip_gradients)
grads_and_vars = zip(self.gradients, params)
if isinstance(optimizer, str):
self._optimizer = OPTIMIZER_CLS_NAMES[optimizer](self._learning_rate)
else:
self._optimizer = optimizer(self.learning_rate)
self.trainer = self._optimizer.apply_gradients(grads_and_vars,
global_step=model.global_step,
name="train")
# Get all initializers for all trainable variables.
self._initializers = tf.initialize_all_variables()
开发者ID:linhvannguyen,项目名称:skflow,代码行数:27,代码来源:trainer.py
示例10: defineTensorGradientDescent
def defineTensorGradientDescent(self):
self._learningRate = tf.Variable(0.0, trainable=False)
trainingVars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, trainingVars),self.config.max_grad_norm)
optimizer = tf.train.AdamOptimizer(self.learningRate)
self._tensorGradientDescentTrainingOperation = optimizer.apply_gradients(zip(grads, trainingVars))
开发者ID:killianlevacher,项目名称:TrumpBSQuoteRNNGenerator,代码行数:7,代码来源:RNN_Model.py
示例11: __init__
def __init__(self, vocab_size, size, num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, dropout, forward_only=False):
self.size = size
self.vocab_size = vocab_size
self.batch_size = batch_size
self.num_layers = num_layers
self.keep_prob = 1.0 - dropout
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
self.source_tokens = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="source_tokens")
self.target_tokens = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="target_tokens")
self.source_mask = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="source_mask")
self.target_mask = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="target_mask")
self.source_length = tf.reduce_sum(self.source_mask, reduction_indices=0)
self.target_length = tf.reduce_sum(self.target_mask, reduction_indices=0)
self.setup_embeddings()
self.setup_encoder()
self.setup_decoder()
self.setup_loss()
params = tf.trainable_variables()
if not forward_only:
opt = tf.train.AdamOptimizer(self.learning_rate)
gradients = tf.gradients(self.losses, params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients, max_gradient_norm)
self.gradient_norms = norm
self.updates = opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step)
self.saver = tf.train.Saver(tf.all_variables())
开发者ID:nipengmath,项目名称:nlc,代码行数:35,代码来源:nlc_model.py
示例12: training
def training(hypes, loss, global_step, learning_rate, opt=None):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
global_step: Integer Variable counting the number of training steps
processed.
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.''
sol = hypes["solver"]
hypes['tensors'] = {}
hypes['tensors']['global_step'] = global_step
total_loss = loss['total_loss']
with tf.name_scope('training'):
if opt is None:
if sol['opt'] == 'RMS':
opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate,
decay=0.9,
epsilon=sol['epsilon'])
elif sol['opt'] == 'Adam':
opt = tf.train.AdamOptimizer(learning_rate=learning_rate,
epsilon=sol['adam_eps'])
elif sol['opt'] == 'SGD':
lr = learning_rate
opt = tf.train.GradientDescentOptimizer(learning_rate=lr)
else:
raise ValueError('Unrecognized opt type')
hypes['opt'] = opt
grads_and_vars = opt.compute_gradients(total_loss)
if hypes['clip_norm'] > 0:
grads, tvars = zip(*grads_and_vars)
clip_norm = hypes["clip_norm"]
clipped_grads, norm = tf.clip_by_global_norm(grads, clip_norm)
grads_and_vars = zip(clipped_grads, tvars)
train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = opt.apply_gradients(grads_and_vars,
global_step=global_step)
return train_op
开发者ID:new-2017,项目名称:KittiSeg,代码行数:60,代码来源:generic_optimizer.py
示例13: fit
def fit(self, data_function):
with tf.Graph().as_default(), tf.Session() as sess:
n, s, p = data_function.train.X.shape
X_pl = tf.placeholder(tf.float32, [self.batch_size, s, p])
Y_pl = tf.placeholder(tf.float32, [self.batch_size, p])
lstm_cell = rnn_cell.BasicLSTMCell(self.hidden_size)
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * self.num_layers)
outputs, _ = rnn.rnn(cell, [X_pl[:,i,:] for i in xrange(s)],
dtype = tf.float32)
softmax_w = tf.get_variable("softmax_w", [self.hidden_size, p])
softmax_b = tf.get_variable("softmax_b", [p])
logits = tf.matmul(outputs[-1], softmax_w) + softmax_b
loss = loss_dict['ce'](logits, Y_pl)
tvars = tf.trainable_variables()
print([i.get_shape() for i in tvars])
grads, _ = tf.clip_by_global_norm(tf.gradients(loss,
tvars), self.max_grad_norm)
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.apply_gradients(zip(grads, tvars))
initializer = tf.random_uniform_initializer(-self.init_scale,
self.init_scale)
tf.initialize_all_variables().run()
for i in xrange(self.n_step):
batch_xs, batch_ys = data_function.train.next_batch(
self.batch_size)
feed_dict = {X_pl: batch_xs, Y_pl: batch_ys}
_, loss_value = sess.run([train_op, loss],
feed_dict = feed_dict)
if i % 100 == 0:
PrintMessage(data_function.train.epochs_completed,
loss_value , 0, 0)
开发者ID:hduongtrong,项目名称:ScikitFlow,代码行数:33,代码来源:rnn.py
示例14: create_optimizer
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
# a different optimizer, you should probably take this line out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
开发者ID:arnaudvl,项目名称:bert,代码行数:60,代码来源:optimization.py
示例15: training_ops
def training_ops(self, loss):
opt = self.get_optimizer()
params = tf.trainable_variables()
gradients = tf.gradients(loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
return opt.apply_gradients(zip(clipped_gradients, params),
global_step=self.global_step)
开发者ID:JiweiHe,项目名称:models,代码行数:7,代码来源:model.py
示例16: __init__
def __init__(self,
length_batch,
features_batch,
labels_batch):
self.labels_flat = tf.reshape(labels_batch, [-1])
self.labels_one_hot = tf.one_hot(labels_batch, 26)
self.labels_one_hot_flat = tf.reshape(self.labels_one_hot, [-1, 26])
self.lstm = tf.nn.rnn_cell.BasicLSTMCell(128)
self.lstm_outputs, _ = tf.nn.dynamic_rnn(
self.lstm, features_batch, sequence_length=length_batch, time_major=False, dtype=tf.float32)
self.flat_lstm_outputs = tf.reshape(self.lstm_outputs, [-1, 128])
self.outputs = tflearn.fully_connected(self.flat_lstm_outputs, 26)
# mask out padding
self.losses = tf.nn.softmax_cross_entropy_with_logits(self.outputs, self.labels_one_hot_flat)
self.mask = tf.to_float(tf.sign(self.labels_flat))
self.masked_losses = self.mask * self.losses
self.mean_loss = tf.reduce_sum(self.masked_losses / tf.reduce_sum(self.mask))
self.predictions = tf.argmax(self.outputs, 1)
self.accurate = tf.equal(self.predictions, self.labels_flat)
self.accuracy = tf.reduce_sum(tf.to_float(self.accurate) * self.mask) / tf.reduce_sum(self.mask)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.mean_loss, tvars), 5.0)
self.train = tf.train.GradientDescentOptimizer(0.1).apply_gradients(zip(grads, tvars))
开发者ID:andreasjansson,项目名称:cr,代码行数:28,代码来源:basic_lstm.py
示例17: train_neural_network
def train_neural_network():
logits, last_state, _, _, _ = neural_network()
targets = tf.reshape(output_targets, [-1])
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits], [targets], [tf.ones_like(targets, dtype=tf.float32)],
len(words))
cost = tf.reduce_mean(loss)#arvrage值
learning_rate = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
#当在一次迭代中权重的更新过于迅猛的话,很容易导致loss divergence。Gradient Clipping的直观作用就是让权重的更新限制在一个合适的范围
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 5)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.apply_gradients(zip(grads, tvars))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
for epoch in range(50):
sess.run(tf.assign(learning_rate, 0.002 * (0.97 ** epoch)))
n = 0
for batche in range(n_chunk):
train_loss, _, _ = sess.run([cost, last_state, train_op],
feed_dict={input_data: x_batches[n], output_targets: y_batches[n]})
n += 1
print(epoch, batche, train_loss)
if epoch % 7 == 0:
saver.save(sess, 'poetry.module', global_step=epoch)
开发者ID:veyvin,项目名称:tensorflow-learn,代码行数:28,代码来源:train.py
示例18: get_graph
def get_graph(self, tensor_input=0):
#
# Define variables
# - weights & bias on cells
# - memory of previous cell's values
# - output classifier
#
self.wCells = tf.Variable(tf.truncated_normal([self.nbInputs+self.nbCells, self.nbCells*4], -0.1, 0.1))
self.bCells = tf.Variable(tf.zeros([1, self.nbCells*4]))
saved_output = tf.Variable(tf.truncated_normal([self.batchSize, self.nbCells], -0.1, 0.1), trainable=False)
saved_state = tf.Variable(tf.truncated_normal([self.batchSize, self.nbCells], -0.1, 0.1), trainable=False)
wClassif = tf.Variable(tf.truncated_normal([self.nbCells, self.nbOutputs], -0.1, 0.1))
bClassif = tf.Variable(tf.zeros([self.nbOutputs]))
self.train_labels = tf.placeholder(tf.float32, shape=[1,self.nbOutputs])
# Feed <nbInputs> inputs to <nbCells> LSTM cells
# which have <self.nbFrames> consecutive LSTMs
# LSTM_inputs = list()
# for _ in range(self.nbFrames):
# LSTM_inputs.append(
# tf.placeholder(tf.float32, shape=[self.batchSize,self.nbInputs]))
# if tensor_input != 0:
# LSTM_inputs = list()
# for _ in range(self.nbFrames):
# LSTM_inputs.append(
# tensor_input)
# Propagate images into LSTM cells
# for fc6 in LSTM_inputs:
# saved_output, saved_state = self.lstm_cell(fc6, saved_output, saved_state)
saved_output, saved_state = self.lstm_cell(tensor_input, saved_output, saved_state)
# State saving across unrollings.
# control_dependencies => must be true to continue
# with tf.control_dependencies([saved_output.assign(output),
# saved_state.assign(state)]):
# Classifier.
self.logits = tf.nn.xw_plus_b(saved_output, wClassif, bClassif)
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
self.logits, self.train_labels))
# Predictions.
self.train_prediction = tf.nn.softmax(self.logits)
# Optimizer.
global_step = tf.Variable(0)
self.learning_rate = tf.train.exponential_decay(
50.0, global_step, 5000, 0.8, staircase=True)
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
gradients, v = zip(*self.optimizer.compute_gradients(self.loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
self.optimizer = self.optimizer.apply_gradients(
zip(gradients, v), global_step=global_step)
开发者ID:marc-moreaux,项目名称:perso,代码行数:60,代码来源:tf_lstm.py
示例19: build_training
def build_training(self):
print(' Building training')
self.global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
# Do gradient clipping
# NOTE: this is the correct, but slower clipping by global norm.
# Maybe it's worth trying the faster tf.clip_by_norm()
# (See the documentation for tf.clip_by_global_norm() for more info)
grads_and_vars = optimizer.compute_gradients(self.loss)
gradients, variables = zip(*grads_and_vars) # unzip list of tuples
clipped_gradients, global_norm = (
tf.clip_by_global_norm(gradients, self.clip_norm) )
clipped_grads_and_vars = zip(clipped_gradients, variables)
# Create TensorBoard scalar summary for global gradient norm
tf.scalar_summary('train/global gradient norm', global_norm)
# Create TensorBoard summaries for gradients
# for grad, var in grads_and_vars:
# # Sparse tensor updates can't be summarized, so avoid doing that:
# if isinstance(grad, tf.Tensor):
# tf.histogram_summary('grad_' + var.name, grad)
# make training op for applying the gradients
self.train_op = optimizer.apply_gradients(clipped_grads_and_vars,
global_step=self.global_step)
开发者ID:Styrke,项目名称:master-code,代码行数:27,代码来源:model.py
示例20: _update_network
def _update_network(self, trainer):
self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.actions_onehot = tf.one_hot(
self.actions, self.a_dim, dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None], dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.outputs = tf.reduce_sum(
self.policy * self.actions_onehot, [1])
# loss
self.value_loss = 0.5 * tf.reduce_sum(tf.square(
self.target_v - tf.reshape(self.value, [-1])))
# higher entropy -> lower loss -> encourage exploration
self.entropy = -tf.reduce_sum(self.policy * tf.log(self.policy))
self.policy_loss = -tf.reduce_sum(
tf.log(self.outputs) * self.advantages)
self.loss = 0.5 * self.value_loss \
+ self.policy_loss - 0.01 * self.entropy
# local gradients
local_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
self.gradients = tf.gradients(self.loss, local_vars)
self.var_norms = tf.global_norm(local_vars)
# grads[i] * clip_norm / max(global_norm, clip_norm)
grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, 40.0)
# apply gradients to global network
global_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))
开发者ID:Funitus,项目名称:reinforce_py,代码行数:35,代码来源:net.py
注:本文中的tensorflow.clip_by_global_norm函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论