• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.trainable_variables函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.trainable_variables函数的典型用法代码示例。如果您正苦于以下问题:Python trainable_variables函数的具体用法?Python trainable_variables怎么用?Python trainable_variables使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了trainable_variables函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self,learning_rate, cost,feed,sess,m,comm,size,rank):
     self.Y=[]
     self.S=[]
     self.YS=[]
     self.cost=cost
     self.sess=sess
     self.NumIter=0
     self.m=m
     self.counter=0
     self.gradientEval=0
     self.functionEval=0
     self.last_func=0
     self.innerEval=0
     self.HessianEval=0
     self.last_z1=0.01
     self.memorySize=0
     self.rank=rank
     self.comm=comm
     self.size=size
     v=[]
     self.assign_placeholders=[]
     assign_op=[]
     for t in tf.trainable_variables():
         v.append(sess.run(t))
         self.assign_placeholders.append(tf.placeholder(shape=v[-1].shape,dtype="float32"))
         assign_op.append(t.assign(self.assign_placeholders[-1]))
     self.assign=tf.group(*assign_op)
     self.var=np.array(v)
     # self.var=np.load('var.npy')
     np.save('var.npy',self.var)
     comm.scatter(['Init' for i in range(size)],root=rank)
     self.gradient=tf.gradients(cost,tf.trainable_variables(),gate_gradients=True)
     self.learningRate=learning_rate
     self.old_grad=None
开发者ID:Vendea,项目名称:summer-research-2016,代码行数:34,代码来源:lbfgs_optimizer.py


示例2: train

def train(lr, total_loss, global_step):
    # Variables that affect learning rate.

    # Compute gradients.
    #with tf.control_dependencies([loss_averages_op]):
    opt = tf.train.GradientDescentOptimizer(lr)
    grads = opt.compute_gradients(total_loss)

    # Add histograms for gradients.
    for i, (grad, var) in enumerate(grads):
        if grad is not None:
            tf.histogram_summary(var.op.name + '/gradients', grad)
            grads[i] = (tf.clip_by_norm(grad, 5), var)

    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    # Add histograms for trainable variables.
    for var in tf.trainable_variables():
        tf.histogram_summary(var.op.name, var)

    # Track the moving averages of all trainable variables.
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
        train_op = tf.no_op(name='train')

    return train_op
开发者ID:danfeiX,项目名称:drl,代码行数:29,代码来源:dqn.py


示例3: __init__

    def __init__(self, sess, state_dim, action_dim, learning_rate, tau, num_actor_vars):
        self.sess = sess
        self.s_dim = state_dim
        self.a_dim = action_dim
        self.learning_rate = learning_rate
        self.tau = tau

        # Create the critic network
        self.inputs, self.action, self.out = self.create_critic_network()

        self.network_params = tf.trainable_variables()[num_actor_vars:]

        # Target Network
        self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
        
        self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]

        # Op for periodically updating target network with online network weights with regularization
        self.update_target_network_params = \
            [self.target_network_params[i].assign(tf.mul(self.network_params[i], self.tau) + tf.mul(self.target_network_params[i], 1. - self.tau))
                for i in range(len(self.target_network_params))]
    
        # Network target (y_i)
        self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])

        # Define loss and optimization Op
        self.loss = tflearn.mean_square(self.predicted_q_value, self.out)
        self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)

        # Get the gradient of the net w.r.t. the action
        self.action_grads = tf.gradients(self.out, self.action)
开发者ID:ataitler,项目名称:DQN,代码行数:31,代码来源:ddpg.py


示例4: build_gen_graph

    def build_gen_graph(self):
        # forward pass through generator
        # returns a (batch_size, sequence_length, input_dim) for generated
        self.generated, self.timestep_probs, self.predicted_rewards = self.generate()

        # get the predictions from the discriminator
        # returns a (batch_size, 1) output
        self.gen_scores = self.discriminate(self.generated, reuse=False)

        # formulate the policy gradient loss
        self.gen_train_loss_out, self.baseline_loss = self.gen_train_loss(self.gen_scores,
             self.predicted_rewards)

        # get generative parameters and baseline params
        self.g_params = [p for p in tf.trainable_variables() if 'g' in p.name and 'b' not in p.name]
        self.b_params = [p for p in tf.trainable_variables() if 'b' in p.name]

        # create the gen train op
        self.gen_optimize_rewards(self.gen_train_loss_out)

        # create the baseline train op
        if self.opts.with_baseline:
            self.optimize_baseline(self.baseline_loss)

        # initialize all variable and prep to save model
        tf.initialize_all_variables().run()
开发者ID:wulfebw,项目名称:adversarial_rl,代码行数:26,代码来源:discrete_rgan.py


示例5: train

def train(total_loss, global_step):
  """Train CIFAR-10 model.

  Create an optimizer and apply to all trainable variables. Add moving
  average for all trainable variables.

  Args:
    total_loss: Total loss from loss().
    global_step: Integer Variable counting the number of training steps
      processed.
  Returns:
    train_op: op for training.
  """
  # Variables that affect learning rate.
  num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
  decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)

  # Decay the learning rate exponentially based on the number of steps.
  lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
                                  global_step,
                                  decay_steps,
                                  LEARNING_RATE_DECAY_FACTOR,
                                  staircase=True)
  tf.scalar_summary('learning_rate', lr)

  # Generate moving averages of all losses and associated summaries.
  loss_averages_op = _add_loss_summaries(total_loss)

  # Compute gradients.
  with tf.control_dependencies([loss_averages_op]):
    # opt = tf.train.GradientDescentOptimizer(lr)
    opt = tf.train.AdamOptimizer(learning_rate=0.0001,
                                       beta1=0.9,
                                       beta2=0.999,
                                       epsilon=1e-08,
                                       use_locking=False,
                                       name='Adam')#.minimize(loss,global_step=batch)
    grads = opt.compute_gradients(total_loss)

  # Apply gradients.
  apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

  # Add histograms for trainable variables.
  for var in tf.trainable_variables():
    tf.histogram_summary(var.op.name, var)

  # Add histograms for gradients.
  for grad, var in grads:
    if grad is not None:
      tf.histogram_summary(var.op.name + '/gradients', grad)

  # Track the moving averages of all trainable variables.
  variable_averages = tf.train.ExponentialMovingAverage(
      MOVING_AVERAGE_DECAY, global_step)
  variables_averages_op = variable_averages.apply(tf.trainable_variables())

  with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
    train_op = tf.no_op(name='train')

  return train_op
开发者ID:Thrasi,项目名称:thesis-project,代码行数:60,代码来源:cifar10.py


示例6: __init__

    def __init__(self,
                 sess,
                 state_dim,
                 action_dim,
                 action_high,
                 action_low,
                 learning_rate,
                 grad_norm_clip,
                 tau,
                 batch_size,
                 name=None):
        self.sess = sess
        self.s_dim = state_dim
        self.a_dim = action_dim
        self.a_high = action_high
        self.a_low = action_low
        self.learning_rate = learning_rate
        self.grad_norm_clip = grad_norm_clip
        self.tau = tau
        self.batch_size = batch_size

        # create networks
        net_name = 'actor' if name is None else name
        with tf.variable_scope(net_name):
            self.obs, self.action = self.create_actor_network()
        self.params = tf.trainable_variables(scope=net_name)
        with tf.variable_scope(net_name + '_target'):
            self.target_obs, self.target_action = self.create_actor_network()
        self.target_params = tf.trainable_variables(scope=net_name + '_target')

        # create ops
        (self.update_target_op,
         self.action_gradient,
         self.train_op) = self.create_actor_ops()
开发者ID:zhang01GA,项目名称:cloudml-samples,代码行数:34,代码来源:actor_critic.py


示例7: run_model

  def run_model(self, train_config, eval_config):
    with tf.Graph().as_default() as g:
      train_model = base_model(params=train_config, mode="train", hvd=None)
      train_model.compile()
      eval_model = base_model(params=eval_config, mode="eval", hvd=None)
      eval_model.compile(force_var_reuse=True)

      train(train_model, eval_model)
      saver = tf.train.Saver()
      checkpoint = tf.train.latest_checkpoint(train_model.params['logdir'])
      with self.test_session(g, use_gpu=True) as sess:
        saver.restore(sess, checkpoint)
        sess.run([train_model.get_data_layer(i).iterator.initializer
                  for i in range(train_model.num_gpus)])
        sess.run([eval_model.get_data_layer(i).iterator.initializer
                  for i in range(eval_model.num_gpus)])

        weights = sess.run(tf.trainable_variables())
        loss = sess.run(train_model.loss)
        eval_losses = sess.run(eval_model.eval_losses)
        eval_loss = np.mean(eval_losses)
        weights_new = sess.run(tf.trainable_variables())

        # checking that the weights has not changed from just computing the loss
        for w, w_new in zip(weights, weights_new):
          npt.assert_allclose(w, w_new)
      eval_dict = evaluate(eval_model, checkpoint)
    return loss, eval_loss, eval_dict
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:28,代码来源:speech2text_test.py


示例8: testCustomGetter

  def testCustomGetter(self):
    custom_getter = snt.custom_getters.Context(snt.custom_getters.stop_gradient)
    module = snt.nets.ConvNet2D(output_channels=self.output_channels,
                                kernel_shapes=self.kernel_shapes,
                                rates=self.rates,
                                strides=self.strides,
                                paddings=self.paddings,
                                custom_getter=custom_getter)

    input_shape = [10, 100, 100, 3]
    input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)

    if tf.executing_eagerly():
      with tf.GradientTape() as tape0:
        out0 = module(input_to_net)
      with tf.GradientTape() as tape1:
        with custom_getter:
          out1 = module(input_to_net)
      all_vars = tf.trainable_variables()
      out0_grads = tape0.gradient(out0, all_vars)
      out1_grads = tape1.gradient(out1, all_vars)

    else:
      out0 = module(input_to_net)
      with custom_getter:
        out1 = module(input_to_net)
      all_vars = tf.trainable_variables()
      out0_grads = tf.gradients(out0, all_vars)
      out1_grads = tf.gradients(out1, all_vars)

    for grad in out0_grads:
      self.assertNotEqual(None, grad)
    self.assertEqual([None] * len(out1_grads), out1_grads)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:33,代码来源:convnet_test.py


示例9: __init__

    def __init__(self, actions, name=NAME, learning_rate=1e-4,  x_dim=210, y_dim=160, eps_start=1.0, eps_decay=0.0000001, eps_end=0.1, num_channels=3, should_train=True, from_checkpoint=None, player_id=1):
        Agent.__init__(self, name=name, actions=[])
        self.learning_rate = learning_rate
        self.x_dim, self.y_dim = x_dim, y_dim
        self.actions, self.num_actions = actions, len(actions)
        self.hidden_layers = [32, 32]
        self.num_channels = num_channels
        self.eps_start, self.epsilon_decay, self.epsilon_end = eps_start, eps_decay, eps_end
        self.should_train = should_train
        self.reset()

        # Parameters for updating target network.
        tau = 0.001

        # TODO: Update to support player_id > 2.
        # NOTE: This is a bit of a hack to update the variables in the target
        # network. It can be fixed by using scope and Tensorflow 1.4 which takes
        # a scope argument in tf.trainable_variables().
        if player_id == 2:
            vs = tf.trainable_variables()
            self.target_ops = update_target_graph(vs[len(vs)//2:], tau)
        else:
            self.target_ops = update_target_graph(tf.trainable_variables(), tau)

        # Load model from a checkpoint
        if not (from_checkpoint is None):
            self.saver.restore(self.sess, from_checkpoint)
            print('Restored model from checkpoint: {}'.format(from_checkpoint))
开发者ID:david-abel,项目名称:simple_rl,代码行数:28,代码来源:DQNAgentClass.py


示例10: optim

def optim(loss, **kwargs):
    r"""Applies gradients to variables.

    Args:
        loss: A 0-D `Tensor` containing the value to minimize.
        kwargs:
          optim: A name for optimizer. 'MaxProp' (default), 'AdaMax', 'Adam', or 'sgd'.
          lr: A Python Scalar (optional). Learning rate. Default is .001.
          beta1: A Python Scalar (optional). Default is .9.
          beta2: A Python Scalar (optional). Default is .99.
          category: A string or string list. Specifies the variables that should be trained (optional).
            Only if the name of a trainable variable starts with `category`, it's value is updated.
            Default is '', which means all trainable variables are updated.
    """
    opt = Opt(kwargs)
    # opt += Opt(optim='MaxProp', lr=0.001, beta1=0.9, beta2=0.99, category='')

    # default training options
    opt += Opt(optim='MaxProp', lr=0.001, beta1=0.9, beta2=0.99, category='')

    # select optimizer
    # if opt.optim == 'MaxProp':
        # optim = tf.sg_optimize.MaxPropOptimizer(learning_rate=opt.lr, beta2=opt.beta2)
    # elif opt.optim == 'AdaMax':
        # optim = tf.sg_optimize.AdaMaxOptimizer(learning_rate=opt.lr, beta1=opt.beta1, beta2=opt.beta2)
    # elif opt.optim == 'Adam':
    if opt.optim == 'Adm':
        optim = tf.train.AdamOptimizer(learning_rate=opt.lr, beta1=opt.beta1, beta2=opt.beta2)
    else:
        optim = tf.train.GradientDescentOptimizer(learning_rate=opt.lr)

    # get trainable variables
    if isinstance(opt.category, (tuple, list)):
        var_list = []
        for cat in opt.category:
            var_list.extend([t for t in tf.trainable_variables() if t.name.startswith(cat)])
    else:
        var_list = [t for t in tf.trainable_variables() if t.name.startswith(opt.category)]

    # calc gradient
    gradient = optim.compute_gradients(loss, var_list=var_list)

    # add summary
    for v, g in zip(var_list, gradient):
        # exclude batch normal statics
        if 'mean' not in v.name and 'variance' not in v.name \
                and 'beta' not in v.name and 'gamma' not in v.name:
                prefix = ''
                # summary name
                name = prefix + ''.join(v.name.split(':')[:-1])
                # summary statistics
                # noinspection PyBroadException
                try:
                    tf.summary.scalar(name + '/grad', tf.global_norm([g]))
                    tf.summary.histogram(name + '/grad-h', g)
                except:
                    pass
    global_step = tf.Variable(0, name='global_step', trainable=False)
    # gradient update op
    return optim.apply_gradients(gradient, global_step=global_step), global_step
开发者ID:SiyuanWei,项目名称:tensorflow-101,代码行数:60,代码来源:optimizer.py


示例11: __init__

	def __init__(self, num_actions, num_states, num_trainable_vars):		
		self._num_actions = num_actions
		self._num_states = num_states
	
		# Input (not the cell state)
		self.state = tf.placeholder(tf.float32, [1,num_states])

		# Weights for policy output layer
		self.W_fc1 = self.init_torch_matrix([rnn_size, num_actions])
		self.b_fc1 = self.init_torch_vector([num_actions], rnn_size)
		
		# Weights for value output layer
		self.W_fc2 = self.init_torch_matrix([rnn_size, 1])
		self.b_fc2 = self.init_torch_vector([1], rnn_size)	
		
		rnn_cell = tf.nn.rnn_cell.BasicRNNCell(rnn_size, activation=tf.identity) ### Use LSTM
		### Dropout?
		self.cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell] * num_rnn_layers)

		self.rnn_state = self.cell.zero_state(1, tf.float32)
		output, rnn_state_out = self.cell(self.state, self.rnn_state)
		
		self.rnn_state_out = rnn_state_out
	
		# policy (output)
		self.pi = tf.nn.softmax(tf.matmul(output, self.W_fc1) + self.b_fc1)

		# value - linear output layer
		self.v = tf.matmul(output, self.W_fc2) + self.b_fc2
		
		if num_trainable_vars[0] == None:
			num_trainable_vars[0] = len(tf.trainable_variables())
		
		self.trainable_vars = tf.trainable_variables()[-num_trainable_vars[0]:]
开发者ID:cjratcliff,项目名称:async_deep_reinforce,代码行数:34,代码来源:ac_network.py


示例12: assign_w2v_pretrained_vectors

def assign_w2v_pretrained_vectors(session, word2vec_model, embedding_key, vocab_path, vocab_size, id_to_check):
    embedding_variable = [v for v in tf.trainable_variables() if embedding_key in v.name]
    if len(embedding_variable) != 1:
        print("Word vector variable not found or too many. key: " + embedding_key)
        print("Existing embedding trainable variables:")
        print([v.name for v in tf.trainable_variables() if "embedding" in v.name])
        sys.exit(1)

    embedding_variable = embedding_variable[0]
    vectors = embedding_variable.eval()

    with gfile.GFile(vocab_path, mode="r") as vocab_file:
        counter = 0
        while counter < vocab_size:
            vocab_w = vocab_file.readline().replace("\n", "")
            # for each word in vocabulary check if w2v vector exist and inject.
            # otherwise dont change value initialise randomly.
            if vocab_w and word2vec_model.__contains__(vocab_w):
                w2w_word_vector = word2vec_model.get_vector(vocab_w)
                vectors[counter] = w2w_word_vector
            if counter == id_to_check:
                print(vectors[counter])
            counter += 1
    print("Reinitialising embeddings with pretrained")
    session.run(tf.assign(embedding_variable, vectors))
开发者ID:jonathanmanfield,项目名称:deepreferendum,代码行数:25,代码来源:embeddings_utils.py


示例13: __init__

    def __init__(self, max_gradient, batch_size, time_steps, vocabulary_size, hidden_units, layers):
        self.max_gradient = max_gradient
        self.layers = layers
        # Add vocabulary slots of out of vocabulary (index 1) and padding (index 0).
        vocabulary_size += 2

        with tf.name_scope("Parameters"):
            self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
            self.keep_probability = tf.placeholder(tf.float32, name="keep_probability")

        with tf.name_scope("Input"):
            self.input = tf.placeholder(tf.int64, shape=(batch_size, time_steps), name="input")
            self.targets = tf.placeholder(tf.int64, shape=(batch_size, time_steps), name="targets")
            self.init = tf.placeholder(tf.float32, shape=(), name="init")

        with tf.name_scope("Embedding"):
            self.embedding = tf.Variable(tf.random_uniform((vocabulary_size, hidden_units), -self.init, self.init),
                                         dtype=tf.float32,
                                         name="embedding")
            self.embedded_input = tf.nn.embedding_lookup(self.embedding, self.input, name="embedded_input")

        with tf.name_scope("RNN"):
            cell = tf.nn.rnn_cell.LSTMCell(hidden_units)
            cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.keep_probability)
            rnn_layers = tf.nn.rnn_cell.MultiRNNCell([cell] * layers)
            self.reset_state = rnn_layers.zero_state(batch_size, dtype=tf.float32)
            self.state = tf.placeholder(tf.float32, self.reset_state.get_shape(), "state")
            self.outputs, self.next_state = tf.nn.dynamic_rnn(rnn_layers, self.embedded_input, time_major=True,
                                                              initial_state=self.state)

        with tf.name_scope("Cost"):
            # Concatenate all the batches into a single row.
            self.flattened_outputs = tf.reshape(tf.concat(1, self.outputs), (-1, hidden_units),
                                                name="flattened_outputs")
            # Project the outputs onto the vocabulary.
            self.w = tf.get_variable("w", (hidden_units, vocabulary_size))
            self.b = tf.get_variable("b", vocabulary_size)
            self.predicted = tf.matmul(self.flattened_outputs, self.w) + self.b
            # Compare predictions to labels.
            self.loss = tf.nn.seq2seq.sequence_loss_by_example([self.predicted], [tf.concat(-1, self.targets)],
                                                               [tf.ones(batch_size * time_steps)])
            self.cost = tf.div(tf.reduce_sum(self.loss), batch_size, name="cost")

        with tf.name_scope("Train"):
            self.validation_perplexity = tf.Variable(dtype=tf.float32, initial_value=float("inf"), trainable=False,
                                                     name="validation_perplexity")
            tf.scalar_summary(self.validation_perplexity.op.name, self.validation_perplexity)
            self.training_epoch_perplexity = tf.Variable(dtype=tf.float32, initial_value=float("inf"), trainable=False,
                                                         name="training_epoch_perplexity")
            tf.scalar_summary(self.training_epoch_perplexity.op.name, self.training_epoch_perplexity)
            self.iteration = tf.Variable(0, dtype=tf.int64, name="iteration", trainable=False)
            self.gradients, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tf.trainable_variables()),
                                                       max_gradient, name="clip_gradients")
            optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
            self.train_step = optimizer.apply_gradients(zip(self.gradients, tf.trainable_variables()),
                                                        name="train_step",
                                                        global_step=self.iteration)

        self.initialize = tf.initialize_all_variables()
        self.summary = tf.merge_all_summaries()
开发者ID:wpm,项目名称:tfrnnlm,代码行数:60,代码来源:rnn.py


示例14: testFunctionalDenseTwiceReuse

 def testFunctionalDenseTwiceReuse(self):
   inputs = tf.random_uniform((5, 3), seed=1)
   core_layers.dense(inputs, 2, name='my_dense')
   vars1 = tf.trainable_variables()
   core_layers.dense(inputs, 2, name='my_dense', reuse=True)
   vars2 = tf.trainable_variables()
   self.assertEqual(vars1, vars2)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:7,代码来源:core_test.py


示例15: evaluate

def evaluate():
    with tf.Graph().as_default():
        # testデータのロード
        images, labels = data_inputs.inputs('data/train_kirin_norm_32.tfrecords')
        logits = model.inference(images)

        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        
        variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay)
        variables_to_restore = {}
        for v in tf.trainable_variables():
            if v in tf.trainable_variables():
                restore_name = variable_averages.average_name(v)
            else:
                restore_name = v.op.name
            variables_to_restore[restore_name] = v
        saver = tf.train.Saver(variables_to_restore)
        summary_op = tf.merge_all_summaries()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, graph_def=graph_def)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
开发者ID:pmnyc,项目名称:Machine_Learning_Test_Repository,代码行数:27,代码来源:eval.py


示例16: train

    def train(self, total_loss):
        loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
        losses = tf.get_collection('losses')
        loss_averages_op = loss_averages.apply(losses + [total_loss])

        for l in losses + [total_loss]:
            tf.scalar_summary(l.op.name + ' (raw)', l)

        # Apply gradients, and add histograms
        with tf.control_dependencies([loss_averages_op]):
            opt = tf.train.AdamOptimizer()
            grads = opt.compute_gradients(total_loss)
        apply_gradient_op = opt.apply_gradients(grads)
        for var in tf.trainable_variables():
            tf.histogram_summary(var.op.name, var)
        for grad, var in grads:
            if grad is not None:
                tf.histogram_summary(var.op.name + '/gradients', grad)

        # Track the moving averages of all trainable variables
        variable_averages = tf.train.ExponentialMovingAverage(Recognizer.MOVING_AVERAGE_DECAY)
        variables_averages_op = variable_averages.apply(tf.trainable_variables())

        with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
            train_op = tf.no_op(name='train')
        return train_op
开发者ID:wolfinwool,项目名称:tf-face-recognizer,代码行数:26,代码来源:recognizer.py


示例17: train

def train(total_loss,global_step) :
    num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
    decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)

    lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
        global_step,
        decay_steps,
        LEARNING_RATE_DECAY_FACTOR,
        staircase = True)
    tf.scalar_summary('learning_rate',lr)

    loss_averages_op = _add_loss_summaries(total_loss)

    with tf.control_dependencies([loss_averages_op]) :
        opt = tf.train.GradientDescentOptimizer(lr)
        grads = opt.compute_gradients(total_loss)

    apply_gradient_op = opt.apply_gradients(grads,global_step = global_step)

    for var in tf.trainable_variables() :
        tf.histogram_summary(var.op.name,var)

    for grad,var in grads :
        if grad is not None :
            tf.histogram_summary(var.op.name + '/gradients',grad)

    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY,global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.control_dependencies([apply_gradient_op,variables_averages_op]) :
        train_op = tf.no_op(name = 'train')

    return train_op
开发者ID:fenss,项目名称:tat_algorithm,代码行数:34,代码来源:cifar10_ex.py


示例18: _CheckDecay

  def _CheckDecay(self, ema, actual_decay, dim):
    tens = _Repeat(10.0, dim)
    thirties = _Repeat(30.0, dim)
    var0 = tf.Variable(tens, name="v0")
    var1 = tf.Variable(thirties, name="v1")
    tf.initialize_all_variables().run()
    # Note that tensor2 is not a Variable but just a plain Tensor resulting
    # from the sum operation.
    tensor2 = var0 + var1
    update = ema.apply([var0, var1, tensor2])
    avg0 = ema.average(var0)
    avg1 = ema.average(var1)
    avg2 = ema.average(tensor2)

    self.assertItemsEqual([var0, var1], tf.moving_average_variables())

    self.assertFalse(avg0 in tf.trainable_variables())
    self.assertFalse(avg1 in tf.trainable_variables())
    self.assertFalse(avg2 in tf.trainable_variables())
    tf.initialize_all_variables().run()

    self.assertEqual("v0/ExponentialMovingAverage:0", avg0.name)
    self.assertEqual("v1/ExponentialMovingAverage:0", avg1.name)
    self.assertEqual("add/ExponentialMovingAverage:0", avg2.name)

    # Check initial values.
    self.assertAllClose(tens, var0.eval())
    self.assertAllClose(thirties, var1.eval())
    self.assertAllClose(_Repeat(10.0 + 30.0, dim), tensor2.eval())

    # Check that averages are initialized correctly.
    self.assertAllClose(tens, avg0.eval())
    self.assertAllClose(thirties, avg1.eval())
    # Note that averages of Tensor's initialize to zeros_like since no value
    # of the Tensor is known because the Op has not been run (yet).
    self.assertAllClose(_Repeat(0.0, dim), avg2.eval())

    # Update the averages and check.
    update.run()
    dk = actual_decay

    expected = _Repeat(10.0 * dk + 10.0 * (1 - dk), dim)
    self.assertAllClose(expected, avg0.eval())
    expected = _Repeat(30.0 * dk + 30.0 * (1 - dk), dim)
    self.assertAllClose(expected, avg1.eval())
    expected = _Repeat(0.0 * dk + (10.0 + 30.0) * (1 - dk), dim)
    self.assertAllClose(expected, avg2.eval())

    # Again, update the averages and check.
    update.run()
    expected = _Repeat((10.0 * dk + 10.0 * (1 - dk)) * dk + 10.0 * (1 - dk),
                       dim)
    self.assertAllClose(expected, avg0.eval())
    expected = _Repeat((30.0 * dk + 30.0 * (1 - dk)) * dk + 30.0 * (1 - dk),
                       dim)
    self.assertAllClose(expected, avg1.eval())
    expected = _Repeat(((0.0 * dk + (10.0 + 30.0) * (1 - dk)) * dk +
                        (10.0 + 30.0) * (1 - dk)),
                       dim)
    self.assertAllClose(expected, avg2.eval())
开发者ID:13683116633,项目名称:tensorflow,代码行数:60,代码来源:moving_averages_test.py


示例19: testCompatibleNames

  def testCompatibleNames(self):
    with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()):
      cell = tf.nn.rnn_cell.LSTMCell(10)
      pcell = tf.nn.rnn_cell.LSTMCell(10, use_peepholes=True)
      inputs = [tf.zeros([4, 5])] * 6
      tf.nn.rnn(cell, inputs, dtype=tf.float32, scope="basic")
      tf.nn.rnn(pcell, inputs, dtype=tf.float32, scope="peephole")
      basic_names = {v.name: v.get_shape() for v in tf.trainable_variables()}

    with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()):
      cell = tf.contrib.rnn.LSTMBlockCell(10, use_compatible_names=True)
      pcell = tf.contrib.rnn.LSTMBlockCell(
          10, use_peephole=True, use_compatible_names=True)
      inputs = [tf.zeros([4, 5])] * 6
      tf.nn.rnn(cell, inputs, dtype=tf.float32, scope="basic")
      tf.nn.rnn(pcell, inputs, dtype=tf.float32, scope="peephole")
      block_names = {v.name: v.get_shape() for v in tf.trainable_variables()}

    with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()):
      cell = tf.contrib.rnn.LSTMBlockFusedCell(10)
      pcell = tf.contrib.rnn.LSTMBlockFusedCell(10, use_peephole=True)
      inputs = [tf.zeros([4, 5])] * 6
      cell(inputs, dtype=tf.float32, scope="basic/LSTMCell")
      pcell(inputs, dtype=tf.float32, scope="peephole/LSTMCell")
      fused_names = {v.name: v.get_shape() for v in tf.trainable_variables()}

    self.assertEqual(basic_names, block_names)
    self.assertEqual(basic_names, fused_names)
开发者ID:brchiu,项目名称:tensorflow,代码行数:28,代码来源:lstm_ops_test.py


示例20: build_model

def build_model(x, y_, n_workers, is_chief):
    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE, global_step, 60000 / BATCH_SIZE, LEARNING_RATE_DECAY)
   
    # 通过tf.train.SyncReplicasOptimizer函数实现同步更新。
    opt = tf.train.SyncReplicasOptimizer(
        tf.train.GradientDescentOptimizer(learning_rate),
        replicas_to_aggregate=n_workers,
        total_num_replicas=n_workers)

    train_op = opt.minimize(loss, global_step=global_step)     
    if is_chief:
        variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
        variables_averages_op = variable_averages.apply(tf.trainable_variables())
        with tf.control_dependencies([variables_averages_op, train_op]):
            train_op = tf.no_op()

    return global_step, loss, train_op, opt
开发者ID:Ding-Ye,项目名称:tensorflow-tutorial,代码行数:28,代码来源:5.+同步更新模式样例程序.py



注:本文中的tensorflow.trainable_variables函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.transpose函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.to_int64函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap