• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.no_op函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.no_op函数的典型用法代码示例。如果您正苦于以下问题:Python no_op函数的具体用法?Python no_op怎么用?Python no_op使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了no_op函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: build_eval_graph

    def build_eval_graph(self):
        # Keep track of the totals while running through the batch data
        self.total_loss = tf.Variable(0.0, trainable=False, collections=[])
        self.total_correct = tf.Variable(0.0, trainable=False, collections=[])
        self.example_count = tf.Variable(0.0, trainable=False, collections=[])

        # Calculates the means
        self.mean_loss = self.total_loss / self.example_count
        self.accuracy = self.total_correct / self.example_count

        # Operations to modify to the stateful variables
        inc_total_loss = self.total_loss.assign_add(self.model.total_loss)
        inc_total_correct = self.total_correct.assign_add(
            tf.reduce_sum(tf.cast(self.model.correct_predictions, "float")))
        inc_example_count = self.example_count.assign_add(self.model.batch_size)

        # Operation to reset all the stateful vars. Should be called before starting a data set evaluation.
        with tf.control_dependencies(
                [self.total_loss.initializer, self.total_correct.initializer, self.example_count.initializer]):
            self.eval_reset = tf.no_op()

        # Operation to modify the stateful variables with data from one batch
        # Should be called for each batch in the evaluatin set
        with tf.control_dependencies([inc_total_loss, inc_total_correct, inc_example_count]):
            self.eval_step = tf.no_op()

        # Summaries
        summary_mean_loss = tf.scalar_summary("mean_loss", self.mean_loss)
        summary_acc = tf.scalar_summary("accuracy", self.accuracy)
        self.summaries = tf.merge_summary([summary_mean_loss, summary_acc])
开发者ID:alphawolfxiaoliu,项目名称:tf-models,代码行数:30,代码来源:rnn_classifier.py


示例2: get_run_op

def get_run_op():
  # Create an optimizer that performs gradient descent.
  #opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
  slice_size = FLAGS.batch_size / FLAGS.num_cuts
  print('Slice size:{}'.format(slice_size))
  data = None
  label = None
  last_fc = [tf.no_op()]
  with tf.device('/gpu:0'):
    data = tf.get_variable(
        name = 'data',
        shape=[slice_size, FLAGS.hidden_size],
        trainable=False)
    '''
    label = tf.get_variable(
        name = 'label',
        shape = [slice_size, FLAGS.hidden_size],
        trainable=False))
    with tf.variable_scope('fc_in'):
      weight_in = tf.zeros([1000, FLAGS.hidden_size])
      for k in xrange(FLAGS.num_cuts):
        with tf.control_dependencies([last_fc[-1]]):
            last_fc.append(tf.matmul(data[k+1], weight_in))
    '''
  for i in xrange(FLAGS.num_cuts):
    last_fc.append(data)
  for i in xrange(FLAGS.num_layers):
    dev = '/gpu:%d' % (i * FLAGS.num_gpus / FLAGS.num_layers)
    with tf.device(dev), scopes.arg_scope([variables.variable], device=dev):
      tmp_fc = [tf.no_op()]
      with tf.variable_scope('fc%d' % i):
        w = tf.get_variable(
            name='w',
            shape=[FLAGS.hidden_size, FLAGS.hidden_size],
            trainable=True)
        for k in xrange(FLAGS.num_cuts):
          with tf.control_dependencies([tmp_fc[-1]]):
            tmp_fc.append(tf.matmul(last_fc[k+1], w))
      last_fc = tmp_fc
      if i == FLAGS.num_layers - 1:
        with tf.control_dependencies(last_fc):
          train_op = tf.no_op()
  '''
  with tf.device('/gpu:%d' % (FLAGS.num_gpus - 1)):
    tmp_fc = [tf.no_op()]
    with tf.variable_scope('fc_out'):
      weight_out = tf.zeros([FLAGS.hidden_size, 1000])
      for k in xrange(FLAGS.num_cuts):
        with tf.control_dependencies([tmp_fc[-1]]):
          tmp_fc.append(tf.matmul(last_fc[k+1], weight_out))
    last_fc = tmp_fc
  loss = tf.nn_softmax_cross_entropy_with_logits(last_fc, labels, name='xentropy')
  grads = opt.compute_gradients(loss)
  apply_gradient_op = opt.apply_gradients(grads)

  train_op = tf.group(apply_gradient_op)
  '''
  init_op = tf.initialize_all_variables()

  return init_op, train_op
开发者ID:houcy,项目名称:models,代码行数:60,代码来源:pipelining.py


示例3: main

def main(unused_args):

    config = get_config(FLAGS.model_size)
    eval_config = get_config(FLAGS.model_size)
    saved_model_path = FLAGS.model_path
    weights_dir = FLAGS.weights_dir
    verbose = FLAGS.verbose
    debug = FLAGS.debug


    if weights_dir is not None:
        if not os.path.exists(weights_dir):
            os.mkdir(weights_dir)
    if not debug:
        raw_data = reader.ptb_raw_data(FLAGS.data_path, "ptb.train.txt", "ptb.valid.txt", "ptb.test.txt")
    else:
        raw_data = reader.ptb_raw_data(FLAGS.data_path, "emma.txt", "emma.val.txt", "emma.test.txt")

    # load up PTB data
    train_data, val_data, test_data, vocab, word_to_id = raw_data

    with tf.Graph().as_default(), tf.Session() as session:
        initialiser = tf.random_uniform_initializer(-config.init_scale, config.init_scale)

        with tf.variable_scope('model', reuse=None, initializer=initialiser):
            m = ACTModel(config,is_training=True)

            # if we have a saved/pre-trained model, load it.
            if saved_model_path is not None:
                saveload.main(saved_model_path, session)

        with tf.variable_scope("model", reuse=True):
            m_val = ACTModel(config, is_training=False)
            m_test = ACTModel(eval_config,is_training=False)

        tf.initialize_all_variables().run()

        print("starting training")
        for i in range(config.max_max_epoch):

            lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
            session.run(tf.assign(m.lr, config.learning_rate * lr_decay))
            train_loss = run_epoch(session, m, train_data, m.train_op, verbose=True)
            valid_loss = run_epoch(session, m_val, val_data, tf.no_op())

            if verbose:
                print("Epoch: {} Learning rate: {}".format(i + 1, session.run(m.lr)))
                print("Epoch: {} Train Loss: {}".format(i + 1, train_loss))
                print("Epoch: %d Valid Loss: %.3f" % (i + 1, valid_loss))

            # save weights in a pickled dictionary format
            if weights_dir is not None:
                date = "{:%m.%d.%H.%M}".format(datetime.now())
                saveload.main(weights_dir + "/Epoch_{:02}Train_{:0.3f}Val_{:0.3f}date{}.pkl"
                              .format(i+1,train_loss,valid_loss, date), session)


        test_loss = run_epoch(session, m_test, test_data, tf.no_op())
    if verbose:
        print("Test Perplexity: %.3f" % test_loss)
开发者ID:kastnerkyle,项目名称:act-tensorflow,代码行数:60,代码来源:ACT_Training.py


示例4: testQueueRunnerSerializationRoundTrip

    def testQueueRunnerSerializationRoundTrip(self):
        graph = tf.Graph()
        with graph.as_default():
            queue = tf.FIFOQueue(10, tf.float32, name="queue")
            enqueue_op = tf.no_op(name="enqueue")
            close_op = tf.no_op(name="close")
            cancel_op = tf.no_op(name="cancel")
            qr0 = tf.train.QueueRunner(
                queue,
                [enqueue_op],
                close_op,
                cancel_op,
                queue_closed_exception_types=(tf.errors.OutOfRangeError, tf.errors.CancelledError),
            )
            qr0_proto = tf.train.QueueRunner.to_proto(qr0)
            qr0_recon = tf.train.QueueRunner.from_proto(qr0_proto)
            self.assertEqual("queue", qr0_recon.queue.name)
            self.assertEqual(1, len(qr0_recon.enqueue_ops))
            self.assertEqual(enqueue_op, qr0_recon.enqueue_ops[0])
            self.assertEqual(close_op, qr0_recon.close_op)
            self.assertEqual(cancel_op, qr0_recon.cancel_op)
            self.assertEqual(
                (tf.errors.OutOfRangeError, tf.errors.CancelledError), qr0_recon.queue_closed_exception_types
            )

            # Assert we reconstruct an OutOfRangeError for QueueRunners
            # created before QueueRunnerDef had a queue_closed_exception_types field.
            del qr0_proto.queue_closed_exception_types[:]
            qr0_legacy_recon = tf.train.QueueRunner.from_proto(qr0_proto)
            self.assertEqual("queue", qr0_legacy_recon.queue.name)
            self.assertEqual(1, len(qr0_legacy_recon.enqueue_ops))
            self.assertEqual(enqueue_op, qr0_legacy_recon.enqueue_ops[0])
            self.assertEqual(close_op, qr0_legacy_recon.close_op)
            self.assertEqual(cancel_op, qr0_legacy_recon.cancel_op)
            self.assertEqual((tf.errors.OutOfRangeError,), qr0_legacy_recon.queue_closed_exception_types)
开发者ID:botonchou,项目名称:tensorflow,代码行数:35,代码来源:queue_runner_test.py


示例5: main

def main(_):
    if not FLAGS.data_path:
        raise ValueError("Must set --data_path to PTB data directory")

    raw_data = reader.ptb_raw_data(FLAGS.data_path)
    train_data, valid_data, test_data, _ = raw_data

    config = get_config()
    eval_config = get_config()
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = PTBModel(is_training=True, config=config)
        with tf.variable_scope("model", reuse=True, initializer=initializer):
            mvalid = PTBModel(is_training=False, config=config)
            mtest = PTBModel(is_training=False, config=eval_config)

        tf.initialize_all_variables().run()

        for i in range(config.max_max_epoch):
            lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
            m.assign_lr(session, config.learning_rate * lr_decay)

            print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
            train_perplexity = run_epoch(session, m, train_data, m.train_op,
                                         verbose=True)
            print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
            valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
            print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

        test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
        print("Test Perplexity: %.3f" % test_perplexity)
开发者ID:kkboy123,项目名称:Tensorflow001,代码行数:35,代码来源:ptb_word_lm.py


示例6: main

def main(_):
  assert(FLAGS.num_gpus > 1)
  slice_size = int(FLAGS.hidden_size / FLAGS.num_gpus)
  feature_size = slice_size * FLAGS.num_gpus
  print("Slice size: {} Feature size: {}".format(slice_size, feature_size))
  weight_shape = [slice_size, feature_size]

  # create graph
  weights, grads = make_weights(weight_shape)
  ff_deps = [[tf.no_op() for j in range(FLAGS.num_gpus)] for i in range(FLAGS.num_layers)]
  bp_deps = [[tf.no_op() for j in range(FLAGS.num_gpus)] for i in range(FLAGS.num_layers)]
  for i in range(FLAGS.num_cuts):
    with tf.name_scope('data_cut%d' % i):
      data = make_data(FLAGS.batch_size / FLAGS.num_cuts, slice_size)
    with tf.name_scope('model_cut%d' % i):
      ff_deps, bp_deps = ff_bp(data, weights, grads, ff_deps, bp_deps)

  # create session
  sess = tf.Session()
  # init variables
  print('Initialize Variables')
  sess.run(tf.initialize_all_variables())
  print('Initialize Done')
  # run
  merged = tf.merge_all_summaries()
  writer = tf.train.SummaryWriter(log_dir, sess.graph)
  grads_flatten = sum(grads, [])
  with tf.control_dependencies(grads_flatten):
    train_op = tf.no_op()
  time_tensorflow_run(sess, train_op, 'Training')
开发者ID:jermainewang,项目名称:models,代码行数:30,代码来源:modelpar_pipeline.py


示例7: run_epoch

    def run_epoch(self, session, x, y=None, train_op=None, shuffle=True, verbose=10):
        dp=self.config.dropout
        predictions=self.predictions
        loss=self.loss
        if not train_op:
            train_op=tf.no_op()
            dp=1
        if y is None:
            loss=tf.no_op()

        total_steps=sum(1 for x in data_iterator(x, y, self.config.batch_size))
        total_loss=[]
        total_pred=[]

        for step, (_x, _y) in enumerate(data_iterator(x, y, self.config.batch_size, shuffle)):
            feed={self.input_placeholder: _x,
                  self.dropout_placeholder: dp}
            if _y is not None:
                feed[self.labels_placeholder]=_y
            
            _pred, _loss, _=session.run([predictions, loss, train_op], feed_dict=feed)
            total_pred.append(_pred)
            if y is not None:
                total_loss.append(_loss)
            if verbose and step % verbose==0:
                sys.stdout.write('\r{} / {} : loss = {}'.format(
                    step, total_steps, np.mean(total_loss)))
                sys.stdout.flush()
        if verbose:
            sys.stdout.write('\r')
            sys.stdout.flush()

        assert np.vstack(total_pred).reshape([-1]).shape[0]==x.shape[0], 'pred and x not equal size'
        return np.vstack(total_pred).reshape([-1]), np.mean(total_loss)
开发者ID:buyijie,项目名称:bybolove,代码行数:34,代码来源:nn.py


示例8: main

def main(_):

  filename = "Data11-17.txt"
  vectors_data1,labels_data1 = read_data.read_data(filename)
  filename = "valid18-20.txt"
  vectors_data2,labels_data2 = read_data.read_data(filename)
  filename = "Data21-25.txt"
  vectors_data3,labels_data3 = read_data.read_data(filename)

  vectors_data = np.vstack((vectors_data1,vectors_data2,vectors_data3))
  print(vectors_data.shape)
  labels_data = np.vstack((np.reshape(labels_data1,(len(labels_data1),1)),
    np.reshape(labels_data2,(len(labels_data2),1)),
      np.reshape(labels_data3,(len(labels_data3),1))))
  labels_data = np.reshape(labels_data,-1)
  print(labels_data.shape)

  filename = "Data4-10.txt"
  validation_data,vlabels_data = read_data.read_data(filename)
  filename = "Data26-29.txt"
  test_data,tlabels_data = read_data.read_data(filename)
  test_data = test_data[0:8000,]
  tlabels_data = tlabels_data[0:8000,]

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default(), tf.Session() as session:

    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
      mvalid = PTBModel(is_training=False, config=config)
      mtest = PTBModel(is_training=False, config=eval_config)
    
    
    tf.initialize_all_variables().run()

    summary_writer = tf.train.SummaryWriter("train/lstm3s",session.graph)

    for i in range(config.max_max_epoch):
      lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
      m.assign_lr(session, config.learning_rate * lr_decay)

      print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))

      train_perplexity = run_epoch(session, m, vectors_data, labels_data, m.train_op,summary_writer, 
                                   verbose=True)
      print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))

      valid_perplexity = run_epoch(session, mvalid, validation_data, vlabels_data, tf.no_op(),summary_writer)
      print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

    test_perplexity = run_epoch(session, mtest, test_data, tlabels_data, tf.no_op(),summary_writer)
    print("Test Perplexity: %.3f" % test_perplexity)
开发者ID:veralily,项目名称:data-mining,代码行数:59,代码来源:lstm1.py


示例9: main

def main(config_size='small', num_epochs=10):
    
    def get_config(config_size):
        config_size = config_size.lower()
        if config_size == 'small':
            return c.SmallConfig()
        elif config_size == 'medium':
            return c.MediumConfig()
        elif config_size == 'large':
            return c.LargeConfig()
        else:
            raise ValueError('Unknown config size {} (small, medium, large)'.format(config_size))

    def run_epoch(session, m, data, eval_op, verbose=False):
        """Runs the model on the given data."""
        epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
        print(epoch_size)
        start_time = time.time()
        costs = 0.0
        iters = 0
        state = m.initial_state.eval()
        for step, (x, y) in enumerate(seq_iterator(data, m.batch_size, m.num_steps)):
            cost, state, _ = session.run([m.cost, m.final_state, eval_op],
                             {m.input_data: x, m.targets: y, m.initial_state: state})
            costs += cost
            iters += m.num_steps

            print_interval = 20
            if verbose and epoch_size > print_interval \
                    and step % (epoch_size // print_interval) == print_interval:
                print("%.3f mse: %.8f speed: %.0f ips" % (step * 1.0 / epoch_size, costs / iters,
                     iters * m.batch_size / (time.time() - start_time)))
        return costs / (iters if iters > 0 else 1)

    with tf.Graph().as_default(), tf.Session() as session:
        config = get_config(config_size)
        initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = StockLSTM(is_training=True, config=config)
        with tf.variable_scope("model", reuse=True, initializer=initializer):
            mtest = StockLSTM(is_training=False, config=config)

        tf.initialize_all_variables().run()

        train_data, valid_data, test_data = get_data()

        for epoch in xrange(num_epochs):
            lr_decay = config.lr_decay ** max(epoch - num_epochs, 0.0)
            m.assign_lr(session, config.learning_rate * lr_decay)
            cur_lr = session.run(m.lr)

            mse = run_epoch(session, m, train_data, m.train_op, verbose=True)
            vmse = run_epoch(session, mtest, valid_data, tf.no_op())
            print("Epoch: %d - learning rate: %.3f - train mse: %.3f - test mse: %.3f" %
                  (epoch, cur_lr, mse, vmse))

        tmse = run_epoch(session, mtest, test_data, tf.no_op())
        print("Test mse: %.3f" % tmse)
开发者ID:alexleech,项目名称:thesis,代码行数:58,代码来源:train_stock_lstm.py


示例10: main

def main(unused_args):
  if not FLAGS.data_path:
    raise ValueError("Must specify --data_path to PTB data directory")

  if not FLAGS.save_path:
    raise ValueError("Must specify --save_path to model directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default(), tf.Session() as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
      mvalid = PTBModel(is_training=False, config=config)
      mtest = PTBModel(is_training=False, config=eval_config)

    # Add ops to save and restore all the variables.
    saver = tf.train.Saver()

    ckpt=tf.train.get_checkpoint_state(FLAGS.save_path)
    if (ckpt):
        print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
        saver.restore(session, ckpt.model_checkpoint_path)
    else:
        print("Created model with fresh parameters.")
        tf.initialize_all_variables().run()

    if not FLAGS.testonly:

        for i in range(config.max_max_epoch):
        
           lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
           m.assign_lr(session, config.learning_rate * lr_decay)

           print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
           train_perplexity = run_epoch(session, m, train_data, m.train_op,
                                   verbose=True)
           print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
      
           save_path = saver.save(session, FLAGS.save_path+'/model.ckpt',i)
           print("Model saved in: %s" % save_path)
      
           valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
           print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

    else:
         print("Running only a perplexity test")

    test_perplexity = run_epoch(session, mtest, test_data, tf.no_op(),verbose=True)
    print("Test Perplexity: %.3f" % test_perplexity)
开发者ID:hlt-mt,项目名称:tensorflow,代码行数:58,代码来源:mf_ptb_word_lm.py


示例11: train

def train():
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to data directory")

  raw_data = reader.raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _, word_to_id = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default(), tf.Session() as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, is_testing=False, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
      mvalid = PTBModel(is_training=False, is_testing=False, config=config)
      mtest = PTBModel(is_training=False, is_testing=True, config=eval_config)

    # tf.initialize_all_variables().run()
    if not os.path.exists(FLAGS.train_path):
      os.makedirs(FLAGS.train_path)
      
    session.run(tf.initialize_all_variables())
    ckpt = tf.train.get_checkpoint_state(FLAGS.train_path)
    if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
      print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
      m.saver.restore(session, ckpt.model_checkpoint_path)
    else:
      print("Created model with fresh parameters.")
      session.run(tf.initialize_all_variables())

    valid_perplexity_old = 1000000000000000000

    for i in range(config.max_max_epoch):
      [train_data, valid_data, test_data] = reader.split_data(raw_data)

      lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
      m.assign_lr(session, config.learning_rate * lr_decay)

      print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
      train_perplexity = run_epoch(session, m, train_data, m.train_op, verbose=True)
      print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
      valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
      print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

      if valid_perplexity > valid_perplexity_old:
        break

      checkpoint_path = os.path.join(FLAGS.train_path, "translate.ckpt")
      m.saver.save(session, checkpoint_path, global_step=i)

      valid_perplexity_old = valid_perplexity

    test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
    print("Test Perplexity: %.3f" % test_perplexity)
开发者ID:jiamenguk,项目名称:word_encoding_rnn_lms,代码行数:58,代码来源:ptb_word_lm.py


示例12: main

def main(_):
    ##### Configure these based on current situation. #####
    preload_model = False   # Shall we preload preloaded_epoch or train it from scratch?
    preloaded_epoch = 0     # The epoch to load (if required). Counting from 0.
    #######################################################
    if preload_model:
        load_model_file = "model{}.ckpt".format(preloaded_epoch)
        preloaded_epoch += 1
    else:
        preloaded_epoch = 0
    
    if not FLAGS.data_path:
        raise ValueError("Must set --data_path to PTB data directory")

    raw_data = reader.ptb_raw_data(FLAGS.data_path)
    train_data, valid_data, test_data, _ = raw_data

    config = get_config()
    eval_config = get_config()
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = PTBModel(is_training=True, config=config)
        with tf.variable_scope("model", reuse=True, initializer=initializer):
            mvalid = PTBModel(is_training=False, config=config)
            mtest = PTBModel(is_training=False, config=eval_config)

        tf.initialize_all_variables().run()

        # Add ops to save and restore all the variables.
        saver = tf.train.Saver()
        if(preload_model):
            saver.restore(session, load_model_file)

        for i in range(preloaded_epoch, config.max_max_epoch):
            # Some simple learning rate scheduling. :-)
            if(i>3):
                config.learning_rate = 0.1
            lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
            m.assign_lr(session, config.learning_rate * lr_decay)

            print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
            train_perplexity = run_epoch(session, m, train_data, m.train_op,
                                         verbose=True)
            print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
            valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
            print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
            # Save the variables to disk.
            save_path = saver.save(session, "model{}.ckpt".format(i))
            print("Model saved in file: %s" % save_path)

        test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
        print("Test Perplexity: %.3f" % test_perplexity)
开发者ID:powion,项目名称:storybot,代码行数:57,代码来源:ptb_word_lm.py


示例13: apply_gradients

  def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    var_list = [ v for _,v in grads_and_vars]
    d_vars = []
    g_vars = []
    for grad,var in grads_and_vars:
        if var in self.gan.d_vars():
            d_vars += [var]
        elif var in self.gan.g_vars():
            g_vars += [var]
        else:
            raise("Couldn't find var in g_vars or d_vars")
    w = [tf.Variable(self.config.start_at or 0.0), tf.Variable(self.config.start_at or 0.0)]

    Vidv = [self.gan.trainer.d_loss, self.gan.trainer.g_loss]
    #Vsoc = [1/2. * self.gan.trainer.d_loss + 1/2.* self.gan.trainer.g_loss, -1/2. * self.gan.trainer.d_loss - 1/2.* self.gan.trainer.g_loss]
    Vsoc = [1/2. * self.gan.trainer.d_loss + 1/2.* self.gan.trainer.g_loss, 1/2. * self.gan.trainer.d_loss + 1/2.* self.gan.trainer.g_loss]

    wlr = self.config.w_learn_rate or 0.01
    wt1 = [w[0] + wlr * (Vidv[0] - Vsoc[0]), w[1] + wlr * (Vidv[1] - Vsoc[1])]
    def clamped(net):
        return tf.maximum(self.config.min or 0., tf.minimum(net, self.config.max or 1.))

    self._prepare()

    wt1 = [clamped(wt1[0]),clamped(wt1[1])]
    self.gan.add_metric('wt0', wt1[0])
    self.gan.add_metric('wt1', wt1[1])
    op1 = tf.group(*[tf.assign(w, v) for w,v in zip(w, wt1)]) # store variables

    with tf.get_default_graph().control_dependencies([op1]):
        Vi = [(1. - w[0]) * Vidv[0] + w[0] * Vsoc[0],
              (1. - w[1]) * Vidv[1] + w[1] * Vsoc[1]]
        if self.config.reverse_w:
            Vi = [(w[0]) * Vidv[0] + (1.0-w[0]) * Vsoc[0],
                  (w[1]) * Vidv[1] + (1.0-w[1]) * Vsoc[1]]
        self.gan.add_metric('w0', w[0])
        self.gan.add_metric('w1', w[1])

        new_grads = tf.gradients(Vi[0], d_vars) + tf.gradients(Vi[1], g_vars)
        self.gan.trainer.d_loss = Vi[0]
        self.gan.trainer.g_loss = Vi[1]
        new_grads_and_vars = list(zip(new_grads, var_list)).copy()
        op3 = self.optimizer.apply_gradients(new_grads_and_vars.copy(), global_step=global_step, name=name)
        with tf.get_default_graph().control_dependencies([op3]):
            if(self.config.w_l1):
                # return to selfish state
                wt1 = [wt1[0] + self.config.w_l1 * ((self.config.l1_default or 0.0)-wt1[0]),
                       wt1[1] + self.config.w_l1 * ((self.config.l1_default or 0.0)-wt1[1])]
                op4 = tf.group(*[tf.assign(w, v) for w,v in zip(w, wt1)]) # store variables
                with tf.get_default_graph().control_dependencies([op4]):
                    self.gan.add_metric('l1w0', w[0])
                    self.gan.add_metric('l1w1', w[1])
                    return tf.no_op()

            else:
                return tf.no_op()
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:56,代码来源:social_optimizer.py


示例14: main

def main(_):
    if not FLAGS.data_path:
        #raise ValueError("Must set --data_path to PTB data directory")
        pass

    train_data, valid_data, test_data = imdb_data.load_data()
    word2id, id2word = imdb_data.load_dict_imdb()

    accsTrain = []
    accsTest = []


    config = Config()
    eval_config = Config()
    eval_config.batch_size = 1
    
    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = SentimentModel(is_training=True, config=config)
        with tf.variable_scope("model", reuse=True, initializer=initializer):
            mvalid = SentimentModel(is_training=False, config=config)
            mtest = SentimentModel(is_training=False, config=eval_config)

        tf.initialize_all_variables().run()

        print("Starting")
        for i in range(config.max_max_epoch):
            lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
            m.assign_lr(session, config.learning_rate * lr_decay)

            print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
            train_perplexity, accTrain = run_epoch(session, m, train_data, m.train_op, id2word,
                                       verbose=True)
            accsTrain.append(accTrain)
            
            print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
            valid_perplexity, crap = run_epoch(session, mvalid, valid_data, tf.no_op(),
                                         id2word)
            print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

            test_perplexity, accTest = run_epoch(session, mtest, test_data, tf.no_op(),id2word)
            accsTest.append(accTest)
            print("Test Perplexity: %.3f" % test_perplexity)
        
    plt.figure()
    plt.plot(accsTrain, label="train")
    plt.plot(accsTest, label="test")
    plt.show()
    plt.close()    
开发者ID:cbienpourtoi,项目名称:test-tensorflow-sentiments,代码行数:51,代码来源:sentiment_model_2.py


示例15: _cached_copy

  def _cached_copy(self, var, name, pass_through=False):
    """Helper function to create a worker cached copy of a Variable.

    This assigns the var (either a single Variable or a list of Variables) to
    local transient cache Variable(s). Note that if var is a list of Variables,
    the assignment is done sequentially to minimize the memory overheads.
    Also note that if pass_through is set to True, this does not create new
    Variables but simply return the input back.

    Args:
      var: A Variable or a list of Variables to cache.
      name: name of cached Variable.
      pass_through: when set to True, this simply pass through the var back
        through identity operator and does not actually creates a cache.

    Returns:
      Tuple consisting of following three entries:
      cache: the new transient Variable or list of transient Variables
        corresponding one-to-one with var.
      cache_init: op to initialize the Variable or the list of Variables.
      cache_reset: op to reset the Variable or the list of Variables to some
        default value.
    """
    if var is None:
      return None, None, None
    elif pass_through:
      cache = var
      cache_init = tf.no_op()
      cache_reset = tf.no_op()
    elif isinstance(var, tf.Variable):
      cache = WALSModel._transient_var(name=name)
      with ops.colocate_with(cache):
        cache_init = tf.assign(cache, var, validate_shape=False)
        cache_reset = tf.assign(cache, 1.0, validate_shape=False)
    else:
      assert isinstance(var, list)
      assert var
      cache = [WALSModel._transient_var(name='%s_shard_%d' % (name, i))
               for i in xrange(len(var))]
      reset_ops = []
      for i, c in enumerate(cache):
        with ops.colocate_with(c):
          if i == 0:
            cache_init = tf.assign(c, var[i], validate_shape=False)
          else:
            with ops.control_dependencies([cache_init]):
              cache_init = tf.assign(c, var[i], validate_shape=False)
          reset_ops.append(tf.assign(c, 1.0, validate_shape=False))
      cache_reset = tf.group(*reset_ops)

    return cache, cache_init, cache_reset
开发者ID:brchiu,项目名称:tensorflow,代码行数:51,代码来源:factorization_ops.py


示例16: main

def main(_):
    if not FLAGS.data_path:
        raise ValueError("Must set --data_path to data directory")

    config = get_config()
    eval_config = get_config()
    # eval_config.batch_size = 1
    # eval_config.num_steps = 1

    raw_data, vocab_size = reader.converted_data(FLAGS.data_path, max_len=config.num_steps, min_nwords=200)
    config.vocab_size = vocab_size
    eval_config.vocab_size = vocab_size

    train_data, valid_data, test_data = reader.split_rawdata(raw_data)

    sess = tf.InteractiveSession()

    if os.path.exists(FLAGS.log_dir):
        shutil.rmtree(FLAGS.log_dir)
    writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph_def)

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = GenderModel(is_training=True, config=config)
        with tf.variable_scope("model", reuse=True, initializer=initializer):
            mvalid = GenderModel(is_training=False, config=config)
            mtest = GenderModel(is_training=False, config=eval_config)

        tf.initialize_all_variables().run()

        for i in range(config.max_max_epoch):
            lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
            m.assign_lr(session, config.learning_rate * lr_decay)

            print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
            train_error, train_acc, summary = run_epoch(session, m, train_data, m.train_op, verbose=True)
            writer.add_summary(summary, i)
            print("Epoch: %d Train xentorpy: %.3f" % (i + 1, train_error))
            print("Epoch: %d Train accuracy: %.3f" % (i + 1, train_acc))

            valid_error, valid_acc, summary = run_epoch(session, mvalid, valid_data, tf.no_op())
            print("Epoch: %d Validation xentropy: %.3f" % (i + 1, valid_error))
            print("Epoch: %d Validation accuracy: %.3f" % (i + 1, valid_acc))

        test_err, test_acc, summary = run_epoch(session, mtest, test_data, tf.no_op())
        print("Test Accuracy %.3f" % test_acc)
开发者ID:bigsea-t,项目名称:gender-classification,代码行数:48,代码来源:gender.py


示例17: model_fn

 def model_fn(features, targets):
   # dummy variable:
   _ = tf.Variable([0.])
   _ = targets
   predictions = features["x"]
   loss = tf.constant([2.])
   return predictions, loss, tf.no_op()
开发者ID:perhapszzy,项目名称:tensorflow,代码行数:7,代码来源:estimators_test.py


示例18: test_dequeue

 def test_dequeue(self):
   p = plan.TrainPlan()
   p.compiler = block_compiler.Compiler().compile(blocks.Scalar())
   p.is_chief_trainer = True
   p.batch_size = 3
   p.batches_per_epoch = 2
   p.queue_capacity = 12
   p.num_dequeuers = 1
   p.ps_tasks = 1
   q = p._create_queue(0)
   p._setup_dequeuing([q])
   input_batch = list(p.compiler.build_loom_inputs([7])) * 3
   q_enqueue = q.enqueue_many([input_batch * 4])
   p.losses['foo'], = p.compiler.output_tensors
   p.train_op = tf.no_op()
   p.finalize_stats()
   p.logdir = self.get_temp_dir()
   p.epochs = 2
   p.print_file = six.StringIO()
   init_op = tf.global_variables_initializer()
   sv = p.create_supervisor()
   with self.test_session() as sess:
     sess.run(init_op)
     sess.run(q_enqueue)
     p.run(sv, sess)
   expected = '\n'.join(['running train',
                         'train_size: 6',
                         'epoch:    1 train[loss: 7.000e+00]',
                         'epoch:    2 train[loss: 7.000e+00]',
                         'final model saved in file: %s' % p.logdir])
   log_str = p.print_file.getvalue()
   self.assertIn(expected, log_str)
开发者ID:wangbosdqd,项目名称:fold,代码行数:32,代码来源:plan_test.py


示例19: moving_average

def moving_average(value, window):
    value = tf.to_float(value)
    shape = value.get_shape()

    queue_init = tf.zeros(tf.TensorShape(window).concatenate(shape))
    total_init = tf.zeros(shape)
    num_init = tf.constant(0, dtype=tf.float32)

    queue = tf.FIFOQueue(window, [tf.float32], shapes=[shape])
    total = tf.Variable(total_init, trainable=False)
    num = tf.Variable(num_init, trainable=False)

    init = tf.cond(
        tf.equal(queue.size(), 0),
        lambda: tf.group(
            queue.enqueue_many(queue_init),
            total.assign(total_init),
            num.assign(num_init)),
        lambda: tf.no_op())

    with tf.control_dependencies([init]):
        total_ = total + value - queue.dequeue()
        num_ = num + 1
        value_averaged = total_ / (tf.minimum(num_, window) + EPSILON)

        with tf.control_dependencies([queue.enqueue([value]), total.assign(total_), num.assign(num_)]):
            return tf.identity(value_averaged)
开发者ID:stmharry,项目名称:DeepBox,代码行数:27,代码来源:util.py


示例20: testReuseVars

该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.norm函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.negative函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap