• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.merge_summary函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.merge_summary函数的典型用法代码示例。如果您正苦于以下问题:Python merge_summary函数的具体用法?Python merge_summary怎么用?Python merge_summary使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了merge_summary函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, decoder, l2_regularization):
        self.decoder = decoder

        self.copy_target_plc = [tf.placeholder(tf.int64, shape=[None]) for _ in decoder.copynet_logits]
        self.copy_w_plc = [tf.placeholder(tf.float32, shape=[None]) for _ in decoder.copynet_logits]

        copy_costs_in_time = [tf.nn.sparse_softmax_cross_entropy_with_logits(l, t) * w \
                for w, l, t in zip(self.copy_w_plc, decoder.copynet_logits, self.copy_target_plc)]

        copy_cost = sum([tf.reduce_sum(c) for c in copy_costs_in_time])
        tf.scalar_summary('train_copy_cost', copy_cost, collections=["summary_train"])
        tf.scalar_summary('val_copy_cost', copy_cost, collections=["summary_val"])

        with tf.variable_scope("l2_regularization"):
            l2_value = sum([tf.reduce_sum(v ** 2) for v in tf.trainable_variables()])
            if l2_regularization > 0:
                l2_cost = l2_regularization * l2_value
            else:
                l2_cost = 0.0

            tf.scalar_summary('train_l2_cost', l2_value, collections=["summary_train"])

        optimizer = tf.train.AdamOptimizer(1e-4)
        gradients = optimizer.compute_gradients(decoder.cost + copy_cost + l2_cost)
        #for (g, v) in gradients:
        #    if g is not None:
        #        tf.histogram_summary('gr_' + v.name, g, collections=["summary_gradients"])
        self.optimize_op = optimizer.apply_gradients(gradients, global_step=decoder.learning_step)
        #self.summary_gradients = tf.merge_summary(tf.get_collection("summary_gradients"))
        self.summary_train = tf.merge_summary(tf.get_collection("summary_train"))
        self.summary_val = tf.merge_summary(tf.get_collection("summary_val"))
开发者ID:alvaz16,项目名称:neuralmonkey,代码行数:31,代码来源:copy_net_trainer.py


示例2: train

    def train(self, eval_on_test=False):
        """ Train model and save it to file.

        Train model with given hidden layers. Training data is created
        by prepare_training_data(), which must be called before this function.
        """
        tf.reset_default_graph()
        with tf.Session() as sess:
            feature_data = tf.placeholder("float", [None, self.num_predictors])
            labels = tf.placeholder("float", [None, self.num_classes])

            layers = [self.num_predictors] + self.hidden_layers + [self.num_classes]
            model = self.inference(feature_data, layers)
            cost, cost_summary_op = self.loss(model, labels)
            training_op = self.training(cost, learning_rate=0.0001)

            correct_prediction = tf.equal(tf.argmax(model, 1), tf.argmax(labels, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

            # Merge all variable summaries and save the results to log file
            # summary_op = tf.merge_all_summaries()
            accuracy_op_train = tf.scalar_summary("Accuracy on Train", accuracy)
            summary_op_train = tf.merge_summary([cost_summary_op, accuracy_op_train])
            if eval_on_test:
                accuracy_op_test = tf.scalar_summary("Accuracy on Test", accuracy)
                summary_op_test = tf.merge_summary([accuracy_op_test])

            summary_writer = tf.train.SummaryWriter(self.log_dir + self.model_name, sess.graph)

            train_dict = {
                feature_data: self.training_predictors_tf.values,
                labels: self.training_classes_tf.values.reshape(len(self.training_classes_tf.values), self.num_classes)}

            if eval_on_test:
                test_dict = {
                    feature_data: self.test_predictors_tf.values,
                    labels: self.test_classes_tf.values.reshape(len(self.test_classes_tf.values), self.num_classes)}

            init = tf.initialize_all_variables()
            sess.run(init)

            for i in range(1, self.max_iteration):
                sess.run(training_op, feed_dict=train_dict)

                # Write summary to log
                if i % 100 == 0:
                    summary_str = sess.run(summary_op_train, feed_dict=train_dict)
                    summary_writer.add_summary(summary_str, i)
                    if eval_on_test:
                        summary_str = sess.run(summary_op_test, feed_dict=test_dict)
                        summary_writer.add_summary(summary_str, i)
                    summary_writer.flush()

                # Print current accuracy to console
                if i%5000 == 0:
                    print (i, sess.run(accuracy, feed_dict=train_dict))

            # Save trained parameters
            saver = tf.train.Saver()
            saver.save(sess, self.model_filename)
开发者ID:kanoh-k,项目名称:pred225,代码行数:60,代码来源:model.py


示例3: summary

    def summary(self):
      # Keep track of gradient values and sparsity (optional)
      grad_summaries = []
      for grad, var in self.grads_and_vars:
        if grad is not None:
          grad_hist_summary = tf.histogram_summary(var.op.name + '/gradients/hist', grad)
          sparsity_summary = tf.scalar_summary(var.op.name + '/gradients/sparsity', tf.nn.zero_fraction(grad))
          grad_summaries.append(grad_hist_summary)
          grad_summaries.append(sparsity_summary)

      grad_summaries_merged = tf.merge_summary(grad_summaries)

      # Output directory for models and summaries
      timestamp = str(int(time.time()))
      print("Writing to %s\n" % config.out_dir)

      # Summaries for loss and accuracy
      loss_summary = tf.scalar_summary("loss", self.loss)
      acc_summary = tf.scalar_summary("accuracy", self.accuracy)

      # Train Summaries
      self.train_summary_op = tf.merge_summary([loss_summary, acc_summary, grad_summaries_merged])
      train_summary_dir = os.path.join(config.out_dir, "summaries", "train")
      self.train_summary_writer = tf.train.SummaryWriter(train_summary_dir, self.sess.graph_def)

      # Dev summaries
      self.val_summary_op = tf.merge_summary([loss_summary, acc_summary])
      val_summary_dir = os.path.join(config.out_dir, "summaries", "val")
      self.val_summary_writer = tf.train.SummaryWriter(val_summary_dir, self.sess.graph_def)
开发者ID:ioanachelu,项目名称:char-sent-classif,代码行数:29,代码来源:model.py


示例4: create_summaries

 def create_summaries(self):
     tf.scalar_summary("eval_cost", self.eval_cost,
                       collections=[EVAL_SUMMARIES_COLLECTION])
     tf.scalar_summary("eval_accuracy", self.eval_accuracy,
                       collections=[EVAL_SUMMARIES_COLLECTION])
     self.summaries = tf.merge_summary(
         tf.get_collection(tf.GraphKeys.SUMMARIES))
     self.eval_summaries = tf.merge_summary(tf.get_collection(
         EVAL_SUMMARIES_COLLECTION))
开发者ID:rsepassi,项目名称:tf-play,代码行数:9,代码来源:train.py


示例5: define_summaries

    def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hist_sum = tf.merge_summary(all_sum['hist'])
开发者ID:Soledad89,项目名称:StackGAN,代码行数:14,代码来源:trainer.py


示例6: __init__

    def __init__(self, args, test):

        self.test = test
        self.reward = 0
        self.step_count = 0
        self.loss = 0.0
        self.loss_count = 0
        self.games = 0
        self.q_values = 0.0
        self.q_count = 0
        self.current_score = 0
        self.max_score = -1000000000
        self.min_score = 1000000000
        self.recording_frequency = args.recording_frequency

        with tf.device('/cpu:0'):
            self.spg = tf.placeholder(tf.float32, shape=[],
                                      name="score_per_game")
            self.mean_q = tf.placeholder(tf.float32, shape=[])
            self.total_gp = tf.placeholder(tf.float32, shape=[])
            self.max_r = tf.placeholder(tf.float32, shape=[])
            self.min_r = tf.placeholder(tf.float32, shape=[])
            self.time = tf.placeholder(tf.float32, shape=[])

            self.spg_summ = tf.scalar_summary('score_per_game', self.spg)
            self.q_summ = tf.scalar_summary('q_values', self.mean_q)
            self.gp_summ = tf.scalar_summary('steps_per_game', self.total_gp)
            self.max_summ = tf.scalar_summary('maximum_score', self.max_r)
            self.min_summ = tf.scalar_summary('minimum_score', self.min_r)
            self.time_summ = tf.scalar_summary('steps_per_second', self.time)

            if not test:
                self.mean_l = tf.placeholder(tf.float32, shape=[], name='loss')
                self.l_summ = tf.scalar_summary('loss', self.mean_l)
                self.summary_op = tf.merge_summary(
                    [self.spg_summ, self.q_summ, self.gp_summ, self.l_summ,
                     self.max_summ, self.min_summ, self.time_summ])
                self.path = (
                args.save_path + '/records/' + args.game + '/' + args.agent_type + '/' + args.agent_name + '/train')
            else:
                self.summary_op = tf.merge_summary(
                    [self.spg_summ, self.q_summ, self.gp_summ, self.max_summ,
                     self.min_summ, self.time_summ])
                self.path = (
                args.save_path + '/records/' + args.game + '/' + args.agent_type + '/' + args.agent_name + '/test')

            # self.summary_op = tf.merge_all_summaries()
            self.sess = tf.Session()
            self.summary_writer = tf.train.SummaryWriter(self.path)
            self.start_time = time.time()
开发者ID:michalsustr,项目名称:ML_workshop,代码行数:50,代码来源:record_stats.py


示例7: get_stat

def get_stat():
    fields = ['loss', 'acc']

    stat = {}
    for phase in data._PHASES:
        if phase == data._TRAIN:
            iteration = sum([len(file) for file in files[data._TRAIN]]) / _BATCH_SIZE
        elif phase == data._VAL:
            iteration = sum([len(file) for file in files[data._VAL]]) / _BATCH_SIZE

        raw_averages = {field: (net[field], util.moving_average(net[field], iteration)) for field in fields}

        display = {}
        display.update({'%s_raw' % field: raw_averages[field][0] for field in fields})
        display.update({'%s_avg' % field: raw_averages[field][1] for field in fields})

        summaries = []
        summaries += [tf.scalar_summary('%s_%s_raw' % (data._NAME[phase], field), raw_averages[field][0]) for field in fields]
        summaries += [tf.scalar_summary('%s_%s_avg' % (data._NAME[phase], field), raw_averages[field][1]) for field in fields]
        summary = tf.merge_summary(summaries)

        stat[phase] = dict(
            iteration=iteration,
            display=display,
            summary=summary)

    return stat
开发者ID:stmharry,项目名称:BBQNet,代码行数:27,代码来源:bbqnet.py


示例8: train

 def train(self, x_train, y_train, x_test, y_test, n_epoch=10):
     """Train the cnn."""
     self.session = tf.Session()
     with self.session.as_default():
         optimizer = tf.train.AdamOptimizer(1e-3)
         grad_vars = optimizer.compute_gradients(self.loss)
         train_op = optimizer.apply_gradients(grad_vars)
         # summaries
         acc_summary = tf.scalar_summary('accuracy', self.accuracy)
         loss_summary = tf.scalar_summary('loss', self.loss)
         summary_op = tf.merge_summary([acc_summary, loss_summary])
         summary_dir = os.path.join('cnn_logs', 'summaries')
         summary_writer = tf.train.SummaryWriter(summary_dir, self.session.graph)
         # Init session
         self.session.run(tf.initialize_all_variables())
         # Create the batch iterator
         batches = batch_iterator(list(zip(x_train, y_train)), 64, n_epoch)
         # Train loop
         i = 0
         for batch in batches:
             x_batch, y_batch = zip(*batch)
             # train step
             feed_dict = {self.x: x_batch, self.y_: y_batch, self.keep_prob: 0.5}
             _, summaries, loss, accuracy = self.session.run([train_op, summary_op, self.loss, self.accuracy], feed_dict)
             time = datetime.datetime.now().isoformat()
             i += 1
             print("%s : step %s || loss %s , acc %s" % (time, i, loss, accuracy))
             summary_writer.add_summary(summaries, i)
             # Evaluation on test set every 100 steps
             if i % 100 == 0:
                 print("\nEvaluation on test-set")
                 feed_dict = {self.x: x_test, self.y_: y_test, self.keep_prob: 1.0}
                 _, loss, accuracy = self.session.run([train_op, self.loss, self.accuracy], feed_dict)
                 print("%s : step %s || loss %s , acc %s" % (time, i, loss, accuracy))
                 print("")
开发者ID:Nlte,项目名称:text_classification_sentiment,代码行数:35,代码来源:cnn0.py


示例9: setup_summaries

def setup_summaries(sess, env_id, args):
    ROOT_LOG_DIR = constants.LOG_FILE #os.getcwd() + "/tf-log/"
    TODAY_LOG_DIR = ROOT_LOG_DIR + "/" + datetime.now().date().isoformat()

    LOG_DIR = TODAY_LOG_DIR + "/" + datetime.now().time().replace(second=0, microsecond=0).isoformat()[0:-3].replace(':', '.')

    LOG_DIR += " %s" % env_id #env.spec.id # args.gym_env
    LOG_DIR += " lr=%f" % args.initial_learning_rate
    LOG_DIR += " hs=%s" % args.hidden_sizes
    LOG_DIR += " lstms=%s " % args.lstm_sizes

    if len(args.tag) > 0:
        LOG_DIR += " -- %s" % args.tag


    score_input = tf.placeholder(tf.float32,name="score_input")
    score_input_avg = tf.placeholder(tf.float32,name="score_input_avg")
    score_smooth = tf.Variable(dtype=tf.float32, initial_value=0, name="score_avg")
    score_smooth_assign_op = tf.assign(score_smooth, score_input * 0.01 + score_smooth * 0.99)

    score_summary_op = [tf.merge_summary([
            tf.scalar_summary("score", score_input),
            tf.scalar_summary("score_avg", score_input_avg),
            tf.scalar_summary("score_smooth", score_smooth),
        ]),
        score_smooth_assign_op]

    from collections import deque

    moving_avg_scores = deque(maxlen=100)


    # summary_op = tf.merge_all_summaries()
    summary_writer = tf.train.SummaryWriter(LOG_DIR, sess.graph_def)

    print("logs written to: %s " % LOG_DIR)
    print("tensorboard --logdir=%s" % LOG_DIR)

    # v1
    def _record_score_fn(sess, summary_writer, score, global_t):

        moving_avg_scores.append(score)
        score_avg = np.mean(moving_avg_scores)

        summary_str, _ = sess.run(score_summary_op, feed_dict={
            score_input: score,
            score_input_avg: score_avg
        })

        moving_avg_scores.append(score)


        # print "record_score_fn:", summary_str
        summary_writer.add_summary(summary_str, global_t)





    return summary_writer, _record_score_fn
开发者ID:dmacd,项目名称:async_deep_reinforce,代码行数:60,代码来源:summaries.py


示例10: testMergeSummary

 def testMergeSummary(self):
   with self.test_session() as sess:
     const = tf.constant(10.0)
     summ1 = tf.histogram_summary("h", const, name="histo")
     summ2 = tf.scalar_summary("c", const, name="summ")
     merge = tf.merge_summary([summ1, summ2])
     value = sess.run(merge)
   self.assertEqual([], merge.get_shape())
   self.assertProtoEquals("""
     value {
       tag: "h"
       histo {
         min: 10.0
         max: 10.0
         num: 1.0
         sum: 10.0
         sum_squares: 100.0
         bucket_limit: 9.93809490288
         bucket_limit: 10.9319043932
         bucket_limit: 1.79769313486e+308
         bucket: 0.0
         bucket: 1.0
         bucket: 0.0
       }
     }
     value { tag: "c" simple_value: 10.0 }
   """, self._AsSummary(value))
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:27,代码来源:summary_ops_test.py


示例11: testCanBeCalledMultipleTimes

    def testCanBeCalledMultipleTimes(self):
        batch_size = 20
        val_input_batch = [tf.zeros([2, 3, 4])]
        lbl_input_batch = tf.ones([], dtype=tf.int32)
        probs = np.array([0, 1, 0, 0, 0])
        batches = tf.contrib.training.stratified_sample(
            val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs
        )
        batches += tf.contrib.training.stratified_sample(
            val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs
        )
        batches += tf.contrib.training.stratified_sample_unknown_dist(
            val_input_batch, lbl_input_batch, probs, batch_size
        )
        batches += tf.contrib.training.stratified_sample_unknown_dist(
            val_input_batch, lbl_input_batch, probs, batch_size
        )
        summary_op = tf.merge_summary(tf.get_collection(tf.GraphKeys.SUMMARIES))

        with self.test_session() as sess:
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            sess.run(batches + (summary_op,))

            coord.request_stop()
            coord.join(threads)
开发者ID:rhuangq,项目名称:tensorflow,代码行数:27,代码来源:sampling_ops_test.py


示例12: testSummaries

  def testSummaries(self):
    with self.cached_session() as s:
      var = tf.Variable([1, 2, 3], dtype=tf.float32)
      s.run(tf.initialize_all_variables())
      x, y = np.meshgrid(np.linspace(-10, 10, 256), np.linspace(-10, 10, 256))
      image = np.sin(x**2 + y**2) / np.sqrt(x**2 + y**2) * .5 + .5
      image = image[None, :, :, None]

      # make a dummy sound
      freq = 440  # A = 440Hz
      sampling_frequency = 11000
      audio = np.sin(2 * np.pi * np.linspace(0, 1, sampling_frequency) * freq)
      audio = audio[None, :, None]
      test_dir = tempfile.mkdtemp()
      # test summaries
      writer = tf.train.SummaryWriter(test_dir)
      summaries = [
          tf.scalar_summary("scalar_var", var[0]),
          tf.scalar_summary("scalar_reduce_var", tf.reduce_sum(var)),
          tf.histogram_summary("var_histogram", var),
          tf.image_summary("sin_image", image),
          tf.audio_summary("sin_wave", audio, sampling_frequency),
      ]
      run_summaries = s.run(summaries)
      writer.add_summary(s.run(tf.merge_summary(inputs=run_summaries)))
      # This is redundant, but we want to be able to rewrite the command
      writer.add_summary(s.run(tf.merge_all_summaries()))
      writer.close()
      shutil.rmtree(test_dir)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:29,代码来源:test_file_v0_11.py


示例13: __setup_ops

 def __setup_ops(self):
     cross_entropy = -tf.reduce_sum(self.actual_class * tf.log(self.output))
     self.summary = tf.scalar_summary(self.label, cross_entropy)
     self.train_op = tf.train.AdamOptimizer(0.0001).minimize(cross_entropy)
     self.merge_summaries = tf.merge_summary([self.summary])
     correct_prediction = tf.equal(tf.argmax(self.output,1), tf.argmax(self.actual_class,1))
     self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
开发者ID:unageanu,项目名称:jiji-with-tensorflow-example,代码行数:7,代码来源:model.py


示例14: build_eval_graph

    def build_eval_graph(self):
        # Keep track of the totals while running through the batch data
        self.total_loss = tf.Variable(0.0, trainable=False, collections=[])
        self.total_correct = tf.Variable(0.0, trainable=False, collections=[])
        self.example_count = tf.Variable(0.0, trainable=False, collections=[])

        # Calculates the means
        self.mean_loss = self.total_loss / self.example_count
        self.accuracy = self.total_correct / self.example_count

        # Operations to modify to the stateful variables
        inc_total_loss = self.total_loss.assign_add(self.model.total_loss)
        inc_total_correct = self.total_correct.assign_add(
            tf.reduce_sum(tf.cast(self.model.correct_predictions, "float")))
        inc_example_count = self.example_count.assign_add(self.model.batch_size)

        # Operation to reset all the stateful vars. Should be called before starting a data set evaluation.
        with tf.control_dependencies(
                [self.total_loss.initializer, self.total_correct.initializer, self.example_count.initializer]):
            self.eval_reset = tf.no_op()

        # Operation to modify the stateful variables with data from one batch
        # Should be called for each batch in the evaluatin set
        with tf.control_dependencies([inc_total_loss, inc_total_correct, inc_example_count]):
            self.eval_step = tf.no_op()

        # Summaries
        summary_mean_loss = tf.scalar_summary("mean_loss", self.mean_loss)
        summary_acc = tf.scalar_summary("accuracy", self.accuracy)
        self.summaries = tf.merge_summary([summary_mean_loss, summary_acc])
开发者ID:alphawolfxiaoliu,项目名称:tf-models,代码行数:30,代码来源:rnn_classifier.py


示例15: prepare_loss

    def prepare_loss(self, entropy_beta):

        with tf.device(self._device), tf.name_scope(self.network_name):
            if self._continuous_mode:
                policy_loss, entropy, summaries = self._prepare_policy_loss_continuous(entropy_beta)
            else:
                policy_loss, entropy, summaries = self._prepare_policy_loss_discrete(entropy_beta)


            # R (input for value)
            self.r = tf.placeholder("float", [1],name="reward")
            # value loss (output)
            # (Learning rate for Critic is half of Actor's, so multiply by 0.5)
            value_loss = 0.5 * tf.nn.l2_loss(self.r - self.v)

            # gradienet of policy and value are summed up
            self.total_loss = policy_loss + value_loss


            # todo: unclear if i really need these
            l = []
            l.extend(summaries)
            l += [tf.scalar_summary(["R"], self.r)]
            l += [tf.scalar_summary(["(R-V)"], self.td)]
            l += [tf.scalar_summary("V (loss eval)", tf.reduce_mean(self.v))] # tf.reshape(self.v, (1,)))]
            l += [tf.scalar_summary(["V (r-td)"], self.r - self.td)]
            l += [tf.scalar_summary("entropy", tf.reduce_mean(entropy))] # tf.reshape(entropy, (1,)))]
            l += [tf.scalar_summary("policy_loss", tf.reduce_mean(policy_loss))] # tf.reshape(policy_loss, (1,)))]    # TODO: HACK: when we do batch mode, will want a histogram and ditch the reshape, most likely?
            l += [tf.scalar_summary("value_loss", value_loss)]

            self.loss_summary_op = tf.merge_summary(l)
开发者ID:dmacd,项目名称:async_deep_reinforce,代码行数:31,代码来源:lowdim_ac_network.py


示例16: full_model

    def full_model(data):
        output_logits, queue_updates = predictor(data)
        output_logits = output_logits[:, :SIG_LEN-1, :]
        output_mean = tf.argmax(output_logits, dimension=2)

        targets = data[:, 1:]
        quantized_targets = quantizer(targets, QUANT_LOWER, QUANT_UPPER, QUANT_LEVELS)
        with tf.name_scope('error'):
            batch_error = tf.reduce_mean(tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(output_logits, quantized_targets), reduction_indices=[1]))

            error_summary = tf.scalar_summary('training error', (running_error + batch_error)/(num_runs + 1.0))
        output_plot = crappy_plot(output_mean, QUANT_LEVELS)
        target_plot = crappy_plot(quantized_targets, QUANT_LEVELS)

        M = tf.reduce_max(output_logits)
        m = tf.reduce_min(output_logits)
        scaled_logits = (output_logits-m)/(M-m)
        # image = draw_on(tf.transpose(scaled_logits, perm=[0, 2, 1])[:, :, :, None], target_plot, [1.0, 0.0, 0.0])
        # Casting is to work around some stupid tf bug; shouldn't be necessary
        output_probs = tf.reshape(tf.cast(tf.nn.softmax(tf.reshape(tf.cast(output_logits, tf.float64), [-1, QUANT_LEVELS])), tf.float32), [-1, SIG_LEN-1, QUANT_LEVELS])
        image = draw_on(tf.transpose(output_probs, perm=[0, 2, 1])[:, :, :, None], target_plot, [1.0, 0.0, 0.0])


        # image = draw_on(1.0, target_plot, [1.0, 0.0, 0.0])    # The first 1.0 starts with a white canvas
        # image = draw_on(image, output_plot, [0.0, 0.0, 1.0])

        sample_summary = tf.image_summary('posterior_sample', image, 5)
        summaries = tf.merge_summary([error_summary, sample_summary])
        return output_mean, queue_updates, batch_error, batch_error, summaries #+ 0.1*weight_decay
开发者ID:NoahDStein,项目名称:NeuralNetSandbox,代码行数:29,代码来源:wavenet.py


示例17: evaluate

def evaluate(dataset_path):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default(), tf.device('/cpu:0'):
    train_dir = Path(FLAGS.checkpoint_dir)
    reference_shape = mio.import_pickle(train_dir / 'reference_shape.pkl')
    
    images, gt_truth, inits, _ = data_provider.batch_inputs(
            [dataset_path], reference_shape,
            batch_size=FLAGS.batch_size, is_training=False)

    mirrored_images, _, mirrored_inits, shapes = data_provider.batch_inputs(
        [dataset_path], reference_shape,
        batch_size=FLAGS.batch_size, is_training=False, mirror_image=True)

    print('Loading model...')
    # Build a Graph that computes the logits predictions from the
    # inference model.
    with tf.device(FLAGS.device):
        patch_shape = (FLAGS.patch_size, FLAGS.patch_size)
        pred, _, _ = mdm_model.model(images, inits, patch_shape=patch_shape)

        tf.get_variable_scope().reuse_variables()

        pred_mirrored, _, _ = mdm_model.model(
            mirrored_images, mirrored_inits, patch_shape=patch_shape)

    pred_images, = tf.py_func(utils.batch_draw_landmarks,
            [images, pred], [tf.float32])
    gt_images, = tf.py_func(utils.batch_draw_landmarks,
            [images, gt_truth], [tf.float32])

    summaries = []
    summaries.append(tf.image_summary('images',
        tf.concat(2, [gt_images, pred_images]), max_images=5))
    
    avg_pred = pred + tf.py_func(flip_predictions, (pred_mirrored, shapes), (tf.float32, ))[0]
    avg_pred /= 2.

    # Calculate predictions.
    norm_error = mdm_model.normalized_rmse(avg_pred, gt_truth)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        mdm_train.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_summary(summaries)

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      _eval_once(saver, summary_writer, norm_error, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
开发者ID:trigeorgis,项目名称:mdm,代码行数:59,代码来源:mdm_eval.py


示例18: main

def main(graph_path, Model, stream, validstream, continue_training=False, 
        start_model=None, start_ind=0, save_every=1):
    """Run a complete training session. Will load a saved model to continue training
    if provided. After every epoch the current model will be saved, and the tensorboard
    will graph new data.
    """  
    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-Config.init_scale,
                                                     Config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = Model(config=Config)

        tf.initialize_all_variables().run()
        saver = tf.train.Saver(max_to_keep=Config.num_models)
        if continue_training:
            print("Continuing training from saved model ",start_model)
            saver.restore(session,start_model)
        writer = tf.train.SummaryWriter(graph_path, max_queue=3) 
        last3 = []
        learning_rate = Config.learning_rate
        session.run(tf.assign(m.lr, learning_rate))
        tol = 0.001
        for i in range(start_ind, start_ind+Config.num_epochs):
            print("EPOCH: %s"%i)
            print("learning_rate: %s"%learning_rate)
            epoch_cost, median_cost, max_cost = m.run_epoch(session, stream.get_sents(), True)   
            print("Total cost for EPOCH: %s"%i)
            print(epoch_cost)
            print("Median cost: %s"%median_cost)
            print("Max cost: %s"%max_cost)
            accuracy = m.run_epoch(session, validstream.get_sents(), False)
            print("accuracy: %s"%accuracy)
            summ1 = tf.scalar_summary("epoch_cost", tf.constant(epoch_cost))
            summ2 = tf.scalar_summary("median_cost", tf.constant(median_cost))
            summ3 = tf.scalar_summary("max_cost", tf.constant(max_cost))
            summ4 = tf.scalar_summary("learning_rate", tf.constant(learning_rate))
            summ5 = tf.scalar_summary("accuracy", tf.constant(accuracy))
            merge = tf.merge_summary([summ1, summ2, summ3, summ4, summ5])
            writer.add_summary(merge.eval(), i)
            if i % save_every == 0:
                saver.save(session, model_dir + 'saved-lstm-model', global_step=i)
            if len(last3) == 3:
                h = max(last3)
                if last3[2] == h:
                    learning_rate = learning_rate/2
                    session.run(tf.assign(m.lr, learning_rate))
                elif last3[1] == h:
                    if (last3[1] - last3[2])/last3[1] < tol:
                        learning_rate = learning_rate/2
                        session.run(tf.assign(m.lr, learning_rate))
                else:
                    if (h - min(last3))/h < tol:
                        learning_rate = learning_rate/2
                        session.run(tf.assign(m.lr, learning_rate))
                last3 = last3[1:] + [median_cost]
            elif len(last3) < 3:
                last3 = last3 + [median_cost]
            else:
                raise Exception
开发者ID:chrhansen,项目名称:USF,代码行数:59,代码来源:runner.py


示例19: record_summary

def record_summary():
    w3_summary = tf.scalar_summary("weight 3", w3)
    w2_summary = tf.scalar_summary("weight 2", w2)
    w1_summary = tf.scalar_summary("weight 1", w1)
    w0_summary = tf.scalar_summary("weight 0", w0)
    loss_summary = tf.scalar_summary("loss", loss)
    m = tf.merge_summary([w3_summary, w2_summary, w1_summary, w0_summary, loss_summary]) 
    return m
开发者ID:hylosy,项目名称:tensorflow-study,代码行数:8,代码来源:polynominal_fitting.py


示例20: setup_validation_summary

def setup_validation_summary():
    acc = tf.placeholder(tf.float32)
    auc = tf.placeholder(tf.float32)
    valid_summaries = [
        tf.summary.scalar('validation/acc', acc),
        tf.summary.scalar('validation/auc', auc)
    ]
    return tf.merge_summary(valid_summaries), acc, auc
开发者ID:alrojo,项目名称:EEG_DauwelsLab,代码行数:8,代码来源:mlp.py



注:本文中的tensorflow.merge_summary函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.minimum函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.merge_all_summaries函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap