• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.initialize_all_variables函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.initialize_all_variables函数的典型用法代码示例。如果您正苦于以下问题:Python initialize_all_variables函数的具体用法?Python initialize_all_variables怎么用?Python initialize_all_variables使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了initialize_all_variables函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testWhileUpdateVariable_6

  def testWhileUpdateVariable_6(self):
    with self.test_session():
      # Create some variables.
      var_a = tf.Variable(0, name="a")
      var_b = tf.Variable(0, name="b")
      c = tf.constant(0)
      tf.initialize_all_variables().run()

      # Loop condition
      def pred(i):
        return tf.less(i, 10)

      # Loop body
      def loop_body(i):
        asn1 = tf.assign_add(var_a, 1, name="a_add")
        with tf.control_dependencies([asn1]):
          asn2 = tf.assign_add(var_b, var_a, name="b_add")
        with tf.control_dependencies([asn2]):
          ni = tf.add(i, 1, name="i_add")
          return ni

      lpa = control_flow_ops.While(pred, loop_body, [c], 1, name="loop")

      self.assertEqual(0, var_b.eval())
      lpa.eval()  # Run the loop
      self.assertEqual(55, var_b.eval())
      self.assertEqual(10, var_a.eval())
开发者ID:hypatiad,项目名称:tensorflow,代码行数:27,代码来源:control_flow_ops_py_test.py


示例2: testBlockGRUToGRUCellSingleStep

    def testBlockGRUToGRUCellSingleStep(self):
        with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
            batch_size = 4
            cell_size = 5
            input_size = 6

            seed = 1994
            initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=seed)

            # Inputs
            x = tf.zeros([batch_size, input_size])
            h = tf.zeros([batch_size, cell_size])

            # Values for the inputs.
            x_value = np.random.rand(batch_size, input_size)
            h_value = np.random.rand(batch_size, cell_size)

            # Output from the basic GRU cell implementation.
            with tf.variable_scope("basic", initializer=initializer):
                output = tf.nn.rnn_cell.GRUCell(cell_size)(x, h)
                sess.run([tf.initialize_all_variables()])
                basic_res = sess.run([output], {x: x_value, h: h_value})

            # Output from the block GRU cell implementation.
            with tf.variable_scope("block", initializer=initializer):
                output = gru_ops.GRUBlockCell(cell_size)(x, h)
                sess.run([tf.initialize_all_variables()])
                block_res = sess.run([output], {x: x_value, h: h_value})

            self.assertEqual(len(block_res), len(basic_res))
            for block, basic in zip(block_res, basic_res):
                self.assertAllClose(block, basic)
开发者ID:damienmg,项目名称:tensorflow,代码行数:32,代码来源:gru_ops_test.py


示例3: testInt64

  def testInt64(self):
    save_path = os.path.join(self.get_temp_dir(), "int64")

    with self.test_session() as sess:
      # Build a graph with 1 node, and save and restore for them.
      v = tf.Variable(np.int64(15), name="v")
      save = tf.train.Saver({"v": v}, restore_sequentially=True)
      tf.initialize_all_variables().run()

      # Save the initialized values in the file at "save_path"
      val = save.save(sess, save_path)
      self.assertTrue(isinstance(val, six.string_types))
      self.assertEqual(save_path, val)

      with self.test_session() as sess:
        v = tf.Variable(np.int64(-1), name="v")
        save = tf.train.Saver({"v": v})

      with self.assertRaisesWithPredicateMatch(
          tf.OpError, lambda e: "uninitialized value v" in e.message):
        sess.run(v)

      # Restore the saved values in the parameter nodes.
      save.restore(sess, save_path)
      # Check that the parameter nodes have been restored.
      self.assertEqual(np.int64(15), v.eval())
开发者ID:ray2020,项目名称:tensorflow,代码行数:26,代码来源:saver_test.py


示例4: main

def main(argv):
    mapDict = getKanjiMap()
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        #restore variables from training process
        saver = tf.train.Saver(loadParam)
        saver.restore(sess, MODEL_NAME)
        for argc in range(1,len(sys.argv)):
            fName = sys.argv[argc]
            if os.path.isfile(fName):

               img = cv2.imread(fName,0)
               img=prepareImage(img)
               # to ensure that image has 0 mean and [-1:1]
               img = (img - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
               img = img.reshape([1,IMAGE_SIZE,IMAGE_SIZE,1])
    
               predictions = sess.run(
                    eval_prediction,
                    feed_dict={eval_data_node: img})

               labelID = (np.argmax(predictions))
               print("labelID: %d; Recognized Kanji:%s" %(labelID, mapDict[str(labelID)]))

            else:
                print("%s does not exist\n" %(fName))
                continue
开发者ID:shri95,项目名称:kanji_recognition,代码行数:27,代码来源:kanji_rec_test.py


示例5: enlargeDataset

def enlargeDataset(images, byte_data, names, labels, is_hard):
    extendListEightTimes(labels)
    extendListEightTimes(names)
    extendListEightTimes(is_hard)
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        l = len(images)
        for j in range(7):
            print(l)
            train_data2 = []
            start = time.time()
            for i in range(l):
                imageTensor = tf.image.random_contrast(images[i], 0.2, 1.8)
                imageTensor = tf.image.random_flip_left_right(imageTensor)
                imageTensor = tf.image.random_flip_up_down(imageTensor)
                imageTensor = tf.image.random_brightness(imageTensor, max_delta=50 / 255.0)
                imageTensor = tf.image.random_saturation(imageTensor, 0.2, 1.8)
                train_data2.append(imageTensor)
            print(time.time() - start)
            start = time.time()
            train_data2 = sess.run(train_data2)
            print(type(train_data2))
            print('time2:', time.time() - start)
            print train_data2[0][16]
            for i in range(l):
                byte_data.extend(train_data2[i].flatten())
    return byte_data, names, labels, is_hard
开发者ID:jodik,项目名称:Bachelor-thesis,代码行数:27,代码来源:creatingByteDataset.py


示例6: applyOptimizer

  def applyOptimizer(self, opt, steps=5, is_sparse=False):
    if is_sparse:
      var0 = tf.Variable([[0.0], [0.0]])
      var1 = tf.Variable([[0.0], [0.0]])
      grads0 = tf.IndexedSlices(tf.constant([0.1], shape=[1, 1]),
                                tf.constant([0]),
                                tf.constant([2, 1]))
      grads1 = tf.IndexedSlices(tf.constant([0.02], shape=[1, 1]),
                                tf.constant([1]),
                                tf.constant([2, 1]))
    else:
      var0 = tf.Variable([0.0, 0.0])
      var1 = tf.Variable([0.0, 0.0])
      grads0 = tf.constant([0.1, 0.2])
      grads1 = tf.constant([0.01, 0.02])

    update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
    tf.initialize_all_variables().run()

    sess = tf.get_default_session()
    v0_val, v1_val = sess.run([var0, var1])
    if is_sparse:
      self.assertAllClose([[0.0], [0.0]], v0_val)
      self.assertAllClose([[0.0], [0.0]], v1_val)
    else:
      self.assertAllClose([0.0, 0.0], v0_val)
      self.assertAllClose([0.0, 0.0], v1_val)

    # Run Ftrl for a few steps
    for _ in range(steps):
      update.run()

    v0_val, v1_val = sess.run([var0, var1])
    return v0_val, v1_val
开发者ID:13331151,项目名称:tensorflow,代码行数:34,代码来源:ftrl_test.py


示例7: testFtrlWithL1

  def testFtrlWithL1(self):
    with self.test_session() as sess:
      var0 = tf.Variable([1.0, 2.0])
      var1 = tf.Variable([4.0, 3.0])
      grads0 = tf.constant([0.1, 0.2])
      grads1 = tf.constant([0.01, 0.02])

      opt = tf.train.FtrlOptimizer(3.0,
                                   initial_accumulator_value=0.1,
                                   l1_regularization_strength=0.001,
                                   l2_regularization_strength=0.0)
      update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
      tf.initialize_all_variables().run()

      v0_val, v1_val = sess.run([var0, var1])
      self.assertAllClose([1.0, 2.0], v0_val)
      self.assertAllClose([4.0, 3.0], v1_val)

      # Run 10 steps FTRL
      for _ in range(10):
        update.run()
      v0_val, v1_val = sess.run([var0, var1])
      self.assertAllClose(np.array([-7.66718769, -10.91273689]),
                          v0_val)
      self.assertAllClose(np.array([-0.93460727, -1.86147261]),
                          v1_val)
开发者ID:13331151,项目名称:tensorflow,代码行数:26,代码来源:ftrl_test.py


示例8: testImbalancedWithExampleWeights

    def testImbalancedWithExampleWeights(self):
        # Setup test data with 1 positive, and 1 negative example.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [3.0, 1.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(
                    symmetric_l2_regularization=1,
                    symmetric_l1_regularization=0,
                    num_table_shards=num_shards,
                    loss_type="logistic_loss",
                )

                lr = SdcaModel(examples, variables, options)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()

                self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
                self.assertAllClose(0.408044, loss.eval(), atol=0.012)
                predicted_labels = get_binary_predictions_for_logistic(predictions)
                self.assertAllEqual([0, 1], predicted_labels.eval())
                self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
开发者ID:apollos,项目名称:tensorflow,代码行数:32,代码来源:sdca_ops_test.py


示例9: testInstancesOfOneClassOnly

    def testInstancesOfOneClassOnly(self):
        # Setup test data with 1 positive (ignored), and 1 negative example.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [0]}, 1),  # Shares gender with the instance above.
        ]
        example_weights = [1.0, 0.0]  # Second example "omitted" from training.
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(
                    symmetric_l2_regularization=1,
                    symmetric_l1_regularization=0,
                    num_table_shards=num_shards,
                    loss_type="logistic_loss",
                )

                lr = SdcaModel(examples, variables, options)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()
                self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
                self.assertAllClose(0.525457, loss.eval(), atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(predictions)
                self.assertAllEqual([0, 0], predicted_labels.eval())
                self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
开发者ID:apollos,项目名称:tensorflow,代码行数:31,代码来源:sdca_ops_test.py


示例10: testSparseBasic

 def testSparseBasic(self):
   for dtype in [tf.half, tf.float32, tf.float64]:
     with self.test_session():
       var0 = tf.Variable([[1.0], [2.0]], dtype=dtype)
       var1 = tf.Variable([[3.0], [4.0]], dtype=dtype)
       grads0 = tf.IndexedSlices(
           tf.constant([0.1], shape=[1, 1], dtype=dtype),
           tf.constant([0]),
           tf.constant([2, 1]))
       grads1 = tf.IndexedSlices(
           tf.constant([0.01], shape=[1, 1], dtype=dtype),
           tf.constant([1]),
           tf.constant([2, 1]))
       ada_opt = tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
       ada_update = ada_opt.apply_gradients(zip(
           [grads0, grads1], [var0, var1]))
       tf.initialize_all_variables().run()
       # Fetch params to validate initial values
       self.assertAllClose([[1.0], [2.0]], var0.eval())
       self.assertAllClose([[3.0], [4.0]], var1.eval())
       # Run 3 step of sgd
       for _ in range(3):
         ada_update.run()
       # Validate updated params
       self.assertAllCloseAccordingToType(
           np.array([[-1.6026098728179932], [2.0]]), var0.eval())
       self.assertAllCloseAccordingToType(
           np.array([[3.0], [3.715679168701172]]), var1.eval())
开发者ID:0ruben,项目名称:tensorflow,代码行数:28,代码来源:adagrad_test.py


示例11: testLSTMBasicToBlockPeeping

  def testLSTMBasicToBlockPeeping(self):
    with self.test_session(use_gpu=self._use_gpu) as sess:
      batch_size = 2
      input_size = 3
      cell_size = 4
      sequence_length = 5

      inputs = []
      for _ in range(sequence_length):
        inp = tf.convert_to_tensor(
            np.random.randn(batch_size, input_size),
            dtype=tf.float32)
        inputs.append(inp)

      initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=19890212)
      with tf.variable_scope("basic", initializer=initializer):
        cell = tf.nn.rnn_cell.LSTMCell(cell_size,
                                       use_peepholes=True,
                                       state_is_tuple=True)
        outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)

        sess.run([tf.initialize_all_variables()])
        basic_outputs = sess.run(outputs)
        basic_grads = sess.run(tf.gradients(outputs, inputs))
        basic_wgrads = sess.run(tf.gradients(outputs, tf.trainable_variables()))

      with tf.variable_scope("block", initializer=initializer):
        w = tf.get_variable("w",
                            shape=[input_size + cell_size, cell_size * 4],
                            dtype=tf.float32)
        b = tf.get_variable("b",
                            shape=[cell_size * 4],
                            dtype=tf.float32,
                            initializer=tf.zeros_initializer)

        wci = tf.get_variable("wci", shape=[cell_size], dtype=tf.float32)
        wcf = tf.get_variable("wcf", shape=[cell_size], dtype=tf.float32)
        wco = tf.get_variable("wco", shape=[cell_size], dtype=tf.float32)

        _, _, _, _, _, _, outputs = fused_lstm(
            tf.convert_to_tensor(sequence_length,
                                 dtype=tf.int64),
            inputs,
            w,
            b,
            wci=wci,
            wcf=wcf,
            wco=wco,
            cell_clip=0,
            use_peephole=True)

        sess.run([tf.initialize_all_variables()])
        block_outputs = sess.run(outputs)
        block_grads = sess.run(tf.gradients(outputs, inputs))
        block_wgrads = sess.run(tf.gradients(outputs, [w, b, wci, wcf, wco]))

      self.assertAllClose(basic_outputs, block_outputs)
      self.assertAllClose(basic_grads, block_grads)
      for basic, block in zip(basic_wgrads, block_wgrads):
        self.assertAllClose(basic, block, rtol=1e-2, atol=1e-2)
开发者ID:10imaging,项目名称:tensorflow,代码行数:60,代码来源:lstm_ops_test.py


示例12: testSharing

  def testSharing(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        ada_opt = tf.train.AdagradOptimizer(3.0)
        # Apply the optimizer twice.  Both applications will use
        # the same accums.
        ada_update1 = ada_opt.apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        ada_update2 = ada_opt.apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        self.assertEqual(["accumulator"], ada_opt.get_slot_names())
        slot0 = ada_opt.get_slot(var0, "accumulator")
        self.assertEquals(slot0.get_shape(), var0.get_shape())
        slot1 = ada_opt.get_slot(var1, "accumulator")
        self.assertEquals(slot1.get_shape(), var1.get_shape())
        tf.initialize_all_variables().run()

        # Fetch params to validate initial values.
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Mix the first and the second adagrad for 3 steps.
        ada_update1.run()
        ada_update2.run()
        ada_update1.run()
        # Validate updated params (the same as with only 1 Adagrad).
        self.assertAllCloseAccordingToType(
            np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
        self.assertAllCloseAccordingToType(
            np.array([2.715679168701172, 3.715679168701172]), var1.eval())
开发者ID:0ruben,项目名称:tensorflow,代码行数:33,代码来源:adagrad_test.py


示例13: train_model

def train_model(args):
	data_loader = InputHandler(args.data_dir, args.batch_size, args.result_length)
	args.vocabulary_size = data_loader.vocabulary_size

	# Save the original files, so that we can load the model when sampling
	with open(os.path.join(args.snapshots_dir, CONFIGURATION_FILE), 'wb') as f:
		cPickle.dump(args, f)
	with open(os.path.join(args.snapshots_dir, WORDS_VOCABULARY_FILE), 'wb') as f:
		cPickle.dump((data_loader.words, data_loader.vocabulary), f)

	model = RNNModel(args.rnn_size, args.network_depth, args.batch_size, args.result_length,
					 args.vocabulary_size, args.gradient)

	with tf.Session() as session:
		tf.initialize_all_variables().run()
		saver = tf.train.Saver(tf.all_variables())
		for e in range(args.num_epochs):
			session.run(tf.assign(model.lr, args.training_rate * (args.decay_rate ** e)))
			data_loader.set_batch_pointer_to_zero()
			state = model.initial_state.eval()

			for b in range(data_loader.num_batches):
				x, y = data_loader.get_next_batch()
				feed = {model.input_data: x, model.targets: y, model.initial_state: state}
				train_loss, state, _ = session.run([model.cost, model.final_state, model.train_op], feed)
				if (e * data_loader.num_batches + b) % args.snapshot == 0 \
						or (e==args.num_epochs-1 and b == data_loader.num_batches-1): # save for the last result
					snapshot_path = os.path.join(args.snapshots_dir, 'model.ckpt')
					saver.save(session, snapshot_path, global_step = e * data_loader.num_batches + b)
					print("Model snapshot was taken to {}".format(snapshot_path))
开发者ID:lidanh,项目名称:game-of-thrones-tweets-generator,代码行数:30,代码来源:train_rnn_model.py


示例14: testBasicLSTMCell

 def testBasicLSTMCell(self):
   with self.test_session() as sess:
     with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
       x = tf.zeros([1, 2])
       m = tf.zeros([1, 8])
       g, out_m = tf.nn.rnn_cell.MultiRNNCell(
           [tf.nn.rnn_cell.BasicLSTMCell(2)] * 2)(x, m)
       sess.run([tf.initialize_all_variables()])
       res = sess.run([g, out_m], {x.name: np.array([[1., 1.]]),
                                   m.name: 0.1 * np.ones([1, 8])})
       self.assertEqual(len(res), 2)
       # The numbers in results were not calculated, this is just a smoke test.
       self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
       expected_mem = np.array([[0.68967271, 0.68967271,
                                 0.44848421, 0.44848421,
                                 0.39897051, 0.39897051,
                                 0.24024698, 0.24024698]])
       self.assertAllClose(res[1], expected_mem)
     with tf.variable_scope("other", initializer=tf.constant_initializer(0.5)):
       x = tf.zeros([1, 3])  # Test BasicLSTMCell with input_size != num_units.
       m = tf.zeros([1, 4])
       g, out_m = tf.nn.rnn_cell.BasicLSTMCell(2, input_size=3)(x, m)
       sess.run([tf.initialize_all_variables()])
       res = sess.run([g, out_m], {x.name: np.array([[1., 1., 1.]]),
                                   m.name: 0.1 * np.ones([1, 4])})
       self.assertEqual(len(res), 2)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:26,代码来源:rnn_cell_test.py


示例15: testDenseFeaturesWeightedExamples

    def testDenseFeaturesWeightedExamples(self):
        with self._single_threaded_test_session():
            examples, variables = make_dense_examples_and_variables_dicts(
                dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]], weights=[3.0, 1.0], labels=[1.0, 0.0]
            )
            options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type="hinge_loss")
            model = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = model.predictions(examples)
            binary_predictions = get_binary_predictions_for_hinge(predictions)
            train_op = model.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
            # try to increase the margin from (1.0, 0.5). Due to regularization,
            # (1.0, -0.5) will be within the margin. For these points and example
            # weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
            # loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
            # correct, but the boundary will be much closer to the 2nd point than the
            # first one.
            self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
            self.assertAllEqual([1, 0], binary_predictions.eval())
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
            self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
开发者ID:apollos,项目名称:tensorflow,代码行数:27,代码来源:sdca_ops_test.py


示例16: testSimple

    def testSimple(self):
        # Setup test data
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, -10.0),
            make_example_proto({"age": [1], "gender": [1]}, 14.0),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="squared_loss")

            lr = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = lr.predictions(examples)
            train_op = lr.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # Predictions should be 2/3 of label due to minimizing regularized loss:
            #   (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
            self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0], predictions.eval(), rtol=0.005)
            # Approximate gap should be very close to 0.0. (In fact, because the gap
            # is only approximate, it is likely that upon convergence the duality gap
            # can have a tiny negative value).
            self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), atol=1e-2)
开发者ID:apollos,项目名称:tensorflow,代码行数:26,代码来源:sdca_ops_test.py


示例17: build_graph

  def build_graph(self):
    """Build the graph for the full model."""
    opts = self._options
    # The training data. A text file.
    (words, counts, words_per_epoch, self._epoch, self._words, examples,
     labels) = word2vec.skipgram(filename=opts.train_data,
                                 batch_size=opts.batch_size,
                                 window_size=opts.window_size,
                                 min_count=opts.min_count,
                                 subsample=opts.subsample)
    (opts.vocab_words, opts.vocab_counts,
     opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
    opts.vocab_size = len(opts.vocab_words)
    print("Data file: ", opts.train_data)
    print("Vocab size: ", opts.vocab_size - 1, " + UNK")
    print("Words per epoch: ", opts.words_per_epoch)
    self._examples = examples
    self._labels = labels
    self._id2word = opts.vocab_words
    for i, w in enumerate(self._id2word):
      self._word2id[w] = i
    true_logits, sampled_logits = self.forward(examples, labels)
    loss = self.nce_loss(true_logits, sampled_logits)
    tf.scalar_summary("NCE loss", loss)
    self._loss = loss
    self.optimize(loss)

    # Properly initialize all variables.
    tf.initialize_all_variables().run()

    self.saver = tf.train.Saver()
开发者ID:debaratidas1994,项目名称:tensorflow,代码行数:31,代码来源:word2vec.py


示例18: testL2Regularization

    def testL2Regularization(self):
        # Setup test data
        example_protos = [
            # 2 identical examples
            make_example_proto({"age": [0], "gender": [0]}, -10.0),
            make_example_proto({"age": [0], "gender": [0]}, -10.0),
            # 2 more identical examples
            make_example_proto({"age": [1], "gender": [1]}, 14.0),
            make_example_proto({"age": [1], "gender": [1]}, 14.0),
        ]
        example_weights = [1.0, 1.0, 1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=16, symmetric_l1_regularization=0, loss_type="squared_loss")

            lr = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = lr.predictions(examples)

            train_op = lr.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # Predictions should be 1/5 of label due to minimizing regularized loss:
            #   (label - 2 * weight)^2 + L2 * 16 * weight^2
            optimal1 = -10.0 / 5.0
            optimal2 = 14.0 / 5.0
            self.assertAllClose([optimal1, optimal1, optimal2, optimal2], predictions.eval(), rtol=0.01)
开发者ID:apollos,项目名称:tensorflow,代码行数:29,代码来源:sdca_ops_test.py


示例19: testFtrlwithoutRegularization2

  def testFtrlwithoutRegularization2(self):
    with self.test_session() as sess:
      var0 = tf.Variable([1.0, 2.0])
      var1 = tf.Variable([4.0, 3.0])
      grads0 = tf.constant([0.1, 0.2])
      grads1 = tf.constant([0.01, 0.02])

      opt = tf.train.FtrlOptimizer(3.0,
                                   initial_accumulator_value=0.1,
                                   l1_regularization_strength=0.0,
                                   l2_regularization_strength=0.0)
      update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
      tf.initialize_all_variables().run()

      v0_val, v1_val = sess.run([var0, var1])
      self.assertAllClose([1.0, 2.0], v0_val)
      self.assertAllClose([4.0, 3.0], v1_val)

      # Run 3 steps FTRL
      for _ in range(3):
        update.run()
      v0_val, v1_val = sess.run([var0, var1])
      self.assertAllClose(np.array([-2.55607247, -3.98729396]),
                          v0_val)
      self.assertAllClose(np.array([-0.28232238, -0.56096673]),
                          v1_val)
开发者ID:13331151,项目名称:tensorflow,代码行数:26,代码来源:ftrl_test.py


示例20: testL1Regularization

    def testL1Regularization(self):
        # Setup test data
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, -10.0),
            make_example_proto({"age": [1], "gender": [1]}, 14.0),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=4.0, loss_type="squared_loss")
            lr = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            prediction = lr.predictions(examples)
            loss = lr.regularized_loss(examples)

            train_op = lr.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # Predictions should be -4.0, 48/5 due to minimizing regularized loss:
            #   (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
            self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)

            # Loss should be the sum of the regularized loss value from above per
            # example after plugging in the optimal weights.
            self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
开发者ID:apollos,项目名称:tensorflow,代码行数:27,代码来源:sdca_ops_test.py



注:本文中的tensorflow.initialize_all_variables函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap