• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.placeholder函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.placeholder函数的典型用法代码示例。如果您正苦于以下问题:Python placeholder函数的具体用法?Python placeholder怎么用?Python placeholder使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了placeholder函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, model_path = "models", threshold = [0.6, 0.7, 0.7], factor = 0.709, scale_factor = 1):
        '''
        :param face_rec_sess: FaceRecSession
        :param threshold: detection threshold
        :param factor: default 0.709 image pyramid -- magic number
        :param model_path:
        '''
        self.threshold = threshold
        self.factor = factor
        self.scale_factor = scale_factor;
        with tf.Graph().as_default(), tf.device('/cpu:0'):
            print("Loading Face detection model")
            self.sess = tf.Session()
            if not model_path:
                model_path, _ = os.path.split(os.path.realpath(__file__))

            with tf.variable_scope('pnet'):
                data = tf.placeholder(tf.float32, (None, None, None, 3), 'input')
                pnet = PNet({'data': data})
                pnet.load(os.path.join(model_path, 'det1.npy'), self.sess)
            with tf.variable_scope('rnet'):
                data = tf.placeholder(tf.float32, (None, 24, 24, 3), 'input')
                rnet = RNet({'data': data})
                rnet.load(os.path.join(model_path, 'det2.npy'), self.sess)
            with tf.variable_scope('onet'):
                data = tf.placeholder(tf.float32, (None, 48, 48, 3), 'input')
                onet = ONet({'data': data})
                onet.load(os.path.join(model_path, 'det3.npy'), self.sess)

            self.pnet = lambda img: self.sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0': img})
            self.rnet = lambda img: self.sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0': img})
            self.onet = lambda img: self.sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'),
                                            feed_dict={'onet/input:0': img})
            print("Face detection model loaded")
开发者ID:nxp-gf,项目名称:flask-facep-reg-v3,代码行数:34,代码来源:mtcnn_detect.py


示例2: add_placeholders

  def add_placeholders(self):
    """Generate placeholder variables to represent the input tensors

    These placeholders are used as inputs by the rest of the model building
    code and will be fed data during training.  Note that when "None" is in a
    placeholder's shape, it's flexible

    Adds following nodes to the computational graph

    input_placeholder: Input placeholder tensor of shape
                       (None, window_size), type tf.int32
    labels_placeholder: Labels placeholder tensor of shape
                        (None, label_size), type tf.float32
    dropout_placeholder: Dropout value placeholder (scalar),
                         type tf.float32

    Add these placeholders to self as the instance variables

      self.input_placeholder
      self.labels_placeholder
      self.dropout_placeholder

    (Don't change the variable names)
    """
    ### YOUR CODE HERE
    window_size = self.config.window_size
    self.input_placeholder = tf.placeholder(tf.int32, shape=(None, window_size))
    self.labels_placeholder = tf.placeholder(tf.float32, shape=(None, self.config.label_size))
    self.dropout_placeholder = tf.placeholder(tf.float32)
开发者ID:tracholar,项目名称:cs224d,代码行数:29,代码来源:q2_NER.py


示例3: __init__

    def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, nlstm=256, reuse=False):
        nenv = nbatch // nsteps
        self.pdtype = make_pdtype(ac_space)
        X, processed_x = observation_input(ob_space, nbatch)

        M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
        S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states
        with tf.variable_scope("model", reuse=reuse):
            h = nature_cnn(X)
            xs = batch_to_seq(h, nenv, nsteps)
            ms = batch_to_seq(M, nenv, nsteps)
            h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm)
            h5 = seq_to_batch(h5)
            vf = fc(h5, 'v', 1)
            self.pd, self.pi = self.pdtype.pdfromlatent(h5)

        v0 = vf[:, 0]
        a0 = self.pd.sample()
        neglogp0 = self.pd.neglogp(a0)
        self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)

        def step(ob, state, mask):
            return sess.run([a0, v0, snew, neglogp0], {X:ob, S:state, M:mask})

        def value(ob, state, mask):
            return sess.run(v0, {X:ob, S:state, M:mask})

        self.X = X
        self.M = M
        self.S = S
        self.vf = vf
        self.step = step
        self.value = value
开发者ID:Divyankpandey,项目名称:baselines,代码行数:33,代码来源:policies.py


示例4: __init__

    def __init__(self):
        # Import data
        error = None
        for _ in range(10):
            try:
                self.mnist = input_data.read_data_sets(
                    "/tmp/tensorflow/mnist/input_data", one_hot=True)
                error = None
                break
            except Exception as e:
                error = e
                time.sleep(5)
        if error:
            raise ValueError("Failed to import data", error)

        # Set seed and build layers
        tf.set_random_seed(0)

        self.x = tf.placeholder(tf.float32, [None, 784], name="x")
        self.y_ = tf.placeholder(tf.float32, [None, 10], name="y_")
        y_conv, self.keep_prob = deepnn(self.x)

        # Need to define loss and optimizer attributes
        self.loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(
                labels=self.y_, logits=y_conv))
        self.optimizer = tf.train.AdamOptimizer(1e-4)
        self.variables = ray_tf_utils.TensorFlowVariables(
            self.loss, tf.get_default_session())

        # For evaluating test accuracy
        correct_prediction = tf.equal(
            tf.argmax(y_conv, 1), tf.argmax(self.y_, 1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
开发者ID:robertnishihara,项目名称:ray,代码行数:34,代码来源:mnist_example.py


示例5: build_graph

    def build_graph(self, nn_im_w, nn_im_h, num_colour_channels=3, weights=None, biases=None):
        num_outputs = 1 #ofc
        self.nn_im_w = nn_im_w
        self.nn_im_h = nn_im_h

        if weights is None:
            weights = [None, None, None, None, None]
        if biases is None:
            biases = [None, None, None, None, None]

        with tf.device('/cpu:0'):
            # Placeholder variables for the input image and output images
            self.x = tf.placeholder(tf.float32, shape=[None, nn_im_w*nn_im_h*3])
            self.y_ = tf.placeholder(tf.float32, shape=[None, num_outputs])
            self.threshold = tf.placeholder(tf.float32)

            # Build the convolutional and pooling layers
            conv1_output_channels = 32
            conv2_output_channels = 16
            conv3_output_channels = 8

            conv_layer_1_input = tf.reshape(self.x, [-1, nn_im_h, nn_im_w, num_colour_channels]) #The resized input image
            self.build_conv_layer(conv_layer_1_input, num_colour_channels, conv1_output_channels, initial_weights=weights[0], initial_biases=biases[0]) # layer 1
            self.build_conv_layer(self.layers[0][0], conv1_output_channels, conv2_output_channels, initial_weights=weights[1], initial_biases=biases[1])# layer 2
            self.build_conv_layer(self.layers[1][0], conv2_output_channels, conv3_output_channels, initial_weights=weights[2], initial_biases=biases[2])# layer 3

            # Build the fully connected layer
            convnet_output_w = nn_im_w//8
            convnet_output_h = nn_im_h//8

            fully_connected_layer_input = tf.reshape(self.layers[2][0], [-1, convnet_output_w * convnet_output_h * conv3_output_channels])
            self.build_fully_connected_layer(fully_connected_layer_input, convnet_output_w, convnet_output_h, conv3_output_channels, initial_weights=weights[3], initial_biases=biases[3])

            # The dropout stage and readout layer
            self.keep_prob, self.h_drop = self.dropout(self.layers[3][0])
            self.y_conv,_,_ = self.build_readout_layer(self.h_drop, num_outputs, initial_weights=weights[4], initial_biases=biases[4])

            self.mean_error =  tf.sqrt(tf.reduce_mean(tf.square(self.y_ - self.y_conv)))
            self.train_step = tf.train.AdamOptimizer(1e-4).minimize(self.mean_error)

            self.accuracy = (1.0 - tf.reduce_mean(tf.abs(self.y_ - tf.round(self.y_conv))))


            positive_examples = tf.greater_equal(self.y_, 0.5)
            negative_examples = tf.logical_not(positive_examples)
            positive_classifications = tf.greater_equal(self.y_conv, self.threshold)
            negative_classifications = tf.logical_not(positive_classifications)

            self.true_positive = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, positive_classifications),tf.int32)) # count the examples that are positive and classified as positive
            self.false_positive = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, positive_classifications),tf.int32)) # count the examples that are negative but classified as positive

            self.true_negative = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, negative_classifications),tf.int32)) # count the examples that are negative and classified as negative
            self.false_negative = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, negative_classifications),tf.int32)) # count the examples that are positive but classified as negative

            self.positive_count = tf.reduce_sum(tf.cast(positive_examples, tf.int32)) # count the examples that are positive
            self.negative_count = tf.reduce_sum(tf.cast(negative_examples, tf.int32)) # count the examples that are negative

            self.confusion_matrix = tf.reshape(tf.pack([self.true_positive, self.false_positive, self.false_negative, self.true_negative]), [2,2])

        self.sess.run(tf.initialize_all_variables())
开发者ID:JTKBowers,项目名称:CNN-people-detect,代码行数:60,代码来源:Model.py


示例6: ce

def ce(model, config, scope, connect, threshold = 1e-5):
	with tf.variable_scope(scope), tf.name_scope(scope):
		with tf.variable_scope('inputs'), tf.name_scope('inputs'):
			model['%s_in0length' %scope] = model['%s_out0length' %connect]
			model['%s_in1length' %scope] = model['%s_out1length' %connect]
			model['%s_in2length' %scope] = model['%s_out2length' %connect]
			model['%s_maxin2length' %scope] = model['%s_maxout2length' %connect]
			model['%s_inputs' %scope] = tf.clip_by_value(tf.nn.softmax(model['%s_outputs' %connect]), threshold, 1. - threshold, name = '%s_inputs' %scope)
			model['%s_out0length' %scope] = model['%s_in0length' %scope]
			model['%s_out1length' %scope] = model['%s_in1length' %scope]
			model['%s_out2length' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_out2length' %scope)
			model['%s_maxout2length' %scope] = model['%s_maxin2length' %scope]

		with tf.variable_scope('labels'), tf.name_scope('labels'):
			model['%s_labels_len' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_labels_len' %scope)
			model['%s_labels_ind' %scope] = tf.placeholder(tf.int64, [None, 2], '%s_labels_ind' %scope)
			model['%s_labels_val' %scope] = tf.placeholder(tf.int32, [None], '%s_labels_val' %scope)
			model['%s_labels_collapsed' %scope] = tf.sparse_to_dense(model['%s_labels_ind' %scope], [model['%s_maxin2length' %scope], model['%s_in0length' %scope]], model['%s_labels_val' %scope], -1, name = '%s_labels_collapsed' %scope)
			model['%s_labels' %scope] = tf.one_hot(model['%s_labels_collapsed' %scope], model['%s_out1length' %scope], name = '%s_labels' %scope)

		with tf.variable_scope('loss'), tf.name_scope('loss'):
			model['%s_loss' %scope] = tf.reduce_sum(-tf.multiply(model['%s_labels' %scope], tf.log(model['%s_inputs' %scope])), name = '%s_loss' %scope)

		with tf.variable_scope('outputs'), tf.name_scope('outputs'):
			model['%s_output' %scope] = model['%s_inputs' %scope]

	return model
开发者ID:aaiijmrtt,项目名称:DEEPSPEECH,代码行数:27,代码来源:ce.py


示例7: __init__

    def __init__(self, encoders, vocabulary, data_id,
                 layers=[], activation=tf.tanh, dropout_keep_p=0.5, name='seq_classifier'):
        self.encoders = encoders
        self.vocabulary = vocabulary
        self.data_id = data_id
        self.layers = layers
        self.activation = activation
        self.dropout_keep_p = dropout_keep_p
        self.name = name
        self.max_output_len = 1

        with tf.variable_scope(name):
            self.learning_step = tf.Variable(0, name="learning_step", trainable=False)
            self.dropout_placeholder = tf.placeholder(tf.float32, name="dropout_plc")
            self.gt_inputs = [tf.placeholder(tf.int32, shape=[None], name="targets")]
            mlp_input = tf.concat(1, [enc.encoded for enc in encoders])
            mlp = MultilayerPerceptron(mlp_input, layers, self.dropout_placeholder, len(vocabulary))

            self.loss_with_gt_ins = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(mlp.logits, self.gt_inputs[0]))
            self.loss_with_decoded_ins = self.loss_with_gt_ins
            self.cost = self.loss_with_gt_ins

            self.decoded_seq = [mlp.classification]
            self.decoded_logits = [mlp.logits]

            tf.scalar_summary('val_optimization_cost', self.cost, collections=["summary_val"])
            tf.scalar_summary('train_optimization_cost', self.cost, collections=["summary_train"])
开发者ID:archerbroler,项目名称:neuralmonkey,代码行数:28,代码来源:sequence_classifier.py


示例8: recover_feeling

def recover_feeling(checkpoint):
    #设置变量
    in_sentence = tf.placeholder(tf.float32, [None, 140])
    
    weight = tf.Variable(tf.zeros([140, 6]))
    biases = tf.Variable(tf.zeros([6]))
    global_step = tf.Variable(0, name='global_step', trainable=False)

    #y = softmax(Wx + b)
    y = tf.nn.softmax(tf.matmul(in_sentence, weight) + biases)
    y_ = tf.placeholder(tf.float32, [None, 6])
    sess = tf.InteractiveSession()
    
    #恢复模型.
    saver = tf.train.Saver()
    saver.restore(sess, checkpoint)
    
    #读取模型完毕,加载词表
    vocab, tmp_vocab = read_vocabulary('data/emotion/vocabulary.txt')
    
    #把一些句子转化为vec的array
    vec_list1 = convert_sentence_to_vec_list('你今天感觉怎么样', vocab, 140)
    vec_list2 = convert_sentence_to_vec_list('高兴啊', vocab, 140)
    vec_list_final = [np.array(vec_list1), np.array(vec_list2)]
    print (vec_list_final)
    
    #交给sess进行检查
    data = np.array(vec_list_final)
    result = sess.run(y, feed_dict={in_sentence: data})
    print (result)
开发者ID:chibimiku,项目名称:InoueAoi,代码行数:30,代码来源:train_emotion.py


示例9: main

def main():
    sess = tf.Session()

    # 2進数3ビットから10進数
    x = tf.placeholder(tf.float32, [None, 3])
    w = tf.Variable(tf.zeros([3, 8]))
    b = tf.Variable(tf.zeros([8]))
    y = tf.nn.softmax(tf.matmul(x, w) + b)

    y_ = tf.placeholder(tf.float32, [None, 8])
    cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
    train_step = tf.train.GradientDescentOptimizer(0.05).minimize(cross_entropy)

    sess.run(tf.initialize_all_variables())

    for i in range(1000):
        train_step.run({x: [[0, 0, 0]], y_: [[1, 0, 0, 0, 0, 0, 0, 0]]}, session=sess)
        train_step.run({x: [[1, 0, 0]], y_: [[0, 1, 0, 0, 0, 0, 0, 0]]}, session=sess)
        train_step.run({x: [[0, 1, 0]], y_: [[0, 0, 1, 0, 0, 0, 0, 0]]}, session=sess)
        train_step.run({x: [[1, 1, 0]], y_: [[0, 0, 0, 1, 0, 0, 0, 0]]}, session=sess)
        train_step.run({x: [[0, 0, 1]], y_: [[0, 0, 0, 0, 1, 0, 0, 0]]}, session=sess)
        train_step.run({x: [[1, 0, 1]], y_: [[0, 0, 0, 0, 0, 1, 0, 0]]}, session=sess)
        train_step.run({x: [[0, 1, 1]], y_: [[0, 0, 0, 0, 0, 0, 1, 0]]}, session=sess)
        train_step.run({x: [[1, 1, 1]], y_: [[0, 0, 0, 0, 0, 0, 0, 1]]}, session=sess)

    ## 1に近い予測があってるか 平均
    #correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    #accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print(sess.run(y, feed_dict={x: [[0, 0, 0]]}))
    print(sess.run(y, feed_dict={x: [[1, 0, 0]]}))
    print(sess.run(y, feed_dict={x: [[0, 1, 0]]}))
    print(sess.run(y, feed_dict={x: [[1, 1, 0]]}))
    print(sess.run(y, feed_dict={x: [[0, 0, 1]]}))
    return 0
开发者ID:octaltree,项目名称:tensorFlowTest,代码行数:35,代码来源:binary.py


示例10: create_model

    def create_model(self):
        print "Setting up model",
        sys.stdout.flush()
        # placeholders for data + targets
        self._input_data = tf.placeholder(tf.int32, shape=(self.batch_size, self.num_steps))
        self._targets = tf.placeholder(tf.int32, [self.batch_size, self.num_steps])

        # set up lookup function
        self.embedding = tf.constant(self.saved_embedding,name="embedding")
        self.inputs = tf.nn.embedding_lookup(self.embedding, self._input_data)
        # lstm model
        self.lstm_cell = rnn_cell.BasicLSTMCell(self.lstm_size)
        self.cell = rnn_cell.MultiRNNCell([self.lstm_cell] * self.num_layers)


        self._initial_state = self.cell.zero_state(self.batch_size, tf.float32)

        from tensorflow.models.rnn import rnn
        self.inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, self.num_steps, self.inputs)]
        self.outputs, self.states = rnn.rnn(self.cell, self.inputs, initial_state=self._initial_state)

        self.output = tf.reshape(tf.concat(1, self.outputs), [-1, self.lstm_size])
        self.softmax_w = tf.get_variable("softmax_w", [self.lstm_size, self.vocab_size])
        self.softmax_b = tf.get_variable("softmax_b", [self.vocab_size])
        self.logits = tf.matmul(self.output, self.softmax_w) + self.softmax_b
        #print  "self.states.get_shape():",self.states.get_shape()
        #print  "tf.shape(self.states)",tf.shape(self.states)
        self._final_state = self.states
        self.saver = tf.train.Saver()
        
        #delete data to save memory if network is used for sampling only
        if self.only_for_sampling:
            del self.data
            
        print "done"
开发者ID:wohnjayne,项目名称:what-will-you-publish-next,代码行数:35,代码来源:lstm.py


示例11: testModelWithBucketsScopeAndLoss

  def testModelWithBucketsScopeAndLoss(self):
    """Test that variable scope reuse is not reset after model_with_buckets."""
    classes = 10
    buckets = [(4, 4), (8, 8)]

    with self.test_session():
      # Here comes a sample Seq2Seq model using GRU cells.
      def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
        """Example sequence-to-sequence model that uses GRU cells."""
        def GRUSeq2Seq(enc_inp, dec_inp):
          cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2)
          return tf.nn.seq2seq.embedding_attention_seq2seq(
              enc_inp, dec_inp, cell, num_encoder_symbols=classes,
              num_decoder_symbols=classes, embedding_size=24)
        targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
        return tf.nn.seq2seq.model_with_buckets(
            enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
            per_example_loss=per_example_loss)

      # Now we construct the copy model.
      inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
      out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
      weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
      with tf.variable_scope("root"):
        _, losses1 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=False)
        # Now check that we did not accidentally set reuse.
        self.assertEqual(False, tf.get_variable_scope().reuse)
        # Construct one more model with per-example loss.
        tf.get_variable_scope().reuse_variables()
        _, losses2 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=True)
        # First loss is scalar, the second one is a 1-dimensinal tensor.
        self.assertEqual([], losses1[0].get_shape().as_list())
        self.assertEqual([None], losses2[0].get_shape().as_list())
开发者ID:AngleFork,项目名称:tensorflow,代码行数:33,代码来源:seq2seq_test.py


示例12: modelOpt

    def modelOpt(self):

        bn_params = {'decay': 0.999, 'center': True, 'scale': True, 'epsilon': 0.001, 'updates_collections': None,
                     'is_training': self.is_training}

        self.global_step = tf.Variable(0, trainable=False)
        self.learning_rate = layers.decayLearningRate(LEARNING_RATE, self.global_step, DECAY_STEPS, DECAY_RATE)

        self.towerLogits = tf.placeholder(dtype=tf.float32, shape=[None, self.dataset.frames, 64])
        self.Y = tf.placeholder(dtype=tf.int32, shape=[None])
        self.Yoh = layers.toOneHot(self.Y, self.dataset.num_classes)

        net_shape = self.towerLogits.get_shape()
        net = tf.reshape(self.towerLogits, [BATCH_SIZE, int(net_shape[1]) * int(net_shape[2])])

        layer_num = 1
        for fully_connected_num in [64]:
            net = layers.fc(net, fully_connected_num, name='temporal_FC{}'.format(layer_num),
                            weights_regularizer=layers.l2_regularizer(REGULARIZER_SCALE),
                            normalizer_fn=layers.batchNormalization, normalizer_params=bn_params)
            layer_num += 1

        self.logits = layers.fc(net, self.dataset.num_classes, activation_fn=None, name='logits')

        self.preds = layers.softmax(self.logits)

        cross_entropy_loss = layers.reduce_mean(layers.softmax_cross_entropy(logits=self.logits, labels=self.Yoh))
        regularization_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        self.loss = cross_entropy_loss + REGULARIZER_SCALE * tf.reduce_sum(regularization_loss)
        self.opt = layers.adam(self.learning_rate)
        self.train_op = self.opt.minimize(self.loss, global_step=self.global_step)

        self.accuracy, self.precision, self.recall = self.createSummaries(self.Yoh, self.preds, self.loss,
                                                                          self.learning_rate)
开发者ID:Tiyanak,项目名称:lip-reading,代码行数:34,代码来源:vgg16_time_pooling.py


示例13: runNN

def runNN (train_x, train_y, test_x, test_y, numHidden):
	print "NN({})".format(numHidden)
	session = tf.InteractiveSession()

	x = tf.placeholder("float", shape=[None, train_x.shape[1]])
	y_ = tf.placeholder("float", shape=[None, 2])

	W1 = tf.Variable(tf.truncated_normal([train_x.shape[1],numHidden], stddev=0.01))
	b1 = tf.Variable(tf.truncated_normal([numHidden], stddev=0.01))
	W2 = tf.Variable(tf.truncated_normal([numHidden,2], stddev=0.01))
	b2 = tf.Variable(tf.truncated_normal([2], stddev=0.01))

	z = tf.nn.relu(tf.matmul(x,W1) + b1)
	y = tf.nn.softmax(tf.matmul(z,W2) + b2)

	cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)))
	#cross_entropy = -tf.reduce_sum(y_*tf.log(y))
	train_step = tf.train.MomentumOptimizer(learning_rate=.001, momentum=0.1).minimize(cross_entropy)
	#train_step = tf.train.AdamOptimizer(learning_rate=.01).minimize(cross_entropy)

	session.run(tf.initialize_all_variables())
	for i in range(NUM_EPOCHS):
		offset = i*BATCH_SIZE % (train_x.shape[0] - BATCH_SIZE)
		train_step.run({x: train_x[offset:offset+BATCH_SIZE, :], y_: makeLabels(train_y[offset:offset+BATCH_SIZE])})
		if i % 100 == 0:
			util.showProgress(cross_entropy, x, y, y_, test_x, test_y)
	session.close()
开发者ID:jwhitehill,项目名称:MultiEnrollmentProjectWithDustin,代码行数:27,代码来源:run_tensorflow.py


示例14: init

def init():
    d_model = 512
    d_k = 64
    d_v = 64
    sequence_length =6 #5
    decoder_sent_length=6
    h = 8
    batch_size = 4*32
    num_layer=6
    # 2.set Q,K,V
    vocab_size = 1000
    embed_size = d_model
    initializer = tf.random_normal_initializer(stddev=0.1)
    Embedding = tf.get_variable("Embedding_d", shape=[vocab_size, embed_size], initializer=initializer)
    decoder_input_x = tf.placeholder(tf.int32, [batch_size, decoder_sent_length], name="input_x")  # [4,10]
    print("1.decoder_input_x:", decoder_input_x)
    decoder_input_embedding = tf.nn.embedding_lookup(Embedding, decoder_input_x)  # [batch_size*sequence_length,embed_size]
    #Q = embedded_words  # [batch_size*sequence_length,embed_size]
    #K_s = embedded_words  # [batch_size*sequence_length,embed_size]
    #K_v_encoder = tf.placeholder(tf.float32, [batch_size,decoder_sent_length, d_model], name="input_x") #sequence_length
    Q = tf.placeholder(tf.float32, [batch_size,sequence_length, d_model], name="input_x")
    K_s=decoder_input_embedding
    K_v_encoder= tf.get_variable("v_variable",shape=[batch_size,decoder_sent_length, d_model],initializer=initializer) #tf.float32,
    print("2.output from encoder:",K_v_encoder)
    mask = get_mask(decoder_sent_length) #sequence_length
    decoder = Decoder(d_model, d_k, d_v, sequence_length, h, batch_size, Q, K_s, K_v_encoder,decoder_sent_length,mask=mask,num_layer=num_layer)
    return decoder,Q, K_s
开发者ID:AmjadHisham,项目名称:text_classification,代码行数:27,代码来源:a2_decoder.py


示例15: create_network

    def create_network(self, name):
        with tf.variable_scope(name) as scope:

            inputs = tf.placeholder(fl32, [None, self.state_dim], 'inputs')
            actions = tf.placeholder(fl32, [None, self.action_dim], 'actions')

            with slim.arg_scope(
                [slim.fully_connected],
                activation_fn=relu,
                weights_initializer=uniform,
                weights_regularizer=None
            ):
                net = tf.concat(1, [inputs, actions])
                net = slim.fully_connected(net, 1024)

                res = net = slim.fully_connected(net, 128)
                net = slim.fully_connected(net, 256)
                net = slim.fully_connected(net, 128, activation_fn=None)
                net = relu(net+res)

                res = net = slim.fully_connected(net, 128)
                net = slim.fully_connected(net, 256)
                net = slim.fully_connected(net, 128, activation_fn=None)
                net = relu(net+res)

                res = net = slim.fully_connected(net, 128)
                net = slim.fully_connected(net, 256)
                net = slim.fully_connected(net, 128, activation_fn=None)
                net = relu(net+res)

                out = slim.fully_connected(net, 1, activation_fn=None)

        return (inputs, actions, out, scope.name)
开发者ID:jpp46,项目名称:CurrentProjects,代码行数:33,代码来源:resnet.py


示例16: __init__

    def __init__(self, data_dictionary):
        # init some parameters
        self.replay_buffer = []
        self.time_step = 0
        self.epsilon = INITIAL_EPSILON
        self.state_dim = data_dictionary["input"]
        self.action_dim = data_dictionary["action"]
        self.n_input = self.state_dim
        self.state_input = tf.placeholder("float", [None, self.n_input])
        self.y_input = tf.placeholder("float",[None, self.action_dim])
        self.create_pg_network(data_dictionary)
        self.create_training_method()

        # Init session
        self.session = tf.InteractiveSession()
        self.session.run(tf.initialize_all_variables())

        # loading networks
        self.saver = tf.train.Saver()
        checkpoint = tf.train.get_checkpoint_state("pg_saved_networks")
        if checkpoint and checkpoint.model_checkpoint_path:
            self.saver.restore(self.session, checkpoint.model_checkpoint_path)
            print "Successfully loaded:", checkpoint.model_checkpoint_path
        else:
            print "Could not find old network weights"

        global summary_writer
        summary_writer = tf.train.SummaryWriter('logs',graph=self.session.graph)
开发者ID:helloyhan,项目名称:deep_trader,代码行数:28,代码来源:pg_stock_model.py


示例17: __init__

    def __init__(self,num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,
                 vocab_size,embed_size,is_training,initializer=tf.random_normal_initializer(stddev=0.1)):
        """init all hyperparameter here"""
        # set hyperparamter
        self.num_classes = num_classes
        self.batch_size = batch_size
        self.sequence_length=sequence_length
        self.vocab_size=vocab_size
        self.embed_size=embed_size
        self.hidden_size=embed_size
        self.is_training=is_training
        self.learning_rate=learning_rate
        self.initializer=initializer
        self.num_sampled=20

        # add placeholder (X,label)
        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")  # X
        self.input_y = tf.placeholder(tf.int32,[None], name="input_y")  # y [None,num_classes]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step=tf.Variable(0,trainable=False,name="Epoch_Step")
        self.epoch_increment=tf.assign(self.epoch_step,tf.add(self.epoch_step,tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate

        self.instantiate_weights()
        self.logits = self.inference() #[None, self.label_size]. main computation graph is here.
        if not is_training:
            return
        self.loss_val = self.loss() #-->self.loss_nce()
        self.train_op = self.train()
        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")  # shape:[None,]
        correct_prediction = tf.equal(tf.cast(self.predictions,tf.int32), self.input_y) #tf.argmax(self.logits, 1)-->[batch_size]
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
开发者ID:brucexia6116,项目名称:text_classification,代码行数:34,代码来源:p8_TextRNN_model.py


示例18: testMixtureOfEnqueueAndEnqueueMany

  def testMixtureOfEnqueueAndEnqueueMany(self):
    with self.test_session() as sess:
      q = tf.PaddingFIFOQueue(10, tf.int32, shapes=((),))
      enqueue_placeholder = tf.placeholder(tf.int32, shape=())
      enqueue_op = q.enqueue((enqueue_placeholder,))
      enqueuemany_placeholder = tf.placeholder(
          tf.int32, shape=(None,))
      enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))

      dequeued_t = q.dequeue()
      close_op = q.close()

      def dequeue():
        for i in xrange(250):
          self.assertEqual(i, sess.run(dequeued_t))
      dequeue_thread = self.checkedThread(target=dequeue)
      dequeue_thread.start()

      elements_enqueued = 0
      while elements_enqueued < 250:
        # With equal probability, run Enqueue or enqueue_many.
        if random.random() > 0.5:
          enqueue_op.run({enqueue_placeholder: elements_enqueued})
          elements_enqueued += 1
        else:
          count = random.randint(0, min(20, 250 - elements_enqueued))
          range_to_enqueue = np.arange(elements_enqueued,
                                       elements_enqueued + count,
                                       dtype=np.int32)
          enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
          elements_enqueued += count

      close_op.run()
      dequeue_thread.join()
      self.assertEqual(0, q.size().eval())
开发者ID:agouwin,项目名称:udacity_deep_learning_homework,代码行数:35,代码来源:padding_fifo_queue_test.py


示例19: testGradient

  def testGradient(self):
    with tf.Graph().as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None, 100], name="input")
      weights = tf.placeholder(tf.float32, shape=[100, 10], name="weights")
      biases = tf.placeholder(tf.float32, shape=[10], name="biases")
      activations = tf.nn.relu(tf.matmul(inputs, weights) + biases,
                               name="activations")
      loss = tf.reduce_mean(activations, name="loss")
    gdef = g.as_graph_def()

    with tf.Graph().as_default() as g:
      input_placeholder = tf.placeholder(tf.float32, shape=[32, 100])
      weights_var = tf.Variable(tf.truncated_normal([100, 10]), name="weights")
      biases_var = tf.Variable(tf.zeros(10), name="biases")
      activations, loss = tf.import_graph_def(
          gdef,
          input_map={"input:0": input_placeholder,
                     "weights:0": weights_var,
                     "biases:0": biases_var},
          return_elements=["activations:0", "loss:0"])
      self.assertEqual([32, 10], activations.get_shape())
      self.assertEqual([], loss.get_shape())
      weights_grad, biases_grad = tf.gradients(loss, [weights_var, biases_var])
      self.assertEqual([100, 10], weights_grad.get_shape())
      self.assertEqual([10], biases_grad.get_shape())
开发者ID:yevgeniyfrenkel,项目名称:tensorflow,代码行数:25,代码来源:importer_test.py


示例20: main

def main(output_dir, summaries_every, num_steps):
  graph = tf.Graph()


  with graph.as_default():
    features = tf.placeholder(tf.float32, shape=[4, 2])
    labels = tf.placeholder(tf.int32, shape=[4])

    train_op, loss, gs, update_acc = make_graph(features, labels)
    init = tf.global_variables_initializer()
    init_local = tf.local_variables_initializer()
    summary_op = tf.summary.merge_all()


  writer = tf.summary.FileWriter(output_dir, graph=graph, flush_secs=1)

  with tf.Session(graph=graph) as sess:
    init.run()
    init_local.run()
    step = 0
    xy = np.array([
        [True, False],
        [True, True],
        [False, False],
        [False, True]
    ], dtype=np.float)
    y_ = np.array([True, False, False, True], dtype=np.int32)
    while step < num_steps:

      _, _, step, loss_value, summaries = sess.run(
          [train_op, update_acc, gs, loss, summary_op],
          feed_dict={features: xy, labels: y_}
      )
      if step % summaries_every == 0:
        writer.add_summary(summaries, global_step=step)
开发者ID:amygdala,项目名称:tensorflow-workshop,代码行数:35,代码来源:xor_summaries.py



注:本文中的tensorflow.placeholder函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.placeholder_with_default函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.parse_single_example函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap