• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python layers.dropout函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.contrib.layers.dropout函数的典型用法代码示例。如果您正苦于以下问题:Python dropout函数的具体用法?Python dropout怎么用?Python dropout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dropout函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: model

def model():
    print("building model ...")
    with tf.variable_scope('train'):
        print("building model ...")
        X_pl = tf.placeholder(tf.float32, [None, num_features])
        X_expand = tf.expand_dims(X_pl, axis=2)
        print("X_pl", X_pl.get_shape())
        t_pl = tf.placeholder(tf.int32, [None,])
        print("t_pl", t_pl.get_shape())
        is_training_pl = tf.placeholder(tf.bool)
        cell_fw = tf.nn.rnn_cell.GRUCell(205)
        cell_bw = tf.nn.rnn_cell.GRUCell(205)
        seq_len = tf.reduce_sum(tf.ones(tf.shape(X_pl), dtype=tf.int32), axis=1)
        _, enc_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw,
            cell_bw=cell_bw, inputs=X_expand, sequence_length=seq_len,
            dtype=tf.float32)
        enc_states = tf.concat(1, enc_states)
        enc_states_drop = dropout(enc_states, is_training=is_training_pl) 
        l1 = fully_connected(enc_states_drop, 200, activation_fn=None)
        l1 = batch_norm(l1, is_training=is_training_pl)
        l1_relu = relu(l1)
        l1_dropout = dropout(l1_relu, is_training=is_training_pl)
        l2 = fully_connected(l1_dropout, 200, activation_fn=None)
        l2 = batch_norm(l2, is_training=is_training_pl)
        l2_relu = relu(l2)
        l_out = fully_connected(l2_relu, num_outputs=num_classes, activation_fn=None)
        l_out_softmax = tf.nn.softmax(l_out)
        tf.contrib.layers.summarize_variables()

    with tf.variable_scope('metrics'):
        loss = sparse_softmax_cross_entropy_with_logits(l_out, t_pl)
        print("loss", loss.get_shape())
        loss = tf.reduce_mean(loss)
        print("loss", loss.get_shape())
        tf.summary.scalar('train/loss', loss)
        argmax = tf.to_int32(tf.argmax(l_out, 1))
        print("argmax", argmax.get_shape())
        correct = tf.to_float(tf.equal(argmax, t_pl))
        print("correct,", correct.get_shape())
        accuracy = tf.reduce_mean(correct)
        print("accuracy", accuracy.get_shape())

    with tf.variable_scope('optimizer'):
        print("building optimizer ...")
        global_step = tf.Variable(0, name='global_step', trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        grads_and_vars = optimizer.compute_gradients(loss)
        gradients, variables = zip(*grads_and_vars)
        clipped_gradients, global_norm = (
            tf.clip_by_global_norm(gradients, clip_norm))
        clipped_grads_and_vars = zip(clipped_gradients, variables)

        tf.summary.scalar('train/global_gradient_norm', global_norm)

        train_op = optimizer.apply_gradients(clipped_grads_and_vars, global_step=global_step)

    return X_pl, t_pl, is_training_pl, l_out, l_out_softmax, loss, accuracy, train_op, global_step
开发者ID:alrojo,项目名称:EEG_DauwelsLab,代码行数:57,代码来源:rnn_big2.py


示例2: define_sequence_model

 def define_sequence_model(self):
     seed=12345
     np.random.seed(12345)
     layer_list=[]
     with self.graph.as_default() as g:
         utt_length=tf.placeholder(tf.int32,shape=(None))
         g.add_to_collection(name="utt_length",value=utt_length)
         with tf.name_scope("input"):
              input_layer=tf.placeholder(dtype=tf.float32,shape=(None,None,self.n_in),name="input_layer")
              if self.dropout_rate!=0.0:
                 print "Using dropout to avoid overfitting and the dropout rate is",self.dropout_rate
                 is_training_drop=tf.placeholder(dtype=tf.bool,shape=(),name="is_training_drop")
                 input_layer_drop=dropout(input_layer,self.dropout_rate,is_training=is_training_drop)
                 layer_list.append(input_layer_drop)
                 g.add_to_collection(name="is_training_drop",value=is_training_drop)
              else:
                 layer_list.append(input_layer)
         g.add_to_collection("input_layer",layer_list[0])
         with tf.name_scope("hidden_layer"):
            basic_cell=[]
            if "tanh" in self.hidden_layer_type:
                is_training_batch=tf.placeholder(dtype=tf.bool,shape=(),name="is_training_batch")
                bn_params={"is_training":is_training_batch,"decay":0.99,"updates_collections":None}
                g.add_to_collection("is_training_batch",is_training_batch)
            for i in xrange(len(self.hidden_layer_type)):
                if self.dropout_rate!=0.0:
                    if self.hidden_layer_type[i]=="tanh":
                        new_layer=fully_connected(layer_list[-1],self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,normalizer_params=bn_params)
                        new_layer_drop=dropout(new_layer,self.dropout_rate,is_training=is_training_drop)
                        layer_list.append(new_layer_drop)
                    if self.hidden_layer_type[i]=="lstm":
                        basic_cell.append(MyDropoutWrapper(BasicLSTMCell(num_units=self.hidden_layer_size[i]),self.dropout_rate,self.dropout_rate,is_training=is_training_drop))
                    if self.hidden_layer_type[i]=="gru":
                        basic_cell.append(MyDropoutWrapper(GRUCell(num_units=self.hidden_layer_size[i]),self.dropout_rate,self.dropout_rate,is_training=is_training_drop))
                else:
                    if self.hidden_layer_type[i]=="tanh":
                       new_layer=fully_connected(layer_list[-1],self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,normalizer_params=bn_params)
                       layer_list.append(new_layer)
                    if self.hidden_layer_type[i]=="lstm":
                       basic_cell.append(LayerNormBasicLSTMCell(num_units=self.hidden_layer_size[i]))
                    if self.hidden_layer_type[i]=="gru":
                       basic_cell.append(LayerNormGRUCell(num_units=self.hidden_layer_size[i]))
            multi_cell=MultiRNNCell(basic_cell)
            rnn_outputs,rnn_states=tf.nn.dynamic_rnn(multi_cell,layer_list[-1],dtype=tf.float32,sequence_length=utt_length)
            layer_list.append(rnn_outputs)
         with tf.name_scope("output_layer"):
              if self.output_type=="linear" :
                  output_layer=tf.layers.dense(rnn_outputs,self.n_out)
               #  stacked_rnn_outputs=tf.reshape(rnn_outputs,[-1,self.n_out])
               #  stacked_outputs=tf.layers.dense(stacked_rnn_outputs,self.n_out)
               #  output_layer=tf.reshape(stacked_outputs,[-1,utt_length,self.n_out])
              g.add_to_collection(name="output_layer",value=output_layer)
         with tf.name_scope("training_op"):
              if self.optimizer=="adam":
                  self.training_op=tf.train.AdamOptimizer()
开发者ID:CSTR-Edinburgh,项目名称:merlin,代码行数:55,代码来源:model.py


示例3: _init_body

  def _init_body(self, scope):
    with tf.variable_scope(scope):

      word_level_inputs = tf.reshape(self.inputs_embedded, [
        self.document_size * self.sentence_size,
        self.word_size,
        self.embedding_size
      ])
      word_level_lengths = tf.reshape(
        self.word_lengths, [self.document_size * self.sentence_size])

      with tf.variable_scope('word') as scope:
        word_encoder_output, _ = bidirectional_rnn(
          self.word_cell, self.word_cell,
          word_level_inputs, word_level_lengths,
          scope=scope)

        with tf.variable_scope('attention') as scope:
          word_level_output = task_specific_attention(
            word_encoder_output,
            self.word_output_size,
            scope=scope)

        with tf.variable_scope('dropout'):
          word_level_output = layers.dropout(
            word_level_output, keep_prob=self.dropout_keep_proba,
            is_training=self.is_training,
          )

      # sentence_level

      sentence_inputs = tf.reshape(
        word_level_output, [self.document_size, self.sentence_size, self.word_output_size])

      with tf.variable_scope('sentence') as scope:
        sentence_encoder_output, _ = bidirectional_rnn(
          self.sentence_cell, self.sentence_cell, sentence_inputs, self.sentence_lengths, scope=scope)

        with tf.variable_scope('attention') as scope:
          sentence_level_output = task_specific_attention(
            sentence_encoder_output, self.sentence_output_size, scope=scope)

        with tf.variable_scope('dropout'):
          sentence_level_output = layers.dropout(
            sentence_level_output, keep_prob=self.dropout_keep_proba,
            is_training=self.is_training,
          )

      with tf.variable_scope('classifier'):
        self.logits = layers.fully_connected(
          sentence_level_output, self.classes, activation_fn=None)

        self.prediction = tf.argmax(self.logits, axis=-1)
开发者ID:siddrtm,项目名称:hierarchical-attention-networks,代码行数:53,代码来源:HAN_model.py


示例4: dnn_logits_fn

 def dnn_logits_fn():
   """Builds the logits from the input layer."""
   previous_layer = input_layer
   for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
     with variable_scope.variable_scope(
         "hiddenlayer_%d" % layer_id,
         values=(previous_layer,)) as hidden_layer_scope:
       net = layers.fully_connected(
           previous_layer,
           num_hidden_units,
           activation_fn=dnn_activation_fn,
           variables_collections=[dnn_parent_scope],
           scope=hidden_layer_scope)
       if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
         net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
     _add_hidden_layer_summary(net, hidden_layer_scope.name)
     previous_layer = net
   with variable_scope.variable_scope(
       "logits", values=(previous_layer,)) as logits_scope:
     dnn_logits = layers.fully_connected(
         previous_layer,
         head.logits_dimension,
         activation_fn=None,
         variables_collections=[dnn_parent_scope],
         scope=logits_scope)
   _add_hidden_layer_summary(dnn_logits, logits_scope.name)
   return dnn_logits
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:dnn_tree_combined_estimator.py


示例5: conv_model

def conv_model(X, Y_, mode):
    XX = tf.reshape(X, [-1, 28, 28, 1])
    biasInit = tf.constant_initializer(0.1, dtype=tf.float32)
    Y1 = layers.conv2d(XX,  num_outputs=6,  kernel_size=[6, 6], biases_initializer=biasInit)
    Y2 = layers.conv2d(Y1, num_outputs=12, kernel_size=[5, 5], stride=2, biases_initializer=biasInit)
    Y3 = layers.conv2d(Y2, num_outputs=24, kernel_size=[4, 4], stride=2, biases_initializer=biasInit)
    Y4 = layers.flatten(Y3)
    Y5 = layers.relu(Y4, 200, biases_initializer=biasInit)
    # to deactivate dropout on the dense layer, set keep_prob=1
    Y5d = layers.dropout(Y5, keep_prob=0.75, noise_shape=None, is_training=mode==learn.ModeKeys.TRAIN)
    Ylogits = layers.linear(Y5d, 10)
    predict = tf.nn.softmax(Ylogits)
    classes = tf.cast(tf.argmax(predict, 1), tf.uint8)

    loss = conv_model_loss(Ylogits, Y_, mode)
    train_op = conv_model_train_op(loss, mode)
    eval_metrics = conv_model_eval_metrics(classes, Y_, mode)

    return learn.ModelFnOps(
        mode=mode,
        # You can name the fields of your predictions dictionary as you like.
        predictions={"predictions": predict, "classes": classes},
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metrics
    )
开发者ID:spwcd,项目名称:QTML,代码行数:26,代码来源:task.py


示例6: model_fn

def model_fn(x, target, mode, params):
    """Model function for Estimator."""

    y_ = tf.cast(target, tf.float32)

    x_image = tf.reshape(x, [-1, 28, 28, 1])

    # first convolutional layer
    h_conv1 = layers.convolution2d(x_image, 32, [5,5])
    h_pool1 = layers.max_pool2d(h_conv1, [2,2])

    # second convolutional layer
    h_conv2 = layers.convolution2d(h_pool1, 64, [5,5])
    h_pool2 = layers.max_pool2d(h_conv2, [2,2])

    # densely connected layer
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
    h_fc1 = layers.fully_connected(h_pool2_flat, 1024)
    h_fc1_drop = layers.dropout(
        h_fc1, keep_prob=params["dropout"],
        is_training=(mode == ModeKeys.TRAIN))

    # readout layer
    y_conv = layers.fully_connected(h_fc1_drop, 10, activation_fn=None)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
    train_op = tf.contrib.layers.optimize_loss(
        loss=cross_entropy,
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=params["learning_rate"],
        optimizer="Adam")

    predictions = tf.argmax(y_conv, 1)
    return predictions, cross_entropy, train_op
开发者ID:ccortezb,项目名称:pipeline,代码行数:35,代码来源:mnist_cnn_estim_layers.py


示例7: _dnn_logits

 def _dnn_logits(self, features, is_training=False):
   net = layers.input_from_feature_columns(
       features,
       self._get_dnn_feature_columns(),
       weight_collections=[self._dnn_weight_collection])
   for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units):
     net = layers.legacy_fully_connected(
         net,
         num_hidden_units,
         activation_fn=self._dnn_activation_fn,
         weight_collections=[self._dnn_weight_collection],
         bias_collections=[self._dnn_weight_collection],
         name="hiddenlayer_%d" % layer_id)
     if self._dnn_dropout is not None and is_training:
       net = layers.dropout(
           net,
           keep_prob=(1.0 - self._dnn_dropout))
     self._add_hidden_layer_summary(net, "hiddenlayer_%d" % layer_id)
   logit = layers.legacy_fully_connected(
       net,
       self._num_label_columns(),
       weight_collections=[self._dnn_weight_collection],
       bias_collections=[self._dnn_weight_collection],
       name="dnn_logit")
   self._add_hidden_layer_summary(logit, "dnn_logit")
   return logit
开发者ID:Ambier,项目名称:tensorflow,代码行数:26,代码来源:dnn_linear_combined.py


示例8: define_feedforward_model

 def define_feedforward_model(self):
     layer_list=[]
     with self.graph.as_default() as g:
         is_training_batch=tf.placeholder(tf.bool,shape=(),name="is_training_batch")
         bn_params={"is_training":is_training_batch,"decay":0.99,"updates_collections":None}
         g.add_to_collection("is_training_batch",is_training_batch)
         with tf.name_scope("input"):
             input_layer=tf.placeholder(dtype=tf.float32,shape=(None,self.n_in),name="input_layer")
             if self.dropout_rate!=0.0:
                print "Using dropout to avoid overfitting and the dropout rate is",self.dropout_rate
                is_training_drop=tf.placeholder(dtype=tf.bool,shape=(),name="is_training_drop")
                input_layer_drop=dropout(input_layer,self.dropout_rate,is_training=is_training_drop)
                layer_list.append(input_layer_drop)
                g.add_to_collection(name="is_training_drop",value=is_training_drop)
             else:
                layer_list.append(input_layer)
         g.add_to_collection("input_layer",layer_list[0])
         for i in xrange(len(self.hidden_layer_size)):
             with tf.name_scope("hidden_layer_"+str(i+1)):
               if self.dropout_rate!=0.0:
                   last_layer=layer_list[-1]
                   if self.hidden_layer_type[i]=="tanh":
                      new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,\
                                 normalizer_params=bn_params)
                   if self.hidden_layer_type[i]=="sigmoid":
                       new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.sigmoid,normalizer_fn=batch_norm,\
                                 normalizer_params=bn_params)
                   new_layer_drop=dropout(new_layer,self.dropout_rate,is_training=is_training_drop)
                   layer_list.append(new_layer_drop)
               else:
                   last_layer=layer_list[-1]
                   if self.hidden_layer_type[i]=="tanh":
                      new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,\
                                normalizer_params=bn_params)
                   if self.hidden_layer_type[i]=="sigmoid":
                      new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.sigmoid,normalizer_fn=batch_norm,\
                                normalizer_params=bn_params)
                   layer_list.append(new_layer)
         with tf.name_scope("output_layer"):
             if self.output_type=="linear":
                output_layer=fully_connected(layer_list[-1],self.n_out,activation_fn=None)
             if self.output_type=="tanh":
                output_layer=fully_connected(layer_list[-1],self.n_out,activation_fn=tf.nn.tanh)
             g.add_to_collection(name="output_layer",value=output_layer)
         with tf.name_scope("training_op"):
              if self.optimizer=="adam":
                 self.training_op=tf.train.AdamOptimizer()
开发者ID:CSTR-Edinburgh,项目名称:merlin,代码行数:47,代码来源:model.py


示例9: build_model

  def build_model(self, features, feature_columns, is_training):
    """See base class."""
    self._feature_columns = feature_columns

    input_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._num_ps_replicas,
            min_slice_size=64 << 20))
    with variable_scope.variable_scope(
        self._scope + "/input_from_feature_columns",
        values=features.values(),
        partitioner=input_layer_partitioner) as scope:
      net = layers.input_from_feature_columns(
          features,
          self._get_feature_columns(),
          weight_collections=[self._scope],
          trainable=self._trainable,
          scope=scope)

    hidden_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._num_ps_replicas))
    for layer_id, num_hidden_units in enumerate(self._hidden_units):
      with variable_scope.variable_scope(
          self._scope + "/hiddenlayer_%d" % layer_id,
          values=[net],
          partitioner=hidden_layer_partitioner) as scope:
        net = layers.fully_connected(
            net,
            num_hidden_units,
            activation_fn=self._activation_fn,
            variables_collections=[self._scope],
            trainable=self._trainable,
            scope=scope)
        if self._dropout is not None and is_training:
          net = layers.dropout(
              net,
              keep_prob=(1.0 - self._dropout))
      self._add_hidden_layer_summary(net, scope.name)

    with variable_scope.variable_scope(
        self._scope + "/logits",
        values=[net],
        partitioner=hidden_layer_partitioner) as scope:
      logits = layers.fully_connected(
          net,
          self._num_label_columns,
          activation_fn=None,
          variables_collections=[self._scope],
          trainable=self._trainable,
          scope=scope)
    self._add_hidden_layer_summary(logits, "logits")
    return logits
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:53,代码来源:composable_model.py


示例10: model

def model():
    tf.set_random_seed(1)
    print("building model ...")
    with tf.variable_scope('train'):
        print("building model ...")
        X_pl = tf.placeholder(tf.float32, [None, num_features])
        print("X_pl", X_pl.get_shape())
        t_pl = tf.placeholder(tf.int32, [None,])
        print("t_pl", t_pl.get_shape())
        is_training_pl = tf.placeholder(tf.bool)
        X_bn = batch_norm(X_pl, is_training=is_training_pl)
        print("X_bn", X_bn.get_shape())
        l1 = fully_connected(X_pl, num_outputs=100, activation_fn=relu)#, normalizer_fn=batch_norm)
        print("l1", l1.get_shape())
        l1_drop = dropout(l1, is_training=is_training_pl)
        print("l1_drop", l1_drop.get_shape())
        l_out = fully_connected(l1_drop, num_outputs=num_classes, activation_fn=None)
        print("l_out", l_out.get_shape())
        l_out_softmax = tf.nn.softmax(l_out)
        tf.contrib.layers.summarize_variables()

    with tf.variable_scope('metrics'):
        loss = sparse_softmax_cross_entropy_with_logits(l_out, t_pl)
        print("loss", loss.get_shape())
        loss = tf.reduce_mean(loss)
        print("loss", loss.get_shape())
        tf.summary.scalar('train/loss', loss)
        argmax = tf.to_int32(tf.argmax(l_out, 1))
        print("argmax", argmax.get_shape())
        correct = tf.to_float(tf.equal(argmax, t_pl))
        print("correct,", correct.get_shape())
        accuracy = tf.reduce_mean(correct)
        print("accuracy", accuracy.get_shape())

    with tf.variable_scope('optimizer'):
        print("building optimizer ...")
        global_step = tf.Variable(0, name='global_step', trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        grads_and_vars = optimizer.compute_gradients(loss)
        gradients, variables = zip(*grads_and_vars)
        clipped_gradients, global_norm = (
            tf.clip_by_global_norm(gradients, clip_norm))
        clipped_grads_and_vars = zip(clipped_gradients, variables)

        tf.summary.scalar('train/global_gradient_norm', global_norm)

        train_op = optimizer.apply_gradients(clipped_grads_and_vars, global_step=global_step)

    return X_pl, t_pl, is_training_pl, l_out, l_out_softmax, loss, accuracy, train_op, global_step
开发者ID:alrojo,项目名称:EEG_DauwelsLab,代码行数:49,代码来源:mlp.py


示例11: general_module_end_operations

    def general_module_end_operations(self, tensor, dropout_on, strided_max_pool_on):
        """
        Common end of module operations.

        :param tensor: The tensor being processed.
        :type tensor: tf.Tensor
        :param dropout_on: Whether to include dropout or not.
        :type dropout_on: bool
        :param strided_max_pool_on: Whether to include a strided max pool at the end of the module.
        :type strided_max_pool_on: bool
        :return: The processed tensor.
        :rtype: tf.Tensor
        """
        if strided_max_pool_on:
            tensor = max_pool2d(tensor, kernel_size=3, stride=2, padding='VALID')
        if dropout_on:
            tensor = dropout(tensor, self.dropout_keep_probability_tensor)
        return tensor
开发者ID:golmschenk,项目名称:go_net,代码行数:18,代码来源:net.py


示例12: conv_model

def conv_model(feature, target, mode):
  """2-layer convolution model."""
  # Convert the target to a one-hot tensor of shape (batch_size, 10) and
  # with a on-value of 1 for each one-hot vector of length 10.
  target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)

  # Reshape feature to 4d tensor with 2nd and 3rd dimensions being
  # image width and height final dimension being the number of color channels.
  feature = tf.reshape(feature, [-1, 28, 28, 1])

  # First conv layer will compute 32 features for each 5x5 patch
  with tf.variable_scope('conv_layer1'):
    h_conv1 = layers.convolution(feature, 32, kernel_size=[5, 5],
                                 activation_fn=tf.nn.relu)
    h_pool1 = max_pool_2x2(h_conv1)

  # Second conv layer will compute 64 features for each 5x5 patch.
  with tf.variable_scope('conv_layer2'):
    h_conv2 = layers.convolution(h_pool1, 64, kernel_size=[5, 5],
                                 activation_fn=tf.nn.relu)
    h_pool2 = max_pool_2x2(h_conv2)
    # reshape tensor into a batch of vectors
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])

  # Densely connected layer with 1024 neurons.
  h_fc1 = layers.dropout(
      layers.fully_connected(
          h_pool2_flat, 1024, activation_fn=tf.nn.relu), keep_prob=0.5,
      is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)

  # Compute logits (1 per class) and compute loss.
  logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

  # Create a tensor for training op.
  train_op = layers.optimize_loss(
      loss, tf.contrib.framework.get_global_step(), optimizer='SGD',
      learning_rate=0.001)

  return tf.argmax(logits, 1), loss, train_op
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:40,代码来源:mnist.py


示例13: build_model

  def build_model(self, features, feature_columns, is_training):
    """See base class."""
    features = self._get_feature_dict(features)
    self._feature_columns = feature_columns

    net = layers.input_from_feature_columns(
        features,
        self._get_feature_columns(),
        weight_collections=[self._weight_collection_name])
    for layer_id, num_hidden_units in enumerate(self._hidden_units):
      with variable_scope.variable_op_scope(
          [net], "hiddenlayer_%d" % layer_id,
          partitioner=partitioned_variables.min_max_variable_partitioner(
              max_partitions=self._config.num_ps_replicas)) as scope:
        net = layers.fully_connected(
            net,
            num_hidden_units,
            activation_fn=self._activation_fn,
            variables_collections=[self._weight_collection_name],
            scope=scope)
        if self._dropout is not None and is_training:
          net = layers.dropout(
              net,
              keep_prob=(1.0 - self._dropout))
      self._add_hidden_layer_summary(net, scope.name)
    with variable_scope.variable_op_scope(
        [net], "dnn_logits",
        partitioner=partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._config.num_ps_replicas)) as scope:
      logits = layers.fully_connected(
          net,
          self._num_label_columns,
          activation_fn=None,
          variables_collections=[self._weight_collection_name],
          scope=scope)
    self._add_hidden_layer_summary(logits, "dnn_logits")
    return logits
开发者ID:Brandon-Tai,项目名称:tensorflow,代码行数:37,代码来源:dnn_linear_combined.py


示例14: _dnn_logits

 def _dnn_logits(self, features, is_training=False):
     net = layers.input_from_feature_columns(
         features, self._get_dnn_feature_columns(), weight_collections=[self._dnn_weight_collection]
     )
     for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units):
         with variable_scope.variable_op_scope(
             [net],
             "hiddenlayer_%d" % layer_id,
             partitioner=partitioned_variables.min_max_variable_partitioner(
                 max_partitions=self._config.num_ps_replicas
             ),
         ) as scope:
             net = layers.fully_connected(
                 net,
                 num_hidden_units,
                 activation_fn=self._dnn_activation_fn,
                 variables_collections=[self._dnn_weight_collection],
                 scope=scope,
             )
             if self._dnn_dropout is not None and is_training:
                 net = layers.dropout(net, keep_prob=(1.0 - self._dnn_dropout))
         self._add_hidden_layer_summary(net, scope.name)
     with variable_scope.variable_op_scope(
         [net],
         "dnn_logit",
         partitioner=partitioned_variables.min_max_variable_partitioner(max_partitions=self._config.num_ps_replicas),
     ) as scope:
         logit = layers.fully_connected(
             net,
             self._target_column.num_label_columns,
             activation_fn=None,
             variables_collections=[self._dnn_weight_collection],
             scope=scope,
         )
     self._add_hidden_layer_summary(logit, "dnn_logit")
     return logit
开发者ID:285219011,项目名称:liuwenfeng,代码行数:36,代码来源:dnn_linear_combined.py


示例15: _dnn_model_fn

def _dnn_model_fn(features, labels, mode, params, config=None):
  """Deep Neural Net model_fn.

  Args:
    features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
    labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
      dtype `int32` or `int64` in the range `[0, n_classes)`.
    mode: Defines whether this is training, evaluation or prediction.
      See `ModeKeys`.
    params: A dict of hyperparameters.
      The following hyperparameters are expected:
      * head: A `_Head` instance.
      * hidden_units: List of hidden units per layer.
      * feature_columns: An iterable containing all the feature columns used by
          the model.
      * optimizer: string, `Optimizer` object, or callable that defines the
          optimizer to use for training. If `None`, will use the Adagrad
          optimizer with a default learning rate of 0.05.
      * activation_fn: Activation function applied to each layer. If `None`,
          will use `tf.nn.relu`.
      * dropout: When not `None`, the probability we will drop out a given
          coordinate.
      * gradient_clip_norm: A float > 0. If provided, gradients are
          clipped to their global norm with this clipping ratio.
      * embedding_lr_multipliers: Optional. A dictionary from
          `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
          multiply with learning rate for the embedding variables.
      * input_layer_min_slice_size: Optional. The min slice size of input layer
          partitions. If not provided, will use the default of 64M.
    config: `RunConfig` object to configure the runtime settings.

  Returns:
    predictions: A dict of `Tensor` objects.
    loss: A scalar containing the loss of the step.
    train_op: The op for training.
  """
  head = params["head"]
  hidden_units = params["hidden_units"]
  feature_columns = params["feature_columns"]
  optimizer = params.get("optimizer") or "Adagrad"
  activation_fn = params.get("activation_fn")
  dropout = params.get("dropout")
  gradient_clip_norm = params.get("gradient_clip_norm")
  input_layer_min_slice_size = (
      params.get("input_layer_min_slice_size") or 64 << 20)
  num_ps_replicas = config.num_ps_replicas if config else 0
  embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})

  features = _get_feature_dict(features)
  parent_scope = "dnn"

  partitioner = partitioned_variables.min_max_variable_partitioner(
      max_partitions=num_ps_replicas)
  with variable_scope.variable_scope(
      parent_scope,
      values=tuple(six.itervalues(features)),
      partitioner=partitioner):
    input_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=num_ps_replicas,
            min_slice_size=input_layer_min_slice_size))
    with variable_scope.variable_scope(
        "input_from_feature_columns",
        values=tuple(six.itervalues(features)),
        partitioner=input_layer_partitioner) as input_layer_scope:
      if all([
          isinstance(fc, feature_column._FeatureColumn)  # pylint: disable=protected-access
          for fc in feature_columns
      ]):
        net = layers.input_from_feature_columns(
            columns_to_tensors=features,
            feature_columns=feature_columns,
            weight_collections=[parent_scope],
            scope=input_layer_scope)
      else:
        net = fc_core.input_layer(
            features=features,
            feature_columns=feature_columns,
            weight_collections=[parent_scope])

    for layer_id, num_hidden_units in enumerate(hidden_units):
      with variable_scope.variable_scope(
          "hiddenlayer_%d" % layer_id,
          values=(net,)) as hidden_layer_scope:
        net = layers.fully_connected(
            net,
            num_hidden_units,
            activation_fn=activation_fn,
            variables_collections=[parent_scope],
            scope=hidden_layer_scope)
        if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
          net = layers.dropout(net, keep_prob=(1.0 - dropout))
      _add_hidden_layer_summary(net, hidden_layer_scope.name)

    with variable_scope.variable_scope(
        "logits",
        values=(net,)) as logits_scope:
      logits = layers.fully_connected(
          net,
          head.logits_dimension,
#.........这里部分代码省略.........
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:101,代码来源:dnn.py


示例16: _dnn_tree_combined_model_fn

def _dnn_tree_combined_model_fn(
    features,
    labels,
    mode,
    head,
    dnn_hidden_units,
    dnn_feature_columns,
    tree_learner_config,
    num_trees,
    tree_examples_per_layer,
    config=None,
    dnn_optimizer="Adagrad",
    dnn_activation_fn=nn.relu,
    dnn_dropout=None,
    dnn_input_layer_partitioner=None,
    dnn_input_layer_to_tree=True,
    dnn_steps_to_train=10000,
    predict_with_tree_only=False,
    tree_feature_columns=None,
    tree_center_bias=False,
    dnn_to_tree_distillation_param=None,
    use_core_versions=False,
    output_type=model.ModelBuilderOutputType.MODEL_FN_OPS):
  """DNN and GBDT combined model_fn.

  Args:
    features: `dict` of `Tensor` objects.
    labels: Labels used to train on.
    mode: Mode we are in. (TRAIN/EVAL/INFER)
    head: A `Head` instance.
    dnn_hidden_units: List of hidden units per layer.
    dnn_feature_columns: An iterable containing all the feature columns
      used by the model's DNN.
    tree_learner_config: A config for the tree learner.
    num_trees: Number of trees to grow model to after training DNN.
    tree_examples_per_layer: Number of examples to accumulate before
      growing the tree a layer. This value has a big impact on model
      quality and should be set equal to the number of examples in
      training dataset if possible. It can also be a function that computes
      the number of examples based on the depth of the layer that's
      being built.
    config: `RunConfig` of the estimator.
    dnn_optimizer: string, `Optimizer` object, or callable that defines the
      optimizer to use for training the DNN. If `None`, will use the Adagrad
      optimizer with default learning rate of 0.001.
    dnn_activation_fn: Activation function applied to each layer of the DNN.
      If `None`, will use `tf.nn.relu`.
    dnn_dropout: When not `None`, the probability to drop out a given
      unit in the DNN.
    dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
      Defaults to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
    dnn_input_layer_to_tree: Whether to provide the DNN's input layer
    as a feature to the tree.
    dnn_steps_to_train: Number of steps to train dnn for before switching
      to gbdt.
    predict_with_tree_only: Whether to use only the tree model output as the
      final prediction.
    tree_feature_columns: An iterable containing all the feature columns
      used by the model's boosted trees. If dnn_input_layer_to_tree is
      set to True, these features are in addition to dnn_feature_columns.
    tree_center_bias: Whether a separate tree should be created for
      first fitting the bias.
    dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
      float defines the weight of the distillation loss, and the loss_fn, for
      computing distillation loss, takes dnn_logits, tree_logits and weight
      tensor. If the entire tuple is None, no distillation will be applied. If
      only the loss_fn is None, we will take the sigmoid/softmax cross entropy
      loss be default. When distillation is applied, `predict_with_tree_only`
      will be set to True.
    use_core_versions: Whether feature columns and loss are from the core (as
      opposed to contrib) version of tensorflow.

  Returns:
    A `ModelFnOps` object.
  Raises:
    ValueError: if inputs are not valid.
  """
  if not isinstance(features, dict):
    raise ValueError("features should be a dictionary of `Tensor`s. "
                     "Given type: {}".format(type(features)))

  if not dnn_feature_columns:
    raise ValueError("dnn_feature_columns must be specified")

  if dnn_to_tree_distillation_param:
    if not predict_with_tree_only:
      logging.warning("update predict_with_tree_only to True since distillation"
                      "is specified.")
      predict_with_tree_only = True

  # Build DNN Logits.
  dnn_parent_scope = "dnn"
  dnn_partitioner = dnn_input_layer_partitioner or (
      partitioned_variables.min_max_variable_partitioner(
          max_partitions=config.num_ps_replicas, min_slice_size=64 << 20))

  if (output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC and
      not use_core_versions):
    raise ValueError("You must use core versions with Estimator Spec")

#.........这里部分代码省略.........
开发者ID:StephenOman,项目名称:tensorflow,代码行数:101,代码来源:dnn_tree_combined_estimator.py


示例17: __init__

    def __init__(self, word_embeddings, setting):

        self.vocab_size = setting.vocab_size
        self.len_sentence= len_sentence = setting.len_sentence
        self.num_epochs = setting.num_epochs
        self.num_classes = num_classes =setting.num_classes
        self.cnn_size = setting.cnn_size
        self.num_layers = setting.num_layers
        self.pos_size = setting.pos_size
        self.pos_num = setting.pos_num
        self.word_embedding = setting.word_embedding
        self.lr = setting.lr


        word_embedding = tf.get_variable(initializer=word_embeddings, name='word_embedding')
        pos1_embedding = tf.get_variable('pos1_embedding', [self.pos_num, self.pos_size])
        pos2_embedding = tf.get_variable('pos2_embedding', [self.pos_num, self.pos_size])
        #relation_embedding = tf.get_variable('relation_embedding', [self.num_classes, self.cnn_size])

        self.input_word = tf.placeholder(dtype=tf.int32, shape=[None, len_sentence], name='input_word')
        self.input_pos1 = tf.placeholder(dtype=tf.int32, shape=[None, len_sentence], name='input_pos1')
        self.input_pos2 = tf.placeholder(dtype=tf.int32, shape=[None, len_sentence], name='input_pos2')
        self.input_y = tf.placeholder(dtype=tf.float32, shape=[None, num_classes], name='input_y')
        self.keep_prob = tf.placeholder(tf.float32)


        self.input_word_ebd = tf.nn.embedding_lookup(word_embedding, self.input_word)
        self.input_pos1_ebd = tf.nn.embedding_lookup(pos1_embedding, self.input_pos1)
        self.input_pos2_ebd = tf.nn.embedding_lookup(pos2_embedding, self.input_pos2)


        self.inputs =  tf.concat(axis=2,values=[self.input_word_ebd,self.input_pos1_ebd,self.input_pos2_ebd])
        self.inputs = tf.reshape(self.inputs, [-1,self.len_sentence,self.word_embedding+self.pos_size*2,1] )


        
        conv = layers.conv2d(inputs =self.inputs ,num_outputs = self.cnn_size ,kernel_size = [3,60],stride=[1,60],padding='SAME')

     
        max_pool = layers.max_pool2d(conv,kernel_size = [70,1],stride=[1,1])
        self.sentence = tf.reshape(max_pool, [-1, self.cnn_size])

 
        tanh = tf.nn.tanh(self.sentence)
        drop = layers.dropout(tanh,keep_prob=self.keep_prob)

   
        self.outputs = layers.fully_connected(inputs = drop,num_outputs = self.num_classes,activation_fn = tf.nn.softmax)

        '''
        self.y_index =  tf.argmax(self.input_y,1,output_type=tf.int32)
        self.indexes = tf.range(0, tf.shape(self.outputs)[0]) * tf.shape(self.outputs)[1] + self.y_index
        self.responsible_outputs = - tf.reduce_mean(tf.log(tf.gather(tf.reshape(self.outputs, [-1]),self.indexes)))
        '''
        #loss
        self.cross_loss = -tf.reduce_mean( tf.log(tf.reduce_sum( self.input_y  * self.outputs ,axis=1)))
        self.reward =  tf.log(tf.reduce_sum( self.input_y  * self.outputs ,axis=1))

        self.l2_loss = tf.contrib.layers.apply_regularization(regularizer=tf.contrib.layers.l2_regularizer(0.0001),
                                                              weights_list=tf.trainable_variables())

        self.final_loss = self.cross_loss + self.l2_loss


        #accuracy
        self.pred = tf.argmax(self.outputs,axis=1)
        self.pred_prob = tf.reduce_max(self.outputs,axis=1)

        self.y_label = tf.argmax(self.input_y,axis=1)
        self.accuracy = tf.reduce_mean(tf.cast( tf.equal(self.pred,self.y_label), 'float'))

        #minimize loss
        optimizer = tf.train.AdamOptimizer(self.lr)
        self.train_op = optimizer.minimize(self.final_loss)


        self.tvars = tf.trainable_variables()

        # manual update parameters
        self.tvars_holders = []
        for idx, var in enumerate(self.tvars):
            placeholder = tf.placeholder(tf.float32, name=str(idx) + '_holder')
            self.tvars_holders.append(placeholder)

        self.update_tvar_holder = []
        for idx, var in enumerate(self.tvars):
            update_tvar = tf.assign(var, self.tvars_holders[idx])
            self.update_tvar_holder.append(update_tvar)
开发者ID:cherry979988,项目名称:TensorFlow_RLRE,代码行数:88,代码来源:cnnmodel.py


示例18: _dnn_classifier_model_fn


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python layers.flatten函数代码示例发布时间:2022-05-27
下一篇:
Python layers.conv2d函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap