• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.truncated_normal_initializer函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.truncated_normal_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python truncated_normal_initializer函数的具体用法?Python truncated_normal_initializer怎么用?Python truncated_normal_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了truncated_normal_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: build_network

    def build_network(self, images, class_num, is_training=True, keep_prob=0.5, scope='Fast-RCNN'):

        self.conv1 = self.convLayer(images, 11, 11, 4, 4, 96, "conv1", "VALID")
        lrn1 = self.LRN(self.conv1, 2, 2e-05, 0.75, "norm1")
        self.pool1 = self.maxPoolLayer(lrn1, 3, 3, 2, 2, "pool1", "VALID")
        self.conv2 = self.convLayer(self.pool1, 5, 5, 1, 1, 256, "conv2", groups=2)
        lrn2 = self.LRN(self.conv2, 2, 2e-05, 0.75, "lrn2")
        self.pool2 = self.maxPoolLayer(lrn2, 3, 3, 2, 2, "pool2", "VALID")
        self.conv3 = self.convLayer(self.pool2, 3, 3, 1, 1, 384, "conv3")
        self.conv4 = self.convLayer(self.conv3, 3, 3, 1, 1, 384, "conv4", groups=2)
        self.conv5 = self.convLayer(self.conv4, 3, 3, 1, 1, 256, "conv5", groups=2)

        self.roi_pool6 = roi_pooling(self.conv5, self.rois, pool_height=6, pool_width=6)

        with slim.arg_scope([slim.fully_connected, slim.conv2d],
                            activation_fn=nn_ops.relu,
                            weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
                            weights_regularizer=slim.l2_regularizer(0.0005)):
            flatten = slim.flatten(self.roi_pool6, scope='flat_32')
            self.fc1 = slim.fully_connected(flatten, 4096,  scope='fc_6')
            drop6 = slim.dropout(self.fc1, keep_prob=keep_prob, is_training=is_training, scope='dropout6',)
            self.fc2 = slim.fully_connected(drop6,  4096,  scope='fc_7')
            drop7 = slim.dropout(self.fc2, keep_prob=keep_prob, is_training=is_training, scope='dropout7')
            cls = slim.fully_connected(drop7, class_num,activation_fn=nn_ops.softmax ,scope='fc_8')
            bbox = slim.fully_connected(drop7, (self.class_num-1)*4,
                                        weights_initializer=tf.truncated_normal_initializer(0.0, 0.001),
                                        activation_fn=None ,scope='fc_9')
        return cls,bbox
开发者ID:ausk,项目名称:Fast-RCNN,代码行数:28,代码来源:Net.py


示例2: lstm_fs_

def lstm_fs_(xs, ys, batches, l, m, n):
    #(name, shape=None, initializer=None,dtype=tf.float32, var_type="variable")
    [Wf, Wi, WC, Wo] = map(lambda name: variable_on_cpu(name, shape=[m+n,m], initializer=tf.truncated_normal_initializer(stddev=1e-2)), ["Wf", "Wi", "WC", "Wo"])
    Wo1 = variable_on_cpu( "Wo1", shape=[m, n], initializer=tf.truncated_normal_initializer(stddev=1e-2))
    [bf, bi, bC, bo] = map(lambda name: variable_on_cpu(name, shape=[m], initializer=tf.truncated_normal_initializer(stddev=1e-2)), ["bf", "bi", "bC", "bo"])
    bo1 = variable_on_cpu( "bo1", shape=[n], initializer=tf.truncated_normal_initializer(stddev=1e-2))
    # C = variable_on_cpu("C", shape=[m], var_type="variable")
    # h = variable_on_cpu("h", shape=[m], var_type="variable")
    #C = tf.ones([batches,m])
    C = tf.zeros([batches,m])
    #h = tf.zeros([m])
    #h = tf.ones([batches,m])
    h = tf.zeros([batches,m])
    (outs, end) = scan(lambda mem, x: step_lstm1(x, mem, Wf, bf, Wi, bi, WC, bC, Wo, bo, Wo1, bo1), 
                       (C,h), xs, l)
    yhats = tf.pack(outs)
    #print(ys)
    #print(yhats)
    loss = cross_entropy(ys, yhats,t=1e-6)
    #tf.nn.sparse_softmax_cross_entropy_with_logits(outs, yhats, name='xentropy')
    #loss = cross_entropy(outs, yhats)
    #is not actually accuracy
    accuracy = cross_entropy(ys[-1], yhats[-1])
    #tf.nn.sparse_softmax_cross_entropy_with_logits(outs[-1], yhats[-1])
    return {"loss": loss, "inference": yhats, "accuracy": accuracy}
开发者ID:holdenlee,项目名称:tensorflow_learning,代码行数:25,代码来源:lstm_model.py


示例3: __init__

 def __init__(self, distinctTagNum, w2vPath, c2vPath, numHidden):
     self.distinctTagNum = distinctTagNum
     self.numHidden = numHidden
     self.w2v = self.load_w2v(w2vPath, FLAGS.embedding_word_size)
     self.c2v = self.load_w2v(c2vPath, FLAGS.embedding_char_size)
     self.words = tf.Variable(self.w2v, name="words")
     self.chars = tf.Variable(self.c2v, name="chars")
     with tf.variable_scope('Softmax') as scope:
         self.W = tf.get_variable(
             shape=[numHidden * 2, distinctTagNum],
             initializer=tf.truncated_normal_initializer(stddev=0.01),
             name="weights",
             regularizer=tf.contrib.layers.l2_regularizer(0.001))
         self.b = tf.Variable(tf.zeros([distinctTagNum], name="bias"))
     with tf.variable_scope('CNN_Layer') as scope:
         self.filter = tf.get_variable(
             "filters_1",
             shape=[2, FLAGS.embedding_char_size, 1,
                    FLAGS.embedding_char_size],
             regularizer=tf.contrib.layers.l2_regularizer(0.0001),
             initializer=tf.truncated_normal_initializer(stddev=0.01),
             dtype=tf.float32)
     self.trains_params = None
     self.inp_w = tf.placeholder(tf.int32,
                                 shape=[None, FLAGS.max_sentence_len],
                                 name="input_words")
     self.inp_c = tf.placeholder(
         tf.int32,
         shape=[None, FLAGS.max_sentence_len * FLAGS.max_chars_per_word],
         name="input_chars")
     pass
开发者ID:koth,项目名称:kcws,代码行数:31,代码来源:train_pos.py


示例4: _shared_encoder_network

  def _shared_encoder_network(self):
    # config SSE network to be shared encoder mode
    # Build shared encoder
    with tf.variable_scope('shared_encoder'):
      # TODO: need play with forgetGate and peeholes here
      if self.use_lstm:
        src_single_cell = tf.nn.rnn_cell.LSTMCell(self.src_cell_size, forget_bias=1.0, use_peepholes=False)
      else:
        src_single_cell = tf.nn.rnn_cell.GRUCell(self.src_cell_size)

      src_cell = src_single_cell
      if self.num_layers > 1:
        src_cell = tf.nn.rnn_cell.MultiRNNCell([src_single_cell] * self.num_layers)

      #compute source sequence related tensors
      src_output, _ = tf.nn.dynamic_rnn(src_cell, self.src_input_distributed, sequence_length=self._src_lens,
                                        dtype=tf.float32)
      src_last_output = self._last_relevant(src_output, self._src_lens)
      self.src_M = tf.get_variable('src_M', shape=[self.src_cell_size, self.seq_embed_size],
                                   initializer=tf.truncated_normal_initializer())
      # self.src_b = tf.get_variable('src_b', shape=[self.seq_embed_size])
      self.src_seq_embedding = tf.matmul(src_last_output, self.src_M)  # + self.src_b

      #declare tgt_M tensor before reuse them
      self.tgt_M = tf.get_variable('tgt_M', shape=[self.src_cell_size, self.seq_embed_size],
                                   initializer=tf.truncated_normal_initializer())
      # self.tgt_b = tf.get_variable('tgt_b', shape=[self.seq_embed_size])

    with tf.variable_scope('shared_encoder', reuse=True):
      #compute target sequence related tensors by reusing shared_encoder model
      tgt_output, _ = tf.nn.dynamic_rnn(src_cell, self.tgt_input_distributed, sequence_length=self._tgt_lens,
                                        dtype=tf.float32)
      tgt_last_output = self._last_relevant(tgt_output, self._tgt_lens)

      self.tgt_seq_embedding = tf.matmul(tgt_last_output, self.tgt_M)  # + self.tgt_b
开发者ID:yaoyaowd,项目名称:tensorflow_demo,代码行数:35,代码来源:ebay_model.py


示例5: model

def model(data, prev_outputs, image_size, n_channels, n_actions, n_prev_actions):
    kernel_defs = [(8, 16, 4), (2, 32, 1)]  # each conv layer, (patch_side, n_kernels, stride)
    fc_sizes = [256]
    n_input_kernels = n_channels
    for i, k in enumerate(kernel_defs):
        with tf.variable_scope("conv_%i" % i):
            kernel_shape = (k[0], k[0], n_input_kernels, k[1])
            data = conv_relu(data, kernel_shape, k[2])
            n_input_kernels = k[1]

    for i, n in enumerate(fc_sizes):
        with tf.variable_scope("fc_%i" % i):
            if i == 0:
                previous_n = kernel_defs[-1][1] * np.prod(image_size) / np.prod([k[2] for k in kernel_defs])**2
                data = tf.reshape(data, [-1, previous_n])
                reshape_prev_outputs = tf.reshape(prev_outputs, [-1, n_actions * n_prev_actions])
                prev_outputs_weights = tf.get_variable("prev_outputs_weights", [n_actions * n_prev_actions, n],
                                                       initializer=tf.truncated_normal_initializer(mean=0., stddev=0.01/np.sqrt(n_prev_actions * n_actions)))
            else:
                previous_n = fc_sizes[i-1]
            weights = tf.get_variable("weights", [previous_n, n],
                                      initializer=tf.truncated_normal_initializer(mean=0., stddev=0.01 / np.sqrt(previous_n)))
            biases = tf.get_variable("biases", [n], initializer=tf.constant_initializer(0.0))
            relu_input = tf.matmul(data, weights) + biases
            if i == 0:
                relu_input += 0.1 * (previous_n / n_actions / n_prev_actions) * tf.matmul(reshape_prev_outputs, prev_outputs_weights)
            data = tf.nn.relu(relu_input)

    with tf.variable_scope("flat_out"):
        weights = tf.get_variable("weights", [fc_sizes[-1], n_actions],
                                  initializer=tf.truncated_normal_initializer(mean=0., stddev=0.01 / np.sqrt(fc_sizes[-1])))
        biases = tf.get_variable("biases", [n_actions], initializer=tf.constant_initializer(0.0))
        return tf.matmul(data, weights) + biases
开发者ID:bibarz,项目名称:bibarz.github.io,代码行数:33,代码来源:lander.py


示例6: testCheckInitializers

  def testCheckInitializers(self):
    initializers = {
        "key_a": tf.truncated_normal_initializer(mean=0, stddev=1),
        "key_c": tf.truncated_normal_initializer(mean=0, stddev=1),
    }
    keys = ["key_a", "key_b"]
    self.assertRaisesRegexp(KeyError,
                            "Invalid initializer keys.*",
                            snt.check_initializers,
                            initializers=initializers,
                            keys=keys)

    del initializers["key_c"]
    initializers["key_b"] = "not a function"
    self.assertRaisesRegexp(TypeError,
                            "Initializer for.*",
                            snt.check_initializers,
                            initializers=initializers,
                            keys=keys)

    initializers["key_b"] = {"key_c": "not a function"}
    self.assertRaisesRegexp(TypeError,
                            "Initializer for.*",
                            snt.check_initializers,
                            initializers=initializers,
                            keys=keys)

    initializers["key_b"] = {
        "key_c": tf.truncated_normal_initializer(mean=0, stddev=1),
        "key_d": tf.truncated_normal_initializer(mean=0, stddev=1),
    }
    snt.check_initializers(initializers=initializers, keys=keys)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:32,代码来源:util_test.py


示例7: layers

def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
    """
    Create the layers for a fully convolutional network.  Build skip-layers using the vgg layers.
    :param vgg_layer3_out: TF Tensor for VGG Layer 3 output
    :param vgg_layer4_out: TF Tensor for VGG Layer 4 output
    :param vgg_layer7_out: TF Tensor for VGG Layer 7 output
    :param num_classes: Number of classes to classify
    :return: The Tensor for the last layer of output
    """
    # upsampling on layer7 by 2
    input = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, strides=(1,1), padding='same', 
                            kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                            kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
    output = tf.layers.conv2d_transpose(input, num_classes, 4, strides = (2, 2), padding= 'same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))

    #skip connection followed by upsampling on layer4 by 2
    input = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, strides=(1,1), padding='same', 
                            kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                            kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
    input = tf.add(input, output)
    output = tf.layers.conv2d_transpose(input, num_classes, 4, strides = (2, 2), padding= 'same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))

    #skip connection followed by upsampling on layer3 by 8
    input = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, strides=(1,1), padding='same', 
                            kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                            kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
    input = tf.add(input, output)
    nn_last_layer = tf.layers.conv2d_transpose(input, num_classes, 32, strides = (8, 8), padding= 'same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
    
    return nn_last_layer
开发者ID:jgliang74,项目名称:CarND-Semantic-Segmentation,代码行数:30,代码来源:main.py


示例8: Discriminator

def Discriminator(image_Pattern, initial_Filter_Count = 64, attribute_Count = 10, reuse = False):
    with tf.variable_scope('discriminator', reuse=reuse):
        hidden_Activation = image_Pattern;
        for index in range(6):
            hidden_Activation = tf.nn.leaky_relu(
                tf.layers.conv2d(
                    inputs = hidden_Activation, 
                    filters = initial_Filter_Count * (2 ** index), 
                    kernel_size = 4,
                    strides = 2,
                    padding = "same",
                    kernel_initializer = tf.truncated_normal_initializer(stddev=0.02)
                    ),
                alpha=0.01,
                name="hidden_Layer{}".format(index)
                )

        output_Activation = tf.layers.conv2d(
            inputs = hidden_Activation, 
            filters = 1 + attribute_Count,
            kernel_size = hidden_Activation.get_shape()[1:3],
            strides = 1,
            padding = "valid",
            name = "output_Layer",
            use_bias = False,
            kernel_initializer = tf.truncated_normal_initializer(stddev=0.02)
            )

        discrimination_Logit, attribute_Logit = tf.split(
            tf.squeeze(output_Activation, axis=[1,2]),
            num_or_size_splits = [1, attribute_Count],
            axis = 1
            )

        return discrimination_Logit, attribute_Logit;
开发者ID:CODEJIN,项目名称:GAN,代码行数:35,代码来源:Customized_Layers.py


示例9: discriminator

def discriminator(images, reuse_variables=None):
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables) as scope:
        # First convolutional and pool layers
        # This finds 32 different 5 x 5 pixel features
        d_w1 = tf.get_variable('d_w1', [5, 5, 1, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))
        d_b1 = tf.get_variable('d_b1', [32], initializer=tf.constant_initializer(0))
        d1 = tf.nn.conv2d(input=images, filter=d_w1, strides=[1, 1, 1, 1], padding='SAME')
        d1 = d1 + d_b1
        d1 = tf.nn.relu(d1)
        d1 = tf.nn.avg_pool(d1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        # Second convolutional and pool layers
        # This finds 64 different 5 x 5 pixel features
        d_w2 = tf.get_variable('d_w2', [5, 5, 32, 64], initializer=tf.truncated_normal_initializer(stddev=0.02))
        d_b2 = tf.get_variable('d_b2', [64], initializer=tf.constant_initializer(0))
        d2 = tf.nn.conv2d(input=d1, filter=d_w2, strides=[1, 1, 1, 1], padding='SAME')
        d2 = d2 + d_b2
        d2 = tf.nn.relu(d2)
        d2 = tf.nn.avg_pool(d2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        # First fully connected layer
        d_w3 = tf.get_variable('d_w3', [7 * 7 * 64, 1024], initializer=tf.truncated_normal_initializer(stddev=0.02))
        d_b3 = tf.get_variable('d_b3', [1024], initializer=tf.constant_initializer(0))
        d3 = tf.reshape(d2, [-1, 7 * 7 * 64])
        d3 = tf.matmul(d3, d_w3)
        d3 = d3 + d_b3
        d3 = tf.nn.relu(d3)

        # Second fully connected layer
        d_w4 = tf.get_variable('d_w4', [1024, 1], initializer=tf.truncated_normal_initializer(stddev=0.02))
        d_b4 = tf.get_variable('d_b4', [1], initializer=tf.constant_initializer(0))
        d4 = tf.matmul(d3, d_w4) + d_b4

        # d4 contains unscaled values
        return d4
开发者ID:mm25712423,项目名称:Test,代码行数:35,代码来源:image_models.py


示例10: inference

def inference(images):
    def _variable_with_weight_decay(name, shape, stddev, wd):
        var = tf.get_variable(name, shape=shape, initializer=tf.truncated_normal_initializer(stddev=stddev))
        if wd:
            weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
            tf.add_to_collection('losses', weight_decay)
        return var

    with tf.variable_scope('conv1') as scope:
        kernel = tf.get_variable('weights', shape=[3, 3, 3, 32], initializer=tf.truncated_normal_initializer(stddev=1e-4))
        conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.get_variable('biases', shape=[32], initializer=tf.constant_initializer(0.0))
        bias = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(bias, name=scope.name)
    pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')

    with tf.variable_scope('conv2') as scope:
        kernel = tf.get_variable('weights', shape=[3, 3, 32, 64], initializer=tf.truncated_normal_initializer(stddev=1e-4))
        conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.get_variable('biases', shape=[64], initializer=tf.constant_initializer(0.0))
        bias = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(bias, name=scope.name)
    pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')

    with tf.variable_scope('conv3') as scope:
        kernel = tf.get_variable('weights', shape=[3, 3, 64, 128], initializer=tf.truncated_normal_initializer(stddev=1e-4))
        conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.get_variable('biases', shape=[128], initializer=tf.constant_initializer(0.0))
        bias = tf.nn.bias_add(conv, biases)
        conv3 = tf.nn.relu(bias, name=scope.name)
    pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')

    with tf.variable_scope('conv4') as scope:
        kernel = tf.get_variable('weights', shape=[3, 3, 128, 256], initializer=tf.truncated_normal_initializer(stddev=1e-4))
        conv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.get_variable('biases', shape=[256], initializer=tf.constant_initializer(0.0))
        bias = tf.nn.bias_add(conv, biases)
        conv4 = tf.nn.relu(bias, name=scope.name)
    pool4 = tf.nn.max_pool(conv4, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')

    with tf.variable_scope('fc5') as scope:
        dim = 1
        for d in pool4.get_shape()[1:].as_list():
            dim *= d
        reshape = tf.reshape(pool4, [BATCH_SIZE, dim])
        weights = _variable_with_weight_decay('weights', shape=[dim, 1024], stddev=0.05, wd=0.005)
        biases = tf.get_variable('biases', shape=[1024], initializer=tf.constant_initializer(0.1))
    fc5 = tf.nn.relu_layer(reshape, weights, biases, name=scope.name)

    with tf.variable_scope('fc6') as scope:
        weights = _variable_with_weight_decay('weights', shape=[1024, 256], stddev=0.05, wd=0.005)
        biases = tf.get_variable('biases', shape=[256], initializer=tf.constant_initializer(0.1))
    fc6 = tf.nn.relu_layer(fc5, weights, biases, name=scope.name)

    with tf.variable_scope('fc7') as scope:
        weights = _variable_with_weight_decay('weights', shape=[256, NUM_CLASSES], stddev=0.05, wd=0.005)
        biases = tf.get_variable('biases', shape=[NUM_CLASSES], initializer=tf.constant_initializer(0.1))
    fc7 = tf.nn.xw_plus_b(fc6, weights, biases, name=scope.name)

    return fc7
开发者ID:nyakosuta,项目名称:tf-classifier,代码行数:60,代码来源:v2.py


示例11: Discriminator_with_Vanilla

def Discriminator_with_Vanilla(input_Pattern, hidden_Unit_Size = 128, label_Unit_Size = 10, is_Training  = True, reuse = False):
    with tf.variable_scope('discriminator', reuse=reuse):
        hidden_Activation = tf.layers.dense(
            inputs = input_Pattern,
            units = hidden_Unit_Size,
            activation = tf.nn.relu,
            use_bias = True,
            kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
            bias_initializer = tf.zeros_initializer(),
            name = "hidden"
            )
        discrimination_Logits = tf.layers.dense(
            inputs = hidden_Activation,
            units = 1,
            activation = None,
            use_bias = True,
            kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
            bias_initializer = tf.zeros_initializer(),
            name = "discrimination"
            )
        discrimination_Activation = tf.nn.sigmoid(discrimination_Logits);

        label_Logits = tf.layers.dense(
            inputs = hidden_Activation,
            units = label_Unit_Size,
            activation = None,
            use_bias = True,
            kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
            bias_initializer = tf.zeros_initializer(),
            name = "label"
            )
        label_Activation = tf.nn.softmax(label_Logits);

        return discrimination_Logits, label_Logits, discrimination_Activation, label_Activation;
开发者ID:CODEJIN,项目名称:GAN,代码行数:34,代码来源:Customized_Layers.py


示例12: Generator

def Generator(image_Pattern, is_Training = True, name = "generator", reuse = False):
    with tf.variable_scope(name, reuse=reuse):
        convolution_Activation = tf.nn.leaky_relu(
            tf.layers.conv2d(
                inputs = image_Pattern, 
                filters = 2 ** 6, 
                kernel_size = [4,4],
                strides = (2,2),
                padding = "same",
                use_bias = False,
                kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
                )
            )

        for power in range(7, 10):
            convolution_Activation = tf.nn.leaky_relu(
                tf.layers.batch_normalization(
                    tf.layers.conv2d(
                        inputs = convolution_Activation, 
                        filters = 2 ** power, 
                        kernel_size = [4,4],
                        strides = (2,2),
                        padding = "same",
                        use_bias = False,
                        kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
                        ),
                    training = is_Training
                    )
                )

        convolution_Transpose_Activation = convolution_Activation;
        for power in reversed(range(6, 9)):
            convolution_Transpose_Activation = tf.nn.leaky_relu(
                tf.layers.batch_normalization(
                    tf.layers.conv2d_transpose(
                        inputs = convolution_Transpose_Activation, 
                        filters = 2 ** power, 
                        kernel_size = [4,4],
                        strides = (2,2),
                        padding = "same",
                        use_bias = False,
                        kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
                        ),
                    training = is_Training
                    )
                )

        generator_Logit = tf.layers.conv2d_transpose(
            inputs = convolution_Transpose_Activation, 
            filters = 3,    #RGB
            kernel_size = [4,4],
            strides = (2,2),
            padding = "same",
            use_bias = False,
            kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
            )
        
        generator_Activation = tf.nn.tanh(generator_Logit);

        return generator_Logit, generator_Activation;
开发者ID:CODEJIN,项目名称:GAN,代码行数:60,代码来源:Customized_Layers.py


示例13: _build_network

  def _build_network(self, is_training=True):
    # select initializers
    if cfg.TRAIN.TRUNCATED:
      initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
      initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001)
    else:
      initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
      initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)

    net_conv = self._image_to_head(is_training)
    with tf.variable_scope(self._scope, self._scope):
      # build the anchors for the image
      self._anchor_component()
      # region proposal network
      rois = self._region_proposal(net_conv, is_training, initializer)
      # region of interest pooling
      if cfg.POOLING_MODE == 'crop':
        pool5 = self._crop_pool_layer(net_conv, rois, "pool5")
      else:
        raise NotImplementedError

    fc7 = self._head_to_tail(pool5, is_training)
    with tf.variable_scope(self._scope, self._scope):
      # region classification
      cls_prob, bbox_pred = self._region_classification(fc7, is_training, 
                                                        initializer, initializer_bbox)

    self._score_summaries.update(self._predictions)

    return rois, cls_prob, bbox_pred
开发者ID:StanislawAntol,项目名称:tf-faster-rcnn,代码行数:30,代码来源:network.py


示例14: fc

    def fc(self, input, num_out, name, relu=True, trainable=True):
        with tf.variable_scope(name) as scope:
            # only use the first input
            if isinstance(input, tuple):
                input = input[0]

            input_shape = input.get_shape()
            if input_shape.ndims == 4:
                dim = 1
                for d in input_shape[1:].as_list():
                    dim *= d
                feed_in = tf.reshape(tf.transpose(input,[0,3,1,2]), [-1, dim])
            else:
                feed_in, dim = (input, int(input_shape[-1]))

            if name == 'bbox_pred':
                init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
                init_biases = tf.constant_initializer(0.0)
            else:
                init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
                init_biases = tf.constant_initializer(0.0)

            weights = self.make_var('weights', [dim, num_out], init_weights, trainable, \
                                    regularizer=self.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY))
            biases = self.make_var('biases', [num_out], init_biases, trainable)

            op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
            fc = op(feed_in, weights, biases, name=scope.name)
            return fc
开发者ID:beneo,项目名称:faster-CTPN,代码行数:29,代码来源:network.py


示例15: residual_block

def residual_block(input_, dilation, kwidth, num_kernels=1,
                   bias_init=None, stddev=0.02, do_skip=True,
                   name='residual_block'):
    print('input shape to residual block: ', input_.get_shape())
    with tf.variable_scope(name):
        h_a = atrous_conv1d(input_, dilation, kwidth, num_kernels,
                            bias_init=bias_init, stddev=stddev)
        h = tf.tanh(h_a)
        # apply gated activation
        z_a = atrous_conv1d(input_, dilation, kwidth, num_kernels,
                            name='conv_gate', bias_init=bias_init,
                            stddev=stddev)
        z = tf.nn.sigmoid(z_a)
        print('gate shape: ', z.get_shape())
        # element-wise apply the gate
        gated_h = tf.mul(z, h)
        print('gated h shape: ', gated_h.get_shape())
        #make res connection
        h_ = conv1d(gated_h, kwidth=1, num_kernels=1,
                    init=tf.truncated_normal_initializer(stddev=stddev),
                    name='residual_conv1')
        res = h_ + input_
        print('residual result: ', res.get_shape())
        if do_skip:
            #make skip connection
            skip = conv1d(gated_h, kwidth=1, num_kernels=1,
                          init=tf.truncated_normal_initializer(stddev=stddev),
                          name='skip_conv1')
            return res, skip
        else:
            return res
开发者ID:cc-cherie,项目名称:segan,代码行数:31,代码来源:ops.py


示例16: bottleneck

def bottleneck(input_, feature_input, features, is_training, stride, name):
    f1, f2, f3 = features
    shortcut_input = input_
    with tf.variable_scope(name):
        conv1_weight = tf.get_variable(name='conv1', shape=[1, 1, feature_input, f1],
                                       initializer=tf.truncated_normal_initializer(stddev=0.01))
        input_ = tf.nn.conv2d(input_, conv1_weight, [1, 1, 1, 1], padding='SAME')
        input_ = tf.layers.batch_normalization(input_, training=is_training)
        input_ = tf.nn.relu(input_)

        conv2_weight = tf.get_variable(name='conv2', shape=[3, 3, f1, f2],
                                       initializer=tf.truncated_normal_initializer(stddev=0.01))
        input_ = tf.nn.conv2d(input_, conv2_weight, [1, stride, stride, 1], padding='SAME')
        input_ = tf.layers.batch_normalization(input_, training=is_training)
        input_ = tf.nn.relu(input_)

        conv3_weight = tf.get_variable(name='conv3', shape=[1, 1, f2, f3],
                                       initializer=tf.truncated_normal_initializer(stddev=0.01))
        input_ = tf.nn.conv2d(input_, conv3_weight, [1, 1, 1, 1], padding='SAME')
        input_ = tf.layers.batch_normalization(input_, training=is_training)
        if not (feature_input == f3):
            convs_weight = tf.get_variable(name='convs', shape=[1, 1, feature_input, f3],
                                           initializer=tf.truncated_normal_initializer(stddev=0.01))
            shortcut_input = tf.nn.conv2d(shortcut_input, convs_weight, [1, stride, stride, 1], padding='SAME')
        shortcut_input = tf.layers.batch_normalization(shortcut_input, training=is_training)
        input_ = tf.nn.relu(tf.add(shortcut_input, input_))
    return input_
开发者ID:UGuess,项目名称:emotion_classifier,代码行数:27,代码来源:resnet_utils.py


示例17: model

    def model(x_crop, y_, reuse):
        """ For more simplified CNN APIs, check tensorlayer.org """
        W_init = tf.truncated_normal_initializer(stddev=5e-2)
        W_init2 = tf.truncated_normal_initializer(stddev=0.04)
        b_init2 = tf.constant_initializer(value=0.1)
        with tf.variable_scope("model", reuse=reuse):
            net = tl.layers.InputLayer(x_crop, name='input')
            net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn1')
            net = tl.layers.SignLayer(net)
            net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')
            net = tl.layers.LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
            net = tl.layers.BinaryConv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2')
            net = tl.layers.LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
            net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
            net = tl.layers.FlattenLayer(net, name='flatten')  # output: (batch_size, 2304)
            net = tl.layers.SignLayer(net)
            net = tl.layers.BinaryDenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')  # output: (batch_size, 384)
            net = tl.layers.SignLayer(net)
            net = tl.layers.BinaryDenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')  # output: (batch_size, 192)
            net = tl.layers.DenseLayer(net, n_units=10, act=tf.identity, W_init=W_init2, name='output')  # output: (batch_size, 10)
            y = net.outputs

            ce = tl.cost.cross_entropy(y, y_, name='cost')
            # L2 for the MLP, without this, the accuracy will be reduced by 15%.
            L2 = 0
            for p in tl.layers.get_variables_with_name('relu/W', True, True):
                L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
            cost = ce + L2

            # correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(y), 1), y_)
            correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
            acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

            return net, cost, acc
开发者ID:dccforever,项目名称:tensorlayer,代码行数:34,代码来源:tutorial_binarynet_cifar10_tfrecord.py


示例18: model_batch_norm

    def model_batch_norm(x_crop, y_, reuse, is_train):
        """ Batch normalization should be placed before rectifier. """
        W_init = tf.truncated_normal_initializer(stddev=5e-2)
        W_init2 = tf.truncated_normal_initializer(stddev=0.04)
        b_init2 = tf.constant_initializer(value=0.1)
        with tf.variable_scope("model", reuse=reuse):
            net = InputLayer(x_crop, name='input')

            net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn1')
            net = tl.layers.BatchNormLayer(net, is_train, act=tf.nn.relu, name='batch1')
            net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')
            net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn2')
            net = tl.layers.BatchNormLayer(net, is_train, act=tf.nn.relu, name='batch2')
            net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
            net = tl.layers.FlattenLayer(net, name='flatten')  # output: (batch_size, 2304)
            net = tl.layers.DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')  # output: (batch_size, 384)
            net = tl.layers.DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')  # output: (batch_size, 192)
            net = tl.layers.DenseLayer(net, n_units=10, act=tf.identity, W_init=W_init2, name='output')  # output: (batch_size, 10)
            y = net.outputs

            ce = tl.cost.cross_entropy(y, y_, name='cost')
            # L2 for the MLP, without this, the accuracy will be reduced by 15%.
            L2 = 0
            for p in tl.layers.get_variables_with_name('relu/W', True, True):
                L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
            cost = ce + L2

            correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
            acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

            return net, cost, acc
开发者ID:dccforever,项目名称:tensorlayer,代码行数:31,代码来源:tutorial_binarynet_cifar10_tfrecord.py


示例19: discriminator

def discriminator(x_image, reuse=False):
    # get_variable(): get or create a variable instead of a direct call to tf.Variable
    if reuse:
        tf.get_variable_scope().reuse_variables()

    # 每一层的输入是上一层的输出(第一层的输入是28x28的图像)

    # First Conv and Pool Layers stddev为standard deviation标准差
    # W为整层对下一层的权重
    W_conv1 = tf.get_variable('d_wconv1', [5, 5, 1, 8], initializer=tf.truncated_normal_initializer(stddev=0.02))
    b_conv1 = tf.get_variable('d_bconv1', [8], initializer=tf.constant_initializer(0))  # b为偏置节点
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)  # z = W*x+b relu为激活函数
    h_pool1 = avg_pool_2x2(h_conv1)  # 池化

    # Second Conv and Pool Layers
    W_conv2 = tf.get_variable('d_wconv2', [5, 5, 8, 16], initializer=tf.truncated_normal_initializer(stddev=0.02))
    b_conv2 = tf.get_variable('d_bconv2', [16], initializer=tf.constant_initializer(0))
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = avg_pool_2x2(h_conv2)

    # First Fully Connected Layer
    W_fc1 = tf.get_variable('d_wfc1', [7 * 7 * 16, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))
    b_fc1 = tf.get_variable('d_bfc1', [32], initializer=tf.constant_initializer(0))
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*16])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)  # matmul: 矩阵相乘

    # Second Fully Connected Layer
    W_fc2 = tf.get_variable('d_wfc2', [32, 1], initializer=tf.truncated_normal_initializer(stddev=0.02))
    b_fc2 = tf.get_variable('d_bfc2', [1], initializer=tf.constant_initializer(0))

    # Final Layer
    y_conv = (tf.matmul(h_fc1, W_fc2) + b_fc2)
    return y_conv
开发者ID:SebastianElvis,项目名称:ElvisProjs,代码行数:33,代码来源:gan_mnist.py


示例20: add_model

	def add_model(self, input_data):
		with tf.variable_scope("FirstConv") as CLayer1:
			w_conv1 = tf.get_variable("w_conv1", (11, 11, 1, 32), initializer=tf.truncated_normal_initializer(stddev=0.1))
			b_conv1 = tf.get_variable("b_conv1", (32), initializer=tf.constant_initializer(0.1))
			conv1 =   tf.nn.conv2d(input_data, w_conv1, strides=[1, 1, 1, 1], padding='VALID')
			hconv1 =  tf.nn.relu(conv1 + b_conv1)
			h_pool1 = tf.nn.max_pool(hconv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
			with tf.variable_scope("SecondConv") as CLayer2:
				w_conv2 = tf.get_variable("w_conv2", (11 , 11, 32, 64), 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap