本文整理汇总了Python中tensorflow.contrib.slim.conv2d函数的典型用法代码示例。如果您正苦于以下问题:Python conv2d函数的具体用法?Python conv2d怎么用?Python conv2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了conv2d函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: build_arch_baseline
def build_arch_baseline(input, is_train: bool, num_classes: int):
bias_initializer = tf.truncated_normal_initializer(
mean=0.0, stddev=0.01) # tf.constant_initializer(0.0)
# The paper didnot mention any regularization, a common l2 regularizer to weights is added here
weights_regularizer = tf.contrib.layers.l2_regularizer(5e-04)
tf.logging.info('input shape: {}'.format(input.get_shape()))
# weights_initializer=initializer,
with slim.arg_scope([slim.conv2d, slim.fully_connected], trainable=is_train, biases_initializer=bias_initializer, weights_regularizer=weights_regularizer):
with tf.variable_scope('relu_conv1') as scope:
output = slim.conv2d(input, num_outputs=32, kernel_size=[
5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer1')
tf.logging.info('output shape: {}'.format(output.get_shape()))
with tf.variable_scope('relu_conv2') as scope:
output = slim.conv2d(output, num_outputs=64, kernel_size=[
5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer2')
tf.logging.info('output shape: {}'.format(output.get_shape()))
output = slim.flatten(output)
output = slim.fully_connected(output, 1024, scope='relu_fc3', activation_fn=tf.nn.relu)
tf.logging.info('output shape: {}'.format(output.get_shape()))
output = slim.dropout(output, 0.5, scope='dp')
output = slim.fully_connected(output, num_classes, scope='final_layer', activation_fn=None)
tf.logging.info('output shape: {}'.format(output.get_shape()))
return output
开发者ID:lzqkean,项目名称:deep_learning,代码行数:32,代码来源:capsnet_em.py
示例2: create_inner_block
def create_inner_block(
incoming, scope, nonlinearity=tf.nn.elu,
weights_initializer=tf.truncated_normal_initializer(1e-3),
bias_initializer=tf.zeros_initializer(), regularizer=None,
increase_dim=False, summarize_activations=True):
n = incoming.get_shape().as_list()[-1]
stride = 1
if increase_dim:
n *= 2
stride = 2
incoming = slim.conv2d(
incoming, n, [3, 3], stride, activation_fn=nonlinearity, padding="SAME",
normalizer_fn=_batch_norm_fn, weights_initializer=weights_initializer,
biases_initializer=bias_initializer, weights_regularizer=regularizer,
scope=scope + "/1")
if summarize_activations:
tf.summary.histogram(incoming.name + "/activations", incoming)
incoming = slim.dropout(incoming, keep_prob=0.6)
incoming = slim.conv2d(
incoming, n, [3, 3], 1, activation_fn=None, padding="SAME",
normalizer_fn=None, weights_initializer=weights_initializer,
biases_initializer=bias_initializer, weights_regularizer=regularizer,
scope=scope + "/2")
return incoming
开发者ID:BenJamesbabala,项目名称:deep_sort,代码行数:27,代码来源:generate_detections.py
示例3: create_test_network_7
def create_test_network_7():
"""Aligned network for test, with a control dependency.
The graph is similar to create_test_network_1(), except that it includes an
assert operation on the left branch.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An 8x8 test image.
x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1), name='input_image')
# Left branch.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
l1_shape = array_ops.shape(l1)
assert_op = control_flow_ops.Assert(
gen_math_ops.equal(l1_shape[1], 2), [l1_shape], summarize=4)
# Right branch.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
# Addition.
with ops.control_dependencies([assert_op]):
nn.relu(l1 + l3, name='output')
return g
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:26,代码来源:receptive_field_test.py
示例4: content_extractor
def content_extractor(self, images, reuse=False):
# images: (batch, 32, 32, 3) or (batch, 32, 32, 1)
if images.get_shape()[3] == 1:
# For mnist dataset, replicate the gray scale image 3 times.
images = tf.image.grayscale_to_rgb(images)
with tf.variable_scope('content_extractor', reuse=reuse):
with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.relu, is_training=(self.mode=='train' or self.mode=='pretrain')):
net = slim.conv2d(images, 64, [3, 3], scope='conv1') # (batch_size, 16, 16, 64)
net = slim.batch_norm(net, scope='bn1')
net = slim.conv2d(net, 128, [3, 3], scope='conv2') # (batch_size, 8, 8, 128)
net = slim.batch_norm(net, scope='bn2')
net = slim.conv2d(net, 256, [3, 3], scope='conv3') # (batch_size, 4, 4, 256)
net = slim.batch_norm(net, scope='bn3')
net = slim.conv2d(net, 128, [4, 4], padding='VALID', scope='conv4') # (batch_size, 1, 1, 128)
net = slim.batch_norm(net, activation_fn=tf.nn.tanh, scope='bn4')
if self.mode == 'pretrain':
net = slim.conv2d(net, 10, [1, 1], padding='VALID', scope='out')
net = slim.flatten(net)
return net
开发者ID:ALISCIFP,项目名称:domain-transfer-network,代码行数:25,代码来源:model.py
示例5: create_test_network_4
def create_test_network_4():
"""Misaligned network for test.
The graph corresponds to a variation from the example from the second figure
in go/cnn-rf-computation#arbitrary-computation-graphs. Layer 2 uses 'SAME'
padding, which makes its padding dependent on the input image dimensionality.
In this case, the effective padding will be undetermined, and the utility is
not able to check the network alignment.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch.
l2 = slim.conv2d(x, 1, [3, 3], stride=2, scope='L2', padding='SAME')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
# Addition.
nn.relu(l1 + l3, name='output')
return g
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:25,代码来源:receptive_field_test.py
示例6: decoder
def decoder(self, latent_var, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('decoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.fully_connected(latent_var, 4096, activation_fn=None, normalizer_fn=None, scope='Fc_1')
net = tf.reshape(net, [-1,4,4,256], name='Reshape')
net = tf.image.resize_nearest_neighbor(net, size=(8,8), name='Upsample_1')
net = slim.conv2d(net, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = tf.image.resize_nearest_neighbor(net, size=(16,16), name='Upsample_2')
net = slim.conv2d(net, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = tf.image.resize_nearest_neighbor(net, size=(32,32), name='Upsample_3')
net = slim.conv2d(net, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = tf.image.resize_nearest_neighbor(net, size=(64,64), name='Upsample_4')
net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=None, scope='Conv2d_4c')
return net
开发者ID:NickyGeorge,项目名称:facenet,代码行数:32,代码来源:dfc_vae_resnet.py
示例7: build_arch
def build_arch(input, is_train, num_classes):
data_size = int(input.get_shape()[1])
# initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
# bias_initializer = tf.constant_initializer(0.0)
# weights_regularizer = tf.contrib.layers.l2_regularizer(5e-04)
with slim.arg_scope([slim.conv2d], trainable=is_train):#, activation_fn=None, , , biases_initializer=bias_initializer, weights_regularizer=weights_regularizer
with tf.variable_scope('conv1') as scope:
output = slim.conv2d(input, num_outputs=256, kernel_size=[9, 9], stride=1, padding='VALID', scope=scope)
data_size = data_size-8
assert output.get_shape() == [cfg.batch_size, data_size, data_size, 256]
tf.logging.info('conv1 output shape: {}'.format(output.get_shape()))
with tf.variable_scope('primary_caps_layer') as scope:
output = slim.conv2d(output, num_outputs=32*8, kernel_size=[9, 9], stride=2, padding='VALID', scope=scope)#, activation_fn=None
output = tf.reshape(output, [cfg.batch_size, -1, 8])
output = squash(output)
data_size = int(np.floor((data_size-8)/2))
assert output.get_shape() == [cfg.batch_size, data_size*data_size*32, 8]
tf.logging.info('primary capsule output shape: {}'.format(output.get_shape()))
with tf.variable_scope('digit_caps_layer') as scope:
with tf.variable_scope('u') as scope:
u_hats = vec_transform(output, num_classes, 16)
assert u_hats.get_shape() == [cfg.batch_size, num_classes, data_size*data_size*32, 16]
tf.logging.info('digit_caps_layer u_hats shape: {}'.format(u_hats.get_shape()))
with tf.variable_scope('routing') as scope:
output = dynamic_routing(u_hats)
assert output.get_shape() == [cfg.batch_size, num_classes, 16]
tf.logging.info('the output capsule has shape: {}'.format(output.get_shape()))
output_len = tf.norm(output, axis=-1)
return output, output_len
开发者ID:lzqkean,项目名称:deep_learning,代码行数:35,代码来源:capsnet_dynamic_routing.py
示例8: iter_func
def iter_func(self, state):
sc = predictron_arg_scope()
with tf.variable_scope('value'):
value_net = slim.fully_connected(slim.flatten(state), 32, scope='fc0')
value_net = layers.batch_norm(value_net, activation_fn=tf.nn.relu, scope='fc0/preact')
value_net = slim.fully_connected(value_net, self.maze_size, activation_fn=None, scope='fc1')
with slim.arg_scope(sc):
net = slim.conv2d(state, 32, [3, 3], scope='conv1')
net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv1/preact')
net_flatten = slim.flatten(net, scope='conv1/flatten')
with tf.variable_scope('reward'):
reward_net = slim.fully_connected(net_flatten, 32, scope='fc0')
reward_net = layers.batch_norm(reward_net, activation_fn=tf.nn.relu, scope='fc0/preact')
reward_net = slim.fully_connected(reward_net, self.maze_size, activation_fn=None, scope='fc1')
with tf.variable_scope('gamma'):
gamma_net = slim.fully_connected(net_flatten, 32, scope='fc0')
gamma_net = layers.batch_norm(gamma_net, activation_fn=tf.nn.relu, scope='fc0/preact')
gamma_net = slim.fully_connected(gamma_net, self.maze_size, activation_fn=tf.nn.sigmoid, scope='fc1')
with tf.variable_scope('lambda'):
lambda_net = slim.fully_connected(net_flatten, 32, scope='fc0')
lambda_net = layers.batch_norm(lambda_net, activation_fn=tf.nn.relu, scope='fc0/preact')
lambda_net = slim.fully_connected(lambda_net, self.maze_size, activation_fn=tf.nn.sigmoid, scope='fc1')
net = slim.conv2d(net, 32, [3, 3], scope='conv2')
net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv2/preact')
net = slim.conv2d(net, 32, [3, 3], scope='conv3')
net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv3/preact')
return net, reward_net, gamma_net, lambda_net, value_net
开发者ID:b-kartal,项目名称:predictron,代码行数:34,代码来源:predictron.py
示例9: create_test_network
def create_test_network():
"""Convolutional neural network for test.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch before first addition.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
# First addition.
l4 = nn.relu(l1 + l3, name='L4_relu')
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
# Right branch after first addition.
l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
# Final addition.
gen_math_ops.add(l5, l6, name='L7_add')
return g
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:graph_compute_order_test.py
示例10: create_test_network_9
def create_test_network_9():
"""Aligned network for test, including an intermediate addition.
The graph is the same as create_test_network_8(), except that VALID padding is
changed to SAME.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='SAME')
# Right branch before first addition.
l2 = slim.conv2d(x, 1, [3, 3], stride=2, scope='L2', padding='SAME')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='SAME')
# First addition.
l4 = nn.relu(l1 + l3)
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
# Right branch after first addition.
l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
# Final addition.
nn.relu(l5 + l6, name='output')
return g
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:29,代码来源:receptive_field_test.py
示例11: build_feature_pyramid
def build_feature_pyramid(self):
'''
reference: https://github.com/CharlesShang/FastMaskRCNN
build P2, P3, P4, P5, P6
:return: multi-scale feature map
'''
feature_pyramid = {}
with tf.variable_scope('feature_pyramid'):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(self.rpn_weight_decay)):
feature_pyramid['P5'] = slim.conv2d(self.feature_maps_dict['C5'],
num_outputs=256,
kernel_size=[1, 1],
stride=1,
scope='build_P5')
feature_pyramid['P6'] = slim.max_pool2d(feature_pyramid['P5'],
kernel_size=[2, 2], stride=2, scope='build_P6')
# P6 is down sample of P5
for layer in range(4, 1, -1):
p, c = feature_pyramid['P' + str(layer + 1)], self.feature_maps_dict['C' + str(layer)]
up_sample_shape = tf.shape(c)
up_sample = tf.image.resize_nearest_neighbor(p, [up_sample_shape[1], up_sample_shape[2]],
name='build_P%d/up_sample_nearest_neighbor' % layer)
c = slim.conv2d(c, num_outputs=256, kernel_size=[1, 1], stride=1,
scope='build_P%d/reduce_dimension' % layer)
p = up_sample + c
p = slim.conv2d(p, 256, kernel_size=[3, 3], stride=1,
padding='SAME', scope='build_P%d/avoid_aliasing' % layer)
feature_pyramid['P' + str(layer)] = p
return feature_pyramid
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:35,代码来源:build_rpn.py
示例12: encoder
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = images
net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
开发者ID:NickyGeorge,项目名称:facenet,代码行数:29,代码来源:dfc_vae_resnet.py
示例13: conv_net_kelz
def conv_net_kelz(inputs):
"""Builds the ConvNet from Kelz 2016."""
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.variance_scaling_initializer(
factor=2.0, mode='FAN_AVG', uniform=True)):
net = slim.conv2d(
inputs, 32, [3, 3], scope='conv1', normalizer_fn=slim.batch_norm)
net = slim.conv2d(
net, 32, [3, 3], scope='conv2', normalizer_fn=slim.batch_norm)
net = slim.max_pool2d(net, [1, 2], stride=[1, 2], scope='pool2')
net = slim.dropout(net, 0.25, scope='dropout2')
net = slim.conv2d(
net, 64, [3, 3], scope='conv3', normalizer_fn=slim.batch_norm)
net = slim.max_pool2d(net, [1, 2], stride=[1, 2], scope='pool3')
net = slim.dropout(net, 0.25, scope='dropout3')
# Flatten while preserving batch and time dimensions.
dims = tf.shape(net)
net = tf.reshape(net, (dims[0], dims[1],
net.shape[2].value * net.shape[3].value), 'flatten4')
net = slim.fully_connected(net, 512, scope='fc5')
net = slim.dropout(net, 0.5, scope='dropout5')
return net
开发者ID:Alice-ren,项目名称:magenta,代码行数:29,代码来源:model.py
示例14: network_det
def network_det(self,inputs,reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn = tf.nn.relu,
weights_initializer = tf.truncated_normal_initializer(0.0, 0.01)):
conv1 = slim.conv2d(inputs, 96, [11,11], 4, padding= 'VALID', scope='conv1')
max1 = slim.max_pool2d(conv1, [3,3], 2, padding= 'VALID', scope='max1')
conv2 = slim.conv2d(max1, 256, [5,5], 1, scope='conv2')
max2 = slim.max_pool2d(conv2, [3,3], 2, padding= 'VALID', scope='max2')
conv3 = slim.conv2d(max2, 384, [3,3], 1, scope='conv3')
conv4 = slim.conv2d(conv3, 384, [3,3], 1, scope='conv4')
conv5 = slim.conv2d(conv4, 256, [3,3], 1, scope='conv5')
pool5 = slim.max_pool2d(conv5, [3,3], 2, padding= 'VALID', scope='pool5')
shape = int(np.prod(pool5.get_shape()[1:]))
fc6 = slim.fully_connected(tf.reshape(pool5, [-1, shape]), 4096, scope='fc6')
fc_detection = slim.fully_connected(fc6, 512, scope='fc_det1')
out_detection = slim.fully_connected(fc_detection, 2, scope='fc_det2', activation_fn = None)
return out_detection
开发者ID:dmehr,项目名称:HyperFace-TensorFlow-implementation,代码行数:27,代码来源:model_prediction.py
示例15: localization_squeezenet
def localization_squeezenet(self, inputs):
with tf.variable_scope('localization_network'):
with slim.arg_scope([slim.conv2d], activation_fn = tf.nn.relu,
padding = 'SAME',
weights_initializer = tf.constant_initializer(0.0)):
conv1 = slim.conv2d(inputs, 64, [3,3], 2, padding = 'VALID', scope='conv1')
pool1 = slim.max_pool2d(conv1, [2,2], 2, scope='pool1')
fire2 = self.fire_module(pool1, 16, 64, scope = 'fire2')
fire3 = self.fire_module(fire2, 16, 64, scope = 'fire3', res_connection=True)
fire4 = self.fire_module(fire3, 32, 128, scope = 'fire4')
pool4 = slim.max_pool2d(fire4, [2,2], 2, scope='pool4')
fire5 = self.fire_module(pool4, 32, 128, scope = 'fire5', res_connection=True)
fire6 = self.fire_module(fire5, 48, 192, scope = 'fire6')
fire7 = self.fire_module(fire6, 48, 192, scope = 'fire7', res_connection=True)
fire8 = self.fire_module(fire7, 64, 256, scope = 'fire8')
pool8 = slim.max_pool2d(fire8, [2,2], 2, scope='pool8')
fire9 = self.fire_module(pool8, 64, 256, scope = 'fire9', res_connection=True)
conv10 = slim.conv2d(fire9, 128, [1,1], 1, scope='conv10')
shape = int(np.prod(conv10.get_shape()[1:]))
identity = np.array([[1., 0., 0.],
[0., 1., 0.]])
identity = identity.flatten()
fc11 = slim.fully_connected(tf.reshape(conv10, [-1, shape]), 6, biases_initializer = tf.constant_initializer(identity), scope='fc11')
return fc11
开发者ID:dmehr,项目名称:HyperFace-TensorFlow-implementation,代码行数:26,代码来源:model.py
示例16: create_test_network
def create_test_network():
"""Convolutional neural network for test.
Returns:
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch before first addition.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
# First addition.
l4 = nn.relu(l1 + l3, name='L4_relu')
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
# Right branch after first addition.
l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
# Final addition.
gen_math_ops.add(l5, l6, name='L7_add')
name_to_node = graph_compute_order.parse_graph_nodes(g.as_graph_def())
return name_to_node
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:29,代码来源:parse_layer_parameters_test.py
示例17: create_test_network_8
def create_test_network_8():
"""Aligned network for test, including an intermediate addition.
The graph is similar to create_test_network_1(), except that it includes a few
more layers on top. The added layers compose two different branches whose
receptive fields are different. This makes this test case more challenging; in
particular, this test fails if a naive DFS-like algorithm is used for RF
computation.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch before first addition.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
# First addition.
l4 = nn.relu(l1 + l3)
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='VALID')
# Right branch after first addition.
l6_pad = array_ops.pad(l4, [[0, 0], [1, 0], [1, 0], [0, 0]])
l6 = slim.conv2d(l6_pad, 1, [3, 3], stride=2, scope='L6', padding='VALID')
# Final addition.
nn.relu(l5 + l6, name='output')
return g
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:34,代码来源:receptive_field_test.py
示例18: _build_graph
def _build_graph(self):
normalized_input = tf.div(self._input, 255.0)
#d = tf.divide(1.0, tf.sqrt(8. * 8. * 4.))
conv1 = slim.conv2d(normalized_input, 16, [8, 8], activation_fn=tf.nn.relu,
padding='VALID', stride=4, biases_initializer=None)
# weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))
#d = tf.divide(1.0, tf.sqrt(4. * 4. * 16.))
conv2 = slim.conv2d(conv1, 32, [4, 4], activation_fn=tf.nn.relu,
padding='VALID', stride=2, biases_initializer=None)
#weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))
flattened = slim.flatten(conv2)
#d = tf.divide(1.0, tf.sqrt(2592.))
fc1 = slim.fully_connected(flattened, 256, activation_fn=tf.nn.relu, biases_initializer=None)
#weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))
#d = tf.divide(1.0, tf.sqrt(256.))
# estimate of the value function
self.value_func_prediction = slim.fully_connected(fc1, 1, activation_fn=None, biases_initializer=None)
#weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))
# softmax output with one entry per action representing the probability of taking an action
self.policy_predictions = slim.fully_connected(fc1, self.output_size, activation_fn=tf.nn.softmax,
biases_initializer=None)
开发者ID:thalles753,项目名称:machine-learning,代码行数:28,代码来源:A3C_Network.py
示例19: _build_layers_v2
def _build_layers_v2(self, input_dict, num_outputs, options):
inputs = input_dict["obs"]
filters = options.get("conv_filters")
if not filters:
filters = _get_filter_config(inputs.shape.as_list()[1:])
activation = get_activation_fn(options.get("conv_activation"))
with tf.name_scope("vision_net"):
for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):
inputs = slim.conv2d(
inputs,
out_size,
kernel,
stride,
activation_fn=activation,
scope="conv{}".format(i))
out_size, kernel, stride = filters[-1]
fc1 = slim.conv2d(
inputs,
out_size,
kernel,
stride,
activation_fn=activation,
padding="VALID",
scope="fc1")
fc2 = slim.conv2d(
fc1,
num_outputs, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope="fc2")
return flatten(fc2), flatten(fc1)
开发者ID:jamescasbon,项目名称:ray,代码行数:33,代码来源:visionnet.py
示例20: build_graph
def build_graph(top_k):
keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
images = tf.placeholder(dtype=tf.float32, shape=[None, 64, 64, 1], name='image_batch')
labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')
is_training = tf.placeholder(dtype=tf.bool, shape=[], name='train_flag')
with tf.device('/gpu:0'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': is_training}):
conv3_1 = slim.conv2d(images, 64, [3, 3], 1, padding='SAME', scope='conv3_1')
max_pool_1 = slim.max_pool2d(conv3_1, [2, 2], [2, 2], padding='SAME', scope='pool1')
conv3_2 = slim.conv2d(max_pool_1, 128, [3, 3], padding='SAME', scope='conv3_2')
max_pool_2 = slim.max_pool2d(conv3_2, [2, 2], [2, 2], padding='SAME', scope='pool2')
conv3_3 = slim.conv2d(max_pool_2, 256, [3, 3], padding='SAME', scope='conv3_3')
max_pool_3 = slim.max_pool2d(conv3_3, [2, 2], [2, 2], padding='SAME', scope='pool3')
conv3_4 = slim.conv2d(max_pool_3, 512, [3, 3], padding='SAME', scope='conv3_4')
conv3_5 = slim.conv2d(conv3_4, 512, [3, 3], padding='SAME', scope='conv3_5')
max_pool_4 = slim.max_pool2d(conv3_5, [2, 2], [2, 2], padding='SAME', scope='pool4')
flatten = slim.flatten(max_pool_4)
fc1 = slim.fully_connected(slim.dropout(flatten, keep_prob), 1024,
activation_fn=tf.nn.relu, scope='fc1')
logits = slim.fully_connected(slim.dropout(fc1, keep_prob), FLAGS.charset_size, activation_fn=None,
scope='fc2')
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
loss = control_flow_ops.with_dependencies([updates], loss)
global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
train_op = slim.learning.create_train_op(loss, optimizer, global_step=global_step)
probabilities = tf.nn.softmax(logits)
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
merged_summary_op = tf.summary.merge_all()
predicted_val_top_k, predicted_index_top_k = tf.nn.top_k(probabilities, k=top_k)
accuracy_in_top_k = tf.reduce_mean(tf.cast(tf.nn.in_top_k(probabilities, labels, top_k), tf.float32))
return {'images': images,
'labels': labels,
'keep_prob': keep_prob,
'top_k': top_k,
'global_step': global_step,
'train_op': train_op,
'loss': loss,
'is_training': is_training,
'accuracy': accuracy,
'accuracy_top_k': accuracy_in_top_k,
'merged_summary_op': merged_summary_op,
'predicted_distribution': probabilities,
'predicted_index_top_k': predicted_index_top_k,
'predicted_val_top_k': predicted_val_top_k}
开发者ID:oraSC,项目名称:Chinese-Character-Recognition,代码行数:57,代码来源:chinese_character_recognition_bn.py
注:本文中的tensorflow.contrib.slim.conv2d函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论