本文整理汇总了Python中tensorflow.contrib.slim.arg_scope函数的典型用法代码示例。如果您正苦于以下问题:Python arg_scope函数的具体用法?Python arg_scope怎么用?Python arg_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了arg_scope函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_network_byname
def get_network_byname(net_name,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True):
if net_name not in ['resnet_v1_50', 'mobilenet_224', 'inception_resnet', 'vgg16', 'resnet_v1_101']:
raise ValueError('''not include network: {}, net_name must in [resnet_v1_50, mobilenet_224,
inception_resnet, vgg16, resnet_v1_101]
'''.format(net_name))
if net_name == 'resnet_v1_50':
with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=cfgs.WEIGHT_DECAY[net_name])):
logits, end_points = resnet_v1.resnet_v1_50(inputs=inputs,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
spatial_squeeze=spatial_squeeze
)
return logits, end_points
if net_name == 'resnet_v1_101':
with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=cfgs.WEIGHT_DECAY[net_name])):
logits, end_points = resnet_v1.resnet_v1_101(inputs=inputs,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
spatial_squeeze=spatial_squeeze
)
return logits, end_points
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:33,代码来源:network_factory.py
示例2: encoder
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = images
net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
开发者ID:NickyGeorge,项目名称:facenet,代码行数:29,代码来源:dfc_vae_resnet.py
示例3: inference
def inference(image_batch, keep_probability,
phase_train=True, bottleneck_layer_size=512,
weight_decay=0.0):
batch_norm_params = {
'decay': 0.995,
'epsilon': 0.001,
'scale':True,
'is_training': phase_train,
'updates_collections': None,
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ]
}
with tf.variable_scope('Resface'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=prelu,
normalizer_fn=slim.batch_norm,
#normalizer_fn=None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.conv2d], kernel_size=3):
return resface20(images=image_batch,
keep_probability=keep_probability,
phase_train=phase_train,
bottleneck_layer_size=bottleneck_layer_size,
reuse=None)
开发者ID:Joker316701882,项目名称:Additive-Margin-Softmax,代码行数:25,代码来源:resface.py
示例4: inference
def inference(image_batch, keep_probability,
phase_train=True, bottleneck_layer_size=512,
weight_decay=0.0):
with tf.variable_scope('LResnetE_IR'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=None, #default no biases
activation_fn=None,
normalizer_fn=None
):
with slim.arg_scope([slim.conv2d], kernel_size=3):
with slim.arg_scope([slim.batch_norm],
decay=0.995,
epsilon=1e-5,
scale=True,
is_training=phase_train,
activation_fn=prelu,
updates_collections=None,
variables_collections=[ tf.GraphKeys.TRAINABLE_VARIABLES ]
):
return LResnet50E_IR(images=image_batch,
keep_probability=keep_probability,
phase_train=phase_train,
bottleneck_layer_size=bottleneck_layer_size,
reuse=None)
开发者ID:Joker316701882,项目名称:Additive-Margin-Softmax,代码行数:26,代码来源:insightface.py
示例5: _image_to_head
def _image_to_head(self, is_training, reuse=None):
# Base bottleneck
assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12)
net_conv = self._image
if cfg.MOBILENET.FIXED_LAYERS > 0:
with slim.arg_scope(mobilenet_v1_arg_scope(is_training=False)):
net_conv = mobilenet_v1_base(net_conv,
_CONV_DEFS[:cfg.MOBILENET.FIXED_LAYERS],
starting_layer=0,
depth_multiplier=self._depth_multiplier,
reuse=reuse,
scope=self._scope)
if cfg.MOBILENET.FIXED_LAYERS < 12:
with slim.arg_scope(mobilenet_v1_arg_scope(is_training=is_training)):
net_conv = mobilenet_v1_base(net_conv,
_CONV_DEFS[cfg.MOBILENET.FIXED_LAYERS:12],
starting_layer=cfg.MOBILENET.FIXED_LAYERS,
depth_multiplier=self._depth_multiplier,
reuse=reuse,
scope=self._scope)
self._act_summaries.append(net_conv)
self._layers['head'] = net_conv
return net_conv
开发者ID:StanislawAntol,项目名称:tf-faster-rcnn,代码行数:25,代码来源:mobilenet_v1.py
示例6: _image_to_head
def _image_to_head(self, is_training, reuse=None):
assert (0 <= cfg.RESNET.FIXED_BLOCKS <= 3)
# Now the base is always fixed during training
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net_conv = self._build_base()
if cfg.RESNET.FIXED_BLOCKS > 0:
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net_conv, _ = resnet_v1.resnet_v1(net_conv,
self._blocks[0:cfg.RESNET.FIXED_BLOCKS],
global_pool=False,
include_root_block=False,
reuse=reuse,
scope=self._scope)
if cfg.RESNET.FIXED_BLOCKS < 3:
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
net_conv, _ = resnet_v1.resnet_v1(net_conv,
self._blocks[cfg.RESNET.FIXED_BLOCKS:-1],
global_pool=False,
include_root_block=False,
reuse=reuse,
scope=self._scope)
self._act_summaries.append(net_conv)
self._layers['head'] = net_conv
return net_conv
开发者ID:StanislawAntol,项目名称:tf-faster-rcnn,代码行数:26,代码来源:resnet_v1.py
示例7: mobilenet_v1_arg_scope
def mobilenet_v1_arg_scope(is_training=True,
stddev=0.09):
batch_norm_params = {
'is_training': False,
'center': True,
'scale': True,
'decay': 0.9997,
'epsilon': 0.001,
'trainable': False,
}
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(cfg.MOBILENET.WEIGHT_DECAY)
if cfg.MOBILENET.REGU_DEPTH:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
trainable=is_training,
weights_initializer=weights_init,
activation_fn=tf.nn.relu6,
normalizer_fn=slim.batch_norm,
padding='SAME'):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
开发者ID:StanislawAntol,项目名称:tf-faster-rcnn,代码行数:30,代码来源:mobilenet_v1.py
示例8: resnet_arg_scope
def resnet_arg_scope(is_training=True,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
# NOTE 'is_training' here does not work because inside resnet it gets reset:
# https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': cfg.RESNET.BN_TRAIN,
'updates_collections': ops.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d],
weights_regularizer=regularizers.l2_regularizer(weight_decay),
weights_initializer=initializers.variance_scaling_initializer(),
trainable=is_training,
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
开发者ID:jacke121,项目名称:tf_rfcn,代码行数:26,代码来源:resnet_v1.py
示例9: content_extractor
def content_extractor(self, images, reuse=False):
# images: (batch, 32, 32, 3) or (batch, 32, 32, 1)
if images.get_shape()[3] == 1:
# For mnist dataset, replicate the gray scale image 3 times.
images = tf.image.grayscale_to_rgb(images)
with tf.variable_scope('content_extractor', reuse=reuse):
with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.relu, is_training=(self.mode=='train' or self.mode=='pretrain')):
net = slim.conv2d(images, 64, [3, 3], scope='conv1') # (batch_size, 16, 16, 64)
net = slim.batch_norm(net, scope='bn1')
net = slim.conv2d(net, 128, [3, 3], scope='conv2') # (batch_size, 8, 8, 128)
net = slim.batch_norm(net, scope='bn2')
net = slim.conv2d(net, 256, [3, 3], scope='conv3') # (batch_size, 4, 4, 256)
net = slim.batch_norm(net, scope='bn3')
net = slim.conv2d(net, 128, [4, 4], padding='VALID', scope='conv4') # (batch_size, 1, 1, 128)
net = slim.batch_norm(net, activation_fn=tf.nn.tanh, scope='bn4')
if self.mode == 'pretrain':
net = slim.conv2d(net, 10, [1, 1], padding='VALID', scope='out')
net = slim.flatten(net)
return net
开发者ID:ALISCIFP,项目名称:domain-transfer-network,代码行数:25,代码来源:model.py
示例10: conv_tower_fn
def conv_tower_fn(self, images, is_training=True, reuse=None):
"""Computes convolutional features using the InceptionV3 model.
Args:
images: A tensor of shape [batch_size, height, width, channels].
is_training: whether is training or not.
reuse: whether or not the network and its variables should be reused. To
be able to reuse 'scope' must be given.
Returns:
A tensor of shape [batch_size, OH, OW, N], where OWxOH is resolution of
output feature map and N is number of output features (depends on the
network architecture).
"""
mparams = self._mparams['conv_tower_fn']
logging.debug('Using final_endpoint=%s', mparams.final_endpoint)
with tf.variable_scope('conv_tower_fn/INCE'):
if reuse:
tf.get_variable_scope().reuse_variables()
with slim.arg_scope(inception.inception_v3_arg_scope()):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, _ = inception.inception_v3_base(
images, final_endpoint=mparams.final_endpoint)
return net
开发者ID:812864539,项目名称:models,代码行数:25,代码来源:model.py
示例11: decoder
def decoder(self, latent_var, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('decoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.fully_connected(latent_var, 4096, activation_fn=None, normalizer_fn=None, scope='Fc_1')
net = tf.reshape(net, [-1,4,4,256], name='Reshape')
net = tf.image.resize_nearest_neighbor(net, size=(8,8), name='Upsample_1')
net = slim.conv2d(net, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = tf.image.resize_nearest_neighbor(net, size=(16,16), name='Upsample_2')
net = slim.conv2d(net, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = tf.image.resize_nearest_neighbor(net, size=(32,32), name='Upsample_3')
net = slim.conv2d(net, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = tf.image.resize_nearest_neighbor(net, size=(64,64), name='Upsample_4')
net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=None, scope='Conv2d_4c')
return net
开发者ID:NickyGeorge,项目名称:facenet,代码行数:32,代码来源:dfc_vae_resnet.py
示例12: factory_fn
def factory_fn(image, reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=False):
with slim.arg_scope([slim.conv2d, slim.fully_connected,
slim.batch_norm, slim.layer_norm],
reuse=reuse):
features, logits = _create_network(
image, reuse=reuse, weight_decay=weight_decay)
return features, logits
开发者ID:shmilymm,项目名称:deep_sort_yolov3,代码行数:9,代码来源:freeze_model.py
示例13: _build_network
def _build_network(self, sess, is_training=True):
# select initializers
if cfg.TRAIN.TRUNCATED:
initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001)
else:
initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)
# Base bottleneck
assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12)
net_conv = self._image
if cfg.MOBILENET.FIXED_LAYERS > 0:
with slim.arg_scope(mobilenet_v1_arg_scope(is_training=False)):
net_conv = mobilenet_v1_base(net_conv,
_CONV_DEFS[:cfg.MOBILENET.FIXED_LAYERS],
starting_layer=0,
depth_multiplier=self._depth_multiplier,
scope=self._scope)
if cfg.MOBILENET.FIXED_LAYERS < 12:
with slim.arg_scope(mobilenet_v1_arg_scope(is_training=is_training)):
net_conv = mobilenet_v1_base(net_conv,
_CONV_DEFS[cfg.MOBILENET.FIXED_LAYERS:12],
starting_layer=cfg.MOBILENET.FIXED_LAYERS,
depth_multiplier=self._depth_multiplier,
scope=self._scope)
self._act_summaries.append(net_conv)
self._layers['head'] = net_conv
with tf.variable_scope(self._scope, 'MobilenetV1'):
# build the anchors for the image
self._anchor_component()
# region proposal network
rois = self._region_proposal(net_conv, is_training, initializer)
# region of interest pooling
if cfg.POOLING_MODE == 'crop':
pool5 = self._crop_pool_layer(net_conv, rois, "pool5")
else:
raise NotImplementedError
with slim.arg_scope(mobilenet_v1_arg_scope(is_training=is_training)):
fc7 = mobilenet_v1_base(pool5,
_CONV_DEFS[12:],
starting_layer=12,
depth_multiplier=self._depth_multiplier,
scope=self._scope)
with tf.variable_scope(self._scope, 'MobilenetV1'):
# average pooling done by reduce_mean
fc7 = tf.reduce_mean(fc7, axis=[1, 2])
# region classification
cls_prob, bbox_pred = self._region_classification(fc7, is_training,
initializer, initializer_bbox)
self._score_summaries.update(self._predictions)
return rois, cls_prob, bbox_pred
开发者ID:lz20061213,项目名称:quadrilateral,代码行数:57,代码来源:mobilenet_v1.py
示例14: construct_embedding
def construct_embedding(self):
"""Builds a conv -> spatial softmax -> FC adaptation network."""
is_training = self._is_training
normalizer_params = {'is_training': is_training}
with tf.variable_scope('tcn_net', reuse=self._reuse) as vs:
self._adaptation_scope = vs.name
with slim.arg_scope(
[slim.layers.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params,
weights_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight),
biases_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight)):
with slim.arg_scope(
[slim.layers.fully_connected],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params,
weights_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight),
biases_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight)):
# Input to embedder is pre-trained inception output.
net = self._pretrained_output
# Optionally add more conv layers.
for num_filters in self._additional_conv_sizes:
net = slim.layers.conv2d(
net, num_filters, kernel_size=[3, 3], stride=[1, 1])
net = slim.dropout(net, keep_prob=self._conv_hidden_keep_prob,
is_training=is_training)
# Take the spatial soft arg-max of the last convolutional layer.
# This is a form of spatial attention over the activations.
# See more here: http://arxiv.org/abs/1509.06113.
net = tf.contrib.layers.spatial_softmax(net)
self.spatial_features = net
# Add fully connected layers.
net = slim.layers.flatten(net)
for fc_hidden_size in self._fc_hidden_sizes:
net = slim.layers.fully_connected(net, fc_hidden_size)
if self._fc_hidden_keep_prob < 1.0:
net = slim.dropout(net, keep_prob=self._fc_hidden_keep_prob,
is_training=is_training)
# Connect last FC layer to embedding.
net = slim.layers.fully_connected(net, self._embedding_size,
activation_fn=None)
# Optionally L2 normalize the embedding.
if self._embedding_l2:
net = tf.nn.l2_normalize(net, dim=1)
return net
开发者ID:ALISCIFP,项目名称:models,代码行数:56,代码来源:model.py
示例15: factory_fn
def factory_fn(image, reuse, l2_normalize):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected,
slim.batch_norm, slim.layer_norm],
reuse=reuse):
features, logits = _create_network(
image, num_classes, l2_normalize=l2_normalize,
reuse=reuse, create_summaries=is_training,
weight_decay=weight_decay)
return features, logits
开发者ID:BenJamesbabala,项目名称:deep_sort,代码行数:11,代码来源:generate_detections.py
示例16: image_embedding
def image_embedding(images,
model_fn=resnet_v1_152,
trainable=True,
is_training=True,
weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
add_summaries=False,
reuse=False):
"""Extract image features from pretrained resnet model."""
is_resnet_training = trainable and is_training
batch_norm_params = {
"is_training": is_resnet_training,
"trainable": trainable,
"decay": batch_norm_decay,
"epsilon": batch_norm_epsilon,
"scale": batch_norm_scale,
}
if trainable:
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
else:
weights_regularizer = None
with tf.variable_scope(model_fn.__name__, [images], reuse=reuse) as scope:
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=weights_regularizer,
trainable=trainable):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm],
is_training=is_resnet_training,
trainable=trainable):
with slim.arg_scope([slim.max_pool2d], padding="SAME"):
net, end_points = model_fn(
images, num_classes=None, global_pool=False,
is_training=is_resnet_training,
reuse=reuse, scope=scope)
if add_summaries:
for v in end_points.values():
tf.contrib.layers.summaries.summarize_activation(v)
return net
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:52,代码来源:vqa_layers.py
示例17: model
def model(images, weight_decay=1e-5, is_training=True):
'''
define the model, we use slim's implemention of resnet
'''
images = mean_image_subtraction(images)
with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=weight_decay)):
logits, end_points = resnet_v1.resnet_v1_50(images, is_training=is_training, scope='resnet_v1_50')
with tf.variable_scope('feature_fusion', values=[end_points.values]):
batch_norm_params = {
'decay': 0.997,
'epsilon': 1e-5,
'scale': True,
'is_training': is_training
}
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params,
weights_regularizer=slim.l2_regularizer(weight_decay)):
f = [end_points['pool5'], end_points['pool4'],
end_points['pool3'], end_points['pool2']]
for i in range(4):
print('Shape of f_{} {}'.format(i, f[i].shape))
g = [None, None, None, None]
h = [None, None, None, None]
num_outputs = [None, 128, 64, 32]
for i in range(4):
if i == 0:
h[i] = f[i]
else:
c1_1 = slim.conv2d(tf.concat([g[i-1], f[i]], axis=-1), num_outputs[i], 1)
h[i] = slim.conv2d(c1_1, num_outputs[i], 3)
if i <= 2:
g[i] = unpool(h[i])
else:
g[i] = slim.conv2d(h[i], num_outputs[i], 3)
print('Shape of h_{} {}, g_{} {}'.format(i, h[i].shape, i, g[i].shape))
# here we use a slightly different way for regression part,
# we first use a sigmoid to limit the regression range, and also
# this is do with the angle map
F_score = slim.conv2d(g[3], 1, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None)
# 4 channel of axis aligned bbox and 1 channel rotation angle
geo_map = slim.conv2d(g[3], 4, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None) * FLAGS.text_scale
angle_map = (slim.conv2d(g[3], 1, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None) - 0.5) * np.pi/2 # angle is between [-45, 45]
F_geometry = tf.concat([geo_map, angle_map], axis=-1)
return F_score, F_geometry
开发者ID:ausk,项目名称:EAST_ICPR,代码行数:50,代码来源:model.py
示例18: generator
def generator(self, inputs, reuse=False):
# inputs: (batch, 1, 1, 128)
with tf.variable_scope('generator', reuse=reuse):
with slim.arg_scope([slim.conv2d_transpose], padding='SAME', activation_fn=None,
stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.relu, is_training=(self.mode=='train')):
net = slim.conv2d_transpose(inputs, 512, [4, 4], padding='VALID', scope='conv_transpose1') # (batch_size, 4, 4, 512)
net = slim.batch_norm(net, scope='bn1')
net = slim.conv2d_transpose(net, 256, [3, 3], scope='conv_transpose2') # (batch_size, 8, 8, 256)
net = slim.batch_norm(net, scope='bn2')
net = slim.conv2d_transpose(net, 128, [3, 3], scope='conv_transpose3') # (batch_size, 16, 16, 128)
net = slim.batch_norm(net, scope='bn3')
net = slim.conv2d_transpose(net, 1, [3, 3], activation_fn=tf.nn.tanh, scope='conv_transpose4') # (batch_size, 32, 32, 1)
return net
开发者ID:ALISCIFP,项目名称:domain-transfer-network,代码行数:16,代码来源:model.py
示例19: create_network
def create_network(self, name):
with tf.variable_scope(name) as scope:
inputs = tf.placeholder(fl32, [None, self.state_dim], 'inputs')
with slim.arg_scope(
[slim.fully_connected],
activation_fn=relu,
weights_initializer=uniform,
weights_regularizer=None
):
net = slim.fully_connected(inputs, 1024)
res = net = slim.fully_connected(net, 128)
net = slim.fully_connected(net, 256)
net = slim.fully_connected(net, 128, activation_fn=None)
net = relu(net+res)
res = net = slim.fully_connected(net, 128)
net = slim.fully_connected(net, 256)
net = slim.fully_connected(net, 128, activation_fn=None)
net = relu(net+res)
res = net = slim.fully_connected(net, 128)
net = slim.fully_connected(net, 256)
net = slim.fully_connected(net, 128, activation_fn=None)
net = relu(net+res)
outputs = slim.fully_connected(
net, self.action_dim, activation_fn=tanh)
outputs = tf.mul(outputs, self.bound)
return (inputs, outputs, scope.name)
开发者ID:jpp46,项目名称:CurrentProjects,代码行数:33,代码来源:resnet.py
示例20: build_arch
def build_arch(input, is_train, num_classes):
data_size = int(input.get_shape()[1])
# initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
# bias_initializer = tf.constant_initializer(0.0)
# weights_regularizer = tf.contrib.layers.l2_regularizer(5e-04)
with slim.arg_scope([slim.conv2d], trainable=is_train):#, activation_fn=None, , , biases_initializer=bias_initializer, weights_regularizer=weights_regularizer
with tf.variable_scope('conv1') as scope:
output = slim.conv2d(input, num_outputs=256, kernel_size=[9, 9], stride=1, padding='VALID', scope=scope)
data_size = data_size-8
assert output.get_shape() == [cfg.batch_size, data_size, data_size, 256]
tf.logging.info('conv1 output shape: {}'.format(output.get_shape()))
with tf.variable_scope('primary_caps_layer') as scope:
output = slim.conv2d(output, num_outputs=32*8, kernel_size=[9, 9], stride=2, padding='VALID', scope=scope)#, activation_fn=None
output = tf.reshape(output, [cfg.batch_size, -1, 8])
output = squash(output)
data_size = int(np.floor((data_size-8)/2))
assert output.get_shape() == [cfg.batch_size, data_size*data_size*32, 8]
tf.logging.info('primary capsule output shape: {}'.format(output.get_shape()))
with tf.variable_scope('digit_caps_layer') as scope:
with tf.variable_scope('u') as scope:
u_hats = vec_transform(output, num_classes, 16)
assert u_hats.get_shape() == [cfg.batch_size, num_classes, data_size*data_size*32, 16]
tf.logging.info('digit_caps_layer u_hats shape: {}'.format(u_hats.get_shape()))
with tf.variable_scope('routing') as scope:
output = dynamic_routing(u_hats)
assert output.get_shape() == [cfg.batch_size, num_classes, 16]
tf.logging.info('the output capsule has shape: {}'.format(output.get_shape()))
output_len = tf.norm(output, axis=-1)
return output, output_len
开发者ID:lzqkean,项目名称:deep_learning,代码行数:35,代码来源:capsnet_dynamic_routing.py
注:本文中的tensorflow.contrib.slim.arg_scope函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论