本文整理汇总了Python中tensorflow.contrib.slim.fully_connected函数的典型用法代码示例。如果您正苦于以下问题:Python fully_connected函数的具体用法?Python fully_connected怎么用?Python fully_connected使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fully_connected函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _build_graph
def _build_graph(self):
normalized_input = tf.div(self._input, 255.0)
#d = tf.divide(1.0, tf.sqrt(8. * 8. * 4.))
conv1 = slim.conv2d(normalized_input, 16, [8, 8], activation_fn=tf.nn.relu,
padding='VALID', stride=4, biases_initializer=None)
# weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))
#d = tf.divide(1.0, tf.sqrt(4. * 4. * 16.))
conv2 = slim.conv2d(conv1, 32, [4, 4], activation_fn=tf.nn.relu,
padding='VALID', stride=2, biases_initializer=None)
#weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))
flattened = slim.flatten(conv2)
#d = tf.divide(1.0, tf.sqrt(2592.))
fc1 = slim.fully_connected(flattened, 256, activation_fn=tf.nn.relu, biases_initializer=None)
#weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))
#d = tf.divide(1.0, tf.sqrt(256.))
# estimate of the value function
self.value_func_prediction = slim.fully_connected(fc1, 1, activation_fn=None, biases_initializer=None)
#weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))
# softmax output with one entry per action representing the probability of taking an action
self.policy_predictions = slim.fully_connected(fc1, self.output_size, activation_fn=tf.nn.softmax,
biases_initializer=None)
开发者ID:thalles753,项目名称:machine-learning,代码行数:28,代码来源:A3C_Network.py
示例2: _create_transformation
def _create_transformation(self, input, n_output, reuse, scope_prefix):
"""Create the deterministic transformation between stochastic layers.
If self.hparam.nonlinear:
2 x tanh layers
Else:
1 x linear layer
"""
if self.hparams.nonlinear:
h = slim.fully_connected(input,
self.hparams.n_hidden,
reuse=reuse,
activation_fn=tf.nn.tanh,
scope='%s_nonlinear_1' % scope_prefix)
h = slim.fully_connected(h,
self.hparams.n_hidden,
reuse=reuse,
activation_fn=tf.nn.tanh,
scope='%s_nonlinear_2' % scope_prefix)
h = slim.fully_connected(h,
n_output,
reuse=reuse,
activation_fn=None,
scope='%s' % scope_prefix)
else:
h = slim.fully_connected(input,
n_output,
reuse=reuse,
activation_fn=None,
scope='%s' % scope_prefix)
return h
开发者ID:ALISCIFP,项目名称:models,代码行数:31,代码来源:rebar.py
示例3: create_network
def create_network(self, name):
with tf.variable_scope(name) as scope:
inputs = tf.placeholder(fl32, [None, self.state_dim], 'inputs')
actions = tf.placeholder(fl32, [None, self.action_dim], 'actions')
with slim.arg_scope(
[slim.fully_connected],
activation_fn=relu,
weights_initializer=uniform,
weights_regularizer=None
):
net = tf.concat(1, [inputs, actions])
net = slim.fully_connected(net, 400)
net = slim.fully_connected(net, 300)
'''net = slim.fully_connected(inputs, 400)
w1 = tf.get_variable(
"w1", shape=[400, 300], initializer=uniform
)
w2 = tf.get_variable(
"w2", shape=[self.action_dim, 300], initializer=uniform
)
b = tf.get_variable(
"b", shape=[300], initializer=constant
)
net = relu(tf.matmul(net, w1) + tf.matmul(actions, w2) + b)'''
out = slim.fully_connected(net, 1, activation_fn=None)
return (inputs, actions, out, scope.name)
开发者ID:jpp46,项目名称:CurrentProjects,代码行数:30,代码来源:networks.py
示例4: __init__
def __init__(self):
# policy network
self.observations = tf.placeholder(tf.float32, [None, 4], name='input_x')
self.input_y = tf.placeholder(tf.float32, [None, 1], name='input_y')
self.reward = tf.placeholder(tf.float32, name='reward_signal')
l1 = slim.fully_connected(self.observations,
hidden,
biases_initializer=None,
activation_fn=tf.nn.relu)
self.score = slim.fully_connected(l1,
1,
biases_initializer=None)
self.probability = tf.nn.sigmoid(self.score)
loglike = tf.log(self.input_y * (self.input_y - self.probability)
+ (1 - self.input_y) * (self.input_y + self.probability))
loss = -tf.reduce_mean(loglike * self.reward)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.w1grad = tf.placeholder(tf.float32, name='batch_grad1')
self.w2grad = tf.placeholder(tf.float32, name='batch_grad2')
batch_grad = [self.w1grad, self.w2grad]
self.tvars = tf.trainable_variables()
self.newgrads = tf.gradients(loss, self.tvars)
self.update = self.optimizer.apply_gradients(zip(batch_grad, self.tvars))
开发者ID:yaoyaowd,项目名称:tensorflow_demo,代码行数:25,代码来源:3_model_rl.py
示例5: discriminative_network
def discriminative_network(x):
"""Outputs probability in logits."""
h0 = slim.fully_connected(x, H * 2, activation_fn=tf.tanh)
h1 = slim.fully_connected(h0, H * 2, activation_fn=tf.tanh)
h2 = slim.fully_connected(h1, H * 2, activation_fn=tf.tanh)
h3 = slim.fully_connected(h2, 1, activation_fn=None)
return h3
开发者ID:ekostem,项目名称:edward,代码行数:7,代码来源:gan_wasserstein_synthetic.py
示例6: _build_layers
def _build_layers(self, inputs, num_outputs, options):
"""Process the flattened inputs.
Note that dict inputs will be flattened into a vector. To define a
model that processes the components separately, use _build_layers_v2().
"""
hiddens = options.get("fcnet_hiddens")
activation = get_activation_fn(options.get("fcnet_activation"))
with tf.name_scope("fc_net"):
i = 1
last_layer = inputs
for size in hiddens:
label = "fc{}".format(i)
last_layer = slim.fully_connected(
last_layer,
size,
weights_initializer=normc_initializer(1.0),
activation_fn=activation,
scope=label)
i += 1
label = "fc_out"
output = slim.fully_connected(
last_layer,
num_outputs,
weights_initializer=normc_initializer(0.01),
activation_fn=None,
scope=label)
return output, last_layer
开发者ID:jamescasbon,项目名称:ray,代码行数:30,代码来源:fcnet.py
示例7: __init__
def __init__(self, lr, s_size, a_size, h_size):
# These lines established the feed-forward part of the network. The agent takes a state and produces an action.
self.state_in = tf.placeholder(shape=[None, s_size], dtype=tf.float32)
hidden = slim.fully_connected(self.state_in, h_size, biases_initializer=None, activation_fn=tf.nn.relu)
self.output = slim.fully_connected(hidden, a_size, activation_fn=tf.nn.softmax, biases_initializer=None)
self.chosen_action = tf.argmax(self.output, 1)
# The next six lines establish the training proceedure. We feed the reward and chosen action into the network
# to compute the loss, and use it to update the network.
self.reward_holder = tf.placeholder(shape=[None], dtype=tf.float32)
self.action_holder = tf.placeholder(shape=[None], dtype=tf.int32)
self.indexes = tf.range(0, tf.shape(self.output)[0]) * tf.shape(self.output)[1] + self.action_holder
self.responsible_outputs = tf.gather(tf.reshape(self.output, [-1]), self.indexes)
self.loss = -tf.reduce_mean(tf.log(self.responsible_outputs) * self.reward_holder)
tvars = tf.trainable_variables()
self.gradient_holders = []
for idx2, var in enumerate(tvars):
placeholder = tf.placeholder(tf.float32, name=str(idx2) + '_holder')
self.gradient_holders.append(placeholder)
self.gradients = tf.gradients(self.loss, tvars)
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
self.update_batch = optimizer.apply_gradients(zip(self.gradient_holders, tvars))
开发者ID:dangraf,项目名称:PycharmProjects,代码行数:27,代码来源:cartpole.py
示例8: __init__
def __init__(self, actions, td_discount_rate = 0.99, learningRate= 0.0001, epsilonGreedy = 0.1):
self.learningRate = learningRate
self.td_discount_rate = td_discount_rate
self.epsilonGreedy = epsilonGreedy
self.input = tf.placeholder('float', shape=[None,4])
x1 = slim.fully_connected(self.input, 32, scope='fc/fc_1')
x1 = tf.nn.relu(x1)
self.Qout = slim.fully_connected(x1, actions)
self.predict = tf.argmax(self.Qout,1)
self.logQVal = tf.summary.scalar('QVal', tf.reduce_mean(self.predict) )
# get the best action q values
self.newQout = tf.placeholder(shape=[None,2],dtype=tf.float32)
self.epsilonInput = tf.placeholder(dtype=tf.float32, name="epsilonInput")
self.newstateReward = tf.placeholder(shape=[None],dtype=tf.float32)
self.tdTarget = self.newstateReward + td_discount_rate * np.amax(self.newQout)
self.td_error = tf.square(self.tdTarget - np.amax(self.Qout))
# trun into single scalar value
self.loss = tf.reduce_mean(self.td_error)
self.tdLogger= tf.summary.scalar('tdLoss', self.loss)
self.tdTargetLogger= tf.summary.histogram('tdTarget', self.tdTarget)
self.epsilonLogger= tf.summary.scalar('epsilon', self.epsilonInput)
# minimize the loess (mean of td errors)
self.trainer = tf.train.AdamOptimizer(learning_rate=self.learningRate)
self.updateModel = self.trainer.minimize(self.loss)
self.memory = Memory(memory_capacity)
开发者ID:flutist,项目名称:CartPole-v0,代码行数:32,代码来源:q-network.py
示例9: fprop
def fprop(self, x, **kwargs):
del kwargs
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
net = slim.fully_connected(x, 60)
logits = slim.fully_connected(net, 10, activation_fn=None)
return {self.O_LOGITS: logits,
self.O_PROBS: tf.nn.softmax(logits)}
开发者ID:limin24kobe,项目名称:cleverhans,代码行数:7,代码来源:test_attacks.py
示例10: __init__
def __init__(self,
env,
hidden_size=8,
learning_rate=0.01,
gamma=0.99):
self.state_dim = env.observation_space.shape[0]
self.action_dim = env.action_space.n
self.gamma = gamma
self.history = []
# Define network
self.state_in = tf.placeholder(shape=[None, self.state_dim], dtype=tf.float32)
hidden = slim.fully_connected(self.state_in, hidden_size,
biases_initializer=None,
activation_fn=tf.nn.relu)
self.output = slim.fully_connected(hidden, self.action_dim,
biases_initializer=None,
activation_fn=tf.nn.softmax)
self.reward = tf.placeholder(shape=[None], dtype=tf.float32)
self.actual_action = tf.placeholder(shape=[None], dtype=tf.int32)
self.indexes = tf.range(0, tf.shape(self.output)[0]) * self.action_dim \
+ self.actual_action
self.actual_output = tf.gather(tf.reshape(self.output, [-1]), self.indexes)
self.loss = -tf.reduce_mean(tf.log(self.actual_output)*self.reward)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = slim.learning.create_train_op(self.loss, self.optimizer)
self.session = tf.InteractiveSession()
self.session.run(tf.initialize_all_variables())
开发者ID:yaoyaowd,项目名称:tensorflow_demo,代码行数:30,代码来源:2_dqn.py
示例11: localization_VGG16
def localization_VGG16(self,inputs):
with tf.variable_scope('localization_network'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn = tf.nn.relu,
weights_initializer = tf.constant_initializer(0.0)):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
shape = int(np.prod(net.get_shape()[1:]))
net = slim.fully_connected(tf.reshape(net, [-1, shape]), 4096, scope='fc6')
net = slim.fully_connected(net, 1024, scope='fc7')
identity = np.array([[1., 0., 0.],
[0., 1., 0.]])
identity = identity.flatten()
net = slim.fully_connected(net, 6, biases_initializer = tf.constant_initializer(identity) , scope='fc8')
return net
开发者ID:dmehr,项目名称:HyperFace-TensorFlow-implementation,代码行数:27,代码来源:model.py
示例12: network_det
def network_det(self,inputs,reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn = tf.nn.relu,
weights_initializer = tf.truncated_normal_initializer(0.0, 0.01)):
conv1 = slim.conv2d(inputs, 96, [11,11], 4, padding= 'VALID', scope='conv1')
max1 = slim.max_pool2d(conv1, [3,3], 2, padding= 'VALID', scope='max1')
conv2 = slim.conv2d(max1, 256, [5,5], 1, scope='conv2')
max2 = slim.max_pool2d(conv2, [3,3], 2, padding= 'VALID', scope='max2')
conv3 = slim.conv2d(max2, 384, [3,3], 1, scope='conv3')
conv4 = slim.conv2d(conv3, 384, [3,3], 1, scope='conv4')
conv5 = slim.conv2d(conv4, 256, [3,3], 1, scope='conv5')
pool5 = slim.max_pool2d(conv5, [3,3], 2, padding= 'VALID', scope='pool5')
shape = int(np.prod(pool5.get_shape()[1:]))
fc6 = slim.fully_connected(tf.reshape(pool5, [-1, shape]), 4096, scope='fc6')
fc_detection = slim.fully_connected(fc6, 512, scope='fc_det1')
out_detection = slim.fully_connected(fc_detection, 2, scope='fc_det2', activation_fn = None)
return out_detection
开发者ID:dmehr,项目名称:HyperFace-TensorFlow-implementation,代码行数:27,代码来源:model_prediction.py
示例13: _init
def _init(self, inputs, num_outputs, options):
hiddens = options.get("fcnet_hiddens", [256, 256])
fcnet_activation = options.get("fcnet_activation", "tanh")
if fcnet_activation == "tanh":
activation = tf.nn.tanh
elif fcnet_activation == "relu":
activation = tf.nn.relu
with tf.name_scope("fc_net"):
i = 1
last_layer = inputs
for size in hiddens:
label = "fc{}".format(i)
last_layer = slim.fully_connected(
last_layer, size,
weights_initializer=normc_initializer(1.0),
activation_fn=activation,
scope=label)
i += 1
label = "fc_out"
output = slim.fully_connected(
last_layer, num_outputs,
weights_initializer=normc_initializer(0.01),
activation_fn=None, scope=label)
return output, last_layer
开发者ID:adgirish,项目名称:ray,代码行数:26,代码来源:fcnet.py
示例14: encoder
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = images
net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
开发者ID:NickyGeorge,项目名称:facenet,代码行数:29,代码来源:dfc_vae_resnet.py
示例15: build_decoder_rnn
def build_decoder_rnn(self, first_step):
with tf.variable_scope("cnn"):
image_emb = slim.fully_connected(self.fc7, self.input_encoding_size, reuse=True, activation_fn=None, scope='encode_image')
with tf.variable_scope("rnnlm"):
if first_step:
rnn_input = image_emb # At the first step, the input is the embedded image
else:
# The input of later time step, is the embedding of the previous word
# The previous word is a placeholder
self.decoder_prev_word = tf.placeholder(tf.int32, [None])
rnn_input = tf.nn.embedding_lookup(self.Wemb, self.decoder_prev_word)
batch_size = tf.shape(rnn_input)[0]
tf.get_variable_scope().reuse_variables()
if not first_step:
# If not first step, the states are also placeholders.
self.decoder_initial_state = initial_state = utils.get_placeholder_state(self.cell.state_size)
self.decoder_flattened_state = utils.flatten_state(initial_state)
else:
# The states for the first step are zero.
initial_state = self.cell.zero_state(batch_size, tf.float32)
outputs, state = tf.contrib.legacy_seq2seq.rnn_decoder([rnn_input], initial_state, self.cell)
logits = slim.fully_connected(outputs[0], self.vocab_size + 1, activation_fn = None, scope = 'logit')
decoder_probs = tf.reshape(tf.nn.softmax(logits), [batch_size, self.vocab_size + 1])
decoder_state = utils.flatten_state(state)
# output the current word distribution and states
return [decoder_probs, decoder_state]
开发者ID:ruotianluo,项目名称:neuraltalk2-tensorflow,代码行数:31,代码来源:ShowTellModel.py
示例16: build_arch_baseline
def build_arch_baseline(input, is_train: bool, num_classes: int):
bias_initializer = tf.truncated_normal_initializer(
mean=0.0, stddev=0.01) # tf.constant_initializer(0.0)
# The paper didnot mention any regularization, a common l2 regularizer to weights is added here
weights_regularizer = tf.contrib.layers.l2_regularizer(5e-04)
tf.logging.info('input shape: {}'.format(input.get_shape()))
# weights_initializer=initializer,
with slim.arg_scope([slim.conv2d, slim.fully_connected], trainable=is_train, biases_initializer=bias_initializer, weights_regularizer=weights_regularizer):
with tf.variable_scope('relu_conv1') as scope:
output = slim.conv2d(input, num_outputs=32, kernel_size=[
5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer1')
tf.logging.info('output shape: {}'.format(output.get_shape()))
with tf.variable_scope('relu_conv2') as scope:
output = slim.conv2d(output, num_outputs=64, kernel_size=[
5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer2')
tf.logging.info('output shape: {}'.format(output.get_shape()))
output = slim.flatten(output)
output = slim.fully_connected(output, 1024, scope='relu_fc3', activation_fn=tf.nn.relu)
tf.logging.info('output shape: {}'.format(output.get_shape()))
output = slim.dropout(output, 0.5, scope='dp')
output = slim.fully_connected(output, num_classes, scope='final_layer', activation_fn=None)
tf.logging.info('output shape: {}'.format(output.get_shape()))
return output
开发者ID:lzqkean,项目名称:deep_learning,代码行数:32,代码来源:capsnet_em.py
示例17: cross_ent_loss
def cross_ent_loss(output, x, y):
loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=output)
loss = tf.reduce_mean(loss)
num_class = int(output.get_shape()[-1])
data_size = int(x.get_shape()[1])
# reconstruction loss
y = tf.one_hot(y, num_class, dtype=tf.float32)
y = tf.expand_dims(y, axis=2)
output = tf.expand_dims(output, axis=2)
output = tf.reshape(tf.multiply(output, y), shape=[cfg.batch_size, -1])
tf.logging.info("decoder input value dimension:{}".format(output.get_shape()))
with tf.variable_scope('decoder'):
output = slim.fully_connected(output, 512, trainable=True)
output = slim.fully_connected(output, 1024, trainable=True)
output = slim.fully_connected(output, data_size * data_size,
trainable=True, activation_fn=tf.sigmoid)
x = tf.reshape(x, shape=[cfg.batch_size, -1])
reconstruction_loss = tf.reduce_mean(tf.square(output - x))
# regularization loss
regularization = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_all = tf.add_n([loss] + [0.0005 * reconstruction_loss] + regularization)
return loss_all, reconstruction_loss, output
开发者ID:lzqkean,项目名称:deep_learning,代码行数:28,代码来源:capsnet_em.py
示例18: build_generator
def build_generator(self):
"""
Generator for generating captions
Support sample max or sample from distribution
No Beam search here; beam search is in decoder
"""
# Variables for the sample setting
self.sample_max = tf.Variable(True, trainable = False, name = "sample_max")
self.sample_temperature = tf.Variable(1.0, trainable = False, name = "temperature")
self.generator = []
with tf.variable_scope("rnnlm"):
flattened_ctx = tf.reshape(self.context, [self.batch_size, 196, 512])
ctx_mean = tf.reduce_mean(flattened_ctx, 1)
tf.get_variable_scope().reuse_variables()
initial_state = utils.get_initial_state(ctx_mean, self.cell.state_size)
#projected context
# This is used in attention module; do this outside the loop to reduce redundant computations
# with tf.variable_scope("attention"):
if self.att_hid_size == 0:
pctx = slim.fully_connected(flattened_ctx, 1, activation_fn = None, scope = 'ctx_att') # (batch) * 196 * 1
else:
pctx = slim.fully_connected(flattened_ctx, self.att_hid_size, activation_fn = None, scope = 'ctx_att') # (batch) * 196 * att_hid_size
rnn_input = tf.nn.embedding_lookup(self.Wemb, tf.zeros([self.batch_size], tf.int32))
prev_h = utils.last_hidden_vec(initial_state)
self.g_alphas = []
outputs = []
state = initial_state
for ind in range(MAX_STEPS):
with tf.variable_scope("attention"):
alpha = self.get_alpha(prev_h, pctx)
self.g_alphas.append(alpha)
weighted_context = tf.reduce_sum(flattened_ctx * tf.expand_dims(alpha, 2), 1)
output, state = self.cell(tf.concat(axis=1, values=[weighted_context, rnn_input]), state)
outputs.append(output)
prev_h = output
# Get the input of next timestep
prev_logit = slim.fully_connected(prev_h, self.vocab_size + 1, activation_fn = None, scope = 'logit')
prev_symbol = tf.stop_gradient(tf.cond(self.sample_max,
lambda: tf.argmax(prev_logit, 1), # pick the word with largest probability as the input of next time step
lambda: tf.squeeze(
tf.multinomial(tf.nn.log_softmax(prev_logit) / self.sample_temperature, 1), 1))) # Sample from the distribution
self.generator.append(prev_symbol)
rnn_input = tf.nn.embedding_lookup(self.Wemb, prev_symbol)
self.g_output = output = tf.reshape(tf.concat(axis=1, values=outputs), [-1, self.rnn_size]) # outputs[1:], because we don't calculate loss on time 0.
self.g_logits = logits = slim.fully_connected(output, self.vocab_size + 1, activation_fn = None, scope = 'logit')
self.g_probs = probs = tf.reshape(tf.nn.softmax(logits), [self.batch_size, MAX_STEPS, self.vocab_size + 1])
self.generator = tf.transpose(tf.reshape(tf.concat(axis=0, values=self.generator), [MAX_STEPS, -1]))
开发者ID:ruotianluo,项目名称:neuraltalk2-tensorflow,代码行数:59,代码来源:ShowAttendTellModel_old.py
示例19: neural_network
def neural_network(self, X):
"""pi, mu, sigma = NN(x; theta)"""
hidden1 = slim.fully_connected(X, 25)
hidden2 = slim.fully_connected(hidden1, 25)
self.pi = slim.fully_connected(hidden2, self.K, activation_fn=tf.nn.softmax)
self.mus = slim.fully_connected(hidden2, self.K, activation_fn=None)
self.sigmas = slim.fully_connected(hidden2, self.K,
activation_fn=tf.nn.softplus)
开发者ID:blei-lab,项目名称:edward,代码行数:8,代码来源:tf_mixture_density_network_slim.py
示例20: build_graph
def build_graph(top_k):
keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
images = tf.placeholder(dtype=tf.float32, shape=[None, 64, 64, 1], name='image_batch')
labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')
is_training = tf.placeholder(dtype=tf.bool, shape=[], name='train_flag')
with tf.device('/gpu:0'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': is_training}):
conv3_1 = slim.conv2d(images, 64, [3, 3], 1, padding='SAME', scope='conv3_1')
max_pool_1 = slim.max_pool2d(conv3_1, [2, 2], [2, 2], padding='SAME', scope='pool1')
conv3_2 = slim.conv2d(max_pool_1, 128, [3, 3], padding='SAME', scope='conv3_2')
max_pool_2 = slim.max_pool2d(conv3_2, [2, 2], [2, 2], padding='SAME', scope='pool2')
conv3_3 = slim.conv2d(max_pool_2, 256, [3, 3], padding='SAME', scope='conv3_3')
max_pool_3 = slim.max_pool2d(conv3_3, [2, 2], [2, 2], padding='SAME', scope='pool3')
conv3_4 = slim.conv2d(max_pool_3, 512, [3, 3], padding='SAME', scope='conv3_4')
conv3_5 = slim.conv2d(conv3_4, 512, [3, 3], padding='SAME', scope='conv3_5')
max_pool_4 = slim.max_pool2d(conv3_5, [2, 2], [2, 2], padding='SAME', scope='pool4')
flatten = slim.flatten(max_pool_4)
fc1 = slim.fully_connected(slim.dropout(flatten, keep_prob), 1024,
activation_fn=tf.nn.relu, scope='fc1')
logits = slim.fully_connected(slim.dropout(fc1, keep_prob), FLAGS.charset_size, activation_fn=None,
scope='fc2')
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
loss = control_flow_ops.with_dependencies([updates], loss)
global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
train_op = slim.learning.create_train_op(loss, optimizer, global_step=global_step)
probabilities = tf.nn.softmax(logits)
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
merged_summary_op = tf.summary.merge_all()
predicted_val_top_k, predicted_index_top_k = tf.nn.top_k(probabilities, k=top_k)
accuracy_in_top_k = tf.reduce_mean(tf.cast(tf.nn.in_top_k(probabilities, labels, top_k), tf.float32))
return {'images': images,
'labels': labels,
'keep_prob': keep_prob,
'top_k': top_k,
'global_step': global_step,
'train_op': train_op,
'loss': loss,
'is_training': is_training,
'accuracy': accuracy,
'accuracy_top_k': accuracy_in_top_k,
'merged_summary_op': merged_summary_op,
'predicted_distribution': probabilities,
'predicted_index_top_k': predicted_index_top_k,
'predicted_val_top_k': predicted_val_top_k}
开发者ID:oraSC,项目名称:Chinese-Character-Recognition,代码行数:57,代码来源:chinese_character_recognition_bn.py
注:本文中的tensorflow.contrib.slim.fully_connected函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论