本文整理汇总了Python中tensorflow.multinomial函数的典型用法代码示例。如果您正苦于以下问题:Python multinomial函数的具体用法?Python multinomial怎么用?Python multinomial使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了multinomial函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: body
def body(i, prev_base_state, prev_high_states, prev_y, prev_emb,
y_array):
state1 = decoder.grustep1.forward(prev_base_state, prev_emb)
att_ctx = decoder.attstep.forward(state1)
base_state = decoder.grustep2.forward(state1, att_ctx)
if decoder.high_gru_stack == None:
output = base_state
high_states = []
else:
if decoder.high_gru_stack.context_state_size == 0:
output, high_states = decoder.high_gru_stack.forward_single(
prev_high_states, base_state)
else:
output, high_states = decoder.high_gru_stack.forward_single(
prev_high_states, base_state, context=att_ctx)
logits = decoder.predictor.get_logits(prev_emb, output, att_ctx,
multi_step=False)
new_y = tf.multinomial(logits, num_samples=1)
new_y = tf.cast(new_y, dtype=tf.int32)
new_y = tf.squeeze(new_y, axis=1)
new_y = tf.where(tf.equal(prev_y, tf.constant(0, dtype=tf.int32)),
tf.zeros_like(new_y), new_y)
y_array = y_array.write(index=i, value=new_y)
new_emb = decoder.y_emb_layer.forward(new_y, factor=0)
return i+1, base_state, high_states, new_y, new_emb, y_array
开发者ID:rsennrich,项目名称:nematus,代码行数:25,代码来源:rnn_inference.py
示例2: decoder_fn
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
with tf.name_scope(name, "simple_decoder_fn_inference",
[time, cell_state, cell_input, cell_output,
context_state]):
if cell_input is not None:
raise ValueError("Expected cell_input to be None, but saw: %s" %
cell_input)
if cell_output is None:
# invariant that this is time == 0
next_input_id = tf.ones([batch_size], dtype=dtype) * (
start_of_sequence_id)
done = tf.zeros([batch_size], dtype=tf.bool)
cell_state = encoder_state
cell_output = tf.zeros([cell_size],
dtype=tf.float32)
else:
softmax_output = output_fn(cell_output)
if sample:
next_input_id = tf.squeeze(tf.multinomial(softmax_output, 1), 1)
else:
next_input_id = tf.argmax(softmax_output, 1)
next_input_id = tf.cast(next_input_id, dtype=dtype)
done = tf.equal(next_input_id, end_of_sequence_id)
next_input = tf.gather(embeddings, next_input_id)
# if time > maxlen, return all true vector
done = tf.cond(
tf.greater(time, maximum_length),
lambda: tf.ones([batch_size], dtype=tf.bool),
lambda: done)
return (done, cell_state, next_input, next_input_id, context_state)
开发者ID:futurulus,项目名称:rl-cards,代码行数:30,代码来源:tfutils.py
示例3: UpdateProbs
def UpdateProbs(self, inp):
"""Update probabilities of each particle based on 2D matrix inp which is a 2D perspectiuve projection of the scene"""
projection, onscreen = self.project()
filtered_projection = tf.to_int64(tf.select(onscreen, projection, tf.zeros_like(projection)))
per_state_probabilities = tf.gather_nd(inp, filtered_projection)
filtered_probabilities = tf.select(onscreen, per_state_probabilities, tf.zeros_like(per_state_probabilities))
new_state_indicies = tf.squeeze(tf.multinomial(tf.expand_dims(tf.log(filtered_probabilities),0), self.particles/10*9))
new_state = tf.gather(self.state, new_state_indicies)
# Add momentum
new_state = tf.concat(1, [new_state[:, 0:3] + new_state[:, 3:6], new_state[:, 3:10]])
# Add in particles for the "just come onscreen" case.
new_state = tf.concat(0, [new_state, tf.random_normal([self.particles/10, 10]) * self.initial_std + self.initial_bias])
new_state = new_state + tf.random_normal([self.particles, 10]) * self.update_std
# Todo: permute state by adding noise.
return self.state.assign(new_state)
开发者ID:Hello1024,项目名称:quadcopter,代码行数:25,代码来源:particle.py
示例4: loop_function
def loop_function(prev,_):
prev = tf.nn.xw_plus_b(
prev, output_projection[0], output_projection[1])
prev_symbol = tf.cast(tf.reshape(tf.multinomial(prev, 1), [FLAGS.batch_size*FLAGS.max_dec_sen_num]), tf.int32)
emb_prev = tf.nn.embedding_lookup(embedding, prev_symbol)
return emb_prev
开发者ID:dbolshak,项目名称:DPGAN,代码行数:7,代码来源:model.py
示例5: multinomial_squeeze
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:7,代码来源:t2t_model.py
示例6: testSmallEntropy
def testSmallEntropy(self):
with self.test_session(use_gpu=self.use_gpu):
# A logit value of -10 corresponds to a probability of ~5e-5.
logits = tf.constant([[-10., 10., -10.], [-10., -10., 10.]])
num_samples = 1000
samples = tf.multinomial(logits, num_samples).eval()
self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)
开发者ID:Ambier,项目名称:tensorflow,代码行数:7,代码来源:multinomial_op_test.py
示例7: create_dc_actor_critic
def create_dc_actor_critic(self, h_size, num_layers):
num_streams = 1
hidden_streams = self.create_new_obs(num_streams, h_size, num_layers)
hidden = hidden_streams[0]
if self.use_recurrent:
tf.Variable(self.m_size, name="memory_size", trainable=False, dtype=tf.int32)
self.prev_action = tf.placeholder(shape=[None], dtype=tf.int32, name='prev_action')
self.prev_action_oh = c_layers.one_hot_encoding(self.prev_action, self.a_size)
hidden = tf.concat([hidden, self.prev_action_oh], axis=1)
self.memory_in = tf.placeholder(shape=[None, self.m_size], dtype=tf.float32, name='recurrent_in')
hidden, self.memory_out = self.create_recurrent_encoder(hidden, self.memory_in)
self.memory_out = tf.identity(self.memory_out, name='recurrent_out')
self.policy = tf.layers.dense(hidden, self.a_size, activation=None, use_bias=False,
kernel_initializer=c_layers.variance_scaling_initializer(factor=0.01))
self.all_probs = tf.nn.softmax(self.policy, name="action_probs")
self.output = tf.multinomial(self.policy, 1)
self.output = tf.identity(self.output, name="action")
self.value = tf.layers.dense(hidden, 1, activation=None)
self.value = tf.identity(self.value, name="value_estimate")
self.entropy = -tf.reduce_sum(self.all_probs * tf.log(self.all_probs + 1e-10), axis=1)
self.action_holder = tf.placeholder(shape=[None], dtype=tf.int32)
self.selected_actions = c_layers.one_hot_encoding(self.action_holder, self.a_size)
self.all_old_probs = tf.placeholder(shape=[None, self.a_size], dtype=tf.float32, name='old_probabilities')
# We reshape these tensors to [batch x 1] in order to be of the same rank as continuous control probabilities.
self.probs = tf.expand_dims(tf.reduce_sum(self.all_probs * self.selected_actions, axis=1), 1)
self.old_probs = tf.expand_dims(tf.reduce_sum(self.all_old_probs * self.selected_actions, axis=1), 1)
开发者ID:dhsmf1416,项目名称:lastalpha,代码行数:33,代码来源:models.py
示例8: _sample_single
def _sample_single(args):
logits, n_draw = args[0], args[1] # [K], []
x = tf.multinomial(logits[tf.newaxis, ...], n_draw,
seed) # [1, n*n_draw]
x = tf.reshape(x, shape=[n, -1]) # [n, n_draw]
x = tf.reduce_sum(tf.one_hot(x, depth=k), axis=-2) # [n, k]
return x
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:multinomial.py
示例9: testNegativeMinLogits
def testNegativeMinLogits(self):
tf.set_random_seed(78844)
with self.test_session(use_gpu=self.use_gpu):
logits = tf.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
samples = tf.multinomial(logits, num_samples).eval()
self.assertAllEqual([[1023] * num_samples], samples)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:7,代码来源:multinomial_op_test.py
示例10: __init__
def __init__(self, q_values, observations, num_actions, stochastic, eps,
softmax, softmax_temp):
if softmax:
action_dist = Categorical(q_values / softmax_temp)
self.action = action_dist.sample()
self.action_prob = action_dist.sampled_action_prob()
return
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations)[0]
# Special case masked out actions (q_value ~= -inf) so that we don't
# even consider them for exploration.
random_valid_action_logits = tf.where(
tf.equal(q_values, tf.float32.min),
tf.ones_like(q_values) * tf.float32.min, tf.ones_like(q_values))
random_actions = tf.squeeze(
tf.multinomial(random_valid_action_logits, 1), axis=1)
chose_random = tf.random_uniform(
tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions,
deterministic_actions)
self.action = tf.cond(stochastic, lambda: stochastic_actions,
lambda: deterministic_actions)
self.action_prob = None
开发者ID:robertnishihara,项目名称:ray,代码行数:26,代码来源:dqn_policy_graph.py
示例11: testEmpty
def testEmpty(self):
classes = 5
with self.test_session(use_gpu=self.use_gpu):
for batch in 0, 3:
for samples in 0, 7:
x = tf.multinomial(tf.zeros([batch, classes]), samples).eval()
self.assertEqual(x.shape, (batch, samples))
开发者ID:2020zyc,项目名称:tensorflow,代码行数:7,代码来源:multinomial_op_test.py
示例12: sample
def sample(self, projected_output):
"""Return integer ID tensor representing the sampled word.
Args:
projected_output: Tensor [1, 1, state_size], representing a single
decoder timestep output.
"""
# TODO: We really need a tf.control_dependencies check here (for rank).
with tf.name_scope('decoder_sampler', values=[projected_output]):
# Protect against extra size-1 dimensions; grab the 1D tensor
# of size state_size.
logits = tf.squeeze(projected_output)
if self.temperature < 0.02:
return tf.argmax(logits, axis=0)
# Convert logits to probability distribution.
probabilities = tf.div(logits, self.temperature)
projected_output = tf.div(
tf.exp(probabilities),
tf.reduce_sum(tf.exp(probabilities), axis=-1))
# Sample 1 time from the probability distribution.
sample_ID = tf.squeeze(
tf.multinomial(tf.expand_dims(probabilities, 0), 1))
return sample_ID
开发者ID:laurii,项目名称:DeepChatModels,代码行数:26,代码来源:decoders.py
示例13: __init__
def __init__(self, brain, h_size=128, lr=1e-4, n_layers=2, m_size=128,
normalize=False, use_recurrent=False):
LearningModel.__init__(self, m_size, normalize, use_recurrent, brain)
num_streams = 1
hidden_streams = self.create_new_obs(num_streams, h_size, n_layers)
hidden = hidden_streams[0]
self.dropout_rate = tf.placeholder(dtype=tf.float32, shape=[], name="dropout_rate")
hidden_reg = tf.layers.dropout(hidden, self.dropout_rate)
if self.use_recurrent:
self.memory_in = tf.placeholder(shape=[None, self.m_size], dtype=tf.float32, name='recurrent_in')
hidden_reg, self.memory_out = self.create_recurrent_encoder(hidden_reg, self.memory_in)
self.memory_out = tf.identity(self.memory_out, name='recurrent_out')
self.policy = tf.layers.dense(hidden_reg, self.a_size, activation=None, use_bias=False,
kernel_initializer=c_layers.variance_scaling_initializer(factor=0.01))
if brain.vector_action_space_type == "discrete":
self.action_probs = tf.nn.softmax(self.policy)
self.sample_action_float = tf.multinomial(self.policy, 1)
self.sample_action_float = tf.identity(self.sample_action_float, name="action")
self.sample_action = tf.cast(self.sample_action_float, tf.int32)
self.true_action = tf.placeholder(shape=[None], dtype=tf.int32, name="teacher_action")
self.action_oh = tf.one_hot(self.true_action, self.a_size)
self.loss = tf.reduce_sum(-tf.log(self.action_probs + 1e-10) * self.action_oh)
self.action_percent = tf.reduce_mean(tf.cast(
tf.equal(tf.cast(tf.argmax(self.action_probs, axis=1), tf.int32), self.sample_action), tf.float32))
else:
self.sample_action = tf.identity(self.policy, name="action")
self.true_action = tf.placeholder(shape=[None, self.a_size], dtype=tf.float32, name="teacher_action")
self.loss = tf.reduce_sum(tf.squared_difference(self.true_action, self.sample_action))
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
self.update = optimizer.minimize(self.loss)
开发者ID:dhsmf1416,项目名称:lastalpha,代码行数:33,代码来源:models.py
示例14: generate_string
def generate_string(self, initial_logits, initial_state, sequence_length):
"""Builds sub-graph to generate a string, sampled from the model.
Args:
initial_logits: Starting logits to sampling from.
initial_state: Starting state for the RNN core.
sequence_length: Number of characters to sample.
Returns:
A Tensor of characters, with dimensions `[sequence_length, batch_size,
output_size]`.
"""
current_logits = initial_logits
current_state = initial_state
generated_letters = []
for _ in range(sequence_length):
# Sample a character index from distribution.
char_index = tf.squeeze(tf.multinomial(current_logits, 1))
char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0)
generated_letters.append(char_one_hot)
# Feed character back into the deep_lstm.
gen_out_seq, current_state = self._core(
tf.nn.relu(self._embed_module(char_one_hot)),
current_state)
current_logits = self._output_module(gen_out_seq)
generated_string = tf.stack(generated_letters)
return generated_string
开发者ID:bch-runner-1,项目名称:sonnet,代码行数:32,代码来源:rnn_shakespeare.py
示例15: call
def call(self, inputs):
"""Calculates logits and action.
Args:
inputs: Observations from a step in the cart-pole environment, of shape
`(batch_size, input_size)`
Returns:
logits: the logits output by the output layer. This can be viewed as the
likelihood vales of choosing the left (0) action. Shape:
`(batch_size, 1)`.
actions: randomly selected actions ({0, 1}) based on the logits. Shape:
`(batch_size, 1)`.
"""
hidden = self._hidden_layer(inputs)
logits = self._output_layer(hidden)
left_prob = tf.nn.sigmoid(logits)
action_probs = tf.concat([left_prob, 1.0 - left_prob], 1)
self._grad_fn = eager.implicit_gradients(
self._get_cross_entropy_and_save_actions)
actions = tf.multinomial(tf.log(action_probs), 1)
return logits, actions
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:25,代码来源:cartpole_benchmark.py
示例16: _g_recurrence_2
def _g_recurrence_2(i, x_t, gen_x, h_tm1, h_tm1_manager, last_goal, real_goal):
# with tf.device('/cpu:0'):
cur_sen = tf.cond(i > 0, lambda:
tf.split(tf.concat([tf.transpose(gen_x.stack(), perm=[1, 0]), self.padding_array], 1),
[self.sequence_length, i - 1], 1)[0], lambda: self.padding_array)
with tf.variable_scope(self.scope):
feature = self.FeatureExtractor_unit(cur_sen, self.drop_out)
h_t_Worker = self.g_worker_recurrent_unit(x_t, h_tm1) # hidden_memory_tuple
o_t_Worker = self.g_worker_output_unit(h_t_Worker) # batch x vocab , logits not prob
o_t_Worker = tf.reshape(o_t_Worker, [self.batch_size, self.num_vocabulary, self.goal_size])
h_t_manager = self.g_manager_recurrent_unit(feature, h_tm1_manager)
sub_goal = self.g_manager_output_unit(h_t_manager)
sub_goal = tf.nn.l2_normalize(sub_goal, 1)
real_sub_goal = tf.add(last_goal, sub_goal)
w_g = tf.matmul(real_goal, self.g_change) # batch x goal_size
w_g = tf.nn.l2_normalize(w_g, 1)
w_g = tf.expand_dims(w_g, 2) # batch x goal_size x 1
x_logits = tf.matmul(o_t_Worker, w_g)
x_logits = tf.squeeze(x_logits)
log_prob = tf.log(tf.nn.softmax(x_logits))
next_token = tf.cast(tf.reshape(tf.multinomial(log_prob, 1), [self.batch_size]), tf.int32)
x_tp1 = tf.nn.embedding_lookup(self.g_embeddings, next_token) # batch x emb_dim
with tf.control_dependencies([cur_sen]):
gen_x = gen_x.write(i - 1, next_token) # indices, batch_size
return i + 1, x_tp1, gen_x, h_t_Worker, h_t_manager, \
tf.cond(((i) % self.step_size) > 0, lambda: real_sub_goal,
lambda: tf.constant(0.0, shape=[self.batch_size, self.goal_out_size])), \
tf.cond(((i) % self.step_size) > 0, lambda: real_goal, lambda: real_sub_goal)
开发者ID:IshJ,项目名称:Texygen,代码行数:33,代码来源:LeakganGenerator.py
示例17: build_generator
def build_generator(self):
"""
Generator for generating captions
Support sample max or sample from distribution
No Beam search here; beam search is in decoder
"""
# Variables for the sample setting
self.sample_max = tf.Variable(True, trainable = False, name = "sample_max")
self.sample_temperature = tf.Variable(1.0, trainable = False, name = "temperature")
self.generator = []
with tf.variable_scope("rnnlm"):
flattened_ctx = tf.reshape(self.context, [self.batch_size, 196, 512])
ctx_mean = tf.reduce_mean(flattened_ctx, 1)
tf.get_variable_scope().reuse_variables()
initial_state = utils.get_initial_state(ctx_mean, self.cell.state_size)
#projected context
# This is used in attention module; do this outside the loop to reduce redundant computations
# with tf.variable_scope("attention"):
if self.att_hid_size == 0:
pctx = slim.fully_connected(flattened_ctx, 1, activation_fn = None, scope = 'ctx_att') # (batch) * 196 * 1
else:
pctx = slim.fully_connected(flattened_ctx, self.att_hid_size, activation_fn = None, scope = 'ctx_att') # (batch) * 196 * att_hid_size
rnn_input = tf.nn.embedding_lookup(self.Wemb, tf.zeros([self.batch_size], tf.int32))
prev_h = utils.last_hidden_vec(initial_state)
self.g_alphas = []
outputs = []
state = initial_state
for ind in range(MAX_STEPS):
with tf.variable_scope("attention"):
alpha = self.get_alpha(prev_h, pctx)
self.g_alphas.append(alpha)
weighted_context = tf.reduce_sum(flattened_ctx * tf.expand_dims(alpha, 2), 1)
output, state = self.cell(tf.concat(axis=1, values=[weighted_context, rnn_input]), state)
outputs.append(output)
prev_h = output
# Get the input of next timestep
prev_logit = slim.fully_connected(prev_h, self.vocab_size + 1, activation_fn = None, scope = 'logit')
prev_symbol = tf.stop_gradient(tf.cond(self.sample_max,
lambda: tf.argmax(prev_logit, 1), # pick the word with largest probability as the input of next time step
lambda: tf.squeeze(
tf.multinomial(tf.nn.log_softmax(prev_logit) / self.sample_temperature, 1), 1))) # Sample from the distribution
self.generator.append(prev_symbol)
rnn_input = tf.nn.embedding_lookup(self.Wemb, prev_symbol)
self.g_output = output = tf.reshape(tf.concat(axis=1, values=outputs), [-1, self.rnn_size]) # outputs[1:], because we don't calculate loss on time 0.
self.g_logits = logits = slim.fully_connected(output, self.vocab_size + 1, activation_fn = None, scope = 'logit')
self.g_probs = probs = tf.reshape(tf.nn.softmax(logits), [self.batch_size, MAX_STEPS, self.vocab_size + 1])
self.generator = tf.transpose(tf.reshape(tf.concat(axis=0, values=self.generator), [MAX_STEPS, -1]))
开发者ID:ruotianluo,项目名称:neuraltalk2-tensorflow,代码行数:59,代码来源:ShowAttendTellModel_old.py
示例18: _g_recurrence_2
def _g_recurrence_2(i, x_t, h_tm1, given_num, gen_x):
h_t = self.g_recurrent_unit(x_t, h_tm1) # hidden_memory_tuple
o_t = self.g_output_unit(h_t) # batch x vocab , logits not prob
log_prob = tf.log(tf.nn.softmax(o_t))
next_token = tf.cast(tf.reshape(tf.multinomial(log_prob, 1), [self.batch_size]), tf.int32)
x_tp1 = tf.nn.embedding_lookup(self.g_embeddings, next_token) # batch x emb_dim
gen_x = gen_x.write(i, next_token) # indices, batch_size
return i + 1, x_tp1, h_t, given_num, gen_x
开发者ID:IshJ,项目名称:Texygen,代码行数:8,代码来源:RankganReward.py
示例19: st_sampler
def st_sampler(logits):
"""straight-through stochastic sampler"""
flat_samples = tf.reshape(tf.multinomial(tf.reshape(logits, [-1, len(charmap)]), 1), [-1])
onehot = tf.reshape(tf.one_hot(flat_samples, len(charmap)), tf.shape(logits))
residual = onehot - logits
onehot = logits + tf.stop_gradient(residual)
return onehot
开发者ID:igul222,项目名称:nn,代码行数:8,代码来源:rnn_gan_lm.py
示例20: multinomial_sample
def multinomial_sample(x, vocab_size, temperature):
"""Multinomial sampling from a n-dimensional tensor."""
if temperature > 0:
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1)
else:
samples = tf.argmax(x, axis=-1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return tf.to_int32(reshaped_samples)
开发者ID:kltony,项目名称:tensor2tensor,代码行数:8,代码来源:transformer_vae.py
注:本文中的tensorflow.multinomial函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论