本文整理汇总了Python中tensorflow.unpack函数的典型用法代码示例。如果您正苦于以下问题:Python unpack函数的具体用法?Python unpack怎么用?Python unpack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unpack函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _calc_rewards
def _calc_rewards(self, action_list, name="rewards"):
action_list = tf.transpose(self.harden_actions(action_list))
action_list = tf.unpack(action_list, FLAGS.batch_size)
# batch_size * seq_length
token_matrix = tf.transpose(tf.pack(self.input_tokens))
token_matrix = tf.unpack(token_matrix, FLAGS.batch_size)
# "Dereference" the predicted sorts, which are index sequences.
predicted = [tf.gather(token_matrix[i], action_list[i])
for i in range(FLAGS.batch_size)]
# predicted[0] = tf.Print(predicted[0], [predicted[0]], "predicted_" + name, summarize=100)
predicted = tf.concat(0, [tf.expand_dims(predicted_i, 0)
for predicted_i in predicted])
#predicted = tf.Print(predicted, [predicted], "predicted_" + name, summarize=100)
# Compute per-timestep rewards by evaluating constraint violations.
rewards = (tf.slice(predicted, [0, 1], [-1, -1])
> tf.slice(predicted, [0, 0], [-1, self.seq_length - 1]))
rewards = tf.cast(rewards, tf.float32)
# Add reward for t = 0, fixed as 0
rewards = tf.concat(1, [tf.zeros((FLAGS.batch_size, 1)),
rewards])
rewards = tf.transpose(rewards)
rewards_unpacked = tf.unpack(rewards, self.seq_length,
name=name)
return rewards, rewards_unpacked
开发者ID:hans,项目名称:rlcomp,代码行数:29,代码来源:sorting_seq2seq.py
示例2: log_prob
def log_prob(self, xs, zs):
"""Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
N = get_dims(xs)[0]
# Loop over each mini-batch zs[b,:]
log_prob = []
for z in tf.unpack(zs):
pi, mus, sigmas = self.unpack_params(z)
log_prior = dirichlet.logpdf(pi, self.alpha)
for k in xrange(self.K):
log_prior += norm.logpdf(mus[k*self.D], 0, np.sqrt(self.c))
log_prior += norm.logpdf(mus[k*self.D+1], 0, np.sqrt(self.c))
log_prior += invgamma.logpdf(sigmas[k*self.D], self.a, self.b)
log_prior += invgamma.logpdf(sigmas[k*self.D+1], self.a, self.b)
log_lik = tf.constant(0.0, dtype=tf.float32)
for x in tf.unpack(xs):
for k in xrange(self.K):
log_lik += tf.log(pi[k])
log_lik += multivariate_normal.logpdf(x,
mus[(k*self.D):((k+1)*self.D)],
sigmas[(k*self.D):((k+1)*self.D)])
log_prob += [log_prior + log_lik]
return tf.pack(log_prob)
开发者ID:Beronx86,项目名称:edward,代码行数:25,代码来源:mixture_gaussian.py
示例3: log_prob
def log_prob(self, xs, zs):
"""Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
N = get_dims(xs)[0]
# Loop over each mini-batch zs[b,:]
log_prob = []
for z in tf.unpack(zs):
# Do the unconstrained to constrained transformation for MAP here.
pi, mus, sigmas = self.unpack_params(z)
pi = tf.sigmoid(pi)
pi = tf.concat(0, [pi[0:(self.K-1)],
tf.expand_dims(1.0 - tf.reduce_sum(pi[0:(self.K-1)]), 0)])
sigmas = tf.nn.softplus(sigmas)
log_prior = dirichlet.logpdf(pi, self.alpha)
for k in xrange(self.K):
log_prior += norm.logpdf(mus[k*self.D], 0, np.sqrt(self.c))
log_prior += norm.logpdf(mus[k*self.D+1], 0, np.sqrt(self.c))
log_prior += invgamma.logpdf(sigmas[k*self.D], self.a, self.b)
log_prior += invgamma.logpdf(sigmas[k*self.D+1], self.a, self.b)
log_lik = tf.constant(0.0, dtype=tf.float32)
for x in tf.unpack(xs):
for k in xrange(self.K):
log_lik += tf.log(pi[k])
log_lik += multivariate_normal.logpdf(x,
mus[(k*self.D):((k+1)*self.D)],
sigmas[(k*self.D):((k+1)*self.D)])
log_prob += [log_prior + log_lik]
return tf.pack(log_prob)
开发者ID:Beronx86,项目名称:edward,代码行数:30,代码来源:mixture_gaussian_map.py
示例4: language_model
def language_model(X, y):
inputs = learn.ops.one_hot_matrix(X, 256)
inputs = tf.unpack(inputs, axis=1)
target = tf.unpack(y, axis=1)
encoder_cell = tf.nn.rnn_cell.OutputProjectionWrapper(tf.nn.rnn_cell.GRUCell(hidden_size),256)
output, _ = tf.nn.rnn(encoder_cell, inputs, dtype=tf.float32)
return learn.ops.sequence_classifier(output, target)
开发者ID:363158858,项目名称:tensorflow,代码行数:7,代码来源:language_model.py
示例5: inference
def inference(input_var, state_size, vocab_size, num_steps, batch_size, noise_var,
decoder_inputs, scope):
cell = VarRNN(state_size, noise_var)
inputs = tf.unpack(input_var, axis=1)
init_state = cell.zero_state(batch_size, tf.float32)
softmax_w = tf.get_variable('softmax_w', [state_size, vocab_size])
softmax_b = tf.get_variable('softmax_b', [vocab_size])
outputs, state = tf.nn.seq2seq.embedding_rnn_decoder(
inputs, init_state, cell, vocab_size, 32,
output_projection=(softmax_w, softmax_b), scope=scope)
logits = tf.reshape(tf.concat(1, outputs), [-1, state_size])
logits = tf.matmul(logits, softmax_w) + softmax_b
sample_init = cell.zero_state(1, tf.float32)
print('got model')
scope.reuse_variables()
samples, _ = tf.nn.seq2seq.embedding_rnn_decoder(
decoder_inputs, sample_init, cell, vocab_size, 32,
output_projection=(softmax_w, softmax_b), feed_previous=True,
scope=scope)
samples = tf.reshape(tf.concat(1, samples), [-1, state_size])
samples = tf.matmul(samples, softmax_w) + softmax_b
samples = tf.argmax(samples, 1)
samples = tf.unpack(tf.squeeze(samples))
print('got sampling model')
return logits, state, init_state, samples
开发者ID:PFCM,项目名称:rnns,代码行数:30,代码来源:var_rnn.py
示例6: dynamic_vae_single
def dynamic_vae_single(T = 50, d_z = 1, d_hidden=2, d_x = 10):
# MODEL
transition_mat = np.eye(d_z, dtype=np.float32) #GaussianMatrix(mean=0, std=1.0, output_shape=(D, D), name="transition")
transition_bias = np.zeros((d_z,), dtype=np.float32)
transition_cov = np.eye(d_z, dtype=np.float32)
step_noise = MVGaussianMeanCov(transition_bias, transition_cov)
w1, w2, b1, b2 = decoder_params(d_z, d_hidden, d_x)
z = LinearGaussian(T, transition_bias, transition_cov,
transition_mat, transition_bias, transition_cov,
name="z")
x = VAEDecoderBernoulli(z, w1, w2, b1, b2, name="x")
# SYNTHETIC OBSERVATION
x_sampled = x.sample(0)
q_x = x.observe(x_sampled)
# INFERENCE MODEL
upwards_messages = VAEEncoder(q_x.sample, d_hidden, d_z)
upwards_means = tf.unpack(upwards_messages.mean)
upwards_vars = tf.unpack(upwards_messages.variance)
unary_factors = [MVGaussianMeanCov(mean, tf.diag(vs)) for (mean, vs) in zip(upwards_means, upwards_vars)]
tmat = tf.constant(transition_mat)
q_z = LinearGaussianChainCRF((T, d_z), tmat, step_noise, unary_factors)
z.attach_q(q_z)
return x, z, x_sampled
开发者ID:BenJamesbabala,项目名称:bayesflow,代码行数:29,代码来源:dynamic_vae.py
示例7: testCannotInferNumFromUnknownShape
def testCannotInferNumFromUnknownShape(self):
x = tf.placeholder(np.float32)
with self.assertRaisesRegexp(
ValueError, r'Cannot infer num from shape <unknown>'):
tf.unpack(x)
with self.assertRaisesRegexp(
ValueError, r'Cannot infer num from shape <unknown>'):
tf.unstack(x)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:8,代码来源:unpack_op_test.py
示例8: testCannotInferNumFromNoneShape
def testCannotInferNumFromNoneShape(self):
x = tf.placeholder(np.float32, shape=(None,))
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape \(\?,\)'):
tf.unpack(x)
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape \(\?,\)'):
tf.unstack(x)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:8,代码来源:unpack_op_test.py
示例9: __init__
def __init__(self, input_size, output_size):
self.graph = tf.Graph()
self.hyper_cnt = input_size
self.save_path = "fit_trend.ckpt"
self.collect_counter = 0
self.fit_loss_collect = list()
self.stable_loss_predict_collect = list()
self.hp_collect = [list() for _ in range(self.hyper_cnt)]
self.gradient_collect = [list() for _ in range(self.hyper_cnt)]
self.stable_loss_label_collect = list()
self.hp_norms = list()
self.has_init = False
with self.graph.as_default():
# 接收输入
self.ph_hypers = tf.placeholder(tf.float32, shape=[self.hyper_cnt], name='ph_hypers')
self.tf_hypers, self.reset_vars = assign_diffable_vars2tensor(self.ph_hypers, self.hyper_cnt)
rnn_step = 5
trend_input = tf.concat(0, [self.tf_hypers for _ in range(rnn_step)])
# 通过一个RNN
trend_outputs = rnn(trend_input, n_hidden=128)
print('rnn output')
print(tf.concat(0, trend_outputs))
# RNN接一个DNN
trend_output = dnn(tf.concat(0, trend_outputs), [1, output_size])
print('dnn output')
print(trend_output)
self.predict = trend_output
# 实际的trend
self.train_label = tf.placeholder(tf.float32, shape=[output_size], name='train_label')
# 预测准确率,predict和trend的几何距离
predict_accuracy = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(trend_output, self.train_label)))) / output_size
# predict_accuracy /= tf.reduce_mean(tf.concat(0, self.train_label))
# 稳定时损失,最后一个损失
stable_loss = tf.unpack(tf.unpack(trend_output)[0])[-1]
print(stable_loss)
self.is_fit = tf.placeholder(tf.bool, name='is_fit')
self.loss = tf.cond(self.is_fit, lambda: predict_accuracy, lambda: stable_loss)
# 优化器
self.var_s = tf.trainable_variables()
self.v_hp_s = self.var_s[0: self.hyper_cnt]
self.v_fit_s = [v for v in self.var_s if v not in self.v_hp_s]
self.grads = var_gradient(self.v_hp_s, self.loss, start_rate=0.1, lrd=False)
def optimize_fit():
optimizer_fit = var_optimizer(self.v_fit_s, self.loss)
return optimizer_fit
def optimize_hp():
optimizer_hp = var_optimizer(self.v_hp_s, self.loss, start_rate=0.1, lrd=False)
return optimizer_hp
self.optimizer = tf.cond(self.is_fit, optimize_fit, optimize_hp)
self.saver = tf.train.Saver()
开发者ID:ahangchen,项目名称:NN,代码行数:57,代码来源:hp2trend.py
示例10: log_prob
def log_prob(self, xs, zs):
log_prior = tf.pack([norm.logpdf(z, mu, Sigma)
for z in tf.unpack(zs)])
# log_lik = tf.pack([
# tf.reduce_sum(norm.logpdf(x, zs[:,0], Sigma)) \
# for x in tf.unpack(xs)])
log_lik = tf.pack([
tf.reduce_sum(norm.logpdf(xs, z, 0*xs+Sigma)) \
for z in tf.unpack(zs)])
return log_lik + log_prior
开发者ID:bakersfieldag,项目名称:edward,代码行数:10,代码来源:gaussian_map.py
示例11: get_placeholders
def get_placeholders(batch_size, sequence_length, num_features):
"""Make input and target placeholders"""
inputs = tf.placeholder(tf.float32, name='all_inputs',
shape=[sequence_length,
batch_size,
num_features])
targets = tf.placeholder(tf.float32, name='all_targets',
shape=[sequence_length,
batch_size,
num_features])
return tf.unpack(inputs), tf.unpack(targets)
开发者ID:PFCM,项目名称:rnns,代码行数:12,代码来源:jsb_test.py
示例12: testSimple
def testSimple(self):
np.random.seed(7)
with self.test_session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
# Convert data to a single tensorflow tensor
x = tf.constant(data)
# Unpack into a list of tensors
cs_unpacked = tf.unpack(x, num=shape[0])
cs_unstacked = tf.unpack(x, num=shape[0])
for cs in (cs_unpacked, cs_unstacked):
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
cs = [c.eval() for c in cs]
self.assertAllEqual(cs, data)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:15,代码来源:unpack_op_test.py
示例13: sequence_loss
def sequence_loss(self, y_pred, y_true):
'''
Loss function for the seq2seq RNN. Reshape predicted and true (label) tensors, generate dummy weights,
then use seq2seq.sequence_loss to actually compute the loss function.
'''
#print ("my_sequence_loss y_pred=%s, y_true=%s" % (y_pred, y_true))
logits = tf.unpack(y_pred, axis=1) # list of [-1, num_decoder_synbols] elements
targets = tf.unpack(y_true, axis=1) # y_true has shape [-1, self.out_seq_len]; unpack to list of self.out_seq_len [-1] elements
#print ("my_sequence_loss logits=%s" % (logits,))
#print ("my_sequence_loss targets=%s" % (targets,))
weights = [tf.ones_like(yp, dtype=tf.float32) for yp in targets]
#print ("my_sequence_loss weights=%s" % (weights,))
sl = seq2seq.sequence_loss(logits, targets, weights)
#print ("my_sequence_loss return = %s" % sl)
return sl
开发者ID:Hackerer,项目名称:ChatBotCourse,代码行数:15,代码来源:seq2seq_example.py
示例14: cumprod
def cumprod(xs):
"""Cumulative product of a tensor along its outer dimension.
https://github.com/tensorflow/tensorflow/issues/813
Parameters
----------
xs : tf.Tensor
A 1-D or higher tensor.
Returns
-------
tf.Tensor
A tensor with `cumprod` applied along its outer dimension.
Raises
------
InvalidArgumentError
If the input has Inf or NaN values.
"""
dependencies = [tf.verify_tensor_all_finite(xs, msg='')]
xs = control_flow_ops.with_dependencies(dependencies, xs)
xs = tf.cast(xs, dtype=tf.float32)
values = tf.unpack(xs)
out = []
prev = tf.ones_like(values[0])
for val in values:
s = prev * val
out.append(s)
prev = s
result = tf.pack(out)
return result
开发者ID:TalkingData,项目名称:edward,代码行数:34,代码来源:util.py
示例15: get_batch_tensor
def get_batch_tensor(batch_size, sequence_length, num_epochs,
filename='names.txt',
preprocessor=_clean):
"""Gets the data in good tensorflow ways. Adds a queue runner so be sure to
start it."""
with tf.name_scope('input'):
# the data is tiny so just load it, clean it and throw it into a
# constant
with open(filename) as f:
all_data = f.read()
# process it
all_data = preprocessor(all_data)
# just chop off the end to make sure sequence_length * batch_size
# divides the total number of records
print(all_data)
num_batches = all_data.shape[0] // (sequence_length * batch_size)
all_data = all_data[:num_batches * sequence_length * batch_size]
all_data = np.reshape(all_data, (-1, sequence_length))
# and make the queue
data = tf.train.slice_input_producer(
[tf.constant(all_data)],
num_epochs=num_epochs,
shuffle=True,
capacity=batch_size*sequence_length)
# very much unconvinced this is all the right way round
batch = tf.train.batch([data], batch_size=batch_size,
enqueue_many=True, num_threads=2)
batch = tf.transpose(batch)
return tf.unpack(batch)
开发者ID:PFCM,项目名称:sequential_gan,代码行数:30,代码来源:ops.py
示例16: _tile_along_beam
def _tile_along_beam(cls, beam_size, state):
if nest.is_sequence(state):
return nest_map(
lambda val: cls._tile_along_beam(beam_size, val),
state
)
if not isinstance(state, tf.Tensor):
raise ValueError("State should be a sequence or tensor")
tensor = state
tensor_shape = tensor.get_shape().with_rank_at_least(1)
try:
new_first_dim = tensor_shape[0] * beam_size
except:
new_first_dim = None
dynamic_tensor_shape = tf.unpack(tf.shape(tensor))
res = tf.expand_dims(tensor, 1)
res = tf.tile(res, [1, beam_size] + [1] * (tensor_shape.ndims-1))
res = tf.reshape(res, [-1] + list(dynamic_tensor_shape[1:]))
res.set_shape([new_first_dim] + list(tensor_shape[1:]))
return res
开发者ID:Calvin-L,项目名称:commandline-helper,代码行数:25,代码来源:beam_search.py
示例17: testInferNum
def testInferNum(self):
with self.test_session():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
x = tf.placeholder(np.float32, shape=shape)
cs = tf.unpack(x)
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
开发者ID:adam-erickson,项目名称:tensorflow,代码行数:7,代码来源:unpack_op_test.py
示例18: rnn_model
def rnn_model(features, target):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unpack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
开发者ID:lujian8181,项目名称:tensorflow,代码行数:35,代码来源:text_classification.py
示例19: print_progress
def print_progress(self, t, losses, sess):
if t % self.n_print == 0:
print("iter %d loss %.2f " % (t, np.mean(losses)))
self.variational.print_params(sess)
# Sample functions from variational model
mean, std = sess.run([self.variational.m, self.variational.s])
rs = np.random.RandomState(0)
zs = rs.randn(10, self.variational.num_vars) * std + mean
zs = tf.constant(zs, dtype=tf.float32)
inputs = np.linspace(-3, 3, num=400, dtype=np.float32)
x = tf.expand_dims(tf.constant(inputs), 1)
mus = tf.pack([self.model.mapping(x, z) for z in tf.unpack(zs)])
outputs = sess.run(mus)
# Get data
y, x = sess.run([self.data.data[:, 0], self.data.data[:, 1]])
# Plot data and functions
plt.cla()
ax.plot(x, y, 'bx')
ax.plot(inputs, outputs.T)
ax.set_xlim([-3, 3])
ax.set_ylim([-0.5, 1.5])
plt.draw()
开发者ID:ShuaiW,项目名称:edward,代码行数:25,代码来源:hierarchical_logistic_regression.py
示例20: log_prob
def log_prob(self, xs, zs):
K = self.kernel(xs)
log_prior = multivariate_normal.logpdf(zs[:, :], cov=K)
log_lik = tf.pack([tf.reduce_sum( \
bernoulli.logpmf(xs[:,0], self.inverse_link(tf.mul(xs[:,0], z))) \
) for z in tf.unpack(zs)])
return log_prior + log_lik
开发者ID:andybaoxv,项目名称:edward,代码行数:7,代码来源:gp_classification.py
注:本文中的tensorflow.unpack函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论