本文整理汇总了Python中tensorflow.reshape函数的典型用法代码示例。如果您正苦于以下问题:Python reshape函数的具体用法?Python reshape怎么用?Python reshape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reshape函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: project_bilstm_layer
def project_bilstm_layer(self, lstm_outputs, name=None):
"""
hidden layer between lstm layer and logits
:param lstm_outputs: [batch_size, num_steps, emb_size]
:return: [batch_size, num_steps, num_tags]
"""
with tf.variable_scope("project" if not name else name):
with tf.variable_scope("hidden"):
W = tf.get_variable("W", shape=[self.hidden_unit * 2, self.hidden_unit],
dtype=tf.float32, initializer=self.initializers.xavier_initializer())
b = tf.get_variable("b", shape=[self.hidden_unit], dtype=tf.float32,
initializer=tf.zeros_initializer())
output = tf.reshape(lstm_outputs, shape=[-1, self.hidden_unit * 2])
hidden = tf.tanh(tf.nn.xw_plus_b(output, W, b))
# project to score of tags
with tf.variable_scope("logits"):
W = tf.get_variable("W", shape=[self.hidden_unit, self.num_labels],
dtype=tf.float32, initializer=self.initializers.xavier_initializer())
b = tf.get_variable("b", shape=[self.num_labels], dtype=tf.float32,
initializer=tf.zeros_initializer())
pred = tf.nn.xw_plus_b(hidden, W, b)
return tf.reshape(pred, [-1, self.seq_length, self.num_labels])
开发者ID:chongp,项目名称:Name-Entity-Recognition,代码行数:26,代码来源:lstm_crf_layer.py
示例2: iris_input_fn
def iris_input_fn(num_epochs=None):
iris = tf.contrib.learn.datasets.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, 4])
if num_epochs:
features = tf.train.limit_epochs(features, num_epochs=num_epochs)
target = tf.reshape(tf.constant(iris.target), [-1])
return features, target
开发者ID:apollos,项目名称:tensorflow,代码行数:7,代码来源:classifier_test.py
示例3: make_net
def make_net(self, input_images, input_measurements, input_actions, input_objectives, reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
self.fc_val_params = np.copy(self.fc_joint_params)
self.fc_val_params['out_dims'][-1] = self.target_dim
self.fc_adv_params = np.copy(self.fc_joint_params)
self.fc_adv_params['out_dims'][-1] = len(self.net_discrete_actions) * self.target_dim
p_img_conv = my_ops.conv_encoder(input_images, self.conv_params, 'p_img_conv', msra_coeff=0.9)
p_img_fc = my_ops.fc_net(my_ops.flatten(p_img_conv), self.fc_img_params, 'p_img_fc', msra_coeff=0.9)
p_meas_fc = my_ops.fc_net(input_measurements, self.fc_meas_params, 'p_meas_fc', msra_coeff=0.9)
if isinstance(self.fc_obj_params, np.ndarray):
p_obj_fc = my_ops.fc_net(input_objectives, self.fc_obj_params, 'p_obj_fc', msra_coeff=0.9)
p_concat_fc = tf.concat([p_img_fc,p_meas_fc,p_obj_fc], 1)
else:
p_concat_fc = tf.concat([p_img_fc,p_meas_fc], 1)
if self.random_objective_coeffs:
raise Exception('Need fc_obj_params with randomized objectives')
p_val_fc = my_ops.fc_net(p_concat_fc, self.fc_val_params, 'p_val_fc', last_linear=True, msra_coeff=0.9)
p_adv_fc = my_ops.fc_net(p_concat_fc, self.fc_adv_params, 'p_adv_fc', last_linear=True, msra_coeff=0.9)
adv_reshape = tf.reshape(p_adv_fc, [-1, len(self.net_discrete_actions), self.target_dim])
pred_all_nomean = adv_reshape - tf.reduce_mean(adv_reshape, reduction_indices=1, keep_dims=True)
pred_all = pred_all_nomean + tf.reshape(p_val_fc, [-1, 1, self.target_dim])
pred_relevant = tf.boolean_mask(pred_all, tf.cast(input_actions, tf.bool))
return pred_all, pred_relevant
开发者ID:johny-c,项目名称:DirectFuturePrediction,代码行数:29,代码来源:future_predictor_agent_advantage.py
示例4: one_minus_pseudo_unitcell_transfer_op
def one_minus_pseudo_unitcell_transfer_op(direction, mps, left_dominant,
right_dominant, vector):
"""
calculates action of 11-Transfer-Operator +|r)(l|
Parameters:
---------------------------
direction: int or str
if (1,'l','left'): do left multiplication
if (-1,'r','right'): do right multiplication
mps: InfiniteMPSCentralGauge object
an infinite mps
left_dominant: tf.tensor of shape (mps.D[0],mps.D[0])
left dominant eigenvector of the unit-cell transfer operator of mps
right_dominant: tf.tensor of shape (mps.D[-1],mps.D[-1])
right dominant eigenvector of the unit-cell transfer operator of mps
vector: tf.tensor of shape (mps.D[0]*mps.D[0]) or (mps.D[-1]*mps.D[-1])
the input vector
Returns
---------------------------
np.ndarray of shape (mps.D[0]*mps.D[0]) or (mps.D[-1]*mps.D[-1])
"""
if direction in (1, 'l', 'left'):
x = tf.reshape(tf.convert_to_tensor(vector), (mps.D[0], mps.D[0]))
temp = x - mps.unitcell_transfer_op('left', x) + ncon(
[x, right_dominant], [[1, 2], [1, 2]]) * left_dominant
return tf.reshape(temp, [mps.D[-1] * mps.D[-1]]).numpy()
if direction in (-1, 'r', 'right'):
x = tf.reshape(tf.convert_to_tensor(vector), [mps.D[-1], mps.D[-1]])
temp = x - mps.unitcell_transfer_op('right', x) + ncon(
[left_dominant, x], [[1, 2], [1, 2]]) * right_dominant
return tf.reshape(temp, [mps.D[0] * mps.D[0]]).numpy()
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:34,代码来源:misc_mps.py
示例5: create_output
def create_output(decoder_output, rows, cols, targets, hparams):
"""Creates output from decoder output and vars.
Args:
decoder_output: Tensor of shape [batch, ...], where ... can be any rank such
that the number of elements is batch * rows * cols * hparams.hidden_size.
rows: Integer representing number of rows in a 2-D data point.
cols: Integer representing number of columns in a 2-D data point.
targets: Tensor of shape [batch, hparams.img_len, hparams.img_len,
hparams.num_channels].
hparams: tf.contrib.training.HParams set.
Returns:
Tensor of shape [batch, hparams.img_len, hparams.img_len,
hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise
[batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256].
In the special case of predict mode, it is a Tensor of rank 5.
"""
decoded_image = postprocess_image(decoder_output, rows, cols, hparams)
depth = common_layers.shape_list(decoded_image)[-1]
batch, height, width, channels = common_layers.shape_list(targets)
likelihood = getattr(hparams, "likelihood", DistributionType.CAT)
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth])
output = y[:, :height, :, :, :]
elif likelihood == DistributionType.CAT:
# Unpack the cols dimension of the Categorical.
output = tf.reshape(decoded_image,
[batch, height, width, channels, depth])
else:
output = decoded_image
return output
开发者ID:kltony,项目名称:tensor2tensor,代码行数:32,代码来源:common_image_attention.py
示例6: SoftThreshold
def SoftThreshold(t, threshold_ratio, name=None):
"""Soft-threshold a tensor by the mean value.
Softthreshold each dimension-0 vector (for matrix it is each column) by
the mean of absolute value multiplied by the threshold_ratio factor. Here
we soft threshold each column as it corresponds to each unit in a layer.
Args:
t: the input tensor.
threshold_ratio: the threshold ratio.
name: the optional name for the returned tensor.
Returns:
the thresholded tensor, where each entry is soft-thresholded by
threshold_ratio times the mean of the aboslute value of each column.
"""
assert threshold_ratio >= 0
with tf.op_scope([t, threshold_ratio], name, "soft_thresholding") as name:
saved_shape = tf.shape(t)
t2 = tf.reshape(t, tf.concat(0, [tf.slice(saved_shape, [0], [1]), -1]))
t_abs = tf.abs(t2)
t_x = tf.sign(t2) * tf.nn.relu(t_abs -
(tf.reduce_mean(t_abs, [0],
keep_dims=True) *
threshold_ratio))
return tf.reshape(t_x, saved_shape, name=name)
开发者ID:Peratham,项目名称:models,代码行数:26,代码来源:utils.py
示例7: forward_propagation
def forward_propagation(images):
with tf.variable_scope('conv1') as scope:
W_conv1 = weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])
image_matrix = tf.reshape(images, [-1, 1750, 1750, 3])
h_conv1 = tf.nn.sigmoid(conv2d(image_matrix, W_conv1) + b_conv1)
_activation_summary(h_conv1)
h_pool1 = max_pool_5x5(h_conv1)
with tf.variable_scope('conv2') as scope:
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.sigmoid(conv2d(h_pool1, W_conv2) + b_conv2)
_activation_summary(h_conv2)
h_pool2 = max_pool_5x5(h_conv2)
with tf.variable_scope('conv3') as scope:
W_conv3 = weight_variable([5, 5, 64, 128])
b_conv3 = bias_variable([128])
h_conv3 = tf.nn.sigmoid(conv2d(h_pool2, W_conv3) + b_conv3)
_activation_summary(h_conv3)
h_pool3 = max_pool_5x5(h_conv3)
with tf.variable_scope('local3') as scope:
W_fc1 = weight_variable([14 * 14 * 128, 256])
b_fc1 = bias_variable([256])
h_pool3_flat = tf.reshape(h_pool3, [-1, 14 * 14 * 128])
h_fc1 = tf.nn.sigmoid(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
_activation_summary(h_fc1)
keep_prob = tf.Variable(1.0)
W_fc2 = weight_variable([256, 4])
b_fc2 = bias_variable([4])
y_conv = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)
_activation_summary(y_conv)
return y_conv
开发者ID:StructML,项目名称:Neural-Network-Prostate,代码行数:35,代码来源:Process.py
示例8: buildSpImageConverter
def buildSpImageConverter(channelOrder, img_dtype):
"""
Convert a imageIO byte encoded image into a image tensor suitable as input to ConvNets
The name of the input must be a subset of those specified in `image.imageIO.imageSchema`.
:param img_dtype: the type of data the underlying image bytes represent
"""
with IsolatedSession() as issn:
# Flat image data -> image dimensions
# This has to conform to `imageIO.imageSchema`
height = tf.placeholder(tf.int32, [], name="height")
width = tf.placeholder(tf.int32, [], name="width")
num_channels = tf.placeholder(tf.int32, [], name="nChannels")
image_buffer = tf.placeholder(tf.string, [], name="data")
# The image is packed into bytes with height as leading dimension
# This is the default behavior of Python Image Library
shape = tf.reshape(tf.stack([height, width, num_channels], axis=0),
shape=(3,), name='shape')
if img_dtype == 'uint8':
image_uint8 = tf.decode_raw(image_buffer, tf.uint8, name="decode_raw")
image_float = tf.to_float(image_uint8)
elif img_dtype == 'float32':
image_float = tf.decode_raw(image_buffer, tf.float32, name="decode_raw")
else:
raise ValueError('''unsupported image data type "%s", currently only know how to
handle uint8 and float32''' % img_dtype)
image_reshaped = tf.reshape(image_float, shape, name="reshaped")
image_reshaped = imageIO.fixColorChannelOrdering(channelOrder, image_reshaped)
image_input = tf.expand_dims(image_reshaped, 0, name="image_input")
gfn = issn.asGraphFunction([height, width, image_buffer, num_channels], [image_input])
return gfn
开发者ID:pawanrana,项目名称:spark-deep-learning,代码行数:33,代码来源:pieces.py
示例9: read_data
def read_data(self, filename_queue, has_3d=False):
with tf.name_scope(None, 'read_data', [filename_queue]):
reader = tf.TFRecordReader()
_, example_serialized = reader.read(filename_queue)
if has_3d:
image, image_size, label, center, fname, pose, shape, gt3d, has_smpl3d = data_utils.parse_example_proto(
example_serialized, has_3d=has_3d)
# Need to send pose bc image can get flipped.
image, label, pose, gt3d = self.image_preprocessing(
image, image_size, label, center, pose=pose, gt3d=gt3d)
# Convert pose to rotation.
# Do not ignore the global!!
rotations = batch_rodrigues(tf.reshape(pose, [-1, 3]))
gt3d_flat = tf.reshape(gt3d, [-1])
# Label 3d is:
# [rotations, shape-beta, 3Djoints]
# [216=24*3*3, 10, 42=14*3]
label3d = tf.concat(
[tf.reshape(rotations, [-1]), shape, gt3d_flat], 0)
else:
image, image_size, label, center, fname = data_utils.parse_example_proto(
example_serialized)
image, label = self.image_preprocessing(
image, image_size, label, center)
# label should be K x 3
label = tf.transpose(label)
if has_3d:
return image, label, label3d, has_smpl3d
else:
return image, label
开发者ID:andrewjong,项目名称:hmr,代码行数:33,代码来源:data_loader.py
示例10: conv_net
def conv_net(_X, _weights, _biases, _dropout):
# Reshape input picture
_X = tf.reshape(_X, shape=[-1, 28, 28, 1])
# Convolution Layer
conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'])
# Max Pooling (down-sampling)
conv1 = max_pool(conv1, k=2)
# Apply Dropout
conv1 = tf.nn.dropout(conv1, _dropout)
# Convolution Layer
conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'])
# Max Pooling (down-sampling)
conv2 = max_pool(conv2, k=2)
# Apply Dropout
conv2 = tf.nn.dropout(conv2, _dropout)
# Fully connected layer
dense1 = tf.reshape(conv2, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv2 output to fit dense layer input
dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1'])) # Relu activation
dense1 = tf.nn.dropout(dense1, _dropout) # Apply Dropout
# Output, class prediction
out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out'])
return out
开发者ID:Dayz001,项目名称:MachineLearning,代码行数:26,代码来源:main_load_Conv.py
示例11: knn_point
def knn_point(k, xyz1, xyz2):
'''
Input:
k: int32, number of k in k-nn search
xyz1: (batch_size, ndataset, c) float32 array, input points
xyz2: (batch_size, npoint, c) float32 array, query points
Output:
val: (batch_size, npoint, k) float32 array, L2 distances
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
b = xyz1.get_shape()[0].value
n = xyz1.get_shape()[1].value
c = xyz1.get_shape()[2].value
m = xyz2.get_shape()[1].value
print b, n, c, m
print xyz1, (b,1,n,c)
xyz1 = tf.tile(tf.reshape(xyz1, (b,1,n,c)), [1,m,1,1])
xyz2 = tf.tile(tf.reshape(xyz2, (b,m,1,c)), [1,1,n,1])
dist = tf.reduce_sum((xyz1-xyz2)**2, -1)
print dist, k
outi, out = select_top_k(k, dist)
idx = tf.slice(outi, [0,0,0], [-1,-1,k])
val = tf.slice(out, [0,0,0], [-1,-1,k])
print idx, val
#val, idx = tf.nn.top_k(-dist, k=k) # ONLY SUPPORT CPU
return val, idx
开发者ID:joosm,项目名称:pointnet2,代码行数:26,代码来源:tf_grouping.py
示例12: din_fcn_shine
def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN
# outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
# D value - hidden size of the RNN layer
facts_size = facts.get_shape().as_list()[-1]
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(
query, facts_size, activation=None, name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat(
[queries, facts, queries - facts, queries * facts], axis=-1)
d_layer_1_all = tf.layers.dense(
din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
d_layer_2_all = tf.layers.dense(
d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
output = d_layer_2_all
return output
开发者ID:q64545,项目名称:x-deeplearning,代码行数:28,代码来源:utils.py
示例13: accumulate_privacy_spending
def accumulate_privacy_spending(self, eps_delta, unused_sigma,
num_examples):
"""Accumulate the privacy spending.
Currently only support approximate privacy. Here we assume we use Gaussian
noise on randomly sampled batch so we get better composition: 1. the per
batch privacy is computed using privacy amplication via sampling bound;
2. the composition is done using the composition with Gaussian noise.
TODO(liqzhang) Add a link to a document that describes the bounds used.
Args:
eps_delta: EpsDelta pair which can be tensors.
unused_sigma: the noise sigma. Unused for this accountant.
num_examples: the number of examples involved.
Returns:
a TensorFlow operation for updating the privacy spending.
"""
eps, delta = eps_delta
with tf.control_dependencies(
[tf.Assert(tf.greater(delta, 0),
["delta needs to be greater than 0"])]):
amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
self._total_examples)
# Use privacy amplification via sampling bound.
# See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
# TODO(liqzhang) Add a link to a document with formal statement
# and proof.
amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
tf.exp(eps) - 1.0)), [1])
amortize_delta = tf.reshape(amortize_ratio * delta, [1])
return tf.group(*[tf.assign_add(self._eps_squared_sum,
tf.square(amortize_eps)),
tf.assign_add(self._delta_sum, amortize_delta)])
开发者ID:ZhangShiyue,项目名称:models,代码行数:34,代码来源:accountant.py
示例14: add_logits_op
def add_logits_op(self):
"""
Adds logits to self
"""
with tf.variable_scope("bi-lstm"):
lstm_fwrd_cell = tf.contrib.rnn.LSTMCell(self.hidden_size)
lstm_back_cell = tf.contrib.rnn.LSTMCell(self.hidden_size)
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(lstm_fwrd_cell,
lstm_back_cell,
self.word_embeddings,
sequence_length=self.sequence_lengths,
dtype=tf.float32)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.nn.dropout(output, self.dropout)
with tf.variable_scope("proj"):
W = tf.get_variable("W", shape=[2*self.hidden_size, self.ntags],
dtype=tf.float32)
b = tf.get_variable("b", shape=[self.ntags], dtype=tf.float32,
initializer=tf.zeros_initializer())
ntime_steps = tf.shape(output)[1]
output = tf.reshape(output, [-1, 2*self.hidden_size])
pred = tf.matmul(output, W) + b
self.logits = tf.reshape(pred, [-1, ntime_steps, self.ntags])
开发者ID:yyf013932,项目名称:tensormsa,代码行数:26,代码来源:neuralnet_node_bilstmcrf.py
示例15: tf_random_modifiers
def tf_random_modifiers(flat_img, window_dims, name=None):
float_img = tf.cast(flat_img, tf.float32)
w, h = window_dims
mod_image = tf.reshape(float_img, (h, w, 3))
# # Define the modifier ops:
# brightness_mod = lambda x: tf.image.random_brightness(x, max_delta=0.3)
# contrast_mod = lambda x: tf.image.random_contrast(x, lower=0.2, upper=1.8)
# saturation_mod = lambda x: tf.image.random_saturation(x, lower=0.2, upper=1.8)
# hue_mod = lambda x: tf.image.random_hue(x, max_delta=0.025)
# modifier_ops = [brightness_mod, contrast_mod, saturation_mod, hue_mod]
# # Choose a random order for the modifiers:
# perm = np.arange(len(modifier_ops))
# np.random.shuffle(perm)
# # Apply the modifiers in a random order:
# for i in perm:
# mod_op = modifier_ops[i]
# mod_image = mod_op(mod_image)
mod_image = tf.image.random_brightness(mod_image, max_delta=0.3)
mod_image = tf.image.random_contrast(mod_image, lower=0.2, upper=1.8)
mod_image = tf.image.random_saturation(mod_image, lower=0.2, upper=1.8)
mod_image = tf.image.random_hue(mod_image, max_delta=0.025)
# Subtract off the mean and divide by the variance of the pixels.
final_image = tf.image.per_image_whitening(mod_image)
final_flat_image = tf.reshape(final_image, (w*h*3,), name=name)
print 'final_flat_image.get_shape()', final_flat_image.get_shape()
return final_flat_image
开发者ID:LHY20,项目名称:car-detection,代码行数:32,代码来源:input_data.py
示例16: get_reconstructed_image
def get_reconstructed_image(self, real, imag, name=None):
"""
:param real:
:param imag:
:param name:
:return:
"""
complex_k_space_label = tf.complex(real=tf.squeeze(real), imag=tf.squeeze(imag), name=name+"_complex_k_space")
rec_image_complex = tf.expand_dims(tf.ifft2d(complex_k_space_label), axis=1)
rec_image_real = tf.reshape(tf.real(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
rec_image_imag = tf.reshape(tf.imag(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
# Shifting
top, bottom = tf.split(rec_image_real, num_or_size_splits=2, axis=2)
top_left, top_right = tf.split(top, num_or_size_splits=2, axis=3)
bottom_left, bottom_right = tf.split(bottom, num_or_size_splits=2, axis=3)
top_shift = tf.concat(axis=3, values=[bottom_right, bottom_left])
bottom_shift = tf.concat(axis=3, values=[top_right, top_left])
shifted_image = tf.concat(axis=2, values=[top_shift, bottom_shift])
# Shifting
top_imag, bottom_imag = tf.split(rec_image_imag, num_or_size_splits=2, axis=2)
top_left_imag, top_right_imag = tf.split(top_imag, num_or_size_splits=2, axis=3)
bottom_left_imag, bottom_right_imag = tf.split(bottom_imag, num_or_size_splits=2, axis=3)
top_shift_imag = tf.concat(axis=3, values=[bottom_right_imag, bottom_left_imag])
bottom_shift_imag = tf.concat(axis=3, values=[top_right_imag, top_left_imag])
shifted_image_imag = tf.concat(axis=2, values=[top_shift_imag, bottom_shift_imag])
shifted_image_two_channels = tf.stack([shifted_image[:,0,:,:], shifted_image_imag[:,0,:,:]], axis=1)
return shifted_image_two_channels
开发者ID:shohad25,项目名称:thesis,代码行数:34,代码来源:k_space_wgan_gl_g2_unet_Gloss.py
示例17: BatchClipByL2norm
def BatchClipByL2norm(t, upper_bound, name=None):
"""Clip an array of tensors by L2 norm.
Shrink each dimension-0 slice of tensor (for matrix it is each row) such
that the l2 norm is at most upper_bound. Here we clip each row as it
corresponds to each example in the batch.
Args:
t: the input tensor.
upper_bound: the upperbound of the L2 norm.
name: optional name.
Returns:
the clipped tensor.
"""
assert upper_bound > 0
with tf.op_scope([t, upper_bound], name, "batch_clip_by_l2norm") as name:
saved_shape = tf.shape(t)
batch_size = tf.slice(saved_shape, [0], [1])
t2 = tf.reshape(t, tf.concat(0, [batch_size, [-1]]))
upper_bound_inv = tf.fill(tf.slice(saved_shape, [0], [1]),
tf.constant(1.0/upper_bound))
# Add a small number to avoid divide by 0
l2norm_inv = tf.rsqrt(tf.reduce_sum(t2 * t2, [1]) + 0.000001)
scale = tf.minimum(l2norm_inv, upper_bound_inv) * upper_bound
clipped_t = tf.matmul(tf.diag(scale), t2)
clipped_t = tf.reshape(clipped_t, saved_shape, name=name)
return clipped_t
开发者ID:Peratham,项目名称:models,代码行数:28,代码来源:utils.py
示例18: convAutoencoder
def convAutoencoder(x, weights, bias, weights_key, bias_key):
x = tf.reshape(x, shape = [-1, 32, 32, 3])
print(weights_key, bias_key)
#encoder procedure
encoder_conv_1 = conv2d(x, weights[weights_key[0]], bias[bias_key[0]])
encoder_pool_1 = maxpool2d(encoder_conv_1)
encoder_conv_2 = conv2d(encoder_pool_1, weights[weights_key[1]], bias[bias_key[1]])
encoder_pool_2 = maxpool2d(encoder_conv_2)
encoder_conv_3 = conv2d(encoder_pool_2, weights[weights_key[2]], bias[bias_key[2]])
encoder_pool_3 = maxpool2d(encoder_conv_3)
print(encoder_pool_3.get_shape())
encoder_pool_3_reshape = tf.reshape(encoder_pool_3, shape = [-1, 1024])
encoder_dense_1 = dense_layer(encoder_pool_3_reshape, weights[weights_key[3]], bias[bias_key[3]])
#decoder_procedure
decoder_dense_1 = dense_layer(encoder_dense_1, weights[weights_key[4]], bias[bias_key[4]])
decoder_dense_1_reshape = tf.reshape(decoder_dense_1, shape = [-1, 4, 4, 64])
decoder_upscale_3 = upscale2d(decoder_dense_1_reshape, [1, 2], scale = 2)
decoder_conv_3 = conv2d(decoder_upscale_3, weights[weights_key[5]], bias[bias_key[5]])
decoder_upscale_2 = upscale2d(decoder_conv_3, [1, 2], scale = 2)
decoder_conv_2 = conv2d(decoder_upscale_2, weights[weights_key[6]], bias[bias_key[6]])
decoder_upscale_1 = upscale2d(decoder_conv_2, [1, 2], scale = 2)
decoder_conv_1 = conv2d(decoder_upscale_1, weights[weights_key[7]], bias[bias_key[7]])
print(decoder_conv_1.get_shape())
#output
output = tf.reshape(decoder_conv_1, shape = [-1, 3072])
print(output.get_shape())
return output
开发者ID:jiajunshen,项目名称:MultipleDetection,代码行数:32,代码来源:convolutional_autoencoder_cifar10.py
示例19: loss
def loss(logits, labels, batch_size=None):
"""Adds all losses for the model.
Note the final loss is not returned. Instead, the list of losses are collected
by slim.losses. The losses are accumulated in tower_loss() and summed to
calculate the total loss.
Args:
logits: List of logits from inference(). Each entry is a 2-D float Tensor.
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
batch_size: integer
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Reshape the labels into a dense Tensor of
# shape [FLAGS.batch_size, num_classes].
sparse_labels = tf.reshape(labels, [batch_size, 1])
indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
concated = tf.concat(1, [indices, sparse_labels])
num_classes = logits[0].get_shape()[-1].value
dense_labels = tf.sparse_to_dense(concated,
[batch_size, num_classes],
1.0, 0.0)
# Cross entropy loss for the main softmax prediction.
slim.losses.cross_entropy_loss(logits[0],
dense_labels,
label_smoothing=0.1,
weight=1.0)
开发者ID:mclumd,项目名称:baxNet,代码行数:31,代码来源:inception_model.py
示例20: testPaddingCrossEntropyFactored
def testPaddingCrossEntropyFactored(self):
vocab_size = 19
rows = 5
cols = 4
depth = 11
label_smoothing = 0.1
features = np.random.rand(rows, cols, depth)
weights = np.random.rand(vocab_size, depth)
labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))
with self.test_session() as session:
features = tf.to_float(features)
weights = tf.to_float(weights)
labels = tf.to_int32(labels)
logits = tf.matmul(
tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True)
logits = tf.reshape(logits, [rows, cols, vocab_size])
loss_num, loss_den = common_layers.padded_cross_entropy(
logits, labels, label_smoothing=label_smoothing, reduce_sum=False)
factored_logits = common_layers.FactoredTensor(features, weights)
loss_num_f, loss_den_f = common_layers.padded_cross_entropy_factored(
factored_logits,
labels=labels,
label_smoothing=label_smoothing,
reduce_sum=False)
num, den, num_f, den_f = session.run(
[loss_num, loss_den, loss_num_f, loss_den_f])
self.assertEqual(num.shape, (rows, cols))
self.assertEqual(den.shape, (rows, cols))
self.assertEqual(num_f.shape, (rows, cols))
self.assertEqual(den_f.shape, (rows, cols))
self.assertAllClose(num, num_f)
self.assertAllClose(den, den_f)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:32,代码来源:common_layers_test.py
注:本文中的tensorflow.reshape函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论