本文整理汇总了Python中tensorflow.zeros_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python zeros_initializer函数的具体用法?Python zeros_initializer怎么用?Python zeros_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zeros_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: project_bilstm_layer
def project_bilstm_layer(self, lstm_outputs, name=None):
"""
hidden layer between lstm layer and logits
:param lstm_outputs: [batch_size, num_steps, emb_size]
:return: [batch_size, num_steps, num_tags]
"""
with tf.variable_scope("project" if not name else name):
with tf.variable_scope("hidden"):
W = tf.get_variable("W", shape=[self.hidden_unit * 2, self.hidden_unit],
dtype=tf.float32, initializer=self.initializers.xavier_initializer())
b = tf.get_variable("b", shape=[self.hidden_unit], dtype=tf.float32,
initializer=tf.zeros_initializer())
output = tf.reshape(lstm_outputs, shape=[-1, self.hidden_unit * 2])
hidden = tf.tanh(tf.nn.xw_plus_b(output, W, b))
# project to score of tags
with tf.variable_scope("logits"):
W = tf.get_variable("W", shape=[self.hidden_unit, self.num_labels],
dtype=tf.float32, initializer=self.initializers.xavier_initializer())
b = tf.get_variable("b", shape=[self.num_labels], dtype=tf.float32,
initializer=tf.zeros_initializer())
pred = tf.nn.xw_plus_b(hidden, W, b)
return tf.reshape(pred, [-1, self.seq_length, self.num_labels])
开发者ID:chongp,项目名称:Name-Entity-Recognition,代码行数:26,代码来源:lstm_crf_layer.py
示例2: initialize_parameters
def initialize_parameters():
"""
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [n_hidden_1, n_input]
b1 : [n_hidden_1, 1]
W2 : [n_hidden_2, n_hidden_1]
b2 : [n_hidden_2, 1]
W3 : [n_classes, n_hidden_2]
b3 : [n_classes, 1]
"""
tf.set_random_seed(42)
# First hidden layer
W1 = tf.get_variable("W1", [n_hidden_1, n_input], initializer=tf.contrib.layers.xavier_initializer(seed=42))
b1 = tf.get_variable("b1", [n_hidden_1, 1], initializer=tf.zeros_initializer())
# Second hidden layer
W2 = tf.get_variable("W2", [n_hidden_2, n_hidden_1], initializer=tf.contrib.layers.xavier_initializer(seed=42))
b2 = tf.get_variable("b2", [n_hidden_2, 1], initializer=tf.zeros_initializer())
# Output layer
W3 = tf.get_variable("W3", [n_classes, n_hidden_2], initializer=tf.contrib.layers.xavier_initializer(seed=42))
b3 = tf.get_variable("b3", [n_classes, 1], initializer=tf.zeros_initializer())
# Store initializations as a dictionary of parameters
parameters = {
"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3
}
return parameters
开发者ID:vaibhawvipul,项目名称:Machine-Learning,代码行数:34,代码来源:fashion_mnist_tf.py
示例3: auxnet
def auxnet(embedding, size, dropout_rate=.5, std=.2, is_training=True, scope='auxnet'):
"""
Defines the fully connected layers for the auxnet:
-- so far, one layer to batch norm to relu to dropout
Args:
embedding: the histogram embedding matrix
size: int size of each hidden layer
dropout_rate: rate to dropout (usually .5)
std: standard deviation used for initilizer
is_training: bool--used to turn off dropout for inference
scope: name the op/tensor
Returns:
fc: the fully connected network as a tensor of size (pxsize)
"""
# make lower/upper for uniform init
a,b = 0 - np.sqrt(3)*std, np.sqrt(3)*std
with tf.variable_scope(scope,'Aux'):
# notes: if you use dropout and batchnorm no need for regularizer
with slim.arg_scope([slim.fully_connected],
weights_initializer = tf.random_uniform_initializer(minval=a,maxval=b),
#weights_initializer = tf.truncated_normal_initializer(std),
weights_regularizer = slim.l2_regularizer(.005),
activation_fn=tf.nn.relu):
"""
net = slim.fully_connected(embedding, size, scope='hidden')
net = slim.dropout(net, dropout_rate,
is_training=is_training, scope='dropout')
net= slim.fully_connected(net, size, scope='output',
activation_fn=None)
"""
fc = slim.fully_connected(embedding, size,
biases_initializer=tf.zeros_initializer(),
activation_fn=None, #tf.nn.relu,
scope='hidden')
#tf.summary.histogram('beforebn/%s' % scope, fc, collections=['train'])
fc = slim.batch_norm(fc, center=True,
scale=True,
zero_debias_moving_mean=True,
is_training=is_training,
scope='bn')
# mod option: add another layer here:
fc = tf.nn.relu(fc, 'relu')
# now apply the dropout:
fc = slim.dropout(fc, dropout_rate,
is_training=is_training,
scope='dropout')
# add another layer:
fc = slim.fully_connected(fc, size, biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.tanh, scope="hidden2")
#tf.summary.histogram('activations/auxnet/%s' % scope, fc, collections=['train'])
return fc
开发者ID:ljstrnadiii,项目名称:DietNet,代码行数:60,代码来源:network.py
示例4: batch_norm
def batch_norm(x, decay=0.999, epsilon=1e-03, is_training=True,
scope="scope"):
x_shape = x.get_shape()
num_inputs = x_shape[-1]
reduce_dims = list(range(len(x_shape) - 1))
with tf.variable_scope(scope):
beta = create_var("beta", [num_inputs,],
initializer=tf.zeros_initializer())
gamma = create_var("gamma", [num_inputs,],
initializer=tf.ones_initializer())
# for inference
moving_mean = create_var("moving_mean", [num_inputs,],
initializer=tf.zeros_initializer(),
trainable=False)
moving_variance = create_var("moving_variance", [num_inputs],
initializer=tf.ones_initializer(),
trainable=False)
if is_training:
mean, variance = tf.nn.moments(x, axes=reduce_dims)
update_move_mean = moving_averages.assign_moving_average(moving_mean,
mean, decay=decay)
update_move_variance = moving_averages.assign_moving_average(moving_variance,
variance, decay=decay)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_mean)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_variance)
else:
mean, variance = moving_mean, moving_variance
return tf.nn.batch_normalization(x, mean, variance, beta, gamma, epsilon)
开发者ID:kaka7,项目名称:DeepLearning_tutorials,代码行数:28,代码来源:ResNet50.py
示例5: mlp_param_init
def mlp_param_init(dim, scheme = 'zero'):
"""
@note: Initializes parameters to build a multi-layer perceptron with tensorflow.
The shapes are:
W1: [n1, n_x]
B1: [n1, 1]
W2: [n2, n1]
B2: [n2, 1]
...
Wl: [n_y, nl-1]
Bl: [n_y, 1]
@param dim: the number of unit in each level -- dim = [n_x, n1, n2, ..., n(l-1), n_y]
@param scheme: the initial scheme of Weight, including {'zero', 'xavier'}
@return: parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
parameters = {}
l = len(dim) # the layers' count
# parameter initializing (using xavier_initializer for weight)
# (from 0 - input to l-1 - output)
for i in range(1, l):
if scheme == 'xavier':
parameters['W'+str(i)] = tf.get_variable('W'+str(i), [dim[i], dim[i-1]], \
initializer = tf.contrib.layers.xavier_initializer())
else:
parameters['W'+str(i)] = tf.get_variable('W'+str(i), [dim[i], dim[i-1]], \
initializer = tf.zeros_initializer())
parameters['B'+str(i)] = tf.get_variable('B'+str(i), [dim[i], 1], \
initializer = tf.zeros_initializer())
return parameters
开发者ID:LiuYouliang,项目名称:Practice-of-Machine-Learning,代码行数:33,代码来源:mlp_demo.py
示例6: initialize_parameters
def initialize_parameters():
"""
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [25, 12288]
b1 : [25, 1]
W2 : [12, 25]
b2 : [12, 1]
W3 : [6, 12]
b3 : [6, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 6 lines of code)
W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2", [12,25], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b2 = tf.get_variable("b2", [12,1], initializer = tf.zeros_initializer())
W3 = tf.get_variable("W3", [6,12], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b3 = tf.get_variable("b3", [6,1], initializer = tf.zeros_initializer())
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
开发者ID:shriavi,项目名称:datasciencecoursera,代码行数:33,代码来源:Tensorflow+Tutorial.py
示例7: query_encoder
def query_encoder(self, v_q, is_training=True, scope="query_encoder"):
"""Encode query image feature
Args:
v_q: query image feature (batch_size, img_dim)
is_training: True - training model / False - inference model
Returns:
phi_q: query vector
v_qr: reconstructed v_q
"""
with tf.variable_scope(scope):
h1 = tf.contrib.layers.fully_connected(inputs=v_q,
num_outputs=256,
activation_fn=tf.nn.tanh,
weights_regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay),
biases_initializer=tf.zeros_initializer())
phi_q = tf.contrib.layers.fully_connected(inputs=h1,
num_outputs=128,
activation_fn=tf.nn.tanh,
weights_regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay),
biases_initializer=tf.zeros_initializer())
h2 = tf.contrib.layers.fully_connected(inputs=phi_q,
num_outputs=256,
activation_fn=tf.nn.tanh,
weights_regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay),
biases_initializer=tf.zeros_initializer())
v_qr = tf.contrib.layers.fully_connected(inputs=h2,
num_outputs=self.img_dim,
activation_fn=tf.nn.tanh,
weights_regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay),
biases_initializer=tf.zeros_initializer())
return phi_q, v_qr
开发者ID:juneyang666,项目名称:clean-net,代码行数:32,代码来源:model.py
示例8: conv2d_zeros
def conv2d_zeros(x,
width,
filter_size=[3, 3],
stride=[1, 1],
pad="SAME",
logscale_factor=3,
skip=1,
edge_bias=True,
name=None):
with tf.variable_scope(name, "conv2d"):
if edge_bias and pad == "SAME":
x = add_edge_padding(x, filter_size)
pad = 'VALID'
n_in = int(x.get_shape()[3])
stride_shape = [1] + stride + [1]
filter_shape = filter_size + [n_in, width]
w = tf.get_variable("W", filter_shape, tf.float32,
initializer=tf.zeros_initializer())
if skip == 1:
x = tf.nn.conv2d(x, w, stride_shape, pad, data_format='NHWC')
else:
assert stride[0] == 1 and stride[1] == 1
x = tf.nn.atrous_conv2d(x, w, skip, pad)
x += tf.get_variable("b", [1, 1, 1, width],
initializer=tf.ones_initializer())
x *= tf.exp(tf.get_variable("logs",
[1, width], initializer=tf.zeros_initializer()) * logscale_factor)
return x
开发者ID:gdahia,项目名称:DLF,代码行数:29,代码来源:ops.py
示例9: basic_fc_discriminator
def basic_fc_discriminator(x):
"""Compute discriminator score for a batch of input images.
Inputs:
- x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
Returns:
TensorFlow Tensor with shape [batch_size, 1], containing the score
for an image being real for each input image.
"""
with tf.variable_scope("bfcdiscriminator"):
W1 = tf.get_variable("W1", (784, 256))
b1 = tf.get_variable("b1", (256, ), initializer=tf.zeros_initializer())
W2 = tf.get_variable("W2", (256, 256))
b2 = tf.get_variable("b2", (256, ), initializer=tf.zeros_initializer())
W3 = tf.get_variable("W3", (256, 1), )
b3 = tf.get_variable("b3", (1, ), initializer=tf.zeros_initializer())
H1 = tf.matmul(x, W1) + b1
H1L = leaky_relu(H1)
H2 = tf.matmul(H1L, W2) + b2
H2L = leaky_relu(H2)
logits = tf.matmul(H2L, W3) + b3
return logits
开发者ID:haolang9527,项目名称:MyDeepLearning,代码行数:27,代码来源:TFGAN.py
示例10: project_layer
def project_layer(self, lstm_outputs, name=None):
"""
"""
with tf.variable_scope("project" if not name else name):
with tf.variable_scope("hidden"):
w_tanh = tf.get_variable("w_tanh", shape=[self.lstm_dim * 2, self.lstm_dim],
dtype=tf.float32, initializer=self.initializer, regularizer=tf.contrib.layers.l2_regularizer(0.001))
b_tanh = tf.get_variable("b_tanh", shape=[self.lstm_dim], dtype=tf.float32,
initializer=tf.zeros_initializer())
output = tf.reshape(lstm_outputs, shape=[-1, self.lstm_dim * 2])
hidden = tf.tanh(tf.nn.xw_plus_b(output, w_tanh, b_tanh))
drop_hidden = tf.nn.dropout(hidden, self.dropout)
# project to score of tags
with tf.variable_scope("output"):
w_out = tf.get_variable("w_out", shape=[self.lstm_dim, self.num_tags],
dtype=tf.float32, initializer=self.initializer, regularizer=tf.contrib.layers.l2_regularizer(0.001))
b_out = tf.get_variable("b_out", shape=[self.num_tags], dtype=tf.float32,
initializer=tf.zeros_initializer() )
pred = tf.nn.xw_plus_b(drop_hidden, w_out, b_out, name="pred")
self.logits = tf.reshape(pred, [-1, self.num_steps, self.num_tags], name="logits")
开发者ID:forin-xyz,项目名称:FoolNLTK,代码行数:26,代码来源:bi_lstm.py
示例11: create_slots
def create_slots(self, var):
"""Create the factorized Adam accumulators for diet variables."""
params = self.params
shape = var.get_shape().as_list()
if not hasattr(params, "slots"):
params.slots = defaultdict(dict)
name = var.op.name
slots = params.slots[name]
if params.factored_second_moment_accumulator and len(shape) == 2:
slots["adam_vr"] = tf.get_variable(
name + "_adam_vr", [shape[0], 1],
trainable=False,
initializer=tf.zeros_initializer())
slots["adam_vc"] = tf.get_variable(
name + "_adam_vc", [1, shape[1]],
trainable=False,
initializer=tf.zeros_initializer())
else:
slots["adam_v"] = tf.get_variable(
name + "_adam_v",
shape,
trainable=False,
initializer=tf.zeros_initializer())
if params.beta1 != 0.0:
slots["adam_m"] = tf.get_variable(
name + "_adam_m",
shape,
trainable=False,
initializer=tf.zeros_initializer())
开发者ID:kltony,项目名称:tensor2tensor,代码行数:32,代码来源:diet.py
示例12: initializeParameters
def initializeParameters(self, m, n):
"""
Arguments:
m -- number of users
n -- number of items
Returns:
parameters -- parameters['b'], global bias, scalar
parameters['u'], users bias, shape (m, 1)
parameters['d'], item bias, shape (1, n)
parameters['P'], users feature matrix, shape (m, K)
parameters['Q'], items feature matrix, shape (n, K)
"""
k = self.K
parameters = {}
parameters['b'] = tf.get_variable(name='b', dtype=tf.float64, shape=[],
initializer=tf.zeros_initializer())
parameters['u'] = tf.get_variable(name='u', dtype=tf.float64, shape=[m, 1],
initializer=tf.zeros_initializer())
parameters['d'] = tf.get_variable(name='d', dtype=tf.float64, shape=[1, n],
initializer=tf.zeros_initializer())
parameters['P'] = tf.get_variable(name='P', dtype=tf.float64, shape=[m, k],
initializer=tf.random_normal_initializer())
parameters['Q'] = tf.get_variable(name='Q', dtype=tf.float64, shape=[n, k],
initializer=tf.random_normal_initializer())
return parameters
开发者ID:cheng-w-liu,项目名称:ML_algos,代码行数:32,代码来源:matrix_factorization_in_TensorFlow.py
示例13: initialize_parameters
def initialize_parameters():
'''
初始化神经网络的参数,参数的维度如下:
W1:[25,12288]
b1:[25,1]
W2:[12,25]
b2:[12,1]
W3:[6.12]
b3:[6,1]
:return:
parameters - 包含了W和b的字典
'''
tf.set_random_seed(1)#指定随机种子
W1= tf.get_variable('W1',[25,12288],initializer=tf.contrib.layers.xavier_initializer(seed=1))
b1 = tf.get_variable('b1',[25,1],initializer=tf.zeros_initializer())
W2 = tf.get_variable('W2',[12,25],initializer=tf.contrib.layers.xavier_initializer(seed=1))
b2 = tf.get_variable('b2',[12,1],initializer=tf.zeros_initializer())
W3 = tf.get_variable('W3',[6,12],initializer=tf.contrib.layers.xavier_initializer(seed=1))
b3 = tf.get_variable('b3',[6,1],initializer=tf.zeros_initializer())
parameters = {
'W1':W1,
'b1':b1,
'W2':W2,
'b2':b2,
'W3':W3,
'b3':b3
}
return parameters
开发者ID:491811030,项目名称:hellow-world,代码行数:31,代码来源:work_2_firsttfnetwork.py
示例14: Discriminator_with_Vanilla
def Discriminator_with_Vanilla(input_Pattern, hidden_Unit_Size = 128, label_Unit_Size = 10, is_Training = True, reuse = False):
with tf.variable_scope('discriminator', reuse=reuse):
hidden_Activation = tf.layers.dense(
inputs = input_Pattern,
units = hidden_Unit_Size,
activation = tf.nn.relu,
use_bias = True,
kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
bias_initializer = tf.zeros_initializer(),
name = "hidden"
)
discrimination_Logits = tf.layers.dense(
inputs = hidden_Activation,
units = 1,
activation = None,
use_bias = True,
kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
bias_initializer = tf.zeros_initializer(),
name = "discrimination"
)
discrimination_Activation = tf.nn.sigmoid(discrimination_Logits);
label_Logits = tf.layers.dense(
inputs = hidden_Activation,
units = label_Unit_Size,
activation = None,
use_bias = True,
kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
bias_initializer = tf.zeros_initializer(),
name = "label"
)
label_Activation = tf.nn.softmax(label_Logits);
return discrimination_Logits, label_Logits, discrimination_Activation, label_Activation;
开发者ID:CODEJIN,项目名称:GAN,代码行数:34,代码来源:Customized_Layers.py
示例15: bacthnorm
def bacthnorm(inputs, scope, epsilon=1e-05, momentum=0.99, is_training=True):
inputs_shape = inputs.get_shape().as_list()# 输出 形状尺寸
params_shape = inputs_shape[-1:]# 输入参数的长度
axis = list(range(len(inputs_shape) - 1))
with tf.variable_scope(scope):
beta = create_variable("beta", params_shape,
initializer=tf.zeros_initializer())
gamma = create_variable("gamma", params_shape,
initializer=tf.ones_initializer())
# 均值 常量 不需要训练 for inference
moving_mean = create_variable("moving_mean", params_shape,
initializer=tf.zeros_initializer(), trainable=False)
# 方差 常量 不需要训练
moving_variance = create_variable("moving_variance", params_shape,
initializer=tf.ones_initializer(), trainable=False)
if is_training:
mean, variance = tf.nn.moments(inputs, axes=axis)# 计算均值和方差
# 移动平均求 均值和 方差 考虑上一次的量 xt = a * x_t-1 +(1-a)*x_now
update_move_mean = moving_averages.assign_moving_average(moving_mean,
mean, decay=momentum)
update_move_variance = moving_averages.assign_moving_average(moving_variance,
variance, decay=momentum)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_mean)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_variance)
else:
mean, variance = moving_mean, moving_variance
return tf.nn.batch_normalization(inputs, mean, variance, beta, gamma, epsilon)
开发者ID:dyz-zju,项目名称:MVision,代码行数:28,代码来源:MobileNet_tf.py
示例16: main
def main(_):
ed.set_seed(42)
# DATA
x_data = build_toy_dataset(FLAGS.N)
# MODEL
pi = Dirichlet(concentration=tf.ones(FLAGS.K))
mu = Normal(0.0, 1.0, sample_shape=[FLAGS.K, FLAGS.D])
sigma = InverseGamma(concentration=1.0, rate=1.0,
sample_shape=[FLAGS.K, FLAGS.D])
c = Categorical(logits=tf.log(pi) - tf.log(1.0 - pi), sample_shape=FLAGS.N)
x = Normal(loc=tf.gather(mu, c), scale=tf.gather(sigma, c))
# INFERENCE
qpi = Empirical(params=tf.get_variable(
"qpi/params",
[FLAGS.T, FLAGS.K],
initializer=tf.constant_initializer(1.0 / FLAGS.K)))
qmu = Empirical(params=tf.get_variable("qmu/params",
[FLAGS.T, FLAGS.K, FLAGS.D],
initializer=tf.zeros_initializer()))
qsigma = Empirical(params=tf.get_variable("qsigma/params",
[FLAGS.T, FLAGS.K, FLAGS.D],
initializer=tf.ones_initializer()))
qc = Empirical(params=tf.get_variable("qc/params",
[FLAGS.T, FLAGS.N],
initializer=tf.zeros_initializer(),
dtype=tf.int32))
gpi = Dirichlet(concentration=tf.constant([1.4, 1.6]))
gmu = Normal(loc=tf.constant([[1.0, 1.0], [-1.0, -1.0]]),
scale=tf.constant([[0.5, 0.5], [0.5, 0.5]]))
gsigma = InverseGamma(concentration=tf.constant([[1.1, 1.1], [1.1, 1.1]]),
rate=tf.constant([[1.0, 1.0], [1.0, 1.0]]))
gc = Categorical(logits=tf.zeros([FLAGS.N, FLAGS.K]))
inference = ed.MetropolisHastings(
latent_vars={pi: qpi, mu: qmu, sigma: qsigma, c: qc},
proposal_vars={pi: gpi, mu: gmu, sigma: gsigma, c: gc},
data={x: x_data})
inference.initialize()
sess = ed.get_session()
tf.global_variables_initializer().run()
for _ in range(inference.n_iter):
info_dict = inference.update()
inference.print_progress(info_dict)
t = info_dict['t']
if t == 1 or t % inference.n_print == 0:
qpi_mean, qmu_mean = sess.run([qpi.mean(), qmu.mean()])
print("")
print("Inferred membership probabilities:")
print(qpi_mean)
print("Inferred cluster means:")
print(qmu_mean)
开发者ID:JoyceYa,项目名称:edward,代码行数:59,代码来源:mixture_gaussian_mh.py
示例17: fc
def fc(inputs, w_shape, b_shape):
weight = tf.get_variable("weights",
w_shape,
initializer=tf.zeros_initializer(tf.float32))
bias = tf.get_variable("bias",
b_shape,
initializer=tf.zeros_initializer(tf.float32))
return tf.matmul(inputs, weight) + bias
开发者ID:q64545,项目名称:x-deeplearning,代码行数:8,代码来源:mnist.py
示例18: evaluate_precision_recall
def evaluate_precision_recall(
input_, labels, threshold=0.5, per_example_weights=None, name=PROVIDED, phase=Phase.train
):
"""Computes the precision and recall of the prediction vs the labels.
Args:
input_: A rank 2 Tensor or a Pretty Tensor holding the result of the model.
labels: The target labels to learn as a float tensor.
threshold: The threshold to use to decide if the prediction is true.
per_example_weights: A Tensor with a weight per example.
name: An optional name.
phase: The phase of this model; non training phases compute a total across
all examples.
Returns:
Precision and Recall.
"""
_ = name # Eliminate warning, name used for namescoping by PT.
selected, sum_retrieved, sum_relevant = _compute_precision_recall(input_, labels, threshold, per_example_weights)
if phase != Phase.train:
dtype = tf.float32
# Create the variables in all cases so that the load logic is easier.
relevant_count = tf.get_variable(
"relevant_count",
[],
dtype,
tf.zeros_initializer(),
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False,
)
retrieved_count = tf.get_variable(
"retrieved_count",
[],
dtype,
tf.zeros_initializer(),
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False,
)
selected_count = tf.get_variable(
"selected_count",
[],
dtype,
tf.zeros_initializer(),
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False,
)
with input_.g.device(selected_count.device):
selected = tf.assign_add(selected_count, selected)
with input_.g.device(retrieved_count.device):
sum_retrieved = tf.assign_add(retrieved_count, sum_retrieved)
with input_.g.device(relevant_count.device):
sum_relevant = tf.assign_add(relevant_count, sum_relevant)
return (
tf.where(tf.equal(sum_retrieved, 0), tf.zeros_like(selected), selected / sum_retrieved),
tf.where(tf.equal(sum_relevant, 0), tf.zeros_like(selected), selected / sum_relevant),
)
开发者ID:google,项目名称:prettytensor,代码行数:58,代码来源:pretty_tensor_loss_methods.py
示例19: mnist_model
def mnist_model(train_data_flat, train_labels, x0):
"""Creates a simple linear model that evaluates cross-entropy loss and
gradient on MNIST dataset. Mirrors 'linear' model from train-on-mnist.lua
Result is a Python callable that accepts ITensor parameter vector and returns
ITensor loss and gradient.
"""
# batchSize = 60000
batchSize = 1
x_size = 10
x_offset = 512
# reshape flat parameter vector into W and b parameter matrices
x_placeholder, param = tf.get_session_tensor(x0.tf_handle, x0.dtype)
W_flat = tf.slice(param, [0], [x_size*10])
W = tf.reshape(W_flat, [x_size, 10])
b_flat = tf.slice(param, [x_size*10], [10])
b = tf.reshape(b_flat, [1, 10])
# create model
data = tf.Variable(tf.zeros_initializer((batchSize, x_size), dtype=dtype))
targets = tf.Variable(tf.zeros_initializer((batchSize, x_size), dtype=dtype))
logits = tf.matmul(data, W) + b
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, targets)
# create loss and gradient ops
cross_entropy_loss = tf.reduce_mean(cross_entropy)
Wnorm = tf.reduce_sum(tf.square(W))
bnorm = tf.reduce_sum(tf.square(b))
loss = cross_entropy_loss + (bnorm + Wnorm)/2
[grad] = tf.gradients(loss, [param])
# get handle ops that will be used to initialize ITensors
loss_handle_tensor = tf.get_session_handle(loss)
grad_handle_tensor = tf.get_session_handle(grad)
# initialize data and targets
data_placeholder = tf.placeholder(dtype=dtype)
data_init = data.assign(data_placeholder)
labels_placeholder = tf.placeholder(shape=(batchSize), dtype=tf.int32)
labels_onehot = tf.one_hot(labels_placeholder - 1, 10, dtype=dtype)
targets_init = targets.assign(labels_onehot)
sess.run(data_init, feed_dict={data_placeholder:train_data_flat[:batchSize,x_offset:x_offset+x_size]})
sess.run(targets_init, feed_dict={labels_placeholder:
train_labels[:batchSize]})
# Create our callable that works on persistent Tensors
def eval_model(x):
loss_handle, grad_handle = sess.run([loss_handle_tensor,
grad_handle_tensor],
feed_dict={x_placeholder: x.tf_handle})
return [env.handle_to_itensor(loss_handle),
env.handle_to_itensor(grad_handle)]
return eval_model
开发者ID:yaroslavvb,项目名称:lbfgs,代码行数:56,代码来源:lbfgs_small.py
示例20: _batch_norm_without_layers
def _batch_norm_without_layers(self, input_layer, decay, use_scale,
epsilon):
"""Batch normalization on `input_layer` without tf.layers."""
shape = input_layer.shape
num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]
beta = self.get_variable(
'beta', [num_channels],
tf.float32,
tf.float32,
initializer=tf.zeros_initializer())
if use_scale:
gamma = self.get_variable(
'gamma', [num_channels],
tf.float32,
tf.float32,
initializer=tf.ones_initializer())
else:
gamma = tf.constant(1.0, tf.float32, [num_channels])
moving_mean = tf.get_variable(
'moving_mean', [num_channels],
tf.float32,
initializer=tf.zeros_initializer(),
trainable=False)
moving_variance = tf.get_variable(
'moving_variance', [num_channels],
tf.float32,
initializer=tf.ones_initializer(),
trainable=False)
if self.phase_train:
bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(
input_layer,
gamma,
beta,
epsilon=epsilon,
data_format=self.data_format,
is_training=True)
mean_update = moving_averages.assign_moving_average(
moving_mean, batch_mean, decay=decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
moving_variance,
batch_variance,
decay=decay,
zero_debias=False)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
else:
bn, _, _ = tf.nn.fused_batch_norm(
input_layer,
gamma,
beta,
mean=moving_mean,
variance=moving_variance,
epsilon=epsilon,
data_format=self.data_format,
is_training=False)
return bn
开发者ID:jamescasbon,项目名称:ray,代码行数:56,代码来源:convnet_builder.py
注:本文中的tensorflow.zeros_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论