本文整理汇总了Python中tensorflow.ones_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python ones_initializer函数的具体用法?Python ones_initializer怎么用?Python ones_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ones_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: bacthnorm
def bacthnorm(inputs, scope, epsilon=1e-05, momentum=0.99, is_training=True):
inputs_shape = inputs.get_shape().as_list()# 输出 形状尺寸
params_shape = inputs_shape[-1:]# 输入参数的长度
axis = list(range(len(inputs_shape) - 1))
with tf.variable_scope(scope):
beta = create_variable("beta", params_shape,
initializer=tf.zeros_initializer())
gamma = create_variable("gamma", params_shape,
initializer=tf.ones_initializer())
# 均值 常量 不需要训练 for inference
moving_mean = create_variable("moving_mean", params_shape,
initializer=tf.zeros_initializer(), trainable=False)
# 方差 常量 不需要训练
moving_variance = create_variable("moving_variance", params_shape,
initializer=tf.ones_initializer(), trainable=False)
if is_training:
mean, variance = tf.nn.moments(inputs, axes=axis)# 计算均值和方差
# 移动平均求 均值和 方差 考虑上一次的量 xt = a * x_t-1 +(1-a)*x_now
update_move_mean = moving_averages.assign_moving_average(moving_mean,
mean, decay=momentum)
update_move_variance = moving_averages.assign_moving_average(moving_variance,
variance, decay=momentum)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_mean)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_variance)
else:
mean, variance = moving_mean, moving_variance
return tf.nn.batch_normalization(inputs, mean, variance, beta, gamma, epsilon)
开发者ID:dyz-zju,项目名称:MVision,代码行数:28,代码来源:MobileNet_tf.py
示例2: batch_norm
def batch_norm(x, decay=0.999, epsilon=1e-03, is_training=True,
scope="scope"):
x_shape = x.get_shape()
num_inputs = x_shape[-1]
reduce_dims = list(range(len(x_shape) - 1))
with tf.variable_scope(scope):
beta = create_var("beta", [num_inputs,],
initializer=tf.zeros_initializer())
gamma = create_var("gamma", [num_inputs,],
initializer=tf.ones_initializer())
# for inference
moving_mean = create_var("moving_mean", [num_inputs,],
initializer=tf.zeros_initializer(),
trainable=False)
moving_variance = create_var("moving_variance", [num_inputs],
initializer=tf.ones_initializer(),
trainable=False)
if is_training:
mean, variance = tf.nn.moments(x, axes=reduce_dims)
update_move_mean = moving_averages.assign_moving_average(moving_mean,
mean, decay=decay)
update_move_variance = moving_averages.assign_moving_average(moving_variance,
variance, decay=decay)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_mean)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_variance)
else:
mean, variance = moving_mean, moving_variance
return tf.nn.batch_normalization(x, mean, variance, beta, gamma, epsilon)
开发者ID:kaka7,项目名称:DeepLearning_tutorials,代码行数:28,代码来源:ResNet50.py
示例3: _batch_norm_without_layers
def _batch_norm_without_layers(self, input_layer, decay, use_scale,
epsilon):
"""Batch normalization on `input_layer` without tf.layers."""
shape = input_layer.shape
num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]
beta = self.get_variable(
'beta', [num_channels],
tf.float32,
tf.float32,
initializer=tf.zeros_initializer())
if use_scale:
gamma = self.get_variable(
'gamma', [num_channels],
tf.float32,
tf.float32,
initializer=tf.ones_initializer())
else:
gamma = tf.constant(1.0, tf.float32, [num_channels])
moving_mean = tf.get_variable(
'moving_mean', [num_channels],
tf.float32,
initializer=tf.zeros_initializer(),
trainable=False)
moving_variance = tf.get_variable(
'moving_variance', [num_channels],
tf.float32,
initializer=tf.ones_initializer(),
trainable=False)
if self.phase_train:
bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(
input_layer,
gamma,
beta,
epsilon=epsilon,
data_format=self.data_format,
is_training=True)
mean_update = moving_averages.assign_moving_average(
moving_mean, batch_mean, decay=decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
moving_variance,
batch_variance,
decay=decay,
zero_debias=False)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
else:
bn, _, _ = tf.nn.fused_batch_norm(
input_layer,
gamma,
beta,
mean=moving_mean,
variance=moving_variance,
epsilon=epsilon,
data_format=self.data_format,
is_training=False)
return bn
开发者ID:jamescasbon,项目名称:ray,代码行数:56,代码来源:convnet_builder.py
示例4: __init__
def __init__(self, size, eps=1e-2, default_clip_range=np.inf, sess=None):
"""A normalizer that ensures that observations are approximately distributed according to
a standard Normal distribution (i.e. have mean zero and variance one).
Args:
size (int): the size of the observation to be normalized
eps (float): a small constant that avoids underflows
default_clip_range (float): normalized observations are clipped to be in
[-default_clip_range, default_clip_range]
sess (object): the TensorFlow session to be used
"""
self.size = size
self.eps = eps
self.default_clip_range = default_clip_range
self.sess = sess if sess is not None else tf.get_default_session()
self.local_sum = np.zeros(self.size, np.float32)
self.local_sumsq = np.zeros(self.size, np.float32)
self.local_count = np.zeros(1, np.float32)
self.sum_tf = tf.get_variable(
initializer=tf.zeros_initializer(), shape=self.local_sum.shape, name='sum',
trainable=False, dtype=tf.float32)
self.sumsq_tf = tf.get_variable(
initializer=tf.zeros_initializer(), shape=self.local_sumsq.shape, name='sumsq',
trainable=False, dtype=tf.float32)
self.count_tf = tf.get_variable(
initializer=tf.ones_initializer(), shape=self.local_count.shape, name='count',
trainable=False, dtype=tf.float32)
self.mean = tf.get_variable(
initializer=tf.zeros_initializer(), shape=(self.size,), name='mean',
trainable=False, dtype=tf.float32)
self.std = tf.get_variable(
initializer=tf.ones_initializer(), shape=(self.size,), name='std',
trainable=False, dtype=tf.float32)
self.count_pl = tf.placeholder(name='count_pl', shape=(1,), dtype=tf.float32)
self.sum_pl = tf.placeholder(name='sum_pl', shape=(self.size,), dtype=tf.float32)
self.sumsq_pl = tf.placeholder(name='sumsq_pl', shape=(self.size,), dtype=tf.float32)
self.update_op = tf.group(
self.count_tf.assign_add(self.count_pl),
self.sum_tf.assign_add(self.sum_pl),
self.sumsq_tf.assign_add(self.sumsq_pl)
)
self.recompute_op = tf.group(
tf.assign(self.mean, self.sum_tf / self.count_tf),
tf.assign(self.std, tf.sqrt(tf.maximum(
tf.square(self.eps),
self.sumsq_tf / self.count_tf - tf.square(self.sum_tf / self.count_tf)
))),
)
self.lock = threading.Lock()
开发者ID:Divyankpandey,项目名称:baselines,代码行数:52,代码来源:normalizer.py
示例5: initialize_model
def initialize_model(sess, train_data_flat, train_labels):
"""Reproduce model from train-on-mnist/mnist_lbfgs"""
dtype = tf.float64
batchSize = 100
learningRate = 0.1
W = tf.Variable(tf.ones_initializer((1024, 10), dtype=dtype))
b = tf.Variable(tf.ones_initializer((1, 10), dtype=dtype))
x = tf.Variable(tf.zeros_initializer((batchSize, 1024), dtype=dtype))
targets = tf.Variable(tf.zeros_initializer((batchSize, 10), dtype=dtype))
logits = tf.matmul(x, W) + b
# cross entropy expects batch dimension to be first, transpose inputs
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, targets)
cross_entropy_loss = tf.reduce_mean(cross_entropy)
Wnorm = tf.reduce_sum(tf.square(W))
bnorm = tf.reduce_sum(tf.square(b))
loss = cross_entropy_loss + (bnorm + Wnorm)/2
loss_handle_op = tf.get_session_handle(loss)
# grads = tf.gradients(loss, [W, b])
opt = tf.train.GradientDescentOptimizer(learning_rate=learningRate)
grads_and_vars = opt.compute_gradients(loss, [W, b])
train_step = opt.apply_gradients(grads_and_vars)
W_grad = grads_and_vars[0][0]
b_grad = grads_and_vars[1][0]
flat_grad = concat_flatten([tf.transpose(W_grad), b_grad])
flat_grad_handle_op = tf.get_session_handle(flat_grad)
flat_params = concat_flatten([tf.transpose(W), b])
# initialize x and targets
x_placeholder = tf.placeholder(dtype=dtype)
x_init = x.assign(x_placeholder)
# initialize labels
labels_placeholder = tf.placeholder(shape=(batchSize), dtype=tf.int32)
# Lua labels are off-by-one hence -1
labels_onehot = tf.one_hot(labels_placeholder - 1, 10, dtype=dtype)
targets_init = targets.assign(labels_onehot)
sess.run(x_init, feed_dict={x_placeholder:train_data_flat[:batchSize]})
sess.run(targets_init, feed_dict={labels_placeholder:
train_labels[:batchSize]})
sess.run([W.initializer, b.initializer])
[(Wgrad, W), (bgrad, b)] = grads_and_vars
return [loss, loss_handle_op, flat_params, flat_grad, flat_grad_handle_op,
W, b, train_step]
开发者ID:yaroslavvb,项目名称:lbfgs,代码行数:49,代码来源:lbfgs_reproduce.py
示例6: _network_template
def _network_template(self, state):
# This dummy network allows us to deterministically anticipate that
# action 0 will be selected by an argmax.
inputs = tf.constant(
np.zeros((state.shape[0], stack_size)), dtype=tf.float32)
# In Rainbow we are dealing with a distribution over Q-values,
# which are represented as num_atoms bins, ranging from -vmax to vmax.
# The output layer will have num_actions * num_atoms elements,
# so each group of num_atoms weights represent the logits for a
# particular action. By setting 1s everywhere, except for the first
# num_atoms (representing the logits for the first action), which are
# set to np.arange(num_atoms), we are ensuring that the first action
# places higher weight on higher Q-values; this results in the first
# action being chosen.
first_row = np.tile(np.ones(self._num_atoms), self.num_actions - 1)
first_row = np.concatenate((np.arange(self._num_atoms), first_row))
bottom_rows = np.tile(
np.ones(self.num_actions * self._num_atoms), (stack_size - 1, 1))
weights_initializer = np.concatenate(([first_row], bottom_rows))
net = slim.fully_connected(
inputs,
self.num_actions * self._num_atoms,
weights_initializer=tf.constant_initializer(weights_initializer),
biases_initializer=tf.ones_initializer(),
activation_fn=None)
logits = tf.reshape(net, [-1, self.num_actions, self._num_atoms])
probabilities = tf.contrib.layers.softmax(logits)
qs = tf.reduce_sum(self._support * probabilities, axis=2)
return self._get_network_type()(qs, logits, probabilities)
开发者ID:veronicachelu,项目名称:dopamine,代码行数:29,代码来源:rainbow_agent_test.py
示例7: test_basic_rnn_cell
def test_basic_rnn_cell(self):
"""see test_basic_rnn_cell.png for the graph"""
batch_size = 1
input_shape = [batch_size, 2]
state_shape = [batch_size, 3]
num_units = 4 # should be equal to state_shape[1] to be recurrent
input_value = np.random.rand(*input_shape)
state_value = np.random.rand(*state_shape)
np_result = TestRNNCells._basic_linear(input_value, state_value, num_units)
with tf.Session() as sess:
with tf.variable_scope('test_basic_rnn_cell', initializer=tf.ones_initializer()):
inputs = tf.placeholder(tf.float32, input_shape, 'inputs')
prev_state = tf.placeholder(tf.float32, state_shape, 'prev_state')
cell = tf.contrib.rnn.BasicRNNCell(num_units)
output_op, new_state_op = cell(inputs, prev_state)
self.assertIsInstance(output_op, tf.Tensor)
tf.summary.FileWriter('/tmp/test_basic_rnn_cell', sess.graph)
sess.run(tf.global_variables_initializer())
output, new_state = sess.run([output_op, new_state_op],
feed_dict={
inputs: input_value,
prev_state: state_value
})
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (batch_size, num_units))
self.assertTrue(np.array_equal(output, new_state))
np.testing.assert_array_almost_equal(np_result, output)
开发者ID:ninotoshi,项目名称:playground,代码行数:34,代码来源:test_rnn.py
示例8: get_logits
def get_logits(self, image):
gauss_init = tf.random_normal_initializer(stddev=0.01)
with argscope(Conv2D,
kernel_initializer=tf.variance_scaling_initializer(scale=2.)), \
argscope([Conv2D, FullyConnected], activation=tf.nn.relu), \
argscope([Conv2D, MaxPooling], data_format='channels_last'):
# necessary padding to get 55x55 after conv1
image = tf.pad(image, [[0, 0], [2, 2], [2, 2], [0, 0]])
l = Conv2D('conv1', image, filters=96, kernel_size=11, strides=4, padding='VALID')
# size: 55
visualize_conv1_weights(l.variables.W)
l = tf.nn.lrn(l, 2, bias=1.0, alpha=2e-5, beta=0.75, name='norm1')
l = MaxPooling('pool1', l, 3, strides=2, padding='VALID')
# 27
l = Conv2D('conv2', l, filters=256, kernel_size=5, split=2)
l = tf.nn.lrn(l, 2, bias=1.0, alpha=2e-5, beta=0.75, name='norm2')
l = MaxPooling('pool2', l, 3, strides=2, padding='VALID')
# 13
l = Conv2D('conv3', l, filters=384, kernel_size=3)
l = Conv2D('conv4', l, filters=384, kernel_size=3, split=2)
l = Conv2D('conv5', l, filters=256, kernel_size=3, split=2)
l = MaxPooling('pool3', l, 3, strides=2, padding='VALID')
l = FullyConnected('fc6', l, 4096,
kernel_initializer=gauss_init,
bias_initializer=tf.ones_initializer())
l = Dropout(l, rate=0.5)
l = FullyConnected('fc7', l, 4096, kernel_initializer=gauss_init)
l = Dropout(l, rate=0.5)
logits = FullyConnected('fc8', l, 1000, kernel_initializer=gauss_init)
return logits
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:31,代码来源:alexnet.py
示例9: layer_norm
def layer_norm(x: tf.Tensor, epsilon: float = 1e-6) -> tf.Tensor:
"""Layer normalize the tensor x, averaging over the last dimension.
Implementation based on tensor2tensor.
Arguments:
x: The ``Tensor`` to normalize.
epsilon: The smoothing parameter of the normalization.
Returns:
The normalized tensor.
"""
with tf.variable_scope("LayerNorm"):
gamma = get_variable(
name="gamma",
shape=[x.get_shape()[-1]],
dtype=tf.float32,
initializer=tf.ones_initializer())
beta = get_variable(
name="beta",
shape=[x.get_shape()[-1]],
dtype=tf.float32,
initializer=tf.zeros_initializer())
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(
tf.square(x - mean),
axis=[-1],
keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * gamma + beta
开发者ID:ufal,项目名称:neuralmonkey,代码行数:31,代码来源:tf_utils.py
示例10: batch_norm
def batch_norm(inputs, name_scope, is_training, epsilon=1e-3, decay=0.99):
with tf.variable_scope(name_scope):
size = inputs.get_shape().as_list()[1]
gamma = tf.get_variable(
'gamma', [size], initializer=tf.constant_initializer(0.1))
# beta = tf.get_variable('beta', [size], initializer=tf.constant_initializer(0))
beta = tf.get_variable('beta', [size])
pop_mean = tf.get_variable('pop_mean', [size],
initializer=tf.zeros_initializer(), trainable=False)
pop_var = tf.get_variable('pop_var', [size],
initializer=tf.ones_initializer(), trainable=False)
batch_mean, batch_var = tf.nn.moments(inputs, [0])
train_mean_op = tf.assign(
pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_var_op = tf.assign(
pop_var, pop_var * decay + batch_var * (1 - decay))
def batch_statistics():
with tf.control_dependencies([train_mean_op, train_var_op]):
return tf.nn.batch_normalization(inputs, batch_mean, batch_var, beta, gamma, epsilon)
def pop_statistics():
return tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, gamma, epsilon)
# control flow
return tf.cond(is_training, batch_statistics, pop_statistics)
开发者ID:seasky100,项目名称:tensorflow_end2end_speech_recognition,代码行数:29,代码来源:batch_normalization.py
示例11: conv2d_zeros
def conv2d_zeros(x,
width,
filter_size=[3, 3],
stride=[1, 1],
pad="SAME",
logscale_factor=3,
skip=1,
edge_bias=True,
name=None):
with tf.variable_scope(name, "conv2d"):
if edge_bias and pad == "SAME":
x = add_edge_padding(x, filter_size)
pad = 'VALID'
n_in = int(x.get_shape()[3])
stride_shape = [1] + stride + [1]
filter_shape = filter_size + [n_in, width]
w = tf.get_variable("W", filter_shape, tf.float32,
initializer=tf.zeros_initializer())
if skip == 1:
x = tf.nn.conv2d(x, w, stride_shape, pad, data_format='NHWC')
else:
assert stride[0] == 1 and stride[1] == 1
x = tf.nn.atrous_conv2d(x, w, skip, pad)
x += tf.get_variable("b", [1, 1, 1, width],
initializer=tf.ones_initializer())
x *= tf.exp(tf.get_variable("logs",
[1, width], initializer=tf.zeros_initializer()) * logscale_factor)
return x
开发者ID:gdahia,项目名称:DLF,代码行数:29,代码来源:ops.py
示例12: batch_norm
def batch_norm(x, name_scope, training, epsilon=1e-3, decay=0.999):
"""Assume 2d [batch, values] tensor"""
with tf.variable_scope(name_scope):
size = x.get_shape().as_list()[1]
scale = tf.get_variable('scale', [size],
initializer=tf.constant_initializer(0.1))
offset = tf.get_variable('offset', [size])
pop_mean = tf.get_variable('pop_mean', [size],
initializer=tf.zeros_initializer(),
trainable=False)
pop_var = tf.get_variable('pop_var', [size],
initializer=tf.ones_initializer(),
trainable=False)
batch_mean, batch_var = tf.nn.moments(x, [0])
train_mean_op = tf.assign(
pop_mean,
pop_mean * decay + batch_mean * (1 - decay))
train_var_op = tf.assign(
pop_var,
pop_var * decay + batch_var * (1 - decay))
def batch_statistics():
with tf.control_dependencies([train_mean_op, train_var_op]):
return tf.nn.batch_normalization(x, batch_mean, batch_var, offset, scale, epsilon)
def population_statistics():
return tf.nn.batch_normalization(x, pop_mean, pop_var, offset, scale, epsilon)
return tf.cond(training, batch_statistics, population_statistics)
开发者ID:siddrtm,项目名称:hierarchical-attention-networks,代码行数:33,代码来源:bn_lstm.py
示例13: call
def call(self, x, h):
channels = x.shape[self._feature_axis].value
with tf.variable_scope('gates'):
inputs = tf.concat([x, h], axis=self._feature_axis)
n = channels + self._filters
m = 2 * self._filters if self._filters > 1 else 2
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
if self._normalize:
r, u = tf.split(y, 2, axis=self._feature_axis)
r = tf.contrib.layers.layer_norm(r)
u = tf.contrib.layers.layer_norm(u)
else:
y += tf.get_variable('bias', [m], initializer=tf.ones_initializer())
r, u = tf.split(y, 2, axis=self._feature_axis)
r, u = tf.sigmoid(r), tf.sigmoid(u)
# TODO
#tf.summary.histogram('reset_gate', r)
#tf.summary.histogram('update_gate', u)
with tf.variable_scope('candidate'):
inputs = tf.concat([x, r * h], axis=self._feature_axis)
n = channels + self._filters
m = self._filters
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
if self._normalize:
y = tf.contrib.layers.layer_norm(y)
else:
y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
h = u * h + (1 - u) * self._activation(y)
return h, h
开发者ID:ascenoputing,项目名称:SemanticSegmentation_DL,代码行数:35,代码来源:ConvLSTM_Cell.py
示例14: __init__
def __init__ (self, name, inputs, training, data_format, start=None, end=None, weights=None,
weight_scope=None, fake=False):
super(BatchNorm, self).__init__(name = name, start=start, end=end)
self.fake = fake
if not self.fake:
if weights is not None:
params_name = weight_scope + '/' + str(name) + '/batch_normalization/'
np_dict = load_pkl_obj(weights)
beta_np = np_dict[params_name+'beta:0']
gamma_np = np_dict[params_name+'gamma:0']
moving_mean_np = np_dict[params_name+'moving_mean:0']
moving_variance_np = np_dict[params_name+'moving_variance:0']
in_shp = inputs.shape.as_list()[1]
if not beta_np.shape[0] == in_shp:
beta_np = np.resize(beta_np, (in_shp,))
gamma_np = np.resize(gamma_np, (in_shp,))
moving_mean_np = np.resize(moving_mean_np, (in_shp))
moving_variance_np = np.resize(moving_variance_np, (in_shp))
beta_initializer = tf.constant_initializer(beta_np)
gamma_initializer = tf.constant_initializer(gamma_np)
moving_mean_initializer = tf.constant_initializer(moving_mean_np)
moving_variance_initializer = tf.constant_initializer(moving_variance_np)
else:
beta_initializer = tf.zeros_initializer()
gamma_initializer = tf.ones_initializer()
moving_mean_initializer = tf.zeros_initializer()
moving_variance_initializer = tf.ones_initializer()
with tf.variable_scope(self._name):
self.output=tf.layers.batch_normalization(inputs=inputs,
axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY,
epsilon=_BATCH_NORM_EPSILON,
center=True,
scale=True,
training=training,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
fused=True )
self._tf_name = self.output.name.split('/')[0] + '/' + self.output.name.split('/')[1]
else:
assert isinstance(inputs, Fake)
self.output=Fake(inputs.shape)
self.param=Fake(inputs.shape[1] * 4)
self.description.append('BatchNorm')
self.description.append(self.get_memory_footprint())
开发者ID:FNDaily,项目名称:amazon-sagemaker-examples,代码行数:47,代码来源:batch_norm.py
示例15: main
def main(_):
ed.set_seed(42)
# DATA
x_data = build_toy_dataset(FLAGS.N)
# MODEL
pi = Dirichlet(concentration=tf.ones(FLAGS.K))
mu = Normal(0.0, 1.0, sample_shape=[FLAGS.K, FLAGS.D])
sigma = InverseGamma(concentration=1.0, rate=1.0,
sample_shape=[FLAGS.K, FLAGS.D])
c = Categorical(logits=tf.log(pi) - tf.log(1.0 - pi), sample_shape=FLAGS.N)
x = Normal(loc=tf.gather(mu, c), scale=tf.gather(sigma, c))
# INFERENCE
qpi = Empirical(params=tf.get_variable(
"qpi/params",
[FLAGS.T, FLAGS.K],
initializer=tf.constant_initializer(1.0 / FLAGS.K)))
qmu = Empirical(params=tf.get_variable("qmu/params",
[FLAGS.T, FLAGS.K, FLAGS.D],
initializer=tf.zeros_initializer()))
qsigma = Empirical(params=tf.get_variable("qsigma/params",
[FLAGS.T, FLAGS.K, FLAGS.D],
initializer=tf.ones_initializer()))
qc = Empirical(params=tf.get_variable("qc/params",
[FLAGS.T, FLAGS.N],
initializer=tf.zeros_initializer(),
dtype=tf.int32))
gpi = Dirichlet(concentration=tf.constant([1.4, 1.6]))
gmu = Normal(loc=tf.constant([[1.0, 1.0], [-1.0, -1.0]]),
scale=tf.constant([[0.5, 0.5], [0.5, 0.5]]))
gsigma = InverseGamma(concentration=tf.constant([[1.1, 1.1], [1.1, 1.1]]),
rate=tf.constant([[1.0, 1.0], [1.0, 1.0]]))
gc = Categorical(logits=tf.zeros([FLAGS.N, FLAGS.K]))
inference = ed.MetropolisHastings(
latent_vars={pi: qpi, mu: qmu, sigma: qsigma, c: qc},
proposal_vars={pi: gpi, mu: gmu, sigma: gsigma, c: gc},
data={x: x_data})
inference.initialize()
sess = ed.get_session()
tf.global_variables_initializer().run()
for _ in range(inference.n_iter):
info_dict = inference.update()
inference.print_progress(info_dict)
t = info_dict['t']
if t == 1 or t % inference.n_print == 0:
qpi_mean, qmu_mean = sess.run([qpi.mean(), qmu.mean()])
print("")
print("Inferred membership probabilities:")
print(qpi_mean)
print("Inferred cluster means:")
print(qmu_mean)
开发者ID:JoyceYa,项目名称:edward,代码行数:59,代码来源:mixture_gaussian_mh.py
示例16: make_params
def make_params():
params_size = 250*1000*FLAGS.data_mb # 1MB is 250k integers
dtype=tf.int32
ps_device = get_ps_device(0)
with tf.device(ps_device):
params = tf.get_variable("params", [params_size], dtype,
initializer=tf.ones_initializer())
return params
开发者ID:yaroslavvb,项目名称:stuff,代码行数:8,代码来源:async_adder.py
示例17: build
def build(self, _):
self.scale = tf.get_variable("layer_norm_scale", [self.hidden_size],
initializer=tf.ones_initializer(dtype=tf.float32),
dtype=tf.float32)
self.bias = tf.get_variable("layer_norm_bias", [self.hidden_size],
initializer=tf.zeros_initializer(dtype=tf.float32),
dtype=tf.float32)
self.built = True
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:8,代码来源:common.py
示例18: create_graph
def create_graph(device0, device1):
"""Create graph that keeps var1 on device0, var2 on device1 and adds them"""
tf.reset_default_graph()
dtype=tf.int32
params_size = 250*1000*FLAGS.data_mb # 1MB is 250k integers
with tf.device(device0):
var1 = tf.get_variable("var1", [params_size], dtype,
initializer=tf.ones_initializer())
with tf.device(device1):
var2 = tf.get_variable("var2", [params_size], dtype,
initializer=tf.ones_initializer())
add_op = var1.assign_add(var2)
init_op = tf.global_variables_initializer()
return init_op, add_op
开发者ID:yaroslavvb,项目名称:stuff,代码行数:17,代码来源:benchmark_grpc_recv.py
示例19: bn
def bn(x, c):
x_shape = x.get_shape()
params_shape = x_shape[-1:]
if c['use_bias']:
bias = _get_variable('bias', params_shape,
initializer=tf.zeros_initializer)
return x + bias
axis = list(range(len(x_shape) - 1))
beta = _get_variable('beta',
params_shape,
initializer=tf.zeros_initializer)
gamma = _get_variable('gamma',
params_shape,
initializer=tf.ones_initializer())
moving_mean = _get_variable('moving_mean',
params_shape,
initializer=tf.zeros_initializer,
trainable=False)
moving_variance = _get_variable('moving_variance',
params_shape,
initializer=tf.ones_initializer(),
trainable=False)
# These ops will only be preformed when training.
mean, variance = tf.nn.moments(x, axis)
update_moving_mean = moving_averages.assign_moving_average(moving_mean,
mean, BN_DECAY)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, BN_DECAY)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
mean, variance = control_flow_ops.cond(
c['is_training'], lambda: (mean, variance),
lambda: (moving_mean, moving_variance))
x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
#x.set_shape(inputs.get_shape()) ??
return x
开发者ID:UCBerkeleySETI,项目名称:breakthrough,代码行数:45,代码来源:WRN_ops.py
示例20: layer_norm
def layer_norm(x, nmaps, prefix, epsilon=1e-5):
"""Layer normalize the 4D tensor x, averaging over the last dimension."""
with tf.variable_scope(prefix):
scale = tf.get_variable("layer_norm_scale", [nmaps],
initializer=tf.ones_initializer())
bias = tf.get_variable("layer_norm_bias", [nmaps],
initializer=tf.zeros_initializer())
mean, variance = tf.nn.moments(x, [3], keep_dims=True)
norm_x = (x - mean) / tf.sqrt(variance + epsilon)
return norm_x * scale + bias
开发者ID:Jmq14,项目名称:models,代码行数:10,代码来源:neural_gpu.py
注:本文中的tensorflow.ones_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论