本文整理汇总了Python中tensorflow.random_normal_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python random_normal_initializer函数的具体用法?Python random_normal_initializer怎么用?Python random_normal_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了random_normal_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: bpr_mf
def bpr_mf(user_count,item_count,hidden_dim):
u = tf.placeholder(tf.int32,[None])
i = tf.placeholder(tf.int32,[None])
j = tf.placeholder(tf.int32,[None])
user_emb_w = tf.get_variable("user_emb_w", [user_count + 1, hidden_dim],
initializer=tf.random_normal_initializer(0, 0.1))
item_emb_w = tf.get_variable("item_emb_w", [item_count + 1, hidden_dim],
initializer=tf.random_normal_initializer(0, 0.1))
u_emb = tf.nn.embedding_lookup(user_emb_w, u)
i_emb = tf.nn.embedding_lookup(item_emb_w, i)
j_emb = tf.nn.embedding_lookup(item_emb_w, j)
x = tf.reduce_sum(tf.multiply(u_emb,(i_emb-j_emb)),1,keep_dims=True)
mf_auc = tf.reduce_mean(tf.to_float(x>0))
l2_norm = tf.add_n([
tf.reduce_sum(tf.multiply(u_emb, u_emb)),
tf.reduce_sum(tf.multiply(i_emb, i_emb)),
tf.reduce_sum(tf.multiply(j_emb, j_emb))
])
regulation_rate = 0.0001
bprloss = regulation_rate * l2_norm - tf.reduce_mean(tf.log(tf.sigmoid(x)))
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(bprloss)
return u, i, j, mf_auc, bprloss, train_op
开发者ID:huyuxiang,项目名称:tensorflow_practice,代码行数:30,代码来源:BPR.py
示例2: __init__
def __init__(self, encoder_rnn_output, label_onehot, is_training=True):
self.encoder_rnn_output = encoder_rnn_output
self.label_onehot = label_onehot
self.is_training = is_training
with tf.variable_scope("encoder_linear1"):
context_to_hidden_W = tf.get_variable(name="context_to_hidden_W",
shape=[FLAGS.RNN_SIZE + FLAGS.LABEL_CLASS,
100],
dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=0.1))
context_to_hidden_b = tf.get_variable(name="context_to_hidden_b",
shape=[100],
dtype=tf.float32)
with tf.variable_scope("encoder_linear2"):
context_to_mu_W = tf.get_variable(name="context_to_mu_W",
shape=[100,
FLAGS.LATENT_VARIABLE_SIZE],
dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=0.1))
context_to_mu_b = tf.get_variable(name="context_to_mu_b",
shape=[FLAGS.LATENT_VARIABLE_SIZE],
dtype=tf.float32)
context_to_logvar_W = tf.get_variable(
name="context_to_logvar_W",
shape=[100,
FLAGS.LATENT_VARIABLE_SIZE],
dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=0.1))
context_to_logvar_b = tf.get_variable(
name="context_to_logvar_b",
shape=[FLAGS.LATENT_VARIABLE_SIZE],
dtype=tf.float32)
with tf.name_scope("rnn_output_and_label"):
rnn_output_and_label = tf.concat((encoder_rnn_output, self.label_onehot),
axis=1,
name="concat_encoder_rnn_output_and_label")
with tf.name_scope("sampler_hiddenstate"):
h = tf.nn.relu(tf.matmul(rnn_output_and_label, context_to_hidden_W) + context_to_hidden_b)
with tf.name_scope("mu"):
self.mu = tf.matmul(h, context_to_mu_W) + context_to_mu_b
with tf.name_scope("log_var"):
self.logvar = tf.matmul(h, context_to_logvar_W) + context_to_logvar_b
with tf.name_scope("z"):
z = tf.truncated_normal((FLAGS.BATCH_SIZE, FLAGS.LATENT_VARIABLE_SIZE), stddev=1.0)
with tf.name_scope("latent_variables"):
self.latent_variables = self.mu + tf.exp(0.5 * self.logvar) * z
开发者ID:leezqcst,项目名称:dcnn_textvae,代码行数:60,代码来源:encoder.py
示例3: model
def model(hparams, X, past=None, scope='model', reuse=False):
with tf.variable_scope(scope, reuse=reuse):
results = {}
batch, sequence = shape_list(X)
wpe = tf.get_variable('wpe', [hparams.n_ctx, hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.01))
wte = tf.get_variable('wte', [hparams.n_vocab, hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.02))
past_length = 0 if past is None else tf.shape(past)[-2]
h = tf.gather(wte, X) + tf.gather(wpe, positions_for(X, past_length))
# Transformer
presents = []
pasts = tf.unstack(past, axis=1) if past is not None else [None] * hparams.n_layer
assert len(pasts) == hparams.n_layer
for layer, past in enumerate(pasts):
h, present = block(h, 'h%d' % layer, past=past, hparams=hparams)
presents.append(present)
results['present'] = tf.stack(presents, axis=1)
h = norm(h, 'ln_f')
# Language model loss. Do tokens <n predict token n?
h_flat = tf.reshape(h, [batch*sequence, hparams.n_embd])
logits = tf.matmul(h_flat, wte, transpose_b=True)
logits = tf.reshape(logits, [batch, sequence, hparams.n_vocab])
results['logits'] = logits
return results
开发者ID:neuroradiology,项目名称:gpt-2,代码行数:28,代码来源:model.py
示例4: _build_net
def _build_net(self):
with tf.name_scope('inputs'):
self.tf_obs = tf.placeholder(tf.float32, [None, self.n_features], name="observations")
self.tf_acts = tf.placeholder(tf.int32, [None, ], name="actions_num")
self.tf_vt = tf.placeholder(tf.float32, [None, ], name="actions_value")
# fc1
layer = tf.layers.dense(
inputs=self.tf_obs,
units=10,
activation=tf.nn.tanh, # tanh activation
kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
bias_initializer=tf.constant_initializer(0.1),
name='fc1'
)
# fc2
all_act = tf.layers.dense(
inputs=layer,
units=self.n_actions,
activation=None,
kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
bias_initializer=tf.constant_initializer(0.1),
name='fc2'
)
self.all_act_prob = tf.nn.softmax(all_act, name='act_prob') # use softmax to convert to probability
with tf.name_scope('loss'):
# to maximize total reward (log_p * R) is to minimize -(log_p * R), and the tf only have minimize(loss)
neg_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=all_act, labels=self.tf_acts) # this is negative log of chosen action
# or in this way:
# neg_log_prob = tf.reduce_sum(-tf.log(self.all_act_prob)*tf.one_hot(self.tf_acts, self.n_actions), axis=1)
loss = tf.reduce_mean(neg_log_prob * self.tf_vt) # reward guided loss
with tf.name_scope('train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
开发者ID:Emrys-Hong,项目名称:Reinforcement-learning-with-tensorflow,代码行数:35,代码来源:RL_brain.py
示例5: __init__
def __init__(self,sess,n_features,n_actions,lr=0.001):
self.sess = sess
self.s = tf.placeholder(tf.float32,[1,n_features],name='state')
self.a = tf.placeholder(tf.int32,None,name='act')
self.td_error = tf.placeholder(tf.float32,None,"td_error")
with tf.variable_scope('Actor'):
l1 = tf.layers.dense(
inputs = self.s,
units = 20,
activation = tf.nn.relu,
kernel_initializer = tf.random_normal_initializer(mean=0,stddev=0.1),
bias_initializer = tf.constant_initializer(0.1),
name = 'l1'
)
self.acts_prob = tf.layers.dense(
inputs = l1,
units = n_actions,
activation = tf.nn.softmax,
kernel_initializer = tf.random_normal_initializer(mean=0,stddev=0.1),
bias_initializer = tf.constant_initializer(0.1),
name = 'acts_prob'
)
with tf.variable_scope('exp_v'):
log_prob = tf.log(self.acts_prob[0,self.a])
self.exp_v = tf.reduce_mean(log_prob * self.td_error)
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(-self.exp_v)
开发者ID:huyuxiang,项目名称:tensorflow_practice,代码行数:34,代码来源:Actor.py
示例6: SRGAN_g
def SRGAN_g(t_image, is_train=False, reuse=False):
""" Generator in Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network
feature maps (n) and stride (s) feature maps (n) and stride (s)
"""
w_init = tf.random_normal_initializer(stddev=0.02)
b_init = None # tf.constant_initializer(value=0.0)
g_init = tf.random_normal_initializer(1., 0.02)
with tf.variable_scope("SRGAN_g", reuse=reuse) as vs:
# tl.layers.set_name_reuse(reuse) # remove for TL 1.8.0+
n = InputLayer(t_image, name='in')
n = Conv2d(n, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, name='n64s1/c')
temp = n
# B residual blocks
for i in range(16):
nn = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c1/%s' % i)
nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='n64s1/b1/%s' % i)
nn = Conv2d(nn, 64, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c2/%s' % i)
nn = BatchNormLayer(nn, is_train=is_train, gamma_init=g_init, name='n64s1/b2/%s' % i)
nn = ElementwiseLayer([n, nn], tf.add, name='b_residual_add/%s' % i)
n = nn
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c/m')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='n64s1/b/m')
n = ElementwiseLayer([n, temp], tf.add, name='add3')
# B residual blacks end
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, name='n256s1/1')
n = SubpixelConv2d(n, scale=2, n_out_channel=None, act=tf.nn.relu, name='pixelshufflerx2/1')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, name='n256s1/2')
n = SubpixelConv2d(n, scale=2, n_out_channel=None, act=tf.nn.relu, name='pixelshufflerx2/2')
n = Conv2d(n, 3, (1, 1), (1, 1), act=tf.nn.tanh, padding='SAME', W_init=w_init, name='out')
return n
开发者ID:TowardSun,项目名称:srgan,代码行数:35,代码来源:model.py
示例7: decoder
def decoder(input, output_dim, training, stddev=0.02, bias_value=0, reuse=False):
w1 = tf.get_variable("w1", [input.get_shape()[1],1000], initializer=tf.random_normal_initializer(stddev=stddev))
b1 = tf.get_variable("b1", [1000], initializer=tf.constant_initializer(bias_value))
w2 = tf.get_variable("w2", [1000,1000], initializer=tf.random_normal_initializer(stddev=stddev))
b2 = tf.get_variable("b2", [1000], initializer=tf.constant_initializer(bias_value))
w3 = tf.get_variable("w3", [1000,output_dim], initializer=tf.random_normal_initializer(stddev=stddev))
b3 = tf.get_variable("b3", [output_dim], initializer=tf.constant_initializer(bias_value))
fc1 = tf.nn.relu(tf.matmul( input, w1 ) + b1, name='relu1')
fc2 = tf.nn.relu(tf.matmul( fc1 , w2 ) + b2, name='relu2')
fc3 = tf.nn.sigmoid(tf.matmul( fc2 , w3 ) + b3 )
if not reuse:
tf.histogram_summary('DE/L1/activation', fc1)
tf.histogram_summary('DE/L1/weight' , w1)
tf.histogram_summary('DE/L1/bias' , b1)
tf.scalar_summary( 'DE/L1/sparsity' , tf.nn.zero_fraction(fc1))
tf.histogram_summary('DE/L2/activation', fc2)
tf.histogram_summary('DE/L2/weight' , w2)
tf.histogram_summary('DE/L2/bias' , b2)
tf.scalar_summary( 'DE/L2/sparsity' , tf.nn.zero_fraction(fc2))
tf.histogram_summary('DE/L3/activation', fc3)
tf.histogram_summary('DE/L3/weight' , w3)
tf.histogram_summary('DE/L3/bias' , b3)
tf.scalar_summary( 'DE/L3/sparsity' , tf.nn.zero_fraction(fc3))
return fc3, [w1, b1, w2, b2, w3, b3]
开发者ID:supersaiakujin,项目名称:adversarial_autoencoders,代码行数:32,代码来源:adversarial_autoencoders.py
示例8: discriminator
def discriminator(X, reuse=False):
with tf.variable_scope('discriminator'):
if reuse:
tf.get_variable_scope().reuse_variables()
J = 784
K = 128
L = 1
W1 = tf.get_variable('D_W1', [J, K],
initializer=tf.random_normal_initializer(stddev=xavier_init([J, K])))
B1 = tf.get_variable('D_B1', [K], initializer=tf.constant_initializer())
W2 = tf.get_variable('D_W2', [K, L],
initializer=tf.random_normal_initializer(stddev=xavier_init([K, L])))
B2 = tf.get_variable('D_B2', [L], initializer=tf.constant_initializer())
# summary
tf.summary.histogram('weight1', W1)
tf.summary.histogram('weight2', W2)
tf.summary.histogram('biases1', B1)
tf.summary.histogram('biases2', B2)
fc1 = tf.nn.relu((tf.matmul(X, W1) + B1))
logits = tf.matmul(fc1, W2) + B2
prob = tf.nn.sigmoid(logits)
return prob, logits
开发者ID:hephaex,项目名称:tensorflow_note,代码行数:26,代码来源:model_fc.py
示例9: __init__
def __init__(self, sess, n_features, lr=0.01):
self.sess = sess
with tf.name_scope('inputs'):
self.s = tf.placeholder(tf.float32, [1, n_features], "state")
self.v_ = tf.placeholder(tf.float32, [1, 1], name="v_next")
self.r = tf.placeholder(tf.float32, name='r')
with tf.variable_scope('Critic'):
l1 = tf.layers.dense(
inputs=self.s,
units=30, # number of hidden units
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l1'
)
self.v = tf.layers.dense(
inputs=l1,
units=1, # output units
activation=None,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='V'
)
with tf.variable_scope('squared_TD_error'):
self.td_error = tf.reduce_mean(self.r + GAMMA * self.v_ - self.v)
self.loss = tf.square(self.td_error) # TD_error = (r+gamma*V_next) - V_eval
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)
开发者ID:Emrys-Hong,项目名称:Reinforcement-learning-with-tensorflow,代码行数:31,代码来源:AC_continue_Pendulum.py
示例10: initializeParameters
def initializeParameters(self, m, n):
"""
Arguments:
m -- number of users
n -- number of items
Returns:
parameters -- parameters['b'], global bias, scalar
parameters['u'], users bias, shape (m, 1)
parameters['d'], item bias, shape (1, n)
parameters['P'], users feature matrix, shape (m, K)
parameters['Q'], items feature matrix, shape (n, K)
"""
k = self.K
parameters = {}
parameters['b'] = tf.get_variable(name='b', dtype=tf.float64, shape=[],
initializer=tf.zeros_initializer())
parameters['u'] = tf.get_variable(name='u', dtype=tf.float64, shape=[m, 1],
initializer=tf.zeros_initializer())
parameters['d'] = tf.get_variable(name='d', dtype=tf.float64, shape=[1, n],
initializer=tf.zeros_initializer())
parameters['P'] = tf.get_variable(name='P', dtype=tf.float64, shape=[m, k],
initializer=tf.random_normal_initializer())
parameters['Q'] = tf.get_variable(name='Q', dtype=tf.float64, shape=[n, k],
initializer=tf.random_normal_initializer())
return parameters
开发者ID:cheng-w-liu,项目名称:ML_algos,代码行数:32,代码来源:matrix_factorization_in_TensorFlow.py
示例11: __init__
def __init__(self, n_inputs, n_rules, learning_rate=1e-2):
self.n = n_inputs
self.m = n_rules
self.inputs = tf.placeholder(tf.float32, shape=(None, n_inputs)) # Input
self.targets = tf.placeholder(tf.float32, shape=None) # Desired output
mu = tf.get_variable("mu", [n_rules * n_inputs],
initializer=tf.random_normal_initializer(0, 1)) # Means of Gaussian MFS
sigma = tf.get_variable("sigma", [n_rules * n_inputs],
initializer=tf.random_normal_initializer(0, 1)) # Standard deviations of Gaussian MFS
y = tf.get_variable("y", [1, n_rules], initializer=tf.random_normal_initializer(0, 1)) # Sequent centers
self.params = tf.trainable_variables()
self.rul = tf.reduce_prod(
tf.reshape(tf.exp(-0.5 * tf.square(tf.subtract(tf.tile(self.inputs, (1, n_rules)), mu)) / tf.square(sigma)),
(-1, n_rules, n_inputs)), axis=2) # Rule activations
# Fuzzy base expansion function:
num = tf.reduce_sum(tf.multiply(self.rul, y), axis=1)
den = tf.clip_by_value(tf.reduce_sum(self.rul, axis=1), 1e-12, 1e12)
self.out = tf.divide(num, den)
self.loss = tf.losses.huber_loss(self.targets, self.out) # Loss function computation
# Other loss functions for regression, uncomment to try them:
# loss = tf.sqrt(tf.losses.mean_squared_error(target, out))
# loss = tf.losses.absolute_difference(target, out)
self.optimize = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss) # Optimization step
# Other optimizers, uncomment to try them:
# self.optimize = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(self.loss)
# self.optimize = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(self.loss)
self.init_variables = tf.global_variables_initializer() # Variable initializer
开发者ID:tiagoCuervo,项目名称:TensorANFIS,代码行数:30,代码来源:anfis.py
示例12: create_positional_emb_2d
def create_positional_emb_2d(self, targets):
"""Learned 2d positional embedding for images."""
mesh = targets.mesh
positional_emb_rows_var = mtf.get_variable(
mesh, "positional_emb_rows",
mtf.Shape([self.pos_dim, self.model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=self.activation_type)
positional_emb_cols_var = mtf.get_variable(
mesh, "positional_emb_cols",
mtf.Shape([self.pos_dim, self.model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=self.activation_type)
targets_position_x = mtf.range(mesh, self.rows_dim, dtype=tf.int32)
targets_position_y = mtf.range(mesh, self.cols_dim, dtype=tf.int32)
position_x = mtf.broadcast(
mtf.gather(positional_emb_rows_var, targets_position_x,
self.pos_dim),
mtf.Shape([self.rows_dim, self.cols_dim, self.model_dim]))
position_y = mtf.broadcast(
mtf.gather(positional_emb_cols_var, targets_position_y,
self.pos_dim),
mtf.Shape([self.rows_dim, self.cols_dim, self.model_dim]))
return position_x + position_y
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:27,代码来源:mtf_image_transformer.py
示例13: discriminator
def discriminator(X, reuse=False):
with tf.variable_scope('discriminator'):
if reuse:
tf.get_variable_scope().reuse_variables()
K = 64
M = 128
N = 256
W1 = tf.get_variable('D_W1', [4, 4, 1, K], initializer=tf.random_normal_initializer(stddev=0.1))
B1 = tf.get_variable('D_B1', [K], initializer=tf.constant_initializer())
W2 = tf.get_variable('D_W2', [4, 4, K, M], initializer=tf.random_normal_initializer(stddev=0.1))
B2 = tf.get_variable('D_B2', [M], initializer=tf.constant_initializer())
W3 = tf.get_variable('D_W3', [7*7*M, N], initializer=tf.random_normal_initializer(stddev=0.1))
B3 = tf.get_variable('D_B3', [N], initializer=tf.constant_initializer())
W4 = tf.get_variable('D_W4', [N, 1], initializer=tf.random_normal_initializer(stddev=0.1))
B4 = tf.get_variable('D_B4', [1], initializer=tf.constant_initializer())
X = tf.reshape(X, [-1, 28, 28, 1], 'reshape')
conv1 = conv(X, W1, B1, stride=2, name='conv1')
bn1 = tf.contrib.layers.batch_norm(conv1)
conv2 = conv(tf.nn.dropout(lrelu(bn1), 0.4), W2, B2, stride=2, name='conv2')
# conv2 = conv(lrelu(conv1), W2, B2, stride=2, name='conv2')
bn2 = tf.contrib.layers.batch_norm(conv2)
flat = tf.reshape(tf.nn.dropout(lrelu(bn2), 0.4), [-1, 7*7*M], name='flat')
# flat = tf.reshape(lrelu(conv2), [-1, 7*7*M], name='flat')
dense = lrelu(tf.matmul(flat, W3) + B3)
logits = tf.matmul(dense, W4) + B4
prob = tf.nn.sigmoid(logits)
return prob, logits
开发者ID:hephaex,项目名称:tensorflow_note,代码行数:33,代码来源:model_conv.py
示例14: _build_cnn
def _build_cnn(self, feat_x):
with tf.variable_scope("cnn_global", reuse=True):
W1 = tf.get_variable(dtype=tf.float32,
shape=[self.filter_stride, self.dim_feat_x, 1, self.num_feat_map],
name="weight_w1",
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
b1 = tf.get_variable(dtype=tf.float32,
shape=[self.num_feat_map], name="bias_b1", initializer=tf.constant_initializer(1.0))
x_inputs = tf.reshape(feat_x, [-1, self.window_size, self.dim_feat_x, 1])
# print x_inputs.get_shape()
# h_conv_1 size: [-1, dwf, ws, nfm]
h_conv_1 = tf.nn.relu(self._conv_2d(x_inputs, W1) + b1)
# print h_conv_1.get_shape()
# h_max_pool size: [-1, 1,1, nfm]
h_max_pool = self._max_pool(h_conv_1)
# print h_max_pool.get_shape()
# concentrate in one vector
# sent_vec size: [-1, nfm]
sent_vec = tf.reshape(h_max_pool, [-1, self.num_feat_map])
# print sent_vec.get_shape()
with tf.variable_scope("cnn_global", reuse=True):
W2 = tf.get_variable(dtype=tf.float32,
shape=[self.num_feat_map, self.output_size], name="weight_w2",
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
b2 = tf.get_variable(dtype=tf.float32,
shape=[self.output_size], name="bias_b2", initializer=tf.constant_initializer(1.0))
logits = tf.matmul(sent_vec, W2) + b2
return logits
开发者ID:staylonging,项目名称:tf,代码行数:35,代码来源:cnnglobal_back.py
示例15: weight
def weight(name, shape, init='he', range=None):
""" Initializes weight.
:param name: Variable name
:param shape: Tensor shape
:param init: Init mode. xavier / normal / uniform / he (default is 'he')
:param range:
:return: Variable
"""
initializer = tf.constant_initializer()
if init == 'xavier':
fan_in, fan_out = _get_dims(shape)
range = math.sqrt(6.0 / (fan_in + fan_out))
initializer = tf.random_uniform_initializer(-range, range)
elif init == 'he':
fan_in, _ = _get_dims(shape)
std = math.sqrt(2.0 / fan_in)
initializer = tf.random_normal_initializer(stddev=std)
elif init == 'normal':
initializer = tf.random_normal_initializer(stddev=0.1)
elif init == 'uniform':
if range is None:
raise ValueError("range must not be None if uniform init is used.")
initializer = tf.random_uniform_initializer(-range, range)
var = tf.get_variable(name, shape, initializer=initializer)
tf.add_to_collection('l2', tf.nn.l2_loss(var)) # Add L2 Loss
return var
开发者ID:BabelTower,项目名称:dmn-tensorflow,代码行数:30,代码来源:nn.py
示例16: cnn_inference
def cnn_inference(inputs, input_units, output_units, is_train=True,
FLAGS=None):
"""
Define the CNN model.
"""
# [BATCH_SIZE, 9] -> [BATCH_SIZE, 3, 3, 1]
inputs = tf.reshape(inputs, [-1, 3, 3, 1])
# [BATCH_SIZE, 3, 3, 1] -> [BATCH_SIZE, 3, 3, 8]
with tf.variable_scope("conv_0"):
weights = tf.get_variable(
"weights", [3, 3, 1, 8], initializer=tf.random_normal_initializer())
bias = tf.get_variable(
"bias", [8], initializer=tf.random_normal_initializer())
layer = tf.nn.conv2d(inputs, weights, strides=[1, 1, 1, 1], padding="SAME")
layer = tf.nn.bias_add(layer, bias)
layer = tf.nn.relu(layer)
# [BATCH_SIZE, 3, 3, 8] -> [BATCH_SIZE, 3 * 3 * 8]
layer = tf.reshape(layer, [-1, 3 * 3 * 8])
# [BATCH_SIZE, 3 * 3 * 8] -> [BATCH_SIZE, LABEL_SIZE]
with tf.variable_scope("output_layer"):
weights = tf.get_variable(
"weights", [3 * 3 * 8, FLAGS.label_size],
initializer=tf.random_normal_initializer())
bias = tf.get_variable(
"bias", [FLAGS.label_size], initializer=tf.random_normal_initializer())
layer = tf.add(tf.matmul(layer, weights), bias)
return layer
开发者ID:tobegit3hub,项目名称:deep_recommend_system,代码行数:33,代码来源:model.py
示例17: _build_net
def _build_net(self):
with tf.name_scope('inputs'):
self.tf_obs=tf.placeholder(tf.float32,[None,self.n_features],name="observations")
self.tf_acts=tf.placeholder(tf.int32,[None,],name="actions_num")
self.tf_vt=tf.placeholder(tf.float32,[None,],name="actions_value")
layer=tf.layers.dense(
inputs=self.tf_obs,
units=10,
activation=tf.nn.tanh
kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.3),
bias_initializer=tf.constant_initializer(0.1),
name='fc1'
)
all_act=tf.layers.dense(
inputs=layer,
units=self.n_actions,
activation=None,
kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.3)
bias_initializer=tf.constant_initializer(0.1)
name='fc2'
)
self.all_act_prob=tf.nn.softmax(all_act,name='act_prob')
with tf.name_scope('loss'):
neg_log_prob=tf.nn.sparse_softmax_cross_enrtropy_with_logits(logits=all_act,labels=self.tf_acts)
loss=tf.reduce_mean(neg_log_prob*self.tf_vt)#用log_p*R的最大化来表示目标
with tf.name_scope('train'):
self.train_op=tf.train.AdamOptimizer(self.lr).minimize(loss)
开发者ID:niceIrene,项目名称:MetisRL,代码行数:35,代码来源:rl_brain.py
示例18: __init__
def __init__(self, sess, n_features, n_actions, lr=0.001):
self.sess = sess
self.s = tf.placeholder(tf.float32, [1, n_features], "state")
self.a = tf.placeholder(tf.int32, None, "act")
self.td_error = tf.placeholder(tf.float32, None, "td_error") # TD_error
with tf.variable_scope('Actor'):
l1 = tf.layers.dense(
inputs=self.s,
units=20, # number of hidden units
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l1'
)
self.acts_prob = tf.layers.dense(
inputs=l1,
units=n_actions, # output units
activation=tf.nn.softmax, # get action probabilities
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='acts_prob'
)
with tf.variable_scope('exp_v'):
log_prob = tf.log(self.acts_prob[0, self.a])
self.exp_v = tf.reduce_mean(log_prob * self.td_error) # advantage (TD_error) guided loss
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(-self.exp_v) # minimize(-exp_v) = maximize(exp_v)
开发者ID:Emrys-Hong,项目名称:Reinforcement-learning-with-tensorflow,代码行数:32,代码来源:AC_CartPole.py
示例19: model
def model(self, images, input_size, output_size, isEval=None):
with tf.variable_scope('nn_hidden1', reuse=isEval):
#Declaring variables
weights_h1 = tf.get_variable("weights_h1", [input_size, self.num_hidden1],
initializer=tf.random_normal_initializer(0.0, 1.0 / math.sqrt(float(input_size)),
seed=self.SEED))
weights_out = tf.get_variable("weights_out", [self.num_hidden1, output_size],
initializer=tf.random_normal_initializer(0.0, 1.0 / math.sqrt(float(self.num_hidden1)),
seed=self.SEED))
biases_b1 = tf.get_variable("biases_b1", [self.num_hidden1],
initializer=tf.constant_initializer(0.0))
biases_out = tf.get_variable("biases_out", [output_size],
initializer=tf.constant_initializer(0.0))
#Constructing variables, with DropOut
layer_1 = tf.nn.relu(tf.add(tf.matmul(images, weights_h1), biases_b1))
layer_1drop = tf.nn.dropout(layer_1, self.keep_prob)
#Evaluating logits
logits_drop = tf.matmul(layer_1drop, weights_out) + biases_out
logits=tf.matmul(layer_1, weights_out) + biases_out
reg_linear = tf.nn.l2_loss(weights_out)+tf.nn.l2_loss(weights_h1)
if isEval:
return logits
else:
regularizers = reg_linear
return (logits_drop, regularizers)
开发者ID:andreslechuga,项目名称:DeepLearning,代码行数:31,代码来源:Assignment3.py
示例20: build_graph
def build_graph(self,test_decoder_logits):
print('starting building graph [sentiment-discriminator]')
with tf.variable_scope("sentiment") as scope:
self.inputs = tf.slice(test_decoder_logits,[0,0,0],[self.batch_size,self.max_length,self.vocab_size])
# variable
weights = {
'w2v' : tf.get_variable(initializer = tf.random_uniform_initializer(-0.1, 0.1, dtype=tf.float32),shape = [self.vocab_size, self.embedding_dim], name='w2v'),
'out_1' : tf.get_variable(initializer = tf.random_normal_initializer(), shape = [self.unit_size*2, 1], name='w_out_1'),
}
biases = {
'out_1' : tf.get_variable(initializer = tf.random_normal_initializer(), shape=[1], name='b_out_1'),
}
# structure
def BiRNN(x):
x = tf.unstack(x, self.max_length, 1)
lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(self.unit_size, forget_bias=1.0)
lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(self.unit_size,forget_bias=1.0)
outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, dtype = tf.float32 )
return outputs[-1]
self.inputs_softmax = tf.nn.softmax(tf.scalar_mul(tf.constant(5.0, shape=[]),self.inputs))
y_list=[]
for i in range(self.inputs.get_shape().as_list()[0]):
y = tf.matmul(self.inputs_softmax[i], weights['w2v'])
y = tf.reshape(y, [1, self.max_length, self.embedding_dim])
y_list.append(y)
embbed_layer = tf.concat(y_list,0)
layer_1 = BiRNN(embbed_layer)
pred = tf.matmul(layer_1, weights['out_1']) + biases['out_1']
# get score
self.score = tf.sigmoid(pred)
开发者ID:HTY886,项目名称:PPGN,代码行数:31,代码来源:discrim.py
注:本文中的tensorflow.random_normal_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论