• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.uniform_unit_scaling_initializer函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.uniform_unit_scaling_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python uniform_unit_scaling_initializer函数的具体用法?Python uniform_unit_scaling_initializer怎么用?Python uniform_unit_scaling_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了uniform_unit_scaling_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _init_parameters

 def _init_parameters(self):
     if self.W is None:
         self.W = vs.get_variable("W", [self._filters_num + self._num_units, self._num_units], initializer=tf.uniform_unit_scaling_initializer(factor=weight_init_factor))
     if self.F is None:
         self.F = vs.get_variable("F", [L, filters_num], initializer=tf.uniform_unit_scaling_initializer(factor=weight_init_factor))
     if self.R is None:
         self.R = vs.get_variable("R", [L, 1], initializer=tf.uniform_unit_scaling_initializer(factor=weight_init_factor*0.5))
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:7,代码来源:tf.py


示例2: __call__

    def __call__(self, inputs, states, scope=None):
        with tf.variable_scope(
                scope or type(self).__name__,
                initializer=tf.random_normal_initializer(stddev=0.01)):
            # get the tensor
            if self._separate_pad:
                t_shape = [self._num_outputs,
                           self._num_outputs,
                           self._num_inputs]
                vec_a = inputs
                vec_b = states
            else:
                t_shape = [self._num_outputs+1,
                           self._num_outputs,
                           self._num_inputs+1]
                vec_a = tf.concat(
                    axis=1, values=[inputs, tf.ones([inputs.get_shape()[0].value, 1])])
                vec_b = tf.concat(
                    axis=1, values=[inputs, tf.ones([inputs.get_shape()[0].value, 1])])
            tensor = get_tt_3_tensor(t_shape, self._ranks, name='W')
            result = bilinear_product_tt_3(vec_a, tensor, vec_b)
            if self._separate_pad:
                # TODO possible weightnorm
                D = tf.get_variable('D', [self._num_inputs, self._num_outputs],
                                    initializer=tf.uniform_unit_scaling_initializer(1.2))
                E = tf.get_variable('E', [self._num_outputs, self._num_outputs],
                                    initializer=tf.uniform_unit_scaling_initializer(1.2))
                b = tf.get_variable('b', [self._num_outputs],
                                    initializer=tf.constant_initializer(0.0))
                z = tf.nn.bias_add(tf.matmul(inputs, D) + tf.matmul(states, E), b)
                result = result + z

            result = self._nonlin(result)
            return result, result
开发者ID:PFCM,项目名称:rnns,代码行数:34,代码来源:simple_tensor_rnn.py


示例3: __init__

    def __init__(
        self,
        num_units,
        activation = simple_act,
        input_weights_init = tf.uniform_unit_scaling_initializer(factor=1.0),
        recc_weights_init = tf.uniform_unit_scaling_initializer(factor=0.1),
        sigma = 1.0,
        update_gate = True,
        dt = 1.0
    ):
        self._num_units = num_units
        self._activation = activation
        self._dt = dt
        self._sigma = sigma if sigma else 1.0
        self._update_gate = update_gate

        self.W = None
        self.U = None
        self.bias = None
        self.W_u = None
        self.U_u = None
        self.bias_u = None
        self.W_s = None
        self.U_s = None
        self.bias_s = None
        self.sigma = None

        self.input_weights_init = input_weights_init
        self.recc_weights_init = recc_weights_init
        
        self._sensitivity = False
        
        self.states_info = []
        self.update_info = []
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:34,代码来源:model.py


示例4: testInitializerIdentical

 def testInitializerIdentical(self):
   for use_gpu in [False, True]:
     init1 = tf.uniform_unit_scaling_initializer(seed=1)
     init2 = tf.uniform_unit_scaling_initializer(seed=1)
     self.assertTrue(identicaltest(self, init1, init2, use_gpu))
     init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
     init4 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
     self.assertTrue(identicaltest(self, init3, init4, use_gpu))
开发者ID:DapengLan,项目名称:tensorflow,代码行数:8,代码来源:init_ops_test.py


示例5: testInitializerDifferent

 def testInitializerDifferent(self):
   for use_gpu in [False, True]:
     init1 = tf.uniform_unit_scaling_initializer(seed=1)
     init2 = tf.uniform_unit_scaling_initializer(seed=2)
     init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
     self.assertFalse(identicaltest(self, init1, init2, use_gpu))
     self.assertFalse(identicaltest(self, init1, init3, use_gpu))
     self.assertFalse(identicaltest(self, init2, init3, use_gpu))
开发者ID:DapengLan,项目名称:tensorflow,代码行数:8,代码来源:init_ops_test.py


示例6: testInitializerIdentical

 def testInitializerIdentical(self):
   for dtype in [tf.float32, tf.float64]:
     init1 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
     init2 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
     self.assertTrue(identicaltest(self, init1, init2))
     init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
     init4 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
     self.assertTrue(identicaltest(self, init3, init4))
开发者ID:Nishant23,项目名称:tensorflow,代码行数:8,代码来源:init_ops_test.py


示例7: testInitializerDifferent

 def testInitializerDifferent(self):
   for dtype in [tf.float32, tf.float64]:
     init1 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
     init2 = tf.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
     init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
     self.assertFalse(identicaltest(self, init1, init2))
     self.assertFalse(identicaltest(self, init1, init3))
     self.assertFalse(identicaltest(self, init2, init3))
开发者ID:Nishant23,项目名称:tensorflow,代码行数:8,代码来源:init_ops_test.py


示例8: sharded_variable

def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False):
    '''分片操作'''
    shard_size = int((shape[0] + num_shards - 1) / num_shards)
    if transposed:
        initializer = tf.uniform_unit_scaling_initializer(
            dtype=dtype, )
    else:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, )
    return [tf.get_variable(name + '_%d' % i, [shard_size,
                                               shape[1]],
                            initializer=initializer, dtype=dtype)
            for i in range(num_shards)]
开发者ID:IgorWang,项目名称:RNNLM,代码行数:12,代码来源:model_utils.py


示例9: make_variable

def make_variable(name, shape, initializer, weight_decay=None, lr_mult=1, decay_mult=1):
    if lr_mult == 0:
        var = tf.get_variable(name, shape, initializer=initializer, trainable=False)
    elif weight_decay is None:
        var = tf.get_variable(  name, shape,
                                initializer=tf.uniform_unit_scaling_initializer())
    else:
        var = tf.get_variable(  name, shape,
			initializer=tf.uniform_unit_scaling_initializer(),
                                regularizer=tf.contrib.layers.l2_regularizer(weight_decay*decay_mult))

    if lr_mult > 0:
        tf.add_to_collection(str(lr_mult), var);

    return var
开发者ID:24hours,项目名称:tf_fcn,代码行数:15,代码来源:FCN.py


示例10: __call__

    def __call__(self, inputs, states, scope=None):
        with tf.variable_scope(scope or type(self).__name__) as outer_scope:
            # do it
            # sub scope for the tensor init
            # should inherit reuse from outer scope
            with tf.variable_scope('tensor',
                                   initializer=init.orthonormal_init(0.5)):
                tensor = get_cp_tensor([self.input_size,
                                        self.output_size,
                                        self.state_size],
                                       self.rank,
                                       'W',
                                       weightnorm=False,
                                       trainable=True)
            combination = bilinear_product_cp(inputs, tensor, states)
            # and project the input
            input_weights = tf.get_variable('U', shape=[self.input_size,
                                                        self._input_projection],
                                            initializer=tf.uniform_unit_scaling_initializer(1.4))
            input_proj = tf.matmul(inputs, input_weights)
            # apply a bias pre-nonlinearity
            bias = tf.get_variable('b', shape=[self.output_size],
                                   initializer=tf.constant_initializer(0.0))
            if self.layernorm == 'pre':
                activations = layer_normalise(combination + input_proj + bias)
            else:
                activations = combination + input_proj + bias

            result = self._nonlinearity(activations)

            if self.layernorm == 'post':
                result = layer_normalise(result)

            result = result + states
        return result, result
开发者ID:PFCM,项目名称:rnns,代码行数:35,代码来源:additive_tensor_rnn.py


示例11: setup_loss_critic

def setup_loss_critic(critic):
    # we are starting with critic.outputs symbol (after logistic layer)
    with tf.variable_scope("rl", initializer=tf.uniform_unit_scaling_initializer(1.0)):
        # loss setup
        # None to timestep
        critic.target_qt = tf.placeholder(tf.float32, shape=[None, None, critic.vocab_size],
                                            name="q_action_score")
        # p_actions is the target_token, and it's already [T, batch_size]
        # q_t needs to be expanded...

        # critic.outputs [T, batch_size, vocab_size]
        # let's populate (expand) target tokens to fill up qt (just like what we did with one-hot labels)

        critic.q_loss = tf.reduce_mean(tf.square(critic.outputs - critic.target_qt))  # Note: not adding lambda*C yet (variance)

        opt = nlc_model.get_optimizer(FLAGS.optimizer)(critic.learning_rate)

        # update
        params = tf.trainable_variables()
        gradients = tf.gradients(critic.q_loss, params)
        clipped_gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.max_gradient_norm)
        #      self.gradient_norm = tf.global_norm(clipped_gradients)
        critic.gradient_norm = tf.global_norm(gradients)
        critic.param_norm = tf.global_norm(params)
        critic.updates = opt.apply_gradients(
            zip(clipped_gradients, params), global_step=critic.global_step)
开发者ID:windweller,项目名称:nlc,代码行数:26,代码来源:rl_train.py


示例12: _init_parameters

    def _init_parameters(self):
    	return tf.get_variable("F", [self._filter_size, self._input_size, self._layer_size], 
	        initializer=tf.uniform_unit_scaling_initializer(factor=c.weight_init_factor)
        )

	def __call__(self, input, state, scope=None):
		####
		
		if self._params is None:
			self._params = self._init_parameters()

		x = input
		u, a = state
		F = self._params

		####
		b = tf.nn.conv1d(x, F, 1)
		Fc = tf.matmul(tf.transpose(F, (0, 2, 1), F))
		fb = tf.conv1d(a, Fc, 1)
		
		print "b", b.get_shape()
		print "Fc", Fc.get_shape()
		print "fb", fb.get_shape()

		du = - u + b - fb
		new_u = u + c.epsilon * du / c.tau

        new_a = tf.nn.relu(new_u - c.lam)
        
        ####

        return (new_u, new_a), (new_u, new_a)
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:32,代码来源:lca_tf.py


示例13: FullyConnected

def FullyConnected(x, out_dim,
                   W_init=None, b_init=None,
                   nl=tf.nn.relu, use_bias=True):
    """
    Fully-Connected layer.

    :param input: a tensor to be flattened except the first dimension.
    :param out_dim: output dimension
    :param W_init: initializer for W. default to `xavier_initializer_conv2d`.
    :param b_init: initializer for b. default to zero initializer.
    :param nl: nonlinearity. default to `relu`.
    :param use_bias: whether to use bias. a boolean default to True
    :returns: a 2D tensor
    """
    x = batch_flatten(x)
    in_dim = x.get_shape().as_list()[1]

    if W_init is None:
        #W_init = tf.truncated_normal_initializer(stddev=1 / math.sqrt(float(in_dim)))
        W_init = tf.uniform_unit_scaling_initializer(factor=1.43)
    if b_init is None:
        b_init = tf.constant_initializer()

    W = tf.get_variable('W', [in_dim, out_dim], initializer=W_init)
    if use_bias:
        b = tf.get_variable('b', [out_dim], initializer=b_init)
    prod = tf.nn.xw_plus_b(x, W, b) if use_bias else tf.matmul(x, W)
    return nl(prod, name='output')
开发者ID:Paseam,项目名称:tensorpack,代码行数:28,代码来源:fc.py


示例14: testTransformerAutoencoder

  def testTransformerAutoencoder(self):
    hparams = imagetransformer_latent_tiny()
    hparams.mode = tf.estimator.ModeKeys.TRAIN
    block_dim = int(hparams.hidden_size // hparams.num_blocks)
    block_v_size = 2**(hparams.bottleneck_bits /
                       (hparams.num_residuals * hparams.num_blocks))
    block_v_size = int(block_v_size)
    means = tf.get_variable(
        name="means",
        shape=[hparams.num_residuals,
               hparams.num_blocks,
               block_v_size,
               block_dim],
        initializer=tf.uniform_unit_scaling_initializer())
    hparams.bottleneck = functools.partial(
        discretization.discrete_bottleneck,
        hidden_size=hparams.hidden_size,
        z_size=hparams.bottleneck_bits,
        filter_size=hparams.filter_size,
        startup_steps=hparams.startup_steps,
        bottleneck_kind=hparams.bottleneck_kind,
        num_blocks=hparams.num_blocks,
        num_residuals=hparams.num_residuals,
        reshape_method=hparams.reshape_method,
        beta=hparams.vq_beta,
        decay=hparams.vq_decay,
        soft_em=hparams.soft_em,
        num_samples=hparams.num_samples,
        epsilon=hparams.vq_epsilon,
        ema=hparams.ema,
        means=means)

    inputs = None
    batch_size = hparams.batch_size
    targets = tf.random_uniform([batch_size,
                                 hparams.img_len,
                                 hparams.img_len,
                                 hparams.hidden_size],
                                minval=-1., maxval=1.)
    target_space_id = None

    tf.train.create_global_step()
    decoder_output, losses, cache = latent_layers.transformer_autoencoder(
        inputs, targets, target_space_id, hparams)

    self.assertEqual(set(six.iterkeys(losses)),
                     {"extra", "extra_loss", "latent_pred"})

    self.evaluate(tf.global_variables_initializer())
    decoder_output_, extra_loss_, latent_pred_ = self.evaluate(
        [decoder_output, losses["extra_loss"], losses["latent_pred"]])
    self.assertEqual(decoder_output_.shape, (batch_size,
                                             hparams.img_len,
                                             hparams.img_len,
                                             hparams.hidden_size))
    self.assertEqual(extra_loss_.shape, (batch_size,))
    self.assertEqual(latent_pred_.shape, (batch_size,))
    self.assertAllGreaterEqual(extra_loss_, 0.)
    self.assertAllGreaterEqual(latent_pred_, 0.)
    self.assertEqual(cache, None)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:60,代码来源:latent_layers_test.py


示例15: _fully_connected

 def _fully_connected(self, x, out_dim):
   x = tf.reshape(x, [self._params.batch_size, -1])
   w = tf.get_variable(
       'DW', [x.get_shape()[1], out_dim],
       initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
   b = tf.get_variable(
       'biases', [out_dim], initializer=tf.constant_initializer())
   return tf.nn.xw_plus_b(x, w, b)
开发者ID:812864539,项目名称:models,代码行数:8,代码来源:embedders.py


示例16: __init__

 def __init__(self,FLAGS):
     # Q: we can use an LSTM in the decoder too, but it may be a better idea not to increase the number of parameters too much
     self.state_size = FLAGS.state_size
     self.maxSentenceLength = FLAGS.maxSentenceLength
     with vs.variable_scope("decoder", initializer = tf.contrib.layers.xavier_initializer()):
         self.W = tf.get_variable("W", dtype = tf.float64, shape = (self.state_size,1))
         self.b = tf.get_variable("b", dtype = tf.float64, shape = (1,),
         initializer=tf.uniform_unit_scaling_initializer(1.0))
开发者ID:andrewquirk,项目名称:extractive-news-summarization,代码行数:8,代码来源:tldr.py


示例17: _fully_connected

 def _fully_connected(self, x, out_dim, name=''):
     with tf.variable_scope(name):
         x = tf.reshape(x, [self._batch_size, -1]);
         w = tf.get_variable(
             name+'DW', [x.get_shape()[1], out_dim],
             initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
         b = tf.get_variable(name+'biases', [out_dim],
                         initializer=tf.constant_initializer())
         return tf.nn.xw_plus_b(x, w, b)
开发者ID:KelvinKarRoy,项目名称:KKAlphaGoZero,代码行数:9,代码来源:alphago_zero_resnet_model.py


示例18: _fully_connected

 def _fully_connected(self, x, out_dim):
   # 输入转换成2D tensor,尺寸为[N,-1]
   x = tf.reshape(x, [self.hps.batch_size, -1])
   # 参数w,平均随机初始化,[-sqrt(3/dim), sqrt(3/dim)]*factor
   w = tf.get_variable('DW', [x.get_shape()[1], out_dim],
                       initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
   # 参数b,0值初始化
   b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer())
   # 计算x*w+b
   return tf.nn.xw_plus_b(x, w, b)
开发者ID:Npccc,项目名称:Study,代码行数:10,代码来源:resnet_model.py


示例19: __init__

  def __init__(self, embedding_dim, num_embeddings, commitment_cost,
               name='vq_layer'):
    super(VectorQuantizer, self).__init__(name=name)
    self._embedding_dim = embedding_dim
    self._num_embeddings = num_embeddings
    self._commitment_cost = commitment_cost

    with self._enter_variable_scope():
      initializer = tf.uniform_unit_scaling_initializer()
      self._w = tf.get_variable('embedding', [embedding_dim, num_embeddings],
                                initializer=initializer, trainable=True)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:11,代码来源:vqvae.py


示例20: conv1d_log

def conv1d_log(x,
           num_filters,
           filter_length,
           name,
           dilation=1,
           causal=True,
           kernel_initializer=tf.uniform_unit_scaling_initializer(1.0),
           biases_initializer=tf.constant_initializer(0.0)):
  """Fast 1D convolution that supports causal padding and dilation.
  Args:
    x: The [mb, time, channels] float tensor that we convolve.
    num_filters: The number of filter maps in the convolution.
    filter_length: The integer length of the filter.
    name: The name of the scope for the variables.
    dilation: The amount of dilation.
    causal: Whether or not this is a causal convolution.
    kernel_initializer: The kernel initialization function.
    biases_initializer: The biases initialization function.
  Returns:
    y: The output of the 1D convolution.
  """
  batch_size, length, num_input_channels = x.get_shape().as_list()
  assert length % dilation == 0

  kernel_shape = [1, filter_length, num_input_channels, num_filters]
  strides = [1, 1, 1, 1]
  biases_shape = [num_filters]
  padding = 'VALID' if causal else 'SAME'

  with tf.variable_scope(name):
    weights = tf.get_variable(
        'W', shape=kernel_shape, initializer=kernel_initializer)
    biases = tf.get_variable(
        'biases', shape=biases_shape, initializer=biases_initializer)

  x_ttb = time_to_batch(x, dilation)
  if filter_length > 1 and causal:
    x_ttb = tf.pad(x_ttb, [[0, 0], [filter_length - 1, 0], [0, 0]])

  W_mean = tf.reduce_mean(weights)
  biases_mean = tf.reduce_mean(biases)

  x_ttb_shape = x_ttb.get_shape().as_list()
  x_4d = tf.reshape(x_ttb, [x_ttb_shape[0], 1,
                            x_ttb_shape[1], num_input_channels])

  y = tf.nn.conv2d(x_4d, weights, strides, padding=padding)
  y = tf.nn.bias_add(y, biases)

  y_shape = y.get_shape().as_list()
  y = tf.reshape(y, [y_shape[0], y_shape[2], num_filters])
  y = batch_to_time(y, dilation)
  y.set_shape([batch_size, length, num_filters])
  return y, W_mean, biases_mean
开发者ID:QianQQ,项目名称:Voice-Conversion,代码行数:54,代码来源:masked.py



注:本文中的tensorflow.uniform_unit_scaling_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.unpack函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.truncated_normal_initializer函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap