• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.placeholder_with_default函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.placeholder_with_default函数的典型用法代码示例。如果您正苦于以下问题:Python placeholder_with_default函数的具体用法?Python placeholder_with_default怎么用?Python placeholder_with_default使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了placeholder_with_default函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_non_positive_shape

  def test_non_positive_shape(self):
    dims = 2
    old_batch_shape = [4]
    if self.is_static_shape:
      # Unknown first dimension does not trigger size check. Note that
      # any dimension < 0 is treated statically as unknown.
      new_batch_shape = [-1, 0]
    else:
      new_batch_shape = [-2, -2]  # -2 * -2 = 4, same size as the old shape.

    new_batch_shape_ph = (
        tf.constant(np.int32(new_batch_shape))
        if self.is_static_shape else tf.placeholder_with_default(
            np.int32(new_batch_shape), shape=None))

    scale = np.ones(old_batch_shape + [dims], self.dtype)
    scale_ph = tf.placeholder_with_default(
        scale, shape=scale.shape if self.is_static_shape else None)
    mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)

    if self.is_static_shape:
      with self.assertRaisesRegexp(ValueError, r".*must be >=-1.*"):
        tfd.BatchReshape(
            distribution=mvn,
            batch_shape=new_batch_shape_ph,
            validate_args=True)

    else:
      with self.assertRaisesOpError(r".*must be >=-1.*"):
        self.evaluate(
            tfd.BatchReshape(
                distribution=mvn,
                batch_shape=new_batch_shape_ph,
                validate_args=True).sample())
开发者ID:asudomoeva,项目名称:probability,代码行数:34,代码来源:batch_reshape_test.py


示例2: test_non_vector_shape

  def test_non_vector_shape(self):
    dims = 2
    new_batch_shape = 2
    old_batch_shape = [2]

    new_batch_shape_ph = (
        tf.constant(np.int32(new_batch_shape))
        if self.is_static_shape else tf.placeholder_with_default(
            np.int32(new_batch_shape), shape=None))

    scale = np.ones(old_batch_shape + [dims], self.dtype)
    scale_ph = tf.placeholder_with_default(
        scale, shape=scale.shape if self.is_static_shape else None)
    mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)

    if self.is_static_shape:
      with self.assertRaisesRegexp(ValueError, r".*must be a vector.*"):
        tfd.BatchReshape(
            distribution=mvn,
            batch_shape=new_batch_shape_ph,
            validate_args=True)

    else:
      with self.assertRaisesOpError(r".*must be a vector.*"):
        self.evaluate(
            tfd.BatchReshape(
                distribution=mvn,
                batch_shape=new_batch_shape_ph,
                validate_args=True).sample())
开发者ID:asudomoeva,项目名称:probability,代码行数:29,代码来源:batch_reshape_test.py


示例3: test_bad_reshape_size

  def test_bad_reshape_size(self):
    dims = 2
    new_batch_shape = [2, 3]
    old_batch_shape = [2]   # 2 != 2*3

    new_batch_shape_ph = (
        tf.constant(np.int32(new_batch_shape))
        if self.is_static_shape else tf.placeholder_with_default(
            np.int32(new_batch_shape), shape=None))

    scale = np.ones(old_batch_shape + [dims], self.dtype)
    scale_ph = tf.placeholder_with_default(
        scale, shape=scale.shape if self.is_static_shape else None)
    mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)

    if self.is_static_shape:
      with self.assertRaisesRegexp(
          ValueError, (r"`batch_shape` size \(6\) must match "
                       r"`distribution\.batch_shape` size \(2\)")):
        tfd.BatchReshape(
            distribution=mvn,
            batch_shape=new_batch_shape_ph,
            validate_args=True)

    else:
      with self.assertRaisesOpError(r"Shape sizes do not match."):
        self.evaluate(
            tfd.BatchReshape(
                distribution=mvn,
                batch_shape=new_batch_shape_ph,
                validate_args=True).sample())
开发者ID:asudomoeva,项目名称:probability,代码行数:31,代码来源:batch_reshape_test.py


示例4: _test_partial_shape_correctness

    def _test_partial_shape_correctness(self,
                                        input,
                                        rank,
                                        batch_size,
                                        grid,
                                        interpolation,
                                        boundary,
                                        expected_value=None):

        resampler = ResamplerLayer(interpolation=interpolation,
                                   boundary=boundary)
        input_default = tf.random_uniform(input.shape)
        if batch_size > 0 and rank > 0:
            input_placeholder = tf.placeholder_with_default(
                input_default, shape=[batch_size] + [None] * (rank + 1))
        elif batch_size <= 0 and rank > 0:
            input_placeholder = tf.placeholder_with_default(
                input_default, shape=[None] * (rank + 2))
        elif batch_size <= 0 and rank <= 0:
            input_placeholder = tf.placeholder_with_default(
                input_default, shape=None)

        out = resampler(input_placeholder, grid)
        with self.test_session() as sess:
            out_value = sess.run(
                out, feed_dict={input_placeholder: input})
            # print(expected_value)
            # print(out_value)
            if expected_value is not None:
                self.assertAllClose(expected_value, out_value)
开发者ID:fepegar,项目名称:NiftyNet,代码行数:30,代码来源:resampler_test.py


示例5: _testScaledIdentityComplexAdjoint

 def _testScaledIdentityComplexAdjoint(self, is_dynamic):
   shift_ = np.array(-0.5, dtype=np.complex)
   scale_ = np.array(4 + 2j, dtype=np.complex)
   shift = tf.placeholder_with_default(
       shift_, shape=None if is_dynamic else [])
   scale = tf.placeholder_with_default(
       scale_, shape=None if is_dynamic else [])
   bijector = tfb.Affine(
       shift=shift,
       scale_identity_multiplier=scale,
       adjoint=True,
       validate_args=True)
   z = np.array([1., 2, 3], dtype=np.complex)
   y = bijector.forward(z)
   x = bijector.inverse(z)
   inv_fwd_z = bijector.inverse(tf.identity(y))
   ildj = bijector.inverse_log_det_jacobian(z, event_ndims=1)
   fldj = bijector.forward_log_det_jacobian(z, event_ndims=1)
   [x_, y_, inv_fwd_z_, ildj_, fldj_] = self.evaluate([
       x, y, inv_fwd_z, ildj, fldj])
   self.assertAllClose(np.conj(scale_) * z + shift_, y_)
   self.assertAllClose((z - shift_) / np.conj(scale_), x_)
   self.assertAllClose(z, inv_fwd_z_)
   self.assertAllClose(z.shape[-1] * np.log(np.abs(scale_)), fldj_)
   self.assertAllClose(-z.shape[-1] * np.log(np.abs(scale_)), ildj_)
开发者ID:lewisKit,项目名称:probability,代码行数:25,代码来源:affine_test.py


示例6: test_broadcasting_explicitly_unsupported

  def test_broadcasting_explicitly_unsupported(self):
    old_batch_shape = [4]
    new_batch_shape = [1, 4, 1]
    rate_ = self.dtype([1, 10, 2, 20])

    rate = tf.placeholder_with_default(
        rate_, shape=old_batch_shape if self.is_static_shape else None)
    poisson_4 = tfd.Poisson(rate)
    new_batch_shape_ph = (
        tf.constant(np.int32(new_batch_shape))
        if self.is_static_shape else tf.placeholder_with_default(
            np.int32(new_batch_shape), shape=None))
    poisson_141_reshaped = tfd.BatchReshape(
        poisson_4, new_batch_shape_ph, validate_args=True)

    x_4 = self.dtype([2, 12, 3, 23])
    x_114 = self.dtype([2, 12, 3, 23]).reshape(1, 1, 4)

    if self.is_static_shape:
      with self.assertRaisesRegexp(NotImplementedError,
                                   "too few batch and event dims"):
        poisson_141_reshaped.log_prob(x_4)
      with self.assertRaisesRegexp(NotImplementedError,
                                   "unexpected batch and event shape"):
        poisson_141_reshaped.log_prob(x_114)
      return

    with self.assertRaisesOpError("too few batch and event dims"):
      self.evaluate(poisson_141_reshaped.log_prob(x_4))

    with self.assertRaisesOpError("unexpected batch and event shape"):
      self.evaluate(poisson_141_reshaped.log_prob(x_114))
开发者ID:asudomoeva,项目名称:probability,代码行数:32,代码来源:batch_reshape_test.py


示例7: add_decoding_ops

  def add_decoding_ops(self, language_model: str = None, lm_weight: float = 0.8, word_count_weight: float = 0.0,
                       valid_word_count_weight: float = 2.3):
    """
    Add the ops for decoding
j
    Args:
      language_model: the file path to the language model to use for beam search decoding or None
      word_count_weight: The weight added for each added word
      valid_word_count_weight: The weight added for each in vocabulary word
      lm_weight: The weight multiplied with the language model scoring
    """
    with tf.name_scope('decoding'):
      self.lm_weight = tf.placeholder_with_default(lm_weight, shape=(), name='language_model_weight')
      self.word_count_weight = tf.placeholder_with_default(word_count_weight, shape=(), name='word_count_weight')
      self.valid_word_count_weight = tf.placeholder_with_default(valid_word_count_weight, shape=(),
                                                                 name='valid_word_count_weight')

      if language_model:
        self.softmaxed = tf.log(tf.nn.softmax(self.logits, name='softmax') + 1e-8) / math.log(10)
        self.decoded, self.log_probabilities = tf.nn.ctc_beam_search_decoder(self.softmaxed,
                                                                             self.sequence_lengths // 2,
                                                                             kenlm_directory_path=language_model,
                                                                             kenlm_weight=self.lm_weight,
                                                                             word_count_weight=self.word_count_weight,
                                                                             valid_word_count_weight=self.valid_word_count_weight,
                                                                             beam_width=100,
                                                                             merge_repeated=False,
                                                                             top_paths=1)
      else:
        self.decoded, self.log_probabilities = tf.nn.ctc_greedy_decoder(self.logits,
                                                                        self.sequence_lengths // 2,
                                                                        merge_repeated=True)
开发者ID:mark-arm,项目名称:speechT,代码行数:32,代码来源:speech_model.py


示例8: __init__

 def __init__(self, dataset):
     self._data_set = dataset
     self.class_count = dataset.class_count
     self.lat_placeholder = tf.placeholder_with_default(tf.zeros([1], dtype=tf.float32), [None], name='lat_placeholder')
     self.lng_placeholder = tf.placeholder_with_default(tf.zeros([1], dtype=tf.float32), [None], name='lng_placeholder')
     self.week_placeholder = tf.placeholder_with_default(tf.zeros([1], dtype=tf.float32), [None], name='week_placeholder')
     self.ground_truth = tf.placeholder(tf.float32, [None, self.class_count])
开发者ID:thran,项目名称:neuron_nets,代码行数:7,代码来源:meta_test.py


示例9: setup_val

 def setup_val(self, tfname):
     self.restore = glob(os.path.join(self.checkpoint8, "FCN__*", "*.data*" ))[0].split(".data")[0]  
     
     filename_queue = tf.train.string_input_producer(
                                 [tfname], num_epochs=10)
     self.image_queue, self.annotation_queue = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)
     self.image = tf.placeholder_with_default(self.image, shape=[None, 
                                                                 None,
                                                                    3])
     self.annotation = tf.placeholder_with_default(self.annotation_queue, shape=[None,
                                                                 None,
                                                                    1])
     self.resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(self.image, self.annotation, (self.size, self.size))
     self.resized_annotation = tf.squeeze(resized_annotation)
     image_batch_tensor = tf.expand_dims(self.image, axis=0)
     annotation_batch_tensor = tf.expand_dims(self.annotation, axis=0)
     # Be careful: after adaptation, network returns final labels
     # and not logits
     FCN_8s_bis = adapt_network_for_any_size_input(FCN_8s, 32)
     self.pred, fcn_16s_variables_mapping = FCN_8s_bis(image_batch_tensor=image_batch_tensor,
                                                   number_of_classes=self.num_labels,
                                                   is_training=False)
     self.prob = [h for h in [s for s in [t for t in self.pred.op.inputs][0].op.inputs][0].op.inputs][0]
     initializer = tf.local_variables_initializer()
     self.saver = tf.train.Saver()
     with tf.Session() as sess:
         sess.run(initializer)
         self.saver.restore(sess, self.restore)
开发者ID:PeterJackNaylor,项目名称:PhD_Fabien,代码行数:28,代码来源:FCN_Object.py


示例10: test_batch_vector_sampaxis03_eventaxis12_dynamic

  def test_batch_vector_sampaxis03_eventaxis12_dynamic(self):
    # x.shape = sample, event, event, sample, batch
    x = rng.randn(2, 3, 4, 5, 6)
    y = x + 0.1 * rng.randn(2, 3, 4, 5, 6)

    x_ph = tf.placeholder_with_default(input=x, shape=None)
    y_ph = tf.placeholder_with_default(input=y, shape=None)

    cov = tfp.stats.covariance(
        x_ph, y_ph, sample_axis=[0, 3], event_axis=[1, 2])
    cov = self.evaluate(cov)
    self.assertAllEqual((3, 4, 3, 4, 6), cov.shape)

    cov_kd = tfp.stats.covariance(
        x_ph, y_ph, sample_axis=[0, 3], event_axis=[1, 2], keepdims=True)
    cov_kd = self.evaluate(cov_kd)
    self.assertAllEqual((1, 3, 4, 3, 4, 1, 6), cov_kd.shape)
    self.assertAllEqual(cov, cov_kd[0, :, :, :, :, 0, :])

    for i in range(6):  # Iterate over batch index.
      # Get ith batch of samples, and permute/reshape to [n_samples, n_events]
      x_i = np.reshape(
          np.transpose(x[:, :, :, :, i], [0, 3, 1, 2]), [2 * 5, 3 * 4])
      y_i = np.reshape(
          np.transpose(y[:, :, :, :, i], [0, 3, 1, 2]), [2 * 5, 3 * 4])
      # Will compare with ith batch of covariance.
      cov_i = np.reshape(cov[..., i], [3 * 4, 3 * 4])
      for m in range(0, 3 * 4, 3):  # Iterate over some rows of matrix
        for n in range(0, 3 * 4, 3):  # Iterate over some columns of matrix
          self.assertAllClose(
              self._np_cov_1d(x_i[:, m], y_i[:, n]), cov_i[m, n])
开发者ID:asudomoeva,项目名称:probability,代码行数:31,代码来源:sample_stats_test.py


示例11: test_expected_value

 def test_expected_value(self):
   shape_ = np.array([2, int(1e3)], np.int32)
   shape = (tf.constant(shape_) if self.use_static_shape
            else tf.placeholder_with_default(shape_, shape=None))
   # This shape will require broadcasting before sampling.
   scale_ = np.linspace(0.1, 0.5, 3 * 2).astype(self.dtype).reshape(3, 2)
   scale = (tf.constant(scale_) if self.use_static_shape
            else tf.placeholder_with_default(scale_, shape=None))
   x = tfp.math.random_rayleigh(shape,
                                scale=scale[..., tf.newaxis],
                                dtype=self.dtype,
                                seed=42)
   self.assertEqual(self.dtype, x.dtype.as_numpy_dtype)
   final_shape_ = [3, 2, int(1e3)]
   if self.use_static_shape:
     self.assertAllEqual(final_shape_, x.shape)
   sample_mean = tf.reduce_mean(x, axis=-1, keepdims=True)
   sample_var = tf.reduce_mean(tf.squared_difference(
       x, sample_mean), axis=-1)
   [x_, sample_mean_, sample_var_] = self.evaluate([
       x, sample_mean[..., 0], sample_var])
   self.assertAllEqual(final_shape_, x_.shape)
   self.assertAllEqual(np.ones_like(x_, dtype=np.bool), x_ > 0.)
   self.assertAllClose(np.sqrt(np.pi / 2.) * scale_, sample_mean_,
                       atol=0.05, rtol=0.)
   self.assertAllClose(0.5 * (4. - np.pi) * scale_**2., sample_var_,
                       atol=0.05, rtol=0.)
开发者ID:asudomoeva,项目名称:probability,代码行数:27,代码来源:random_ops_test.py


示例12: test_non_scalar_transition_batch

  def test_non_scalar_transition_batch(self):
    initial_prob_ = tf.constant([0.6, 0.4], dtype=self.dtype)
    transition_matrix_ = tf.constant([0.6, 0.4], dtype=self.dtype)
    observation_locs_ = tf.constant(0.0, dtype=self.dtype)
    observation_scale_ = tf.constant(0.5, dtype=self.dtype)

    initial_prob = tf.placeholder_with_default(initial_prob_,
                                               shape=None)
    transition_matrix = tf.placeholder_with_default(transition_matrix_,
                                                    shape=None)
    observation_locs = tf.placeholder_with_default(observation_locs_,
                                                   shape=None)
    observation_scale = tf.placeholder_with_default(observation_scale_,
                                                    shape=None)

    with self.assertRaisesWithPredicateMatch(
        Exception,
        lambda e: "scalar batches" in str(e)):
      model = tfd.HiddenMarkovModel(tfd.Categorical(probs=initial_prob),
                                    tfd.Categorical(probs=transition_matrix),
                                    tfd.Normal(observation_locs,
                                               scale=observation_scale),
                                    num_steps=4,
                                    validate_args=True)
      self.evaluate(model.mean())
开发者ID:asudomoeva,项目名称:probability,代码行数:25,代码来源:hidden_markov_model_test.py


示例13: test_consistency

  def test_consistency(self):
    initial_prob_ = tf.constant([0.6, 0.4], dtype=self.dtype)
    transition_matrix_ = tf.constant([[0.6, 0.4],
                                      [0.3, 0.7]], dtype=self.dtype)
    observation_locs_ = tf.constant([0.0, 1.0], dtype=self.dtype)
    observation_scale_ = tf.constant(0.5, dtype=self.dtype)

    initial_prob = tf.placeholder_with_default(initial_prob_,
                                               shape=None)
    transition_matrix = tf.placeholder_with_default(transition_matrix_,
                                                    shape=None)
    observation_locs = tf.placeholder_with_default(observation_locs_,
                                                   shape=None)
    observation_scale = tf.placeholder_with_default(observation_scale_,
                                                    shape=None)

    model = tfd.HiddenMarkovModel(tfd.Categorical(probs=initial_prob),
                                  tfd.Categorical(probs=transition_matrix),
                                  tfd.Normal(loc=observation_locs,
                                             scale=observation_scale),
                                  num_steps=3,
                                  validate_args=True)

    self.run_test_sample_consistent_log_prob(self.evaluate, model,
                                             num_samples=100000,
                                             center=0.5, radius=0.5,
                                             rtol=0.05)
开发者ID:asudomoeva,项目名称:probability,代码行数:27,代码来源:hidden_markov_model_test.py


示例14: __init__

 def __init__(self, ntoken, ninp, nhid, nlayers, lr=0.001,
              dropout_ratio=0.5, clip_norm = 0.5, **kwargs):
     """
     :param ntoken: #features(input to encoder)
     :param ninp: input_size to LSTM(output of encoder)
     :param nhid: hidden layers in LSTM
     :param nlayers: number of layers
     :param dropout: dropout rate
     """
     tf.reset_default_graph()
     self.data = tf.placeholder(tf.float32, [None, None, ntoken], name="data_")
     self.target =  tf.placeholder(tf.float32, [None, None, ntoken], name="target_")
     self._ntoken = ntoken
     self._ninp = ninp
     self._nhid = nhid
     self._nlayers = nlayers
     # Setting to defaults known to work well
     self._lr = tf.placeholder_with_default(lr, shape=None,
                                            name="learn_rate_")
     self._dropout_ratio = tf.placeholder_with_default(dropout_ratio, shape=None,
                                                       name="dropout_ratio_")
     self._clip_norm = tf.placeholder_with_default(clip_norm, shape=None,
                                                   name="clip_norm_")
     self.tf_init = tf.global_variables_initializer
     self.prediction
     self.loss
     self.optimize
开发者ID:apavlo,项目名称:peloton,代码行数:27,代码来源:LSTM.py


示例15: placeholders

    def placeholders(self):
        self._imfiles = tf.placeholder(dtype=tf.string,
                                      shape=[None, None],
                                      name="image_files")
        self._commands = tf.placeholder(dtype=tf.float32,
                                        shape=[None, None, ds.NUM_COMMANDS],
                                        name="commands")
        self._sqlen = tf.placeholder_with_default(1,
                                                  shape=[],
                                                  name="sequence_length")
        self._bsize = tf.placeholder_with_default(1,
                                                  shape=[],
                                                  name="batch_size")
        self._keep_prob = tf.placeholder_with_default(1.0,
                                                      shape=[],
                                                      name="keep_prob")

        tf.add_to_collection("placeholders", self._imfiles)
        tf.add_to_collection("placeholders", self._commands)
        tf.add_to_collection("placeholders", self._sqlen)
        tf.add_to_collection("placeholders", self._bsize)
        tf.add_to_collection("placeholders", self._keep_prob)

        return (self._imfiles, self._commands, self._sqlen,
                self._bsize, self._keep_prob)
开发者ID:gokhanettin,项目名称:driverless-car,代码行数:25,代码来源:model.py


示例16: testScaledDotAttention

  def testScaledDotAttention(self):
    batch_size = 3
    num_heads = 8
    values_length = [5, 3, 7]
    queries_length = [8, 6, 10]
    depth = 20

    queries = tf.placeholder_with_default(
        np.random.randn(batch_size, num_heads, max(queries_length), depth).astype(np.float32),
        shape=(None, num_heads, None, depth))
    values = tf.placeholder_with_default(
        np.random.randn(batch_size, num_heads, max(values_length), depth).astype(np.float32),
        shape=(None, num_heads, None, depth))
    keys = values

    mask = transformer.build_sequence_mask(values_length, num_heads=num_heads)
    context, attn = transformer.dot_product_attention(
        queries,
        keys,
        values,
        tf.estimator.ModeKeys.PREDICT,
        mask=mask)

    with self.test_session() as sess:
      context, attn = sess.run([context, attn])
      self.assertTupleEqual(
          (batch_size, num_heads, max(queries_length), depth), context.shape)
      self.assertTupleEqual(
          (batch_size, num_heads, max(queries_length), max(values_length)), attn.shape)

      for i in range(batch_size):
        length = values_length[i]
        padding_length = max(values_length) - length
        if padding_length > 0:
          self.assertEqual(0.0, np.sum(attn[i, :, :, length:max(values_length)]))
开发者ID:yhgon,项目名称:OpenNMT-tf,代码行数:35,代码来源:transformer_test.py


示例17: testShapeGettersWithDynamicShape

 def testShapeGettersWithDynamicShape(self):
   x = tf.placeholder_with_default(input=[2, 4], shape=None)
   y = tf.placeholder_with_default(input=[2, 5], shape=None)
   bijector = tfb.SoftmaxCentered(validate_args=True)
   self.assertAllEqual(
       [2, 5], self.evaluate(bijector.forward_event_shape_tensor(x)))
   self.assertAllEqual(
       [2, 4], self.evaluate(bijector.inverse_event_shape_tensor(y)))
开发者ID:asudomoeva,项目名称:probability,代码行数:8,代码来源:softmax_centered_test.py


示例18: _construct_nn

    def _construct_nn(self, use_batch_norm, seperate_validation):
        tf.reset_default_graph()
        clear_start([self._ld])
        if self._random_state is not None:
            if self._verbose:
                print('seed is fixed to {}'.format(self._random_state))
            tf.set_random_seed(self._random_state)
            np.random.seed(self._random_state)
        layers = []

        self._input_ph = tf.placeholder(tf.float32, shape=[None, self.structure[0]], name='input')
        self._dropout_keep_rate = tf.placeholder_with_default(1., shape=None, name='keep_rate')
        self._train_mode = tf.placeholder_with_default(False, shape=None, name='train_mode')
        layers.append(self._input_ph)
        j = 1
        with tf.variable_scope('autoencoder'):
            for i, n_neurons in enumerate(self.structure[1:-1]):

                if j == 1:
                    x = tf.layers.dense(self._input_ph, n_neurons, name='hidden_%s' % j,
                                        kernel_initializer=tf.truncated_normal_initializer())
                else:
                    x = tf.layers.dense(x, n_neurons, name='hidden_%s' % j,
                                        kernel_initializer=tf.truncated_normal_initializer())
                if use_batch_norm:
                    x = tf.layers.batch_normalization(x, axis=1, training=self._train_mode, scale=False)
                    layers.append(x)
                x = self.activation_fn(x)
                layers.append(x)
                x = tf.layers.dropout(x, tf.subtract(1., self._dropout_keep_rate), name='dropout_%s' % j)
                layers.append(x)
                if j == self.encoding_layer_index:
                    x = tf.identity(x, name='encoding')
                    self._encoding = x
                j += 1
        self._output = tf.layers.dense(x, self.structure[-1], name='output',
                                       kernel_initializer=tf.truncated_normal_initializer())
        self._labels = tf.placeholder(tf.float32, shape=[None, self.structure[-1]], name='label')
        layers.append(self._output)
        if self._cpu_only:
            with tf.device('/cpu:{}'.format(self._cpu_number)):
                sess = tf.Session(config=self._config)
                if seperate_validation:
                    self._train_writer = tf.summary.FileWriter(self._ld + 'train/', sess.graph)
                    self._val_writer = tf.summary.FileWriter(self._ld + 'val/', sess.graph)
                else:
                    self._train_writer = tf.summary.FileWriter(self._ld, sess.graph)
        else:
            with tf.device('/gpu:{}'.format(self._gpu_number)):
                sess = tf.Session(config=self._config)
                if seperate_validation:
                    self._train_writer = tf.summary.FileWriter(self._ld + 'train/', sess.graph)
                    self._val_writer = tf.summary.FileWriter(self._ld + 'val/')
                else:
                    self._train_writer = tf.summary.FileWriter(self._ld, sess.graph)
        self._sess = sess
        self._network = layers
开发者ID:sipan17,项目名称:NeuralNetwork,代码行数:57,代码来源:autoencoder.py


示例19: testMeanVariance

 def testMeanVariance(self):
   pln = tfd.PoissonLogNormalQuadratureCompound(
       loc=tf.placeholder_with_default(
           0., shape=[] if self.static_shape else None),
       scale=tf.placeholder_with_default(
           1., shape=[] if self.static_shape else None),
       quadrature_size=10,
       validate_args=True)
   self.run_test_sample_consistent_mean_variance(self.evaluate, pln, rtol=0.02)
开发者ID:asudomoeva,项目名称:probability,代码行数:9,代码来源:poisson_lognormal_test.py


示例20: testHandlesNonStaticEventNdims

 def testHandlesNonStaticEventNdims(self):
   x_ = [[[1., 2.], [3., 4.]]]
   x = tf.placeholder_with_default(x_, shape=None)
   event_ndims = tf.placeholder_with_default(1, shape=None)
   bij = ExpOnlyJacobian(forward_min_event_ndims=1)
   bij.inverse_log_det_jacobian(x, event_ndims=event_ndims)
   ildj = self.evaluate(
       bij.inverse_log_det_jacobian(x, event_ndims=event_ndims))
   self.assertAllClose(-np.log(x_), ildj)
开发者ID:asudomoeva,项目名称:probability,代码行数:9,代码来源:bijector_test.py



注:本文中的tensorflow.placeholder_with_default函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.pow函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.placeholder函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap