• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python control_flow_ops.with_dependencies函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.control_flow_ops.with_dependencies函数的典型用法代码示例。如果您正苦于以下问题:Python with_dependencies函数的具体用法?Python with_dependencies怎么用?Python with_dependencies使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了with_dependencies函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testIndexedSlices

    def testIndexedSlices(self):
        for v1_first in [True, False]:
            with self.test_session():
                v1 = tf.Variable(np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
                v1_at_1 = tf.IndexedSlices(
                    control_flow_ops.with_dependencies([v1.initializer], v1.ref()), tf.constant([1])
                )

                v2 = tf.Variable(np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(np.float32))
                v2_at_1 = tf.IndexedSlices(
                    control_flow_ops.with_dependencies([v2.initializer], v2.ref()), tf.constant([1])
                )

                st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
                g1 = tf.gather(st1.values, st1.indices)
                g2 = tf.gather(st2.values, st2.indices)

                # v1 is not initialized.
                with self.assertRaisesOpError("Attempting to use uninitialized value"):
                    v1.eval()

                # v2 is not initialized.
                with self.assertRaisesOpError("Attempting to use uninitialized value"):
                    v2.eval()

                if v1_first:
                    # Getting g1 initializes v2.
                    self.assertAllClose([[10.0, 11.0]], g1.eval())
                    self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]], v2.eval())
                else:
                    # Getting g2 initializes v1.
                    self.assertAllClose([[10.1, 11.1]], g2.eval())
                    self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v1.eval())
开发者ID:peace195,项目名称:tensorflow,代码行数:33,代码来源:control_flow_ops_py_test.py


示例2: testWithTensorDependencies

  def testWithTensorDependencies(self):
    with self.test_session():
      v = tf.Variable(0.0)
      c1 = tf.constant(10)
      c2 = tf.constant(20)

      # c1_with_init_v depends on the init op for v
      c1_with_init_v = control_flow_ops.with_dependencies(
          name="c1_with_init_v",
          output_tensor=c1,
          dependencies=[v.initializer])
      # c2_with_c1 depends on the value of c1_with_init_v
      c2_with_c1_dep = control_flow_ops.with_dependencies(
          name="c2_with_c1_dep",
          output_tensor=c2,
          dependencies=[c1_with_init_v])

      # Fetching v directly will result in an uninitialized error
      with self.assertRaisesOpError("Attempting to use uninitialized value"):
        v.eval()

      # Get the value of 'c2_with_c1_dep', which should cause 'v'
      # to be initialized.
      self.assertAllEqual(20, c2_with_c1_dep.eval())

      # Ensure that 'v' is initialized
      self.assertAllClose(0.0, v.eval())
开发者ID:hypatiad,项目名称:tensorflow,代码行数:27,代码来源:control_flow_ops_py_test.py


示例3: _check_shapes_dynamic

  def _check_shapes_dynamic(self, operator, v, diag):
    """Return (v, diag) with Assert dependencies, which check shape."""
    checks = []
    with ops.op_scope([operator, v, diag], 'check_shapes'):
      s_v = array_ops.shape(v)
      r_op = operator.rank()
      r_v = array_ops.rank(v)
      if diag is not None:
        s_d = array_ops.shape(diag)
        r_d = array_ops.rank(diag)

      # Check tensor rank.
      checks.append(check_ops.assert_rank(v, r_op))
      if diag is not None:
        checks.append(check_ops.assert_rank(diag, r_op - 1))

      # Check batch shape
      checks.append(check_ops.assert_equal(
          operator.batch_shape(), array_ops.slice(s_v, [0], [r_v - 2])))
      if diag is not None:
        checks.append(check_ops.assert_equal(
            operator.batch_shape(), array_ops.slice(s_d, [0], [r_d - 1])))

      # Check event shape
      checks.append(check_ops.assert_equal(
          operator.vector_space_dimension(), array_ops.gather(s_v, r_v - 2)))
      if diag is not None:
        checks.append(check_ops.assert_equal(
            array_ops.gather(s_v, r_v - 1), array_ops.gather(s_d, r_d - 1)))

      v = control_flow_ops.with_dependencies(checks, v)
      if diag is not None:
        diag = control_flow_ops.with_dependencies(checks, diag)
      return v, diag
开发者ID:10imaging,项目名称:tensorflow,代码行数:34,代码来源:operator_pd_vdvt_update.py


示例4: testTensors

  def testTensors(self):
    for v1_first in [True, False]:
      with self.test_session():
        v1 = tf.Variable([1.0])
        add1 = tf.add(
            control_flow_ops.with_dependencies([v1.initializer], v1.ref()),
            2.0)
        v2 = tf.Variable([10.0])
        add2 = tf.add(
            control_flow_ops.with_dependencies([v2.initializer], v2.ref()),
            20.0)
        t1, _, t2 = control_flow_ops.tuple([add1, None, add2])

        # v1 is not initialized.
        with self.assertRaisesOpError("Attempting to use uninitialized value"):
          v1.eval()

        # v2 is not initialized.
        with self.assertRaisesOpError("Attempting to use uninitialized value"):
          v2.eval()

        if v1_first:
          # Getting t1 initializes v2.
          self.assertAllClose([3.0], t1.eval())
          self.assertAllClose([10.0], v2.eval())
        else:
          # Getting t2 initializes v1.
          self.assertAllClose([30.0], t2.eval())
          self.assertAllClose([1.0], v1.eval())
开发者ID:hypatiad,项目名称:tensorflow,代码行数:29,代码来源:control_flow_ops_py_test.py


示例5: kl_multivariate_normal

def kl_multivariate_normal(loc_one, scale_one, loc_two=0.0, scale_two=1.0):
    """Calculate the KL of multivariate normal distributions with
    diagonal covariances.

    Parameters
    ----------
    loc_one : tf.Tensor
        A 0-D tensor, 1-D tensor of length n, or 2-D tensor of shape M
        x n where each row represents the mean of a n-dimensional
        Gaussian.
    scale_one : tf.Tensor
        A tensor of same shape as ``loc_one``, representing the
        standard deviation.
    loc_two : tf.Tensor, optional
        A tensor of same shape as ``loc_one``, representing the
        mean of another Gaussian.
    scale_two : tf.Tensor, optional
        A tensor of same shape as ``loc_one``, representing the
        standard deviation of another Gaussian.

    Returns
    -------
    tf.Tensor
        For 0-D or 1-D tensor inputs, outputs the 0-D tensor
        ``KL( N(z; loc_one, scale_one) || N(z; loc_two, scale_two) )``
        For 2-D tensor inputs, outputs the 1-D tensor
        ``[KL( N(z; loc_one[m,:], scale_one[m,:]) || N(z; loc_two[m,:], scale_two[m,:]) )]_{m=1}^M``

    Raises
    ------
    InvalidArgumentError
        If the location variables have Inf or NaN values, or if the scale
        variables are not positive.
    """
    dependencies = [tf.verify_tensor_all_finite(loc_one, msg=''),
                    tf.verify_tensor_all_finite(loc_two, msg=''),
                    tf.assert_positive(scale_one),
                    tf.assert_positive(scale_two)]
    loc_one = control_flow_ops.with_dependencies(dependencies, loc_one)
    scale_one = control_flow_ops.with_dependencies(dependencies, scale_one)
    loc_one = tf.cast(loc_one, tf.float32)
    scale_one = tf.cast(scale_one, tf.float32)

    if loc_two == 0.0 and scale_two == 1.0:
        # With default arguments, we can avoid some intermediate computation.
        out = tf.square(scale_one) + tf.square(loc_one) - \
              1.0 - 2.0 * tf.log(scale_one)
    else:
        loc_two = control_flow_ops.with_dependencies(dependencies, loc_two)
        scale_two = control_flow_ops.with_dependencies(dependencies, scale_two)
        loc_two = tf.cast(loc_two, tf.float32)
        scale_two = tf.cast(scale_two, tf.float32)
        out = tf.square(scale_one/scale_two) + \
              tf.square((loc_two - loc_one)/scale_two) - \
              1.0 + 2.0 * tf.log(scale_two) - 2.0 * tf.log(scale_one)

    if len(out.get_shape()) <= 1: # scalar or vector
        return 0.5 * tf.reduce_sum(out)
    else: # matrix
        return 0.5 * tf.reduce_sum(out, 1)
开发者ID:TalkingData,项目名称:edward,代码行数:60,代码来源:util.py


示例6: _verify_input

def _verify_input(tensor_list, labels, probs_list):
  """Verify that batched inputs are well-formed."""
  checked_probs_list = []
  for probs in probs_list:
    # Since number of classes shouldn't change at runtime, probabilities shape
    # should be fully defined.
    probs.get_shape().assert_is_fully_defined()

    # Probabilities must be 1D.
    probs.get_shape().assert_has_rank(1)

    # Probabilities must be nonnegative and sum to one.
    tol = 1e-6
    prob_sum = math_ops.reduce_sum(probs)
    checked_probs = control_flow_ops.with_dependencies([
        check_ops.assert_non_negative(probs),
        check_ops.assert_less(prob_sum, 1.0 + tol),
        check_ops.assert_less(1.0 - tol, prob_sum)
    ], probs)
    checked_probs_list.append(checked_probs)

  # All probabilities should be the same length.
  prob_length = checked_probs_list[0].get_shape().num_elements()
  for checked_prob in checked_probs_list:
    if checked_prob.get_shape().num_elements() != prob_length:
      raise ValueError('Probability parameters must have the same length.')

  # Labels tensor should only have batch dimension.
  labels.get_shape().assert_has_rank(1)

  for tensor in tensor_list:
    # Data tensor should have a batch dimension.
    shape = tensor.get_shape().with_rank_at_least(1)

    # Data and label batch dimensions must be compatible.
    tensor_shape.dimension_at_index(shape, 0).assert_is_compatible_with(
        labels.get_shape()[0])

  # Data and labels must have the same, strictly positive batch size. Since we
  # can't assume we know the batch size at graph creation, add runtime checks.
  labels_batch_size = array_ops.shape(labels)[0]
  lbl_assert = check_ops.assert_positive(labels_batch_size)

  # Make each tensor depend on its own checks.
  labels = control_flow_ops.with_dependencies([lbl_assert], labels)
  tensor_list = [
      control_flow_ops.with_dependencies([
          lbl_assert,
          check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size)
      ], x) for x in tensor_list
  ]

  # Label's classes must be integers 0 <= x < num_classes.
  labels = control_flow_ops.with_dependencies([
      check_ops.assert_integer(labels), check_ops.assert_non_negative(labels),
      check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype))
  ], labels)

  return tensor_list, labels, checked_probs_list
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:59,代码来源:sampling_ops.py


示例7: _maybe_attach_assertion

 def _maybe_attach_assertion(x):
   if not validate_args:
     return x
   if assert_positive:
     return control_flow_ops.with_dependencies([
         tf.assert_positive(x, message="diagonal part must be positive"),
     ], x)
   return control_flow_ops.with_dependencies([
       tf.assert_none_equal(
           x, tf.zeros([], x.dtype), message="diagonal part must be non-zero")
   ], x)
开发者ID:lewisKit,项目名称:probability,代码行数:11,代码来源:distribution_util.py


示例8: rbf

def rbf(X, X2=None, lengthscale=1.0, variance=1.0):
  """Radial basis function kernel, also known as the squared
  exponential or exponentiated quadratic. It is defined as

  $k(x, x') = \sigma^2 \exp\Big(
      -\\frac{1}{2} \sum_{d=1}^D \\frac{1}{\ell_d^2} (x_d - x'_d)^2 \Big)$

  for output variance $\sigma^2$ and lengthscale $\ell^2$.

  The kernel is evaluated over all pairs of rows, `k(X[i, ], X2[j, ])`.
  If `X2` is not specified, then it evaluates over all pairs
  of rows in `X`, `k(X[i, ], X[j, ])`. The output is a matrix
  where each entry (i, j) is the kernel over the ith and jth rows.

  Args:
    X: tf.Tensor.
      N x D matrix of N data points each with D features.
    X2: tf.Tensor.
      N x D matrix of N data points each with D features.
    lengthscale: tf.Tensor.
      Lengthscale parameter, a positive scalar or D-dimensional vector.
    variance: tf.Tensor.
      Output variance parameter, a positive scalar.

  #### Examples

  ```python
  X = tf.random_normal([100, 5])
  K = ed.rbf(X)
  assert K.shape == (100, 100)
  ```
  """
  lengthscale = tf.convert_to_tensor(lengthscale)
  variance = tf.convert_to_tensor(variance)
  dependencies = [tf.assert_positive(lengthscale),
                  tf.assert_positive(variance)]
  lengthscale = control_flow_ops.with_dependencies(dependencies, lengthscale)
  variance = control_flow_ops.with_dependencies(dependencies, variance)

  X = tf.convert_to_tensor(X)
  X = X / lengthscale
  Xs = tf.reduce_sum(tf.square(X), 1)
  if X2 is None:
    X2 = X
    X2s = Xs
  else:
    X2 = tf.convert_to_tensor(X2)
    X2 = X2 / lengthscale
    X2s = tf.reduce_sum(tf.square(X2), 1)

  square = tf.reshape(Xs, [-1, 1]) + tf.reshape(X2s, [1, -1]) - \
      2 * tf.matmul(X, X2, transpose_b=True)
  output = variance * tf.exp(-square / 2)
  return output
开发者ID:JoyceYa,项目名称:edward,代码行数:54,代码来源:tensorflow.py


示例9: _check_domain_range_possibly_add_asserts

  def _check_domain_range_possibly_add_asserts(self):
    """Static check of init arg `num_rows`, possibly add asserts."""
    # Possibly add asserts.
    if self._assert_proper_shapes:
      self._num_rows = control_flow_ops.with_dependencies([
          check_ops.assert_rank(
              self._num_rows,
              0,
              message="Argument num_rows must be a 0-D Tensor."),
          check_ops.assert_non_negative(
              self._num_rows,
              message="Argument num_rows must be non-negative."),
      ], self._num_rows)
      self._num_columns = control_flow_ops.with_dependencies([
          check_ops.assert_rank(
              self._num_columns,
              0,
              message="Argument num_columns must be a 0-D Tensor."),
          check_ops.assert_non_negative(
              self._num_columns,
              message="Argument num_columns must be non-negative."),
      ], self._num_columns)

    # Static checks.
    if not self._num_rows.dtype.is_integer:
      raise TypeError("Argument num_rows must be integer type.  Found:"
                      " %s" % self._num_rows)

    if not self._num_columns.dtype.is_integer:
      raise TypeError("Argument num_columns must be integer type.  Found:"
                      " %s" % self._num_columns)

    num_rows_static = self._num_rows_static
    num_columns_static = self._num_columns_static

    if num_rows_static is not None:
      if num_rows_static.ndim != 0:
        raise ValueError("Argument num_rows must be a 0-D Tensor.  Found:"
                         " %s" % num_rows_static)

      if num_rows_static < 0:
        raise ValueError("Argument num_rows must be non-negative.  Found:"
                         " %s" % num_rows_static)
    if num_columns_static is not None:
      if num_columns_static.ndim != 0:
        raise ValueError("Argument num_columns must be a 0-D Tensor.  Found:"
                         " %s" % num_columns_static)

      if num_columns_static < 0:
        raise ValueError("Argument num_columns must be non-negative.  Found:"
                         " %s" % num_columns_static)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:51,代码来源:linear_operator_zeros.py


示例10: kl_multivariate_normal

def kl_multivariate_normal(loc_one, scale_one, loc_two=0.0, scale_two=1.0):
    """Calculate the KL of multivariate normal distributions with
    diagonal covariances.

    Parameters
    ----------
    loc_one : tf.Tensor
        n-dimensional vector, or M x n-dimensional matrix where each
        row represents the mean of a n-dimensional Gaussian
    scale_one : tf.Tensor
        n-dimensional vector, or M x n-dimensional matrix where each
        row represents the standard deviation of a n-dimensional Gaussian
    loc_two : tf.Tensor, optional
        n-dimensional vector, or M x n-dimensional matrix where each
        row represents the mean of a n-dimensional Gaussian
    scale_two : tf.Tensor, optional
        n-dimensional vector, or M x n-dimensional matrix where each
        row represents the standard deviation of a n-dimensional Gaussian

    Returns
    -------
    tf.Tensor
        for scalar or vector inputs, outputs the scalar
        ``KL( N(z; loc_one, scale_one) || N(z; loc_two, scale_two) )``
        for matrix inputs, outputs the vector
        ``[KL( N(z; loc_one[m,:], scale_one[m,:]) || N(z; loc_two[m,:], scale_two[m,:]) )]_{m=1}^M``

    Raises
    ------
    InvalidArgumentError
        If the location variables have Inf or NaN values, or if the scale
        variables are not positive.
    """
    dependencies = [tf.verify_tensor_all_finite(loc_one, msg=''),
                  tf.verify_tensor_all_finite(loc_two, msg=''),
                  tf.assert_positive(scale_one),
                  tf.assert_positive(scale_two)]
    loc_one = control_flow_ops.with_dependencies(dependencies, loc_one)
    loc_two = control_flow_ops.with_dependencies(dependencies, loc_two)
    scale_one = control_flow_ops.with_dependencies(dependencies, scale_one)
    scale_two = control_flow_ops.with_dependencies(dependencies, scale_two)

    if loc_two == 0.0 and scale_two == 1.0:
        return 0.5 * tf.reduce_sum(
            tf.square(scale_one) + tf.square(loc_one) - \
            1.0 - 2.0 * tf.log(scale_one))
    else:
        return 0.5 * tf.reduce_sum(
            tf.square(scale_one/scale_two) + \
            tf.square((loc_two - loc_one)/scale_two) - \
            1.0 + 2.0 * tf.log(scale_two) - 2.0 * tf.log(scale_one), 1)
开发者ID:leezqcst,项目名称:edward,代码行数:51,代码来源:util.py


示例11: setUpClass

    def setUpClass(cls):
        cls._dump_root = tempfile.mkdtemp()

        cls._is_gpu_available = test.is_gpu_available()
        if cls._is_gpu_available:
            cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
        else:
            cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"

        with session.Session() as sess:
            x_init_val = np.array([5.0, 3.0])
            x_init = constant_op.constant(x_init_val, shape=[2])
            x = variables.Variable(x_init, name="control_deps/x")

            y = math_ops.add(x, x, name="control_deps/y")
            y = control_flow_ops.with_dependencies([x], y, name="control_deps/ctrl_dep_y")

            z = math_ops.mul(x, y, name="control_deps/z")

            z = control_flow_ops.with_dependencies([x, y], z, name="control_deps/ctrl_dep_z")

            x.initializer.run()

            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            debug_utils.watch_graph(
                run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % cls._dump_root
            )

            # Invoke Session.run().
            run_metadata = config_pb2.RunMetadata()
            sess.run(z, options=run_options, run_metadata=run_metadata)

        debug_dump = debug_data.DebugDumpDir(cls._dump_root, partition_graphs=run_metadata.partition_graphs)

        # Construct the analyzer.
        analyzer = analyzer_cli.DebugAnalyzer(debug_dump)

        # Construct the handler registry.
        cls._registry = debugger_cli_common.CommandHandlerRegistry()

        # Register command handlers.
        cls._registry.register_command_handler(
            "node_info", analyzer.node_info, analyzer.get_help("node_info"), prefix_aliases=["ni"]
        )
        cls._registry.register_command_handler(
            "list_inputs", analyzer.list_inputs, analyzer.get_help("list_inputs"), prefix_aliases=["li"]
        )
        cls._registry.register_command_handler(
            "list_outputs", analyzer.list_outputs, analyzer.get_help("list_outputs"), prefix_aliases=["lo"]
        )
开发者ID:brchiu,项目名称:tensorflow,代码行数:50,代码来源:analyzer_cli_test.py


示例12: _initialize_variables

  def _initialize_variables(self, data, initial_means=None):
    """Initializes variables.

    Args:
      data: a list of Tensors with data, each row is a new example.
      initial_means: a Tensor with a matrix of means.
    """
    first_shard = data[0]
    # Initialize means: num_classes X 1 X dimensions.
    if initial_means is not None:
      means = array_ops.expand_dims(initial_means, 1)
    else:
      # Sample data randomly
      means = array_ops.expand_dims(
          _init_clusters_random(data, self._num_classes, self._random_seed), 1)

    # Initialize covariances.
    if self._covariance_type == FULL_COVARIANCE:
      cov = _covariance(first_shard, False) + self._min_var
      # A matrix per class, num_classes X dimensions X dimensions
      covs = array_ops.tile(
          array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
    elif self._covariance_type == DIAG_COVARIANCE:
      cov = _covariance(first_shard, True) + self._min_var
      # A diagonal per row, num_classes X dimensions.
      covs = array_ops.tile(
          array_ops.expand_dims(array_ops.diag_part(cov), 0),
          [self._num_classes, 1])

    with ops.colocate_with(self._cluster_centers_initialized):
      initialized = control_flow_ops.with_dependencies(
          [means, covs],
          array_ops.identity(self._cluster_centers_initialized))
    self._init_ops = []
    with ops.colocate_with(self._means):
      init_means = state_ops.assign(self._means, means, validate_shape=False)
      init_means = control_flow_ops.with_dependencies(
          [init_means],
          state_ops.assign(self._cluster_centers_initialized, True))
      self._init_ops.append(control_flow_ops.cond(initialized,
                                                  control_flow_ops.no_op,
                                                  lambda: init_means).op)
    with ops.colocate_with(self._covs):
      init_covs = state_ops.assign(self._covs, covs, validate_shape=False)
      init_covs = control_flow_ops.with_dependencies(
          [init_covs],
          state_ops.assign(self._cluster_centers_initialized, True))
      self._init_ops.append(control_flow_ops.cond(initialized,
                                                  control_flow_ops.no_op,
                                                  lambda: init_covs).op)
开发者ID:AndreasGocht,项目名称:tensorflow,代码行数:50,代码来源:gmm_ops.py


示例13: _verify_input

def _verify_input(data, labels, probs_list):
  """Verify that batched inputs are well-formed."""
  checked_probs_list = []
  for probs in probs_list:
    # Probabilities must be able to be converted to non-object numpy array.
    np_probs = np.asarray(probs)
    if np_probs.dtype == np.dtype('object'):
      raise ValueError('Probabilities must be able to be converted to a numpy '
                       'array.')
    checked_probs_list.append(np_probs)

    # Probabilities must sum to one.
    # TODO(joelshor): Investigate whether logits should be passed instead of
    # probs.
    if not np.isclose(np.sum(probs), 1.0):
      raise ValueError('Probabilities must sum to one.')

  # All probabilities should be the same length.
  if not np.array_equiv([probs.shape for probs in checked_probs_list],
                        checked_probs_list[0].shape):
    raise ValueError('Probability parameters must have the same length.')

  # Labels tensor should only have batch dimension.
  labels.get_shape().assert_has_rank(1)

  # Data tensor should have a batch dimension.
  data_shape = data.get_shape().with_rank_at_least(1)

  # Data and label batch dimensions must be compatible.
  data_shape[0].assert_is_compatible_with(labels.get_shape()[0])

  # Data and labels must have the same, strictly positive batch size. Since we
  # can't assume we know the batch size at graph creation, add runtime checks.
  data_batch_size = array_ops.shape(data)[0]
  labels_batch_size = array_ops.shape(labels)[0]

  data = control_flow_ops.with_dependencies(
      [check_ops.assert_positive(data_batch_size),
       check_ops.assert_equal(data_batch_size, labels_batch_size)],
      data)

  # Label's classes must be integers 0 <= x < num_classes.
  labels = control_flow_ops.with_dependencies(
      [check_ops.assert_integer(labels),
       check_ops.assert_non_negative(labels),
       check_ops.assert_less(labels, math_ops.cast(len(probs), labels.dtype))],
      labels)

  return data, labels, checked_probs_list
开发者ID:Bala96,项目名称:tensorflow,代码行数:49,代码来源:sampling_ops.py


示例14: _maybe_attach_assertion

 def _maybe_attach_assertion(x):
   if not validate_args:
     return x
   if assert_positive:
     return control_flow_ops.with_dependencies([
         check_ops.assert_positive(
             array_ops.matrix_diag_part(x),
             message="diagonal part must be positive"),
     ], x)
   return control_flow_ops.with_dependencies([
       check_ops.assert_none_equal(
           array_ops.matrix_diag_part(x),
           array_ops.zeros([], x.dtype),
           message="diagonal part must be non-zero"),
   ], x)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:15,代码来源:distribution_util.py


示例15: _maybe_attach_assertion

 def _maybe_attach_assertion(x):
   if not validate_args:
     return x
   if assert_positive:
     return control_flow_ops.with_dependencies([
         check_ops.assert_positive(
             x, message="diagonal part must be positive"),
     ], x)
   # TODO(b/35157376): Use `assert_none_equal` once it exists.
   return control_flow_ops.with_dependencies([
       check_ops.assert_greater(
           math_ops.abs(x),
           array_ops.zeros([], x.dtype),
           message="diagonal part must be non-zero"),
   ], x)
开发者ID:LUTAN,项目名称:tensorflow,代码行数:15,代码来源:distribution_util.py


示例16: maybe_check_quadrature_param

def maybe_check_quadrature_param(param, name, validate_args):
  """Helper which checks validity of `loc` and `scale` init args."""
  with ops.name_scope(name="check_" + name, values=[param]):
    assertions = []
    if param.shape.ndims is not None:
      if param.shape.ndims == 0:
        raise ValueError("Mixing params must be a (batch of) vector; "
                         "{}.rank={} is not at least one.".format(
                             name, param.shape.ndims))
    elif validate_args:
      assertions.append(check_ops.assert_rank_at_least(
          param, 1,
          message=("Mixing params must be a (batch of) vector; "
                   "{}.rank is not at least one.".format(
                       name))))

    # TODO(jvdillon): Remove once we support k-mixtures.
    if param.shape.with_rank_at_least(1)[-1] is not None:
      if param.shape[-1].value != 1:
        raise NotImplementedError("Currently only bimixtures are supported; "
                                  "{}.shape[-1]={} is not 1.".format(
                                      name, param.shape[-1].value))
    elif validate_args:
      assertions.append(check_ops.assert_equal(
          array_ops.shape(param)[-1], 1,
          message=("Currently only bimixtures are supported; "
                   "{}.shape[-1] is not 1.".format(name))))

    if assertions:
      return control_flow_ops.with_dependencies(assertions, param)
    return param
开发者ID:bikong2,项目名称:tensorflow,代码行数:31,代码来源:vector_diffeomixture.py


示例17: _maybe_assert_valid_x

 def _maybe_assert_valid_x(self, x):
   if not self.validate_args:
     return x
   is_valid = check_ops.assert_non_negative(
       x,
       message="Forward transformation input must be at least {}.".format(0))
   return control_flow_ops.with_dependencies([is_valid], x)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:7,代码来源:weibull.py


示例18: testShape

 def testShape(self):
   with ops.Graph().as_default():
     tensor = tf.constant([1.0, 2.0])
     self.assertEquals([2], tensor.get_shape())
     self.assertEquals([2],
                       control_flow_ops.with_dependencies(
                           [tf.constant(1.0)], tensor).get_shape())
开发者ID:2er0,项目名称:tensorflow,代码行数:7,代码来源:control_flow_ops_test.py


示例19: testReuseVars

 def testReuseVars(self):
   height, width = 3, 3
   with self.test_session() as sess:
     image_shape = (10, height, width, 3)
     image_values = np.random.rand(*image_shape)
     expected_mean = np.mean(image_values, axis=(0, 1, 2))
     expected_var = np.var(image_values, axis=(0, 1, 2))
     images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
     output = ops.batch_norm(images, decay=0.1, is_training=False)
     update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
     with tf.control_dependencies(update_ops):
       barrier = tf.no_op(name='gradient_barrier')
       output = control_flow_ops.with_dependencies([barrier], output)
     # Initialize all variables
     sess.run(tf.global_variables_initializer())
     moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
     moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
     mean, variance = sess.run([moving_mean, moving_variance])
     # After initialization moving_mean == 0 and moving_variance == 1.
     self.assertAllClose(mean, [0] * 3)
     self.assertAllClose(variance, [1] * 3)
     # Simulate assigment from saver restore.
     init_assigns = [tf.assign(moving_mean, expected_mean),
                     tf.assign(moving_variance, expected_var)]
     sess.run(init_assigns)
     for _ in range(10):
       sess.run([output], {images: np.random.rand(*image_shape)})
     mean = moving_mean.eval()
     variance = moving_variance.eval()
     # Although we feed different images, the moving_mean and moving_variance
     # shouldn't change.
     self.assertAllClose(mean, expected_mean)
     self.assertAllClose(variance, expected_var)
开发者ID:Aravindreddy986,项目名称:TensorFlowOnSpark,代码行数:33,代码来源:ops_test.py


示例20: testComputeMovingVars

 def testComputeMovingVars(self):
   height, width = 3, 3
   with self.test_session() as sess:
     image_shape = (10, height, width, 3)
     image_values = np.random.rand(*image_shape)
     expected_mean = np.mean(image_values, axis=(0, 1, 2))
     expected_var = np.var(image_values, axis=(0, 1, 2))
     images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
     output = ops.batch_norm(images, decay=0.1)
     update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
     with tf.control_dependencies(update_ops):
       barrier = tf.no_op(name='gradient_barrier')
       output = control_flow_ops.with_dependencies([barrier], output)
     # Initialize all variables
     sess.run(tf.global_variables_initializer())
     moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
     moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
     mean, variance = sess.run([moving_mean, moving_variance])
     # After initialization moving_mean == 0 and moving_variance == 1.
     self.assertAllClose(mean, [0] * 3)
     self.assertAllClose(variance, [1] * 3)
     for _ in range(10):
       sess.run([output])
     mean = moving_mean.eval()
     variance = moving_variance.eval()
     # After 10 updates with decay 0.1 moving_mean == expected_mean and
     # moving_variance == expected_var.
     self.assertAllClose(mean, expected_mean)
     self.assertAllClose(variance, expected_var)
开发者ID:Aravindreddy986,项目名称:TensorFlowOnSpark,代码行数:29,代码来源:ops_test.py



注:本文中的tensorflow.python.ops.control_flow_ops.with_dependencies函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python data_flow_ops.dynamic_partition函数代码示例发布时间:2022-05-27
下一篇:
Python control_flow_ops.while_loop函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap