• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python nn_ops.conv3d函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.nn_ops.conv3d函数的典型用法代码示例。如果您正苦于以下问题:Python conv3d函数的具体用法?Python conv3d怎么用?Python conv3d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了conv3d函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _RunAndVerifyBackprop

  def _RunAndVerifyBackprop(self, input_sizes, filter_sizes, output_sizes,
                            strides, dilations, padding, data_format, use_gpu,
                            err, mode):
    total_input_size = 1
    total_filter_size = 1
    for s in input_sizes:
      total_input_size *= s
    for s in filter_sizes:
      total_filter_size *= s
    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    x1 = [f * 1.0 for f in range(1, total_input_size + 1)]
    x2 = [f * 1.0 for f in range(1, total_filter_size + 1)]
    default_dilations = (
        dilations[0] == 1 and dilations[1] == 1 and dilations[2] == 1)

    # If any dilation rate is larger than 1, only do test on the GPU
    # because we currently do not have a CPU implementation for arbitrary
    # dilation rates.
    if default_dilations or use_gpu:
      with self.cached_session(use_gpu=use_gpu) as sess:
        if data_format == "NCDHW":
          input_sizes = test_util.NHWCToNCHW(input_sizes)
        t1 = constant_op.constant(x1, shape=input_sizes)
        t2 = constant_op.constant(x2, shape=filter_sizes)
        full_strides = [1] + strides + [1]
        full_dilations = [1] + dilations + [1]
        if data_format == "NCDHW":
          full_strides = test_util.NHWCToNCHW(full_strides)
          full_dilations = test_util.NHWCToNCHW(full_dilations)
        actual = nn_ops.conv3d(
            t1,
            t2,
            strides=full_strides,
            dilations=full_dilations,
            padding=padding,
            data_format=data_format)
        expected = nn_ops.convolution(
            t1,
            t2,
            padding=padding,
            strides=strides,
            dilation_rate=dilations,
            data_format=data_format)
        if data_format == "NCDHW":
          actual = test_util.NCHWToNHWC(actual)
          expected = test_util.NCHWToNHWC(expected)
        actual_grad = gradients_impl.gradients(actual, t1
                                               if mode == "input" else t2)[0]
        expected_grad = gradients_impl.gradients(expected, t1
                                                 if mode == "input" else t2)[0]
        # "values" consists of two tensors for two backprops
        actual_value = self.evaluate(actual_grad)
        expected_value = self.evaluate(expected_grad)
        self.assertShapeEqual(actual_value, actual_grad)
        self.assertShapeEqual(expected_value, expected_grad)
      print("expected = ", expected_value)
      print("actual = ", actual_value)
      self.assertArrayNear(expected_value.flatten(), actual_value.flatten(),
                           err)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:60,代码来源:conv_ops_3d_test.py


示例2: testGradient

 def testGradient(self):
   with self.test_session():
     for padding in ["SAME", "VALID"]:
       for stride in [1, 2]:
         np.random.seed(1)
         in_shape = [2, 4, 3, 3, 2]
         in_val = constant_op.constant(
             2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
         filter_shape = [3, 3, 3, 2, 3]
         strides = [1, stride, stride, stride, 1]
         # Make a convolution op with the current settings, just to easily get
         # the shape of the output.
         conv_out = nn_ops.conv3d(in_val,
                                  array_ops.zeros(filter_shape), strides,
                                  padding)
         out_backprop_shape = conv_out.get_shape().as_list()
         out_backprop_val = constant_op.constant(
             2 * np.random.random_sample(out_backprop_shape) - 1,
             dtype=dtypes.float32)
         output = nn_ops.conv3d_backprop_filter_v2(in_val, filter_shape,
                                                   out_backprop_val, strides,
                                                   padding)
         err = gradient_checker.compute_gradient_error(
             [in_val, out_backprop_val], [in_shape, out_backprop_shape],
             output, filter_shape)
         print("conv3d_backprop_filter gradient err = %g " % err)
         err_tolerance = 1e-3
         self.assertLess(err, err_tolerance)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:28,代码来源:conv3d_backprop_filter_v2_grad_test.py


示例3: _SetupValuesForDevice

  def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
                            padding, data_format, dtype, use_gpu):
    total_size_tensor = 1
    total_size_filter = 1
    for s in tensor_in_sizes:
      total_size_tensor *= s
    for s in filter_in_sizes:
      total_size_filter *= s

    # Initializes the input tensor with array containing numbers from 0 to 1.
    # We keep the input tensor values fairly small to avoid overflowing float16
    # during the conv3d.
    x1 = [f * 1.0 / total_size_tensor for f in range(1, total_size_tensor + 1)]
    x2 = [f * 1.0 / total_size_filter for f in range(1, total_size_filter + 1)]
    with self.cached_session(use_gpu=use_gpu):
      t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
      t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)

      if isinstance(stride, collections.Iterable):
        strides = [1] + list(stride) + [1]
      else:
        strides = [1, stride, stride, stride, 1]

      if data_format == "NCDHW":
        t1 = test_util.NHWCToNCHW(t1)
        strides = test_util.NHWCToNCHW(strides)
      conv = nn_ops.conv3d(t1, t2, strides, padding=padding,
                           data_format=data_format)
      if data_format == "NCDHW":
        conv = test_util.NCHWToNHWC(conv)

      return conv
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:32,代码来源:conv_ops_3d_test.py


示例4: _SetupValuesForDevice

  def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
                            padding, data_format, use_gpu):
    total_size_1 = 1
    total_size_2 = 1
    for s in tensor_in_sizes:
      total_size_1 *= s
    for s in filter_in_sizes:
      total_size_2 *= s

    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
    x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
    with self.test_session(use_gpu=use_gpu):
      t1 = constant_op.constant(x1, shape=tensor_in_sizes)
      t2 = constant_op.constant(x2, shape=filter_in_sizes)

      if isinstance(stride, collections.Iterable):
        strides = [1] + list(stride) + [1]
      else:
        strides = [1, stride, stride, stride, 1]

      if data_format == "NCDHW":
        t1 = test_util.NHWCToNCHW(t1)
        strides = test_util.NHWCToNCHW(strides)
      conv = nn_ops.conv3d(t1, t2, strides, padding=padding,
                           data_format=data_format)
      if data_format == "NCDHW":
        conv = test_util.NCHWToNHWC(conv)

      return conv
开发者ID:aravindvcyber,项目名称:tensorflow,代码行数:31,代码来源:conv_ops_3d_test.py


示例5: _VerifyValues

  def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
                    expected):
    total_size_1 = 1
    total_size_2 = 1
    for s in tensor_in_sizes:
      total_size_1 *= s
    for s in filter_in_sizes:
      total_size_2 *= s

    if isinstance(stride, collections.Iterable):
      strides = [1] + list(stride) + [1]
    else:
      strides = [1, stride, stride, stride, 1]

    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
    x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
    with self.test_session(use_gpu=True) as sess:
      t1 = constant_op.constant(x1, shape=tensor_in_sizes)
      t2 = constant_op.constant(x2, shape=filter_in_sizes)
      conv = nn_ops.conv3d(t1, t2, strides, padding=padding)
      value = sess.run(conv)
    print("expected = ", expected)
    print("actual = ", value)
    self.assertArrayNear(expected, value.flatten(), 1e-5)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:26,代码来源:conv_ops_3d_test.py


示例6: _Conv3DBackpropFilterGrad

def _Conv3DBackpropFilterGrad(op, grad):
  return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
                                          grad,
                                          op.inputs[2],
                                          strides=op.get_attr("strides"),
                                          padding=op.get_attr("padding")),
          None,
          nn_ops.conv3d(op.inputs[0],
                        grad,
                        strides=op.get_attr("strides"),
                        padding=op.get_attr("padding"))]
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:11,代码来源:nn_grad.py


示例7: _Conv3DBackpropInputGrad

def _Conv3DBackpropInputGrad(op, grad):
  return [None,
          nn_ops.conv3d_backprop_filter_v2(grad,
                                           array_ops.shape(op.inputs[1]),
                                           op.inputs[2],
                                           strides=op.get_attr("strides"),
                                           padding=op.get_attr("padding")),
          nn_ops.conv3d(grad,
                        op.inputs[1],
                        strides=op.get_attr("strides"),
                        padding=op.get_attr("padding"))]
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:11,代码来源:nn_grad.py


示例8: _ComputeReferenceDilatedConv

  def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,
                                   stride, dilation, padding, data_format,
                                   use_gpu):
    total_size_tensor = 1
    total_size_filter = 1
    for s in tensor_in_sizes:
      total_size_tensor *= s
    for s in filter_in_sizes:
      total_size_filter *= s

    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    x1 = [f * 1.0 for f in range(1, total_size_tensor + 1)]
    x2 = [f * 1.0 for f in range(1, total_size_filter + 1)]
    with self.cached_session(use_gpu=use_gpu):
      t1 = constant_op.constant(x1, shape=tensor_in_sizes)
      t2 = constant_op.constant(x2, shape=filter_in_sizes)
      if isinstance(stride, collections.Iterable):
        strides = list(stride)
      else:
        strides = [stride, stride, stride]
      if data_format == "NCDHW":
        t1 = test_util.NHWCToNCHW(t1)
        full_strides = [1, 1] + strides
        full_dilation = [1, 1] + dilation
      else:
        full_strides = [1] + strides + [1]
        full_dilation = [1] + dilation + [1]
      expected = nn_ops.convolution(
          t1,
          t2,
          padding=padding,
          strides=strides,
          dilation_rate=dilation,
          data_format=data_format)
      computed = nn_ops.conv3d(
          t1,
          t2,
          strides=full_strides,
          dilations=full_dilation,
          padding=padding,
          data_format=data_format)
      if data_format == "NCDHW":
        expected = test_util.NCHWToNHWC(expected)
        computed = test_util.NCHWToNHWC(computed)
    return expected, computed
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:46,代码来源:conv_ops_3d_test.py


示例9: _Conv3DBackpropFilterGrad

def _Conv3DBackpropFilterGrad(op, grad):
  data_format = op.get_attr("data_format").decode()
  return [
      nn_ops.conv3d_backprop_input_v2(
          array_ops.shape(op.inputs[0]),
          grad,
          op.inputs[2],
          dilations=op.get_attr("dilations"),
          strides=op.get_attr("strides"),
          padding=op.get_attr("padding"),
          data_format=data_format), None,
      nn_ops.conv3d(
          op.inputs[0],
          grad,
          dilations=op.get_attr("dilations"),
          strides=op.get_attr("strides"),
          padding=op.get_attr("padding"),
          data_format=data_format)
  ]
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:19,代码来源:nn_grad.py


示例10: _Conv3DBackpropInputGrad

def _Conv3DBackpropInputGrad(op, grad):
  data_format = op.get_attr("data_format").decode()
  return [
      None,
      nn_ops.conv3d_backprop_filter_v2(
          grad,
          array_ops.shape(op.inputs[1]),
          op.inputs[2],
          dilations=op.get_attr("dilations"),
          strides=op.get_attr("strides"),
          padding=op.get_attr("padding"),
          data_format=data_format),
      nn_ops.conv3d(
          grad,
          op.inputs[1],
          dilations=op.get_attr("dilations"),
          strides=op.get_attr("strides"),
          padding=op.get_attr("padding"),
          data_format=data_format)
  ]
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:20,代码来源:nn_grad.py


示例11: _ConstructAndTestGradientForConfig

  def _ConstructAndTestGradientForConfig(
      self, batch, input_shape, filter_shape, in_depth, out_depth, stride,
      padding, test_input, data_format, use_gpu):

    input_planes, input_rows, input_cols = input_shape
    filter_planes, filter_rows, filter_cols = filter_shape

    input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
    filter_shape = [
        filter_planes, filter_rows, filter_cols, in_depth, out_depth
    ]

    if isinstance(stride, collections.Iterable):
      strides = [1] + list(stride) + [1]
    else:
      strides = [1, stride, stride, stride, 1]

    if padding == "VALID":
      output_planes = int(
          math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
      output_rows = int(
          math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
      output_cols = int(
          math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
    else:
      output_planes = int(math.ceil(float(input_planes) / strides[1]))
      output_rows = int(math.ceil(float(input_rows) / strides[2]))
      output_cols = int(math.ceil(float(input_cols) / strides[3]))
    output_shape = [batch, output_planes, output_rows, output_cols, out_depth]
    input_size = 1
    for x in input_shape:
      input_size *= x
    filter_size = 1
    for x in filter_shape:
      filter_size *= x
    input_data = [x * 1.0 / input_size for x in range(0, input_size)]
    filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]

    for data_type in self._DtypesToTest(use_gpu=use_gpu):
      # TODO(mjanusz): Modify gradient_checker to also provide max relative
      # error and synchronize the tolerance levels between the tests for forward
      # and backward computations.
      if data_type == dtypes.float64:
        tolerance = 1e-8
      elif data_type == dtypes.float32:
        tolerance = 5e-3
      elif data_type == dtypes.float16:
        tolerance = 1e-3

      with self.cached_session(use_gpu=use_gpu):
        orig_input_tensor = constant_op.constant(
            input_data, shape=input_shape, dtype=data_type, name="input")
        filter_tensor = constant_op.constant(
            filter_data, shape=filter_shape, dtype=data_type, name="filter")

        if data_format == "NCDHW":
          input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
          new_strides = test_util.NHWCToNCHW(strides)
        else:
          input_tensor = orig_input_tensor
          new_strides = strides

        conv = nn_ops.conv3d(
            input_tensor,
            filter_tensor,
            new_strides,
            padding,
            data_format=data_format,
            name="conv")

        if data_format == "NCDHW":
          conv = test_util.NCHWToNHWC(conv)

        self.assertEqual(conv.shape, tensor_shape.TensorShape(output_shape))

        if test_input:
          jacob_t, jacob_n = gradient_checker.compute_gradient(
              orig_input_tensor, input_shape, conv, output_shape)
        else:
          jacob_t, jacob_n = gradient_checker.compute_gradient(
              filter_tensor, filter_shape, conv, output_shape)

        if data_type != dtypes.float16:
          reference_jacob_t = jacob_t
          err = np.fabs(jacob_t - jacob_n).max()
        else:
          # Compare fp16 theoretical gradients to fp32 theoretical gradients,
          # since fp16 numerical gradients are too imprecise.
          err = np.fabs(jacob_t - reference_jacob_t).max()

      print("conv3d gradient error = ", err)
      self.assertLess(err, tolerance)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:92,代码来源:conv_ops_3d_test.py


示例12: _ConstructAndTestGradientForConfig

  def _ConstructAndTestGradientForConfig(
      self, batch, input_shape, filter_shape, in_depth, out_depth, stride,
      padding, test_input, data_format, use_gpu):

    input_planes, input_rows, input_cols = input_shape
    filter_planes, filter_rows, filter_cols = filter_shape

    input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
    filter_shape = [
        filter_planes, filter_rows, filter_cols, in_depth, out_depth
    ]

    if isinstance(stride, collections.Iterable):
      strides = [1] + list(stride) + [1]
    else:
      strides = [1, stride, stride, stride, 1]

    if padding == "VALID":
      output_planes = int(
          math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
      output_rows = int(
          math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
      output_cols = int(
          math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
    else:
      output_planes = int(math.ceil(float(input_planes) / strides[1]))
      output_rows = int(math.ceil(float(input_rows) / strides[2]))
      output_cols = int(math.ceil(float(input_cols) / strides[3]))
    output_shape = [batch, output_planes, output_rows, output_cols, out_depth]
    input_size = 1
    for x in input_shape:
      input_size *= x
    filter_size = 1
    for x in filter_shape:
      filter_size *= x
    input_data = [x * 1.0 / input_size for x in range(0, input_size)]
    filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]

    if test.is_gpu_available() and use_gpu:
      data_type = dtypes.float32
      if test.is_gpu_available():
        tolerance = 4e-3
      else:
        # As of Aug 2016, higher tolerance is needed for some CPU architectures.
        # Runs on a single machine can also generate slightly different errors
        # because of multithreading.
        tolerance = 8e-3
    else:
      data_type = dtypes.float64
      tolerance = 1e-8
    with self.test_session(use_gpu=use_gpu):
      orig_input_tensor = constant_op.constant(
          input_data, shape=input_shape, dtype=data_type, name="input")
      filter_tensor = constant_op.constant(
          filter_data, shape=filter_shape, dtype=data_type, name="filter")

      if data_format == "NCDHW":
        input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
        strides = test_util.NHWCToNCHW(strides)
      else:
        input_tensor = orig_input_tensor

      conv = nn_ops.conv3d(
          input_tensor, filter_tensor, strides, padding,
          data_format=data_format, name="conv")

      if data_format == "NCDHW":
        conv = test_util.NCHWToNHWC(conv)

      if test_input:
        err = gradient_checker.compute_gradient_error(orig_input_tensor,
                                                      input_shape,
                                                      conv, output_shape)
      else:
        err = gradient_checker.compute_gradient_error(filter_tensor,
                                                      filter_shape, conv,
                                                      output_shape)
    print("conv3d gradient error = ", err)
    self.assertLess(err, tolerance)
开发者ID:aravindvcyber,项目名称:tensorflow,代码行数:79,代码来源:conv_ops_3d_test.py



注:本文中的tensorflow.python.ops.nn_ops.conv3d函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nn_ops.conv3d_backprop_filter_v2函数代码示例发布时间:2022-05-27
下一篇:
Python nn_ops.conv2d_transpose函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap