• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python random_ops.truncated_normal函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.random_ops.truncated_normal函数的典型用法代码示例。如果您正苦于以下问题:Python truncated_normal函数的具体用法?Python truncated_normal怎么用?Python truncated_normal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了truncated_normal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testNoCSE

 def testNoCSE(self):
   with self.test_session(use_gpu=True):
     shape = [2, 3, 4]
     rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
     rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
     diff = rnd2 - rnd1
     self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:random_ops_test.py


示例2: testSmallNetwork

  def testSmallNetwork(self):
    image = array_ops.placeholder(dtypes.float32, shape=[1, 28, 28, 1])
    label = array_ops.placeholder(dtypes.float32, shape=[1, 10])
    w = variables.Variable(
        random_ops.truncated_normal([5, 5, 1, 32], stddev=0.1))
    b = variables.Variable(random_ops.truncated_normal([32], stddev=0.1))
    conv = nn_ops.conv2d(image, w, strides=[1, 1, 1, 1], padding="SAME")
    h_conv = nn_ops.relu(conv + b)
    h_conv_flat = array_ops.reshape(h_conv, [1, -1])

    w_fc = variables.Variable(
        random_ops.truncated_normal([25088, 10], stddev=0.1))
    b_fc = variables.Variable(random_ops.truncated_normal([10], stddev=0.1))
    y_conv = nn_ops.softmax(math_ops.matmul(h_conv_flat, w_fc) + b_fc)

    cross_entropy = math_ops.reduce_mean(-math_ops.reduce_sum(
        label * math_ops.log(y_conv), reduction_indices=[1]))
    _ = adam.AdamOptimizer(1e-4).minimize(cross_entropy)

    mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
    report = cost_analyzer.GenerateCostReport(mg)

    self.assertTrue(b"MatMul" in report)
    self.assertTrue(b"ApplyAdam" in report)
    self.assertTrue(b"Conv2D" in report)
    self.assertTrue(b"Conv2DBackpropInput" in report)
    self.assertTrue(b"Conv2DBackpropFilter" in report)
    self.assertTrue(b"Softmax" in report)

    # Also print the report to make it easier to debug
    print("{}".format(report))
开发者ID:ajaybhat,项目名称:tensorflow,代码行数:31,代码来源:cost_analyzer_test.py


示例3: build_graph

def build_graph(device, input_shape, filter_shape, strides, padding, num_iters):
  """builds a graph containing a sequence of conv2d operations.

  Args:
    device: String, the device to run on.
    input_shape: Shape of the input tensor.
    filter_shape: Shape of the filter tensor.
    strides: A list of ints. 1-D of length 4. The stride of sliding
             window for each dimension of input.
    padding: A string from: "SAME", "VALID". The type of padding
             algorithm to use.
    num_iters: number of iterations to run conv2d.

  Returns:
    An array of tensors to run()
  """
  with ops.device("/%s:0" % device):
    inp = variables.Variable(random_ops.truncated_normal(input_shape))
    filt = variables.Variable(random_ops.truncated_normal(filter_shape))

    outputs = []
    conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format="NHWC")
    outputs.append(conv2d_op)
    for _ in range(1, num_iters):
      with ops.control_dependencies([conv2d_op]):
        conv2d_op = nn_ops.conv2d(
            inp, filt, strides, padding, data_format="NHWC")
        outputs.append(conv2d_op)
    return control_flow_ops.group(*outputs)
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:29,代码来源:conv2d_benchmark.py


示例4: loop

def loop():
  random_seed.set_random_seed(0)
  x1 = random_ops.truncated_normal([1, 784], seed=0)
  x2 = random_ops.truncated_normal([1, 784], seed=0)
  x3 = random_ops.truncated_normal([1, 784], seed=0)
  x4 = random_ops.truncated_normal([1, 784], seed=0)
  elems = (x1, x2, x3, x4)
  outputs = functional_ops.map_fn(two_layer_model, elems, dtype=dtypes.float32)
  return outputs
开发者ID:1000sprites,项目名称:tensorflow,代码行数:9,代码来源:layout_optimizer_test.py


示例5: _loop_with_vec_and_4d

def _loop_with_vec_and_4d():
  random_seed.set_random_seed(0)
  x1 = random_ops.truncated_normal([1, 784], seed=0)
  x2 = random_ops.truncated_normal([1, 784], seed=0)
  x3 = random_ops.truncated_normal([1, 784], seed=0)
  x4 = random_ops.truncated_normal([1, 784], seed=0)
  elems = (x1, x2, x3, x4)
  outputs = functional_ops.map_fn(
      _model_with_vec_and_4d, elems, dtype=dtypes.float32)
  return outputs
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:10,代码来源:layout_optimizer_test.py


示例6: testTruncatedNormal

 def testTruncatedNormal(self):
   # Fully known shape.
   rnd1 = random_ops.truncated_normal([1, 2, 3])
   self.assertEqual([1, 2, 3], rnd1.get_shape())
   # Partially known shape.
   rnd2 = random_ops.truncated_normal(
       array_ops.placeholder(dtypes.int32, shape=(3,)))
   self.assertEqual([None, None, None], rnd2.get_shape().as_list())
   # Unknown shape.
   rnd3 = random_ops.truncated_normal(array_ops.placeholder(dtypes.int32))
   self.assertIs(None, rnd3.get_shape().ndims)
开发者ID:aeverall,项目名称:tensorflow,代码行数:11,代码来源:random_ops_test.py


示例7: build_fused_conv_bias_relu_graph

def build_fused_conv_bias_relu_graph(device, input_shape, filter_shape, strides,
                                     padding, num_iters, data_format):
  """builds a graph containing a sequence of conv2d operations.

  Args:
    device: String, the device to run on.
    input_shape: Shape of the input tensor.
    filter_shape: Shape of the filter tensor.
    strides: A list of ints. 1-D of length 4. The stride of sliding
             window for each dimension of input.
    padding: A string from: "SAME", "VALID". The type of padding
             algorithm to use.
    num_iters: number of iterations to run conv2d.
    data_format: data format string of input, 'NHWC' and 'NCHW' are
    supported.

  Returns:
    An array of tensors to run()
  """
  if data_format == "NCHW":
    input_shape = [
        input_shape[0], input_shape[3], input_shape[1], input_shape[2]
    ]
  with ops.device("/%s:0" % device):
    inp = variables.Variable(random_ops.truncated_normal(input_shape))
    filt = variables.Variable(random_ops.truncated_normal(filter_shape))
    bias_shape = [filter_shape[-1]]
    bias = variables.Variable(random_ops.truncated_normal(bias_shape))

    outputs = []
    fused_out = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
        inp,
        filt,
        bias,
        strides,
        padding,
        data_format=data_format,
        activation_mode="Relu")
    outputs.append(fused_out)
    for _ in range(1, num_iters):
      with ops.control_dependencies([fused_out]):
        # pylint: disable=g-line-too-long
        fused_out = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(  # pylint: disable=line-too-long
            inp,
            filt,
            bias,
            strides,
            padding,
            data_format=data_format,
            activation_mode="Relu")
        outputs.append(fused_out)
    return control_flow_ops.group(*outputs)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:52,代码来源:fused_conv2d_bias_activation_benchmark.py


示例8: testSmallNetwork

  def testSmallNetwork(self):
    image = array_ops.placeholder(dtypes.float32, shape=[1, 28, 28, 1])
    label = array_ops.placeholder(dtypes.float32, shape=[1, 10])
    w = variables.Variable(
        random_ops.truncated_normal([5, 5, 1, 32], stddev=0.1))
    b = variables.Variable(random_ops.truncated_normal([32], stddev=0.1))
    conv = nn_ops.conv2d(image, w, strides=[1, 1, 1, 1], padding="SAME")
    h_conv = nn_ops.relu(conv + b)
    h_conv_flat = array_ops.reshape(h_conv, [1, -1])

    w_fc = variables.Variable(
        random_ops.truncated_normal([25088, 10], stddev=0.1))
    b_fc = variables.Variable(random_ops.truncated_normal([10], stddev=0.1))
    y_conv = nn_ops.softmax(math_ops.matmul(h_conv_flat, w_fc) + b_fc)

    cross_entropy = math_ops.reduce_mean(-math_ops.reduce_sum(
        label * math_ops.log(y_conv), reduction_indices=[1]))
    _ = adam.AdamOptimizer(1e-4).minimize(cross_entropy)

    mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
    report = cost_analyzer.GenerateCostReport(mg)

    # Print the report to make it easier to debug
    print("{}".format(report))

    self.assertTrue(b"MatMul" in report)
    self.assertTrue(b"ApplyAdam" in report)
    self.assertTrue(b"Conv2D" in report)
    self.assertTrue(b"Conv2DBackpropInput" in report)
    self.assertTrue(b"Conv2DBackpropFilter" in report)
    self.assertTrue(b"Softmax" in report)

    for op_type in [
        b"MatMul", b"Conv2D", b"Conv2DBackpropInput", b"Conv2DBackpropFilter"
    ]:
      matcher = re.compile(
          br"\s+" + op_type + br",\s*(\d+),\s*(\d+),\s*([\d\.eE+-]+)%,\s*" +
          br"([\d\.eE+-]+)%,\s*(-?\d+),\s*(\d+),", re.MULTILINE)
      m = matcher.search(report)

      op_count = int(m.group(1))
      # upper = int(m.group(5))
      lower = int(m.group(6))
      if op_type is b"MatMul":
        self.assertEqual(3, op_count)
      else:
        self.assertEqual(1, op_count)
      self.assertTrue(0 <= lower)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:48,代码来源:cost_analyzer_test.py


示例9: _initializer

 def _initializer(shape, dtype=dtype, partition_info=None):
   """Initializer function."""
   if not dtype.is_floating:
     raise TypeError('Cannot create initializer for non-floating point type.')
   # Estimating fan_in and fan_out is not possible to do perfectly, but we try.
   # This is the right thing for matrix multiply and convolutions.
   if shape:
     fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
     fan_out = float(shape[-1])
   else:
     fan_in = 1.0
     fan_out = 1.0
   for dim in shape[:-2]:
     fan_in *= float(dim)
     fan_out *= float(dim)
   if mode == 'FAN_IN':
     # Count only number of input connections.
     n = fan_in
   elif mode == 'FAN_OUT':
     # Count only number of output connections.
     n = fan_out
   elif mode == 'FAN_AVG':
     # Average number of inputs and output connections.
     n = (fan_in + fan_out) / 2.0
   if uniform:
     # To get stddev = math.sqrt(factor / n) need to adjust for uniform.
     limit = math.sqrt(3.0 * factor / n)
     return random_ops.random_uniform(shape, -limit, limit,
                                      dtype, seed=seed)
   else:
     # To get stddev = math.sqrt(factor / n) need to adjust for truncated.
     trunc_stddev = math.sqrt(1.3 * factor / n)
     return random_ops.truncated_normal(shape, 0.0, trunc_stddev, dtype,
                                        seed=seed)
开发者ID:sonnyhu,项目名称:tensorflow,代码行数:34,代码来源:initializers.py


示例10: sequence_softmax

def sequence_softmax(inputs, noutput, scope=None, name=None, linear_name=None):
  """Run a softmax layer over all the time steps of an input sequence.

  Args:
    inputs: (length, batch_size, depth) tensor
    noutput: output depth
    scope: optional scope name
    name: optional name for output tensor
    linear_name: name for linear (pre-softmax) output

  Returns:
    A tensor of size (length, batch_size, noutput).

  """
  length, _, ninputs = _shape(inputs)
  inputs_u = array_ops.unstack(inputs)
  output_u = []
  with variable_scope.variable_scope(scope, "SequenceSoftmax", [inputs]):
    initial_w = random_ops.truncated_normal([0 + ninputs, noutput], stddev=0.1)
    initial_b = constant_op.constant(0.1, shape=[noutput])
    w = variables.model_variable("weights", initializer=initial_w)
    b = variables.model_variable("biases", initializer=initial_b)
    for i in xrange(length):
      with variable_scope.variable_scope(scope, "SequenceSoftmaxStep",
                                         [inputs_u[i]]):
        # TODO(tmb) consider using slim.fully_connected(...,
        # activation_fn=tf.nn.softmax)
        linear = nn_ops.xw_plus_b(inputs_u[i], w, b, name=linear_name)
        output = nn_ops.softmax(linear)
        output_u += [output]
    outputs = array_ops.stack(output_u, name=name)
  return outputs
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:32,代码来源:lstm1d.py


示例11: testSelectOpConditionUnknownShape

  def testSelectOpConditionUnknownShape(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      add = math_ops.add(conv, conv)
      condition = array_ops.placeholder(dtype='bool')
      select = gen_math_ops._select(condition, conv, add)
      output = array_ops.identity(select)

      condition_val = np.zeros((1, 7, 7, 64))
      with session.Session() as sess:
        output_val_ref = sess.run(output, feed_dict={condition: condition_val})

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(
            output, run_metadata=metadata, feed_dict={condition: condition_val})

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if _is_transpose(node.name):
          num_transposes += 1
        nodes.append(node.name)

      expected_num_transposes = 3
      self.assertEqual(expected_num_transposes, num_transposes)
      self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:japrogramer,项目名称:tensorflow,代码行数:30,代码来源:layout_optimizer_test.py


示例12: testSplitWithNonConstAxis

  def testSplitWithNonConstAxis(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      dim = array_ops.placeholder(dtype='int32')
      split = array_ops.split(conv, 2, axis=dim)
      output = math_ops.reduce_sum(split[0])

      with session.Session() as sess:
        output_val_ref = sess.run(output, feed_dict={dim: 3})

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if node.name.startswith('LayoutOptimizerTranspose'):
          num_transposes += 1
        nodes.append(node.name)

      # Four transposes were initially added in the Expand phase of
      # LayoutOptimizer; two of them are cancelled out in the Collapse phase.
      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-Reshape-0',
                    nodes)
      self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-split-Sum-0', nodes)
      self.assertIn('LayoutOptimizerDim-split', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:paulojblack,项目名称:tensorflow,代码行数:32,代码来源:layout_optimizer_test.py


示例13: testPadWithNonConstPaddings

  def testPadWithNonConstPaddings(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      paddings = array_ops.placeholder(dtype='int32')
      pad = array_ops.pad(conv, paddings)
      output = array_ops.identity(pad)

      paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
      with session.Session() as sess:
        output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(
            output, run_metadata=metadata, feed_dict={
                paddings: paddings_val
            })

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if node.name.startswith('LayoutOptimizerTranspose'):
          num_transposes += 1
        nodes.append(node.name)

      # Four transposes were initially added in the Expand phase of
      # LayoutOptimizer; two of them are cancelled out in the Collapse phase.
      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
      self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Pad-0-0', nodes)
      self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_Pad_1', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:35,代码来源:layout_optimizer_test.py


示例14: testTernaryOp

  def testTernaryOp(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      add = math_ops.add(conv, conv)
      mean = math_ops.reduce_mean(conv)
      condition = math_ops.less(conv, mean)
      select = gen_math_ops._select(condition, conv, add)
      output = array_ops.identity(select)

      with session.Session() as sess:
        output_val_ref = sess.run(output)

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(output, run_metadata=metadata)

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if node.name.startswith('LayoutOptimizerTranspose'):
          num_transposes += 1
        nodes.append(node.name)

      expected_num_transposes = 3
      self.assertEqual(expected_num_transposes, num_transposes)
      self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
      self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Select-0-0', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:30,代码来源:layout_optimizer_test.py


示例15: testTwoConvLayers

  def testTwoConvLayers(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      output = two_layer_model(x)

      with session.Session() as sess:
        output_val_ref = sess.run(output)

      with session.Session(config=get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(output, run_metadata=metadata)

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if node.name.startswith('LayoutOptimizerTranspose'):
          num_transposes += 1
        nodes.append(node.name)

      # Four transposes were initially added in the Expand phase of
      # LayoutOptimizer; two of them are cancelled out in the Collapse phase.
      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-Reshape-0',
                    nodes)
      self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Relu_1-MaxPool_1',
                    nodes)

      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:30,代码来源:layout_optimizer_test.py


示例16: testGradient

  def testGradient(self):
    if not test.is_gpu_available(cuda_only=True):
      self.skipTest('GPU required')

    random_seed.set_random_seed(0)
    x = random_ops.truncated_normal([1, 200, 200, 3], seed=0)
    y = conv_layers.conv2d(x, 32, [3, 3])
    z = conv_layers.conv2d(y, 32, [3, 3])
    optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
    loss = math_ops.reduce_mean(z)
    train_op = optimizer.minimize(loss)
    graph = ops.get_default_graph()
    graph.add_to_collection('train_op', train_op)
    meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())

    rewrite_options = rewriter_config_pb2.RewriterConfig(
        optimize_tensor_layout=True)
    optimized_graph = tf_optimizer.OptimizeGraph(rewrite_options, meta_graph)

    found = 0
    for node in optimized_graph.node:
      if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
        found += 1
        self.assertEqual(node.attr['data_format'].s, 'NCHW')
    self.assertEqual(found, 5)
开发者ID:SylChan,项目名称:tensorflow,代码行数:25,代码来源:layout_optimizer_test.py


示例17: testSplitWithNonConstAxis

  def testSplitWithNonConstAxis(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      dim = array_ops.placeholder(dtype='int32')
      split = array_ops.split(conv, 2, axis=dim)
      scale = constant_op.constant(0.1, shape=[32])
      offset = constant_op.constant(0.3, shape=[32])
      bn0 = nn.fused_batch_norm(split[0], scale, offset)
      bn1 = nn.fused_batch_norm(split[1], scale, offset)
      add = bn0[0] + bn1[0]
      output = array_ops.identity(add)

      with session.Session() as sess:
        output_val_ref = sess.run(output, feed_dict={dim: 3})

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if _is_transpose(node.name):
          num_transposes += 1
        nodes.append(node.name)

      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
      self._assert_trans_nchw_to_nhwc('add_2-0-0', nodes)
      self._assert_map_nhwc_to_nchw('split-0', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:34,代码来源:layout_optimizer_test.py


示例18: testGradient

  def testGradient(self):
    with ops.Graph().as_default() as g:
      inputs = array_ops.placeholder(
          dtypes.float32, shape=[None, 100], name="input")
      weights = array_ops.placeholder(
          dtypes.float32, shape=[100, 10], name="weights")
      biases = array_ops.placeholder(dtypes.float32, shape=[10], name="biases")
      activations = nn_ops.relu(
          math_ops.matmul(inputs, weights) + biases, name="activations")
      loss = math_ops.reduce_mean(activations, name="loss")
    gdef = g.as_graph_def()

    with ops.Graph().as_default() as g:
      input_placeholder = array_ops.placeholder(dtypes.float32, shape=[32, 100])
      weights_var = variables.Variable(
          random_ops.truncated_normal([100, 10]), name="weights")
      biases_var = variables.Variable(array_ops.zeros([10]), name="biases")
      activations, loss = importer.import_graph_def(
          gdef,
          input_map={
              "input:0": input_placeholder,
              "weights:0": weights_var,
              "biases:0": biases_var
          },
          return_elements=["activations:0", "loss:0"])
      self.assertEqual([32, 10], activations.get_shape())
      self.assertEqual([], loss.get_shape())
      weights_grad, biases_grad = gradients_impl.gradients(
          loss, [weights_var, biases_var])
      self.assertEqual([100, 10], weights_grad.get_shape())
      self.assertEqual([10], biases_grad.get_shape())
开发者ID:pcm17,项目名称:tensorflow,代码行数:31,代码来源:importer_test.py


示例19: testStridedSliceWithMask1011

  def testStridedSliceWithMask1011(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      # This will generate a StridedSlice op with begin mask and
      # end mask 11(1011).
      s = conv[:, :, 1:-1, :]
      output = array_ops.identity(s)

      with session.Session() as sess:
        output_val_ref = sess.run(output)

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(output, run_metadata=metadata)

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if _is_transpose(node.name):
          num_transposes += 1
        nodes.append(node.name)

      # Four transposes were initially added in the Expand phase of
      # LayoutOptimizer; two of them are cancelled out in the Collapse phase.
      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
      self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
      self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
      self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
      self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:34,代码来源:layout_optimizer_test.py


示例20: __call__

 def __call__(self, shape, dtype=None, partition_info=None):
   if dtype is None:
     dtype = self.dtype
   scale = self.scale
   scale_shape = shape
   if partition_info is not None:
     scale_shape = partition_info.full_shape
   fan_in, fan_out = _compute_fans(scale_shape)
   if self.mode == "fan_in":
     scale /= max(1., fan_in)
   elif self.mode == "fan_out":
     scale /= max(1., fan_out)
   else:
     scale /= max(1., (fan_in + fan_out) / 2.)
   if self.distribution == "normal" or self.distribution == "truncated_normal":
     # constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
     stddev = math.sqrt(scale) / .87962566103423978
     return random_ops.truncated_normal(
         shape, 0.0, stddev, dtype, seed=self.seed)
   elif self.distribution == "untruncated_normal":
     stddev = math.sqrt(scale)
     return random_ops.random_normal(
         shape, 0.0, stddev, dtype, seed=self.seed)
   else:
     limit = math.sqrt(3.0 * scale)
     return random_ops.random_uniform(
         shape, -limit, limit, dtype, seed=self.seed)
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:27,代码来源:init_ops.py



注:本文中的tensorflow.python.ops.random_ops.truncated_normal函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap