本文整理汇总了Python中tensorflow.python.platform.test.is_gpu_available函数的典型用法代码示例。如果您正苦于以下问题:Python is_gpu_available函数的具体用法?Python is_gpu_available怎么用?Python is_gpu_available使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了is_gpu_available函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testTraining
def testTraining(self):
x_shape = [1, 1, 6, 1]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW')
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW')
self._test_training(
x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC')
开发者ID:SylChan,项目名称:tensorflow,代码行数:34,代码来源:nn_fused_batchnorm_test.py
示例2: testSelectEverthingDetail
def testSelectEverthingDetail(self):
ops.reset_default_graph()
dev = '/gpu:0' if test.is_gpu_available() else '/cpu:0'
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.select(['micros', 'bytes', 'params', 'float_ops', 'occurrence',
'device', 'op_types', 'input_shapes']).build())
config = config_pb2.ConfigProto()
with session.Session(config=config) as sess, ops.device(dev):
x = lib.BuildSmallModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.profile(
sess.graph, run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
outputs = f.read().split('\n')
self.assertEqual(outputs[0],
'node name | # parameters | # float_ops | requested bytes | total execution time | accelerator execution time | cpu execution time | assigned devices | op types | op count (run|defined) | input shapes')
for o in outputs[1:]:
if o.find('Conv2D ') > 0:
metrics = o[o.find('(') +1: o.find(')')].split(',')
# Make sure time is profiled.
gap = 1 if test.is_gpu_available() else 2
for i in range(3, 6, gap):
mat = re.search('(.*)[um]s/(.*)[um]s', metrics[i])
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure device is profiled.
if test.is_gpu_available():
self.assertTrue(metrics[6].find('gpu') > 0)
self.assertFalse(metrics[6].find('cpu') > 0)
else:
self.assertFalse(metrics[6].find('gpu') > 0)
self.assertTrue(metrics[6].find('cpu') > 0)
# Make sure float_ops is profiled.
mat = re.search('(.*)k/(.*)k flops', metrics[1].strip())
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure op_count is profiled.
self.assertEqual(metrics[8].strip(), '1/1|1/1')
# Make sure input_shapes is profiled.
self.assertEqual(metrics[9].strip(), '0:2x6x6x3|1:3x3x3x6')
if o.find('DW (3x3x3x6') > 0:
metrics = o[o.find('(') +1: o.find(')')].split(',')
mat = re.search('(.*)/(.*) params', metrics[1].strip())
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
开发者ID:chdinh,项目名称:tensorflow,代码行数:60,代码来源:model_analyzer_test.py
示例3: testBatchNormGrad
def testBatchNormGrad(self):
for is_training in [True, False]:
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [1],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [1],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape, [1],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [2],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [2],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [2],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
x_shape = [7, 9, 13, 6]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [9],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape, [6],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [6],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:58,代码来源:nn_fused_batchnorm_test.py
示例4: test_convolution_2d
def test_convolution_2d(self):
num_samples = 2
filters = 2
stack_size = 3
kernel_size = (3, 2)
num_row = 7
num_col = 6
for padding in ['valid', 'same']:
for strides in [(1, 1), (2, 2)]:
if padding == 'same' and strides != (1, 1):
continue
with self.test_session(use_gpu=True):
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.Conv2D,
kwargs={
'filters': filters,
'kernel_size': kernel_size,
'padding': padding,
'strides': strides,
'data_format': 'channels_first'
},
input_shape=(num_samples, stack_size, num_row, num_col))
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:27,代码来源:convolutional_test.py
示例5: testReverseWithConstDims
def testReverseWithConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = constant_op.constant([3, 1], name='DimsConst')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self.assertIn('ReverseV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:31,代码来源:layout_optimizer_test.py
示例6: test_cudnn_rnn_basics
def test_cudnn_rnn_basics(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
for return_sequences in [True, False]:
with keras.utils.CustomObjectScope(
{'keras.layers.CuDNNGRU': keras.layers.CuDNNGRU,
'keras.layers.CuDNNLSTM': keras.layers.CuDNNLSTM}):
testing_utils.layer_test(
layer_class,
kwargs={'units': units,
'return_sequences': return_sequences},
input_shape=(num_samples, timesteps, input_size))
for go_backwards in [True, False]:
with keras.utils.CustomObjectScope(
{'keras.layers.CuDNNGRU': keras.layers.CuDNNGRU,
'keras.layers.CuDNNLSTM': keras.layers.CuDNNLSTM}):
testing_utils.layer_test(
layer_class,
kwargs={'units': units,
'go_backwards': go_backwards},
input_shape=(num_samples, timesteps, input_size))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:26,代码来源:cudnn_recurrent_test.py
示例7: testTwoConvLayers
def testTwoConvLayers(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
output = two_layer_model(x)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-Reshape-0',
nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Relu_1-MaxPool_1',
nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:30,代码来源:layout_optimizer_test.py
示例8: testAllocationHistory
def testAllocationHistory(self):
if not test.is_gpu_available(cuda_only=True):
return
gpu_dev = test.gpu_device_name()
ops.reset_default_graph()
with ops.device(gpu_dev):
_, run_meta = _run_model()
mm = _extract_node(run_meta, 'MatMul')['gpu:0'][0]
mm_allocs = mm.memory[0].allocation_records
# has allocation and deallocation.
self.assertEqual(len(mm_allocs), 2)
# first allocated.
self.assertGreater(mm_allocs[1].alloc_micros, mm_allocs[0].alloc_micros)
self.assertGreater(mm_allocs[0].alloc_bytes, 0)
# Then deallocated.
self.assertLess(mm_allocs[1].alloc_bytes, 0)
# All memory deallocated.
self.assertEqual(mm_allocs[0].alloc_bytes + mm_allocs[1].alloc_bytes, 0)
rand = _extract_node(
run_meta, 'random_normal/RandomStandardNormal')['gpu:0'][0]
random_allocs = rand.memory[0].allocation_records
# random normal must allocated first since matmul depends on it.
self.assertLess(random_allocs[0].alloc_micros, mm.all_start_micros)
# deallocates the memory after matmul started.
self.assertGreater(random_allocs[1].alloc_micros, mm.all_start_micros)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:28,代码来源:run_metadata_test.py
示例9: benchmarkMatrixExponentialOp
def benchmarkMatrixExponentialOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
expm = linalg_impl.matrix_exponential(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(expm),
min_iters=25,
name="matrix_exponential_cpu_{shape}".format(
shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
expm = linalg_impl.matrix_exponential(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(expm),
min_iters=25,
name="matrix_exponential_gpu_{shape}".format(
shape=shape))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:28,代码来源:matrix_exponential_op_test.py
示例10: testConcatLargeNumberOfTensors
def testConcatLargeNumberOfTensors(self):
with self.session(use_gpu=True):
for concat_dim in range(2):
params = {}
p = []
shape = np.array([7, 13])
if test.is_gpu_available():
num_tensors = 5000
else:
num_tensors = 500
for i in np.arange(num_tensors):
input_shape = shape
placeholder = array_ops.placeholder(dtypes.float32, shape=input_shape)
p.append(placeholder)
params[placeholder] = np.random.rand(*input_shape).astype(np.float32)
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
index = [slice(0, params[p[i]].shape[j]) for j in np.arange(2)]
index[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
self.assertAllEqual(result[index], params[p[i]])
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:33,代码来源:concat_op_test.py
示例11: testLoopGPU
def testLoopGPU(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
with ops.device('/gpu:0'):
tfprof_node, run_meta = _run_loop_model()
# The while-loop caused a node to appear 4 times in scheduling.
ret = _extract_node(run_meta,
'rnn/while/rnn/basic_rnn_cell/basic_rnn_cell/MatMul')
self.assertEqual(len(ret['/job:localhost/replica:0/task:0/gpu:0']), 4)
total_cpu_execs = 0
for node in ret['/job:localhost/replica:0/task:0/gpu:0']:
total_cpu_execs += node.op_end_rel_micros
ret = _extract_node(
run_meta,
'rnn/while/rnn/basic_rnn_cell/basic_rnn_cell/MatMul:MatMul')
self.assertGreaterEqual(len(ret['/gpu:0/stream:all']), 4)
total_accelerator_execs = 0
for node in ret['/gpu:0/stream:all']:
total_accelerator_execs += node.op_end_rel_micros
mm_node = lib.SearchTFProfNode(
tfprof_node,
'rnn/while/rnn/basic_rnn_cell/basic_rnn_cell/MatMul')
self.assertEqual(mm_node.run_count, 4)
self.assertEqual(mm_node.accelerator_exec_micros, total_accelerator_execs)
self.assertEqual(mm_node.cpu_exec_micros, total_cpu_execs)
self.assertEqual(mm_node.exec_micros,
total_cpu_execs + total_accelerator_execs)
开发者ID:Joetz,项目名称:tensorflow,代码行数:34,代码来源:run_metadata_test.py
示例12: benchmarkMatrixBandPartOp
def benchmarkMatrixBandPartOp(self):
for shape_ in self.shapes:
for limits in (-1, -1), (-1, 0), (0, -1), (2, 2):
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(array_ops.ones(shape_))
band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(band),
min_iters=10,
name="matrix_band_part_cpu_{shape}_{limits}".format(
shape=shape_, limits=limits))
if test_lib.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/gpu:0"):
matrix = variables.Variable(array_ops.ones(shape_))
band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(band),
min_iters=10,
name="matrix_band_part_gpu_{shape}_{limits}".format(
shape=shape_, limits=limits))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:29,代码来源:matrix_band_part_op_test.py
示例13: testGradientDilatedConv
def testGradientDilatedConv(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
for padding in ["SAME", "VALID"]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [5, 8, 6, 4]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 4, 6]
# Make a convolution op with the current settings,
# just to easily get the shape of the output.
conv_out = nn_ops.conv2d(
in_val,
array_ops.zeros(filter_shape),
dilations=[1, 2, 2, 1],
strides=[1, stride, stride, 1],
padding=padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv2d_backprop_filter(
in_val,
filter_shape,
out_backprop_val,
dilations=[1, 2, 2, 1],
strides=[1, stride, stride, 1],
padding=padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv2d_backprop_filter gradient err = %g " % err)
err_tolerance = 2e-3
self.assertLess(err, err_tolerance)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:35,代码来源:conv2d_backprop_filter_grad_test.py
示例14: testStridedSliceWithMask1011
def testStridedSliceWithMask1011(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 11(1011).
s = conv[:, :, 1:-1, :]
output = array_ops.identity(s)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:34,代码来源:layout_optimizer_test.py
示例15: testSliceWithNonConstAxis
def testSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
size = array_ops.placeholder(dtype='int32')
s = array_ops.slice(conv, [0, 0, 0, 0], size)
output = array_ops.identity(s)
size_val = [1, 2, 3, 4]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={size: size_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
size: size_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Slice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Slice-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:35,代码来源:layout_optimizer_test.py
示例16: testMaxPoolV2
def testMaxPoolV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool = gen_nn_ops._max_pool_v2(conv, ksize, strides, 'VALID')
output = array_ops.identity(max_pool)
strides_val = [1, 3, 2, 1]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolV2-2', nodes)
self.assertIn('MaxPoolV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:35,代码来源:layout_optimizer_test.py
示例17: testSelectOpScalarCondition
def testSelectOpScalarCondition(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = constant_op.constant(True)
select = gen_math_ops._select(condition, conv, add)
output = array_ops.identity(select)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:29,代码来源:layout_optimizer_test.py
示例18: benchmarkCholeskyOp
def benchmarkCholeskyOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_gpu_{shape}".format(shape=shape))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:28,代码来源:cholesky_op_test.py
示例19: setUp
def setUp(self):
self._dump_root = tempfile.mkdtemp()
if test.is_gpu_available():
self._expected_partition_graph_count = 2
else:
self._expected_partition_graph_count = 1
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:7,代码来源:session_debug_test.py
示例20: testSendingLargeGraphDefsWorks
def testSendingLargeGraphDefsWorks(self):
with self.test_session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u = variables.Variable(42.0, name="original_u")
for _ in xrange(50 * 1000):
u = array_ops.identity(u)
sess.run(variables.global_variables_initializer())
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"original_u")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
self.assertAllClose(42.0, sess.run(u))
self.assertAllClose(
[42.0],
self.debug_server.debug_tensor_values["original_u:0:DebugIdentity"])
self.assertEqual(2 if test.is_gpu_available() else 1,
len(self.debug_server.partition_graph_defs))
max_graph_def_size = max([
len(graph_def.SerializeToString())
for graph_def in self.debug_server.partition_graph_defs])
self.assertGreater(max_graph_def_size, 4 * 1024 * 1024)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:27,代码来源:grpc_large_data_test.py
注:本文中的tensorflow.python.platform.test.is_gpu_available函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论