本文整理汇总了Python中tensorflow.python.ops.array_ops.constant函数的典型用法代码示例。如果您正苦于以下问题:Python constant函数的具体用法?Python constant怎么用?Python constant使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了constant函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testUpdateClipCoeff
def testUpdateClipCoeff(self):
with ops.Graph().as_default(), self.test_session() as sess:
grads_and_vars = [(array_ops.constant([[1., 2.], [3., 4.]]), None),
(array_ops.constant([[2., 3.], [4., 5.]]), None)]
pgrads_and_vars = [(array_ops.constant([[3., 4.], [5., 6.]]), None),
(array_ops.constant([[7., 8.], [9., 10.]]), None)]
lrate = 0.1
# Note: without rescaling, the squared Fisher norm of the update
# is 1.74
# If the update already satisfies the norm constraint, there should
# be no rescaling.
opt = optimizer.KfacOptimizer(
lrate, 0.2, 0.3, dummy_layer_collection(), norm_constraint=10.)
coeff = opt._update_clip_coeff(grads_and_vars, pgrads_and_vars)
self.assertAlmostEqual(1., sess.run(coeff), places=5)
# If the update violates the constraint, it should be rescaled to
# be on the constraint boundary.
opt = optimizer.KfacOptimizer(
lrate, 0.2, 0.3, dummy_layer_collection(), norm_constraint=0.5)
coeff = opt._update_clip_coeff(grads_and_vars, pgrads_and_vars)
sq_norm_pgrad = opt._squared_fisher_norm(grads_and_vars, pgrads_and_vars)
sq_norm_update = lrate**2 * coeff**2 * sq_norm_pgrad
self.assertAlmostEqual(0.5, sess.run(sq_norm_update), places=5)
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:26,代码来源:optimizer_test.py
示例2: test_mixture_dev
def test_mixture_dev(self):
mixture_weights = np.array([
[1.0/3, 1.0/3, 1.0/3],
[0.750, 0.250, 0.000]
])
component_means = np.array([
[1.0, 1.0, 1.0],
[-5, 0, 1.25]
])
component_devs = np.array([
[1.0, 1.0, 1.0],
[0.01, 2.0, 0.1]
])
# The first case should trivially have a standard deviation of 1.0 because
# all components are identical and have that standard deviation.
# The second case was computed by hand.
expected_devs = np.array([
1.0,
2.3848637277
])
weights_tf = array_ops.constant(mixture_weights)
means_tf = array_ops.constant(component_means)
sigmas_tf = array_ops.constant(component_devs)
mix_dev = distribution_util.mixture_stddev(weights_tf,
means_tf,
sigmas_tf)
with self.test_session() as sess:
actual_devs = sess.run(mix_dev)
self.assertAllClose(actual_devs, expected_devs)
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:33,代码来源:distribution_util_test.py
示例3: testOptimizerInit
def testOptimizerInit(self):
with ops.Graph().as_default():
layer_collection = lc.LayerCollection()
inputs = array_ops.ones((2, 1)) * 2
weights_val = np.ones((1, 1), dtype=np.float32) * 3.
weights = variable_scope.get_variable(
'w', initializer=array_ops.constant(weights_val))
bias = variable_scope.get_variable(
'b', initializer=init_ops.zeros_initializer(), shape=(1, 1))
output = math_ops.matmul(inputs, weights) + bias
layer_collection.register_fully_connected((weights, bias), inputs, output)
logits = math_ops.tanh(output)
targets = array_ops.constant([[0.], [1.]])
output = math_ops.reduce_mean(
nn.softmax_cross_entropy_with_logits(logits=logits, labels=targets))
layer_collection.register_categorical_predictive_distribution(logits)
optimizer.KfacOptimizer(
0.1,
0.2,
0.3,
layer_collection,
momentum=0.5,
momentum_type='regular')
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:28,代码来源:optimizer_test.py
示例4: testScopeStringFromParamsMultipleTypes
def testScopeStringFromParamsMultipleTypes(self):
with tf_ops.Graph().as_default():
x = array_ops.constant(1,)
y = array_ops.constant(2,)
scope_string = ff.scope_string_from_params([[1, 2, 3], 'foo', True, 4,
(x, y)])
self.assertEqual('1-2-3_foo_True_4_Const__Const_1', scope_string)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:fisher_factors_test.py
示例5: testFullFBInitTensorTuple
def testFullFBInitTensorTuple(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.FullFB(lc.LayerCollection(), params, 32)
self.assertAllEqual(params, block.tensors_to_compute_grads())
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:7,代码来源:fisher_blocks_test.py
示例6: testAggregateGradients
def testAggregateGradients(self):
def fn(x):
ind1 = tensor.Tensor(np.array([0, 1]))
ind2 = tensor.Tensor(np.array([2, 3]))
ind3 = tensor.Tensor(np.array([1, 3]))
# A mixture of IndexedSlices and dense tensor to aggregate.
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = embedding_ops.embedding_lookup(x, ind2)
g3 = embedding_ops.embedding_lookup(x, ind3)
g4 = math_ops.reduce_sum(x * tensor.Tensor(2.0))
return g1 * g2 * g3 * g4
var_np = np.random.rand(4, 2).astype(np.float32)
var = tensor.Tensor(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
with context.graph_mode(), self.test_session():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_ind2 = array_ops.constant([2, 3])
tf_ind3 = array_ops.constant([1, 3])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
tf_dense_grad = math_ops.unsorted_segment_sum(
tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])
self.assertAllClose(grad.numpy(), tf_dense_grad.eval())
开发者ID:chdinh,项目名称:tensorflow,代码行数:33,代码来源:backprop_test.py
示例7: testMultiplyInverseAgainstExplicit
def testMultiplyInverseAgainstExplicit(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.FullFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
grads = (array_ops.constant([2., 3.]), array_ops.constant(4.))
damping = 0.5
block.instantiate_factors((grads,), damping)
block._factor.instantiate_cov_variables()
block.register_inverse()
block._factor.instantiate_inv_variables()
# Make sure our inverse is something other than the identity.
sess.run(state_ops.assign(block._factor._cov, _make_psd(3)))
sess.run(block._factor.make_inverse_update_ops())
v_flat = np.array([4., 5., 6.], dtype=np.float32)
vector = utils.column_to_tensors(params, array_ops.constant(v_flat))
output = block.multiply_inverse(vector)
output_flat = sess.run(utils.tensors_to_column(output)).ravel()
full = sess.run(block.full_fisher_block())
explicit = np.dot(np.linalg.inv(full + damping * np.eye(3)), v_flat)
self.assertAllClose(output_flat, explicit)
开发者ID:DILASSS,项目名称:tensorflow,代码行数:26,代码来源:fisher_blocks_test.py
示例8: test_parameter_switching
def test_parameter_switching(self):
parameter = array_ops.constant(5)
overridden_parameter = array_ops.constant(3)
with self.cached_session():
getter = model_utils.parameter_switch({overridden_parameter: 4})
self.assertEqual(5, getter(parameter))
self.assertEqual(4, getter(overridden_parameter))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:7,代码来源:model_utils_test.py
示例9: testMakeSparseSplitAllEmptyDimensions
def testMakeSparseSplitAllEmptyDimensions(self):
"""Tests split handler op when all dimensions have only bias bucket id."""
with self.test_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | Dimension | bucket ID |
# (0.9, 0.39) | 0 | 0 | -1 |
# (4.0, 0.13) | 1 | 0 | -1 |
partition_ids = array_ops.constant([0, 1], dtype=dtypes.int32)
# We have only 1 dimension in our sparse feature column.
bucket_ids = array_ops.constant([[-1, 0], [-1, 0]], dtype=dtypes.int64)
gradients = array_ops.constant([1.8, 8.0])
hessians = array_ops.constant([0.78, 0.26])
bucket_boundaries = array_ops.constant([0.3, 0.52])
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertEqual(0, len(partitions))
self.assertEqual(0, len(splits))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:32,代码来源:split_handler_ops_test.py
示例10: testMakeDenseSplitEmptyInputs
def testMakeDenseSplitEmptyInputs(self):
"""Tests empty inputs op."""
with self.test_session() as sess:
partition_ids = array_ops.constant([], dtype=dtypes.int32)
bucket_ids = array_ops.constant([[]], dtype=dtypes.int64)
gradients = array_ops.constant([])
hessians = array_ops.constant([])
bucket_boundaries = [0.3, 0.52]
partitions, gains, splits = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=0,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
class_id=-1,
feature_column_group_id=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = sess.run([partitions, gains, splits])
# .assertEmpty doesn't exist on ubuntu-contrib
self.assertEqual(0, len(partitions))
self.assertEqual(0, len(gains))
self.assertEqual(0, len(splits))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:28,代码来源:split_handler_ops_test.py
示例11: testColumnToTensors
def testColumnToTensors(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
vector_template = array_ops.constant(np.array([[0., 1.], [2., 3.]]))
colvec = array_ops.constant(np.arange(4.)[:, None])
output = sess.run(utils.column_to_tensors(vector_template, colvec))
self.assertAllClose(output, np.array([[0., 1.], [2., 3.]]))
vector_template = self._fully_connected_layer_params()
colvec = array_ops.constant(np.arange(6.)[:, None])
output = sess.run(utils.column_to_tensors(vector_template, colvec))
self.assertIsInstance(output, tuple)
self.assertEqual(len(output), 2)
a, b = output
self.assertAllClose(a, np.array([[0., 1.], [2., 3.]]))
self.assertAllClose(b, np.array([4., 5.]))
vector_template = list(vector_template)
vector_template.append(array_ops.constant([[6.], [7.], [8.], [9.]]))
colvec = array_ops.constant(np.arange(10.)[:, None])
output = sess.run(utils.column_to_tensors(vector_template, colvec))
self.assertIsInstance(output, tuple)
self.assertEqual(len(output), 3)
a, b, c = output
self.assertAllClose(a, np.array([[0., 1.], [2., 3.]]))
self.assertAllClose(b, np.array([4., 5.]))
self.assertAllClose(c, np.array([[6.], [7.], [8.], [9.]]))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:29,代码来源:utils_test.py
示例12: testTrackPersistentBytes
def testTrackPersistentBytes(self):
ops.reset_default_graph()
a = array_ops.constant(np.ones((100, 100)))
b = array_ops.constant(np.ones((100, 100)))
c = a * b
with session.Session() as sess:
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess.run(c, options=run_options, run_metadata=run_metadata)
options = option_builder.ProfileOptionBuilder.time_and_memory()
options['min_bytes'] = 0
options['select'] = ('bytes', 'peak_bytes', 'output_bytes',
'residual_bytes')
ret = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
run_metadata = config_pb2.RunMetadata()
sess.run(c, options=run_options, run_metadata=run_metadata)
ret2 = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
n = lib.SearchTFProfNode(ret, 'mul')
n2 = lib.SearchTFProfNode(ret2, 'mul')
self.assertGreater(n.peak_bytes, 0)
self.assertGreater(n.output_bytes, 0)
self.assertGreater(n.residual_bytes, 0)
self.assertEqual(n.peak_bytes, n2.peak_bytes)
self.assertEqual(n.output_bytes, n2.output_bytes)
self.assertEqual(n.residual_bytes, n2.residual_bytes)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:32,代码来源:model_analyzer_test.py
示例13: testConsistent
def testConsistent(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = (
math_ops.floor_div(nums, divs) * divs + math_ops.floormod(nums, divs)
).eval()
tf_nums = array_ops.constant(nums)
tf_divs = array_ops.constant(divs)
tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
np_result = (nums // divs) * divs + (nums % divs)
# consistentcy with numpy
self.assertAllEqual(tf_result, np_result)
# consistentcy with two forms of divide
self.assertAllEqual(tf_result, tf2_result)
# consistency for truncation form
tf3_result = (
math_ops.truncatediv(nums, divs) * divs
+ math_ops.truncatemod(nums, divs)
).eval()
expanded_nums = np.reshape(np.tile(nums, divs.shape[1]),
(nums.shape[0], divs.shape[1]))
# Consistent with desire to get numerator
self.assertAllEqual(tf3_result, expanded_nums)
# Consistent with desire to get numerator
self.assertAllEqual(tf_result, expanded_nums)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:25,代码来源:math_ops_test.py
示例14: testSwishLiteHint
def testSwishLiteHint(self):
"""Makes a custom op swish and makes sure it gets converted as a unit."""
image = array_ops.constant([1., 2., 3., 4.])
swish_scale = array_ops.constant(1.0)
def _swish(input_tensor, scale):
custom = op_hint.OpHint("cool_activation")
input_tensor, scale = custom.add_inputs(input_tensor, scale)
output = math_ops.sigmoid(input_tensor) * input_tensor * scale
output, = custom.add_outputs(output)
return output
output = array_ops.identity(_swish(image, swish_scale), name="ModelOutput")
with self.cached_session() as sess:
# check if identities have been put into the graph (2 input, 1 output,
# and 1 final output).
self.assertEqual(self._countIdentities(sess.graph_def.node), 4)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["cool_activation", "Const", "Identity"]))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:26,代码来源:convert_test.py
示例15: testScaleAndBiasAndIdentity
def testScaleAndBiasAndIdentity(self):
"""This tests a scaled add which has 3 inputs and 2 outputs."""
a = array_ops.constant(1.)
x = array_ops.constant([2., 3.])
b = array_ops.constant([4., 5.])
def _scaled_and_bias_and_identity(a, x, b):
custom = op_hint.OpHint("scale_and_bias_and_identity")
a, x, b = custom.add_inputs(a, x, b)
return custom.add_outputs(a * x + b, x)
output = array_ops.identity(_scaled_and_bias_and_identity(a, x, b),
name="ModelOutput")
with self.cached_session() as sess:
# make sure one identity for each input (3) and output (2) => 3 + 2 = 5
# +1 for the final output
self.assertEqual(self._countIdentities(sess.graph_def.node), 6)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["scale_and_bias_and_identity", "Const", "Identity", "Pack"]))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:26,代码来源:convert_test.py
示例16: testMultiplyInverseTuple
def testMultiplyInverseTuple(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = random_ops.random_normal((2, 2, 2, 2))
inputs = random_ops.random_normal((2, 2, 2, 2))
outputs = random_ops.random_normal((2, 2, 2, 2))
block = fb.ConvKFCBasicFB(lc.LayerCollection(), params, (1, 1, 1, 1),
'SAME')
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
block.instantiate_factors(([grads],), 0.5)
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
vector = (np.arange(1, 15).reshape(7, 2).astype(np.float32),
np.arange(2, 4).reshape(2, 1).astype(np.float32))
output = block.multiply_inverse((array_ops.constant(vector[0]),
array_ops.constant(vector[1])))
output = sess.run(output)
self.assertAllClose([0.136455, 0.27291], output[0][0])
self.assertAllClose([0.27291, 0.409365], output[1])
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:25,代码来源:fisher_blocks_test.py
示例17: report_uninitialized_resources
def report_uninitialized_resources(resource_list=None,
name="report_uninitialized_resources"):
"""Returns the names of all uninitialized resources in resource_list.
If the returned tensor is empty then all resources have been initialized.
Args:
resource_list: resources to check. If None, will use shared_resources() +
local_resources().
name: name for the resource-checking op.
Returns:
Tensor containing names of the handles of all resources which have not
yet been initialized.
"""
if resource_list is None:
resource_list = shared_resources() + local_resources()
with ops.name_scope(name):
# Run all operations on CPU
with ops.device("/cpu:0"):
if not resource_list:
# Return an empty tensor so we only need to check for returned tensor
# size being 0 as an indication of model ready.
return array_ops.constant([], dtype=dtypes.string)
# Get a 1-D boolean tensor listing whether each resource is initialized.
variables_mask = math_ops.logical_not(
array_ops.stack([r.is_initialized for r in resource_list]))
# Get a 1-D string tensor containing all the resource names.
variable_names_tensor = array_ops.constant(
[s.handle.name for s in resource_list])
# Return a 1-D tensor containing all the names of uninitialized resources.
return array_ops.boolean_mask(variable_names_tensor, variables_mask)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:33,代码来源:resources.py
示例18: testRegisterSingleParamRegisteredInTuple
def testRegisterSingleParamRegisteredInTuple(self):
x = variable_scope.get_variable('x', initializer=array_ops.constant(1,))
y = variable_scope.get_variable('y', initializer=array_ops.constant(1,))
lc = layer_collection.LayerCollection()
lc.fisher_blocks = {(x, y): '1'}
lc.register_block(x, 'foo')
self.assertEqual(set(['1']), set(lc.get_blocks()))
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:7,代码来源:layer_collection_test.py
示例19: testAggregateGradients
def testAggregateGradients(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
ind2 = constant_op.constant(np.array([2, 3]))
ind3 = constant_op.constant(np.array([1, 3]))
# A mixture of IndexedSlices and dense tensor to aggregate.
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = embedding_ops.embedding_lookup(x, ind2)
g3 = embedding_ops.embedding_lookup(x, ind3)
g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))
return g1 * g2 * g3 * g4
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = self.evaluate(ops.convert_to_tensor(grad))
if not context.executing_eagerly():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_ind2 = array_ops.constant([2, 3])
tf_ind3 = array_ops.constant([1, 3])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_g4 = math_ops.reduce_sum(tf_var * 2.0, axis=(0, 1))
tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
tf_dense_grad = math_ops.unsorted_segment_sum(
tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])
self.assertAllClose(grad, self.evaluate(tf_dense_grad))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:34,代码来源:backprop_test.py
示例20: test_kernel_classifier_distance_block_sizes
def test_kernel_classifier_distance_block_sizes(self):
"""Test that `kernel_classifier_distance` works with unusual max_block_size
values..
"""
np.random.seed(0)
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(768, 256) * 1.1 + .05)
max_block_size = array_ops.placeholder(dtypes.int32, shape=())
kid_op = _run_with_mock(
classifier_metrics.kernel_classifier_distance_and_std_from_activations,
array_ops.constant(test_pool_real_a),
array_ops.constant(test_pool_gen_a),
max_block_size=max_block_size)
for block_size in [50, 512, 1000]:
with self.cached_session() as sess:
actual_kid, actual_std = sess.run(kid_op, {max_block_size: block_size})
expected_kid, expected_std = _expected_kid_and_std(
test_pool_real_a, test_pool_gen_a, max_block_size=block_size)
self.assertAllClose(expected_kid, actual_kid, 0.001)
self.assertAllClose(expected_std, actual_std, 0.001)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:26,代码来源:classifier_metrics_test.py
注:本文中的tensorflow.python.ops.array_ops.constant函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论