本文整理汇总了Python中tensorflow.contrib.layers.python.layers.feature_column.real_valued_column函数的典型用法代码示例。如果您正苦于以下问题:Python real_valued_column函数的具体用法?Python real_valued_column怎么用?Python real_valued_column使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了real_valued_column函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testMixedFeatures
def testMixedFeatures(self):
"""Tests SDCALogisticClassifier with a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
classifier = sdca_estimator.SDCALogisticClassifier(
example_id_column='example_id',
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights')
classifier.fit(input_fn=input_fn, steps=50)
metrics = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(metrics['accuracy'], 0.9)
开发者ID:LUTAN,项目名称:tensorflow,代码行数:35,代码来源:sdca_estimator_test.py
示例2: testRealValuedColumnDensification
def testRealValuedColumnDensification(self):
"""Tests densification behavior of `RealValuedColumn`."""
# No default value, dimension 1 float.
real_valued_column = fc.real_valued_column(
"sparse_real_valued1", dimension=None)
sparse_tensor = sparse_tensor_lib.SparseTensor(
values=[2.0, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
densified_output = real_valued_column._to_dnn_input_layer(sparse_tensor)
# With default value, dimension 2 int.
real_valued_column_with_default = fc.real_valued_column(
"sparse_real_valued2",
dimension=None,
default_value=-1,
dtype=dtypes.int32)
sparse_tensor2 = sparse_tensor_lib.SparseTensor(
values=[2, 5, 9, 0],
indices=[[0, 0], [1, 1], [2, 0], [2, 1]],
dense_shape=[3, 2])
densified_output2 = real_valued_column_with_default._to_dnn_input_layer(
sparse_tensor2)
with self.test_session() as sess:
densified_output_eval, densified_output_eval2 = sess.run(
[densified_output, densified_output2])
self.assertAllEqual(densified_output_eval, [[2.0], [0.0], [5.0]])
self.assertAllEqual(densified_output_eval2, [[2, -1], [-1, 5], [9, 0]])
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:27,代码来源:feature_column_test.py
示例3: testRealValuedFeaturesWithBigL1Regularization
def testRealValuedFeaturesWithBigL1Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'feature1': constant_op.constant([0.5, 1.0, 1.0]),
'feature2': constant_op.constant([[1.0], [-1.0], [0.5]]),
}, constant_op.constant([[1], [0], [1]])
feature1 = feature_column.real_valued_column('feature1')
feature2 = feature_column.real_valued_column('feature2')
svm_classifier = svm.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=3.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# When L1 regularization parameter is large, the loss due to regularization
# outweights the unregularized loss. In this case, the classifier will favor
# very small weights (in current case 0) resulting both big unregularized
# loss and bad accuracy.
self.assertAlmostEqual(loss, 1.0, places=3)
self.assertAlmostEqual(accuracy, 1 / 3, places=3)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:svm_test.py
示例4: testRealValuedFeaturesWithL2Regularization
def testRealValuedFeaturesWithL2Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'feature1': constant_op.constant([0.5, 1.0, 1.0]),
'feature2': constant_op.constant([1.0, -1.0, 0.5]),
}, constant_op.constant([1, 0, 1])
feature1 = feature_column.real_valued_column('feature1')
feature2 = feature_column.real_valued_column('feature2')
svm_classifier = svm.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# The points are in general separable. Also, if there was no regularization,
# the margin inequalities would be satisfied too (for instance by w1=1.0,
# w2=5.0). Due to regularization, smaller weights are chosen. This results
# to a small but non-zero uneregularized loss. Still, all the predictions
# will be correct resulting to perfect accuracy.
self.assertLess(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:svm_test.py
示例5: testRealValuedFeaturesPerfectlySeparable
def testRealValuedFeaturesPerfectlySeparable(self):
"""Tests SVM classifier with real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'feature1': constant_op.constant([[0.0], [1.0], [3.0]]),
'feature2': constant_op.constant([[1.0], [-1.2], [1.0]]),
}, constant_op.constant([[1], [0], [1]])
feature1 = feature_column.real_valued_column('feature1')
feature2 = feature_column.real_valued_column('feature2')
svm_classifier = svm.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=0.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# The points are not only separable but there exist weights (for instance
# w1=0.0, w2=1.0) that satisfy the margin inequalities (y_i* w^T*x_i >=1).
# The unregularized loss should therefore be 0.0.
self.assertAlmostEqual(loss, 0.0, places=3)
self.assertAlmostEqual(accuracy, 1.0, places=3)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:25,代码来源:svm_test.py
示例6: testBucketizedFeatures
def testBucketizedFeatures(self):
"""Tests SDCALogisticClassifier with bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([600.0, 1000.0, 400.0]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
with self._single_threaded_test_session():
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0])
classifier = sdca_estimator.SDCALogisticClassifier(
example_id_column='example_id',
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
l2_regularization=1.0)
classifier.fit(input_fn=input_fn, steps=50)
metrics = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(metrics['accuracy'], 0.9)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:26,代码来源:sdca_estimator_test.py
示例7: testPrepareInputsForRnnBatchSize2
def testPrepareInputsForRnnBatchSize2(self):
num_unroll = 3
expected = [
np.array([[11., 31., 5., 7.], [21., 41., 6., 8.]]),
np.array([[12., 32., 5., 7.], [22., 42., 6., 8.]]),
np.array([[13., 33., 5., 7.], [23., 43., 6., 8.]])
]
sequence_features = {
'seq_feature0':
constant_op.constant([[11., 12., 13.], [21., 22., 23.]]),
'seq_feature1':
constant_op.constant([[31., 32., 33.], [41., 42., 43.]])
}
sequence_feature_columns = [
feature_column.real_valued_column(
'seq_feature0', dimension=1),
feature_column.real_valued_column(
'seq_feature1', dimension=1),
]
context_features = {
'ctx_feature0': constant_op.constant([[5.], [6.]]),
'ctx_feature1': constant_op.constant([[7.], [8.]])
}
self._test_prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected)
开发者ID:finardi,项目名称:tensorflow,代码行数:32,代码来源:state_saving_rnn_estimator_test.py
示例8: testBiasAndOtherColumns
def testBiasAndOtherColumns(self):
"""SDCALinearRegressor has valid bias weight with other columns present."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
with self._single_threaded_test_session():
regressor = sdca_estimator.SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
])
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:60,代码来源:sdca_estimator_test.py
示例9: testRealValuedFeaturesWithMildL1Regularization
def testRealValuedFeaturesWithMildL1Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'feature1': constant_op.constant([[0.5], [1.0], [1.0]]),
'feature2': constant_op.constant([[1.0], [-1.0], [0.5]]),
}, constant_op.constant([[1], [0], [1]])
feature1 = feature_column.real_valued_column('feature1')
feature2 = feature_column.real_valued_column('feature2')
svm_classifier = svm.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.5,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# Adding small L1 regularization favors even smaller weights. This results
# to somewhat moderate unregularized loss (bigger than the one when there is
# no L1 regularization. Still, since L1 is small, all the predictions will
# be correct resulting to perfect accuracy.
self.assertGreater(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:svm_test.py
示例10: testMakePlaceHolderTensorsForBaseFeatures
def testMakePlaceHolderTensorsForBaseFeatures(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
real_valued_col = fc.real_valued_column("real_valued_column", 5)
vlen_real_valued_col = fc.real_valued_column(
"vlen_real_valued_column", dimension=None)
bucketized_col = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization"), [0, 4])
feature_columns = set(
[sparse_col, real_valued_col, vlen_real_valued_col, bucketized_col])
placeholders = (
fc.make_place_holder_tensors_for_base_features(feature_columns))
self.assertEqual(4, len(placeholders))
self.assertTrue(
isinstance(placeholders["sparse_column"],
sparse_tensor_lib.SparseTensor))
self.assertTrue(
isinstance(placeholders["vlen_real_valued_column"],
sparse_tensor_lib.SparseTensor))
placeholder = placeholders["real_valued_column"]
self.assertGreaterEqual(
placeholder.name.find(u"Placeholder_real_valued_column"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 5], placeholder.get_shape().as_list())
placeholder = placeholders["real_valued_column_for_bucketization"]
self.assertGreaterEqual(
placeholder.name.find(
u"Placeholder_real_valued_column_for_bucketization"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 1], placeholder.get_shape().as_list())
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:32,代码来源:feature_column_test.py
示例11: testRealValuedColumnDtypes
def testRealValuedColumnDtypes(self):
rvc = fc.real_valued_column("rvc")
self.assertDictEqual({
"rvc": parsing_ops.FixedLenFeature([1], dtype=dtypes.float32)
}, rvc.config)
rvc = fc.real_valued_column("rvc", dtype=dtypes.int32)
self.assertDictEqual({
"rvc": parsing_ops.FixedLenFeature([1], dtype=dtypes.int32)
}, rvc.config)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("rvc", dtype=dtypes.string)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:14,代码来源:feature_column_test.py
示例12: testCreateSequenceFeatureSpec
def testCreateSequenceFeatureSpec(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
sparse_id_col = fc.sparse_column_with_keys("id_column",
["marlo", "omar", "stringer"])
weighted_id_col = fc.weighted_sparse_column(sparse_id_col,
"id_weights_column")
real_valued_col1 = fc.real_valued_column("real_valued_column", dimension=2)
real_valued_col2 = fc.real_valued_column(
"real_valued_default_column", dimension=5, default_value=3.0)
real_valued_col3 = fc._real_valued_var_len_column(
"real_valued_var_len_column", default_value=3.0, is_sparse=True)
real_valued_col4 = fc._real_valued_var_len_column(
"real_valued_var_len_dense_column", default_value=4.0, is_sparse=False)
feature_columns = set([
sparse_col, embedding_col, weighted_id_col, real_valued_col1,
real_valued_col2, real_valued_col3, real_valued_col4
])
feature_spec = fc._create_sequence_feature_spec_for_parsing(feature_columns)
expected_feature_spec = {
"sparse_column":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_embedding":
parsing_ops.VarLenFeature(dtypes.string),
"id_column":
parsing_ops.VarLenFeature(dtypes.string),
"id_weights_column":
parsing_ops.VarLenFeature(dtypes.float32),
"real_valued_column":
parsing_ops.FixedLenSequenceFeature(
shape=[2], dtype=dtypes.float32, allow_missing=False),
"real_valued_default_column":
parsing_ops.FixedLenSequenceFeature(
shape=[5], dtype=dtypes.float32, allow_missing=True),
"real_valued_var_len_column":
parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_var_len_dense_column":
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.float32, allow_missing=True,
default_value=4.0),
}
self.assertDictEqual(expected_feature_spec, feature_spec)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:50,代码来源:feature_column_test.py
示例13: testBiasAndOtherColumnsFabricatedCentered
def testBiasAndOtherColumnsFabricatedCentered(self):
"""SDCALinearRegressor has valid bias weight when instances are centered."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
with self._single_threaded_test_session():
regressor = sdca_estimator.SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
])
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:49,代码来源:sdca_estimator_test.py
示例14: _getModelFnOpsForMode
def _getModelFnOpsForMode(self, mode):
"""Helper for testGetRnnModelFn{Train,Eval,Infer}()."""
num_units = [4]
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=1)
]
features = {
'inputs': constant_op.constant([1., 2., 3.]),
}
labels = constant_op.constant([1., 0., 1.])
model_fn = ssre._get_rnn_model_fn(
cell_type='basic_rnn',
target_column=target_column_lib.multi_class_target(n_classes=2),
optimizer='SGD',
num_unroll=2,
num_units=num_units,
num_threads=1,
queue_capacity=10,
batch_size=1,
# Only CLASSIFICATION yields eval metrics to test for.
problem_type=constants.ProblemType.CLASSIFICATION,
sequence_feature_columns=seq_columns,
context_feature_columns=None,
learning_rate=0.1)
model_fn_ops = model_fn(features=features, labels=labels, mode=mode)
return model_fn_ops
开发者ID:finardi,项目名称:tensorflow,代码行数:27,代码来源:state_saving_rnn_estimator_test.py
示例15: testMultiDimensionalRealValuedFeaturesWithL2Regularization
def testMultiDimensionalRealValuedFeaturesWithL2Regularization(self):
"""Tests SVM with multi-dimensional real features and L2 regularization."""
# This is identical to the one in testRealValuedFeaturesWithL2Regularization
# where 2 tensors (dense features) of shape [3, 1] have been replaced by a
# single tensor (dense feature) of shape [3, 2].
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'multi_dim_feature':
constant_op.constant([[0.5, 1.0], [1.0, -1.0], [1.0, 0.5]]),
}, constant_op.constant([[1], [0], [1]])
multi_dim_feature = feature_column.real_valued_column(
'multi_dim_feature', dimension=2)
svm_classifier = svm.SVM(feature_columns=[multi_dim_feature],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
self.assertLess(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:26,代码来源:svm_test.py
示例16: test_checkpoint_and_export
def test_checkpoint_and_export(self):
model_dir = tempfile.mkdtemp()
config = run_config_lib.RunConfig(save_checkpoints_steps=3)
est = dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column('feature', dimension=4)
],
hidden_units=[3, 3],
model_dir=model_dir,
config=config)
exp_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input', exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn,
export_strategies=(exp_strategy,),
train_steps=8,
checkpoint_and_export=True,
eval_delay_secs=0)
with test.mock.patch.object(ex, '_maybe_export'):
with test.mock.patch.object(ex, '_call_evaluate'):
ex.train_and_evaluate()
# Eval and export are called after steps 1, 4, 7, and 8 (after training
# is completed).
self.assertEqual(ex._maybe_export.call_count, 4)
self.assertEqual(ex._call_evaluate.call_count, 4)
开发者ID:Kongsea,项目名称:tensorflow,代码行数:31,代码来源:experiment_test.py
示例17: testExport
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = debug.DebugClassifier(config=run_config.RunConfig(
tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=5)
def default_input_fn(unused_estimator, examples):
return feature_column_ops.parse_feature_columns_from_examples(
examples, feature_columns)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir, input_fn=default_input_fn)
开发者ID:eduardofv,项目名称:tensorflow,代码行数:29,代码来源:debug_test.py
示例18: testSparseFeatures
def testSparseFeatures(self):
"""Tests SVM classifier with (hashed) sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.8], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}, constant_op.constant([[0], [1], [1]])
price = feature_column.real_valued_column('price')
country = feature_column.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
svm_classifier = svm.SVM(feature_columns=[price, country],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
accuracy = svm_classifier.evaluate(input_fn=input_fn, steps=1)['accuracy']
self.assertAlmostEqual(accuracy, 1.0, places=3)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:26,代码来源:svm_test.py
示例19: _make_experiment_fn
def _make_experiment_fn(output_dir):
"""Creates experiment for DNNBoostedTreeCombinedRegressor."""
(x_train, y_train), (x_test,
y_test) = tf.keras.datasets.boston_housing.load_data()
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": x_train},
y=y_train,
batch_size=FLAGS.batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": x_test}, y=y_test, num_epochs=1, shuffle=False)
feature_columns = [
feature_column.real_valued_column("x", dimension=_BOSTON_NUM_FEATURES)
]
feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
export_strategies = [
saved_model_export_utils.make_export_strategy(serving_input_fn)]
return tf.contrib.learn.Experiment(
estimator=_get_estimator(output_dir, feature_columns),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None,
export_strategies=export_strategies)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:30,代码来源:boston_combined.py
示例20: testFitAndEvaluateMultiClassFullDontThrowException
def testFitAndEvaluateMultiClassFullDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
center_bias=False,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:26,代码来源:estimator_test.py
注:本文中的tensorflow.contrib.layers.python.layers.feature_column.real_valued_column函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论