本文整理汇总了Python中tensorflow.python.feature_column.feature_column_lib.numeric_column函数的典型用法代码示例。如果您正苦于以下问题:Python numeric_column函数的具体用法?Python numeric_column怎么用?Python numeric_column使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了numeric_column函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testTrainEvaluateInferDoesNotThrowErrorWithNoDnnInput
def testTrainEvaluateInferDoesNotThrowErrorWithNoDnnInput(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=False,
tree_feature_columns=[core_feature_column.numeric_column("x")])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
# 10 steps for dnn, 3 for 1 tree of depth 3 + 1 after the tree finished
self._assert_checkpoint(est.model_dir, global_step=15)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
开发者ID:baojianzhou,项目名称:tensorflow,代码行数:30,代码来源:dnn_tree_combined_estimator_test.py
示例2: testRankingDontThrowExceptionForForEstimator
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
est = estimator.CoreGradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[
core_feature_column.numeric_column("f1"),
core_feature_column.numeric_column("f2")
],
ranking_model_pair_keys=("a", "b"))
# Train for a few steps.
est.train(input_fn=_ranking_train_input_fn, steps=1000)
est.evaluate(input_fn=_ranking_train_input_fn, steps=1)
est.predict(input_fn=_infer_ranking_train_input_fn)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:27,代码来源:estimator_test.py
示例3: test_parse_features
def test_parse_features(self):
"""Tests the various behaviours of kmeans._parse_features_if_necessary."""
# No-op if a tensor is passed in.
features = constant_op.constant(self.points)
parsed_features = kmeans_lib._parse_features_if_necessary(features, None)
self.assertAllEqual(features, parsed_features)
# All values from a feature dict are transformed into a tensor.
feature_dict = {
'x': [[point[0]] for point in self.points],
'y': [[point[1]] for point in self.points]
}
parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
feature_dict, None)
self._parse_feature_dict_helper(features, parsed_feature_dict)
# Only the feature_columns of a feature dict are transformed into a tensor.
feature_dict_with_extras = {
'foo': 'bar',
'x': [[point[0]] for point in self.points],
'baz': {'fizz': 'buzz'},
'y': [[point[1]] for point in self.points]
}
feature_columns = [fc.numeric_column(key='x'), fc.numeric_column(key='y')]
parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
feature_dict_with_extras, feature_columns)
self._parse_feature_dict_helper(features, parsed_feature_dict)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:28,代码来源:kmeans_test.py
示例4: _get_estimator
def _get_estimator(self,
train_distribute,
eval_distribute,
remote_cluster=None):
input_dimension = LABEL_DIMENSION
linear_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
return dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=LABEL_DIMENSION,
model_dir=self._model_dir,
dnn_optimizer=adagrad.AdagradOptimizer(0.001),
linear_optimizer=adagrad.AdagradOptimizer(0.001),
config=run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=train_distribute,
eval_distribute=eval_distribute,
remote_cluster=remote_cluster)))
开发者ID:kylin9872,项目名称:tensorflow,代码行数:25,代码来源:estimator_training_test.py
示例5: test_linear_model_numpy_input_fn
def test_linear_model_numpy_input_fn(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(price, boundaries=[0., 10., 100.,])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([-1., 2., 13., 104.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = fc.linear_model(features, [price_buckets, body_style])
# self.assertEqual(1 + 3 + 5, net.shape[1])
with self._initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
bias = self._get_linear_model_bias()
price_buckets_var = self._get_linear_model_column_var(price_buckets)
body_style_var = self._get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], sess.run(net))
coord.request_stop()
coord.join(threads)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:32,代码来源:numpy_io_test.py
示例6: testWithFeatureColumns
def testWithFeatureColumns(self):
head_fn = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
est = random_forest.CoreTensorForestEstimator(
hparams.fill(),
head=head_fn,
feature_columns=[core_feature_column.numeric_column('x')])
iris = base.load_iris()
data = {'x': iris.data.astype(np.float32)}
labels = iris.target.astype(np.int32)
input_fn = numpy_io.numpy_input_fn(
x=data, y=labels, batch_size=150, num_epochs=None, shuffle=False)
est.train(input_fn=input_fn, steps=100)
res = est.evaluate(input_fn=input_fn, steps=1)
self.assertEqual(1.0, res['accuracy'])
self.assertAllClose(0.55144483, res['loss'])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:29,代码来源:random_forest_test.py
示例7: testFitAndEvaluateMultiClassFullDontThrowException
def testFitAndEvaluateMultiClassFullDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:26,代码来源:estimator_test.py
示例8: testFitAndEvaluateDontThrowExceptionWithCore
def testFitAndEvaluateDontThrowExceptionWithCore(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
# Use core head
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
classifier = estimator.DNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
# Use core feature columns
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=True,
tree_feature_columns=[],
use_core_versions=True)
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:28,代码来源:dnn_tree_combined_estimator_test.py
示例9: test_ar_lstm_regressor
def test_ar_lstm_regressor(self):
dtype = dtypes.float32
model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
exogenous_feature_columns = (
feature_column.numeric_column("exogenous"),
)
estimator = estimators.LSTMAutoRegressor(
periodicities=10,
input_window_size=10,
output_window_size=6,
model_dir=model_dir,
num_features=1,
extra_feature_columns=exogenous_feature_columns,
num_units=10,
config=_SeedRunConfig())
times = numpy.arange(20, dtype=numpy.int64)
values = numpy.arange(20, dtype=dtype.as_numpy_dtype)
exogenous = numpy.arange(20, dtype=dtype.as_numpy_dtype)
features = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values,
"exogenous": exogenous
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=2, num_threads=1,
batch_size=16, window_size=16)
eval_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=3, num_threads=1,
batch_size=16, window_size=16)
estimator.train(input_fn=train_input_fn, steps=1)
evaluation = estimator.evaluate(
input_fn=eval_input_fn, steps=1)
self.assertAllEqual(evaluation["loss"], evaluation["average_loss"])
self.assertAllEqual([], evaluation["loss"].shape)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:34,代码来源:estimators_test.py
示例10: _serving_input_receiver_fn
def _serving_input_receiver_fn():
"""A receiver function to be passed to export_savedmodel."""
times_column = feature_column.numeric_column(
key=feature_keys.TrainEvalFeatures.TIMES, dtype=dtypes.int64)
values_column = feature_column.numeric_column(
key=feature_keys.TrainEvalFeatures.VALUES, dtype=values_input_dtype,
shape=(self._model.num_features,))
parsed_features_no_sequence = (
feature_column.make_parse_example_spec(
list(self._model.exogenous_feature_columns)
+ [times_column, values_column]))
parsed_features = {}
for key, feature_spec in parsed_features_no_sequence.items():
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
if key == feature_keys.TrainEvalFeatures.VALUES:
parsed_features[key] = feature_spec._replace(
shape=((values_proto_length,)
+ feature_spec.shape))
else:
parsed_features[key] = feature_spec._replace(
shape=((filtering_length + prediction_length,)
+ feature_spec.shape))
elif feature_spec.dtype == dtypes.string:
parsed_features[key] = parsing_ops.FixedLenFeature(
shape=(filtering_length + prediction_length,),
dtype=dtypes.string)
else: # VarLenFeature
raise ValueError("VarLenFeatures not supported, got %s for key %s"
% (feature_spec, key))
tfexamples = array_ops.placeholder(
shape=[default_batch_size], dtype=dtypes.string, name="input")
features = parsing_ops.parse_example(
serialized=tfexamples,
features=parsed_features)
features[feature_keys.TrainEvalFeatures.TIMES] = array_ops.squeeze(
features[feature_keys.TrainEvalFeatures.TIMES], axis=-1)
features[feature_keys.TrainEvalFeatures.VALUES] = math_ops.cast(
features[feature_keys.TrainEvalFeatures.VALUES],
dtype=self._model.dtype)[:, :filtering_length]
features.update(
self._model_start_state_placeholders(
batch_size_tensor=array_ops.shape(
features[feature_keys.TrainEvalFeatures.TIMES])[0],
static_batch_size=default_batch_size))
return export_lib.ServingInputReceiver(
features, {"examples": tfexamples})
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:46,代码来源:estimators.py
示例11: test_subclassed_model_with_feature_columns
def test_subclassed_model_with_feature_columns(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
dnn_model = TestDNNModel([col_a, col_b], 20)
dnn_model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
x = {'a': np.random.random((10, 1)), 'b': np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
y = keras.utils.to_categorical(y, num_classes=20)
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
dnn_model.evaluate(x=x, y=y, batch_size=5)
dnn_model.predict(x=x, batch_size=5)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:19,代码来源:feature_columns_integration_test.py
示例12: test_subclassed_model_with_feature_columns_with_ds_input
def test_subclassed_model_with_feature_columns_with_ds_input(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
dnn_model = TestDNNModel([col_a, col_b], 20)
dnn_model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.randint(20, size=(100, 1))
y = keras.utils.to_categorical(y, num_classes=20)
x = {'a': np.random.random((100, 1)), 'b': np.random.random((100, 1))}
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
dnn_model.fit(ds, steps_per_epoch=1)
dnn_model.fit(ds, steps_per_epoch=1)
dnn_model.evaluate(ds, steps=1)
dnn_model.predict(ds, steps=1)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:22,代码来源:feature_columns_integration_test.py
示例13: DISABLED_test_function_model_feature_layer_input
def DISABLED_test_function_model_feature_layer_input(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
feature_layer = fc.DenseFeatures([col_a, col_b], name='fc')
dense = keras.layers.Dense(4)
# This seems problematic.... We probably need something for DenseFeatures
# the way Input is for InputLayer.
output = dense(feature_layer)
model = keras.models.Model([feature_layer], [output])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights)
data = ({'a': np.arange(10), 'b': np.arange(10)}, np.arange(10, 20))
print(model.fit(*data, epochs=1))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:24,代码来源:feature_columns_integration_test.py
示例14: testTrainEvaluateWithDnnForInputAndTreeForPredict
def testTrainEvaluateWithDnnForInputAndTreeForPredict(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=True,
predict_with_tree_only=True,
dnn_to_tree_distillation_param=(0.5, None),
tree_feature_columns=[])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
serving_input_fn = (
export.build_parsing_serving_input_receiver_fn(
feature_spec={"x": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32)}))
base_exporter = exporter.FinalExporter(
name="Servo",
serving_input_receiver_fn=serving_input_fn,
assets_extra=None)
export_path = os.path.join(model_dir, "export")
base_exporter.export(
est,
export_path=export_path,
checkpoint_path=None,
eval_result={},
is_the_final_export=True)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:45,代码来源:dnn_tree_combined_estimator_test.py
示例15: test_functional_input_layer_with_numpy_input_fn
def test_functional_input_layer_with_numpy_input_fn(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# one_hot_body_style has 3 dims in input_layer.
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
embedded_body_style = fc.embedding_column(body_style, dimension=5,
initializer=_initializer)
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([11., 12., 13., 14.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = fc.input_layer(features,
[price, one_hot_body_style, embedded_body_style])
self.assertEqual(1 + 3 + 5, net.shape[1])
with self._initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[11., 12., 13., 14., 15., 0., 0., 1., 11.],
[1., 2., 3., 4., 5., 1., 0., 0., 12]],
sess.run(net))
coord.request_stop()
coord.join(threads)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:44,代码来源:numpy_io_test.py
示例16: testFitAndEvaluateDontThrowExceptionWithCoreForClassifier
def testFitAndEvaluateDontThrowExceptionWithCoreForClassifier(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:19,代码来源:estimator_test.py
示例17: test_sequential_model
def test_sequential_model(self):
columns = [fc.numeric_column('a')]
model = keras.models.Sequential([
fc.DenseFeatures(columns),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(20, activation='softmax')
])
model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
x = {'a': np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
y = keras.utils.to_categorical(y, num_classes=20)
model.fit(x, y, epochs=1, batch_size=5)
model.fit(x, y, epochs=1, batch_size=5)
model.evaluate(x, y, batch_size=5)
model.predict(x, batch_size=5)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:20,代码来源:feature_columns_integration_test.py
示例18: test_sequential_model_with_ds_input
def test_sequential_model_with_ds_input(self):
columns = [fc.numeric_column('a')]
model = keras.models.Sequential([
fc.DenseFeatures(columns),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(20, activation='softmax')
])
model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.randint(20, size=(100, 1))
y = keras.utils.to_categorical(y, num_classes=20)
x = {'a': np.random.random((100, 1))}
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
model.fit(ds, steps_per_epoch=1)
model.fit(ds, steps_per_epoch=1)
model.evaluate(ds, steps=1)
model.predict(ds, steps=1)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:23,代码来源:feature_columns_integration_test.py
示例19: testTrainEvaluateInferDoesNotThrowError
def testTrainEvaluateInferDoesNotThrowError(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreGradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
est.evaluate(input_fn=_eval_input_fn, steps=1)
est.predict(input_fn=_eval_input_fn)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:23,代码来源:estimator_test.py
示例20: testFitAndEvaluateDontThrowExceptionWithCoreForEstimator
def testFitAndEvaluateDontThrowExceptionWithCoreForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
# Use core head
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
model = estimator.GradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
model.fit(input_fn=_train_input_fn, steps=15)
model.evaluate(input_fn=_eval_input_fn, steps=1)
model.export(self._export_dir_base)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:24,代码来源:estimator_test.py
注:本文中的tensorflow.python.feature_column.feature_column_lib.numeric_column函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论