本文整理汇总了Python中tensorflow.contrib.layers.real_valued_column函数的典型用法代码示例。如果您正苦于以下问题:Python real_valued_column函数的具体用法?Python real_valued_column怎么用?Python real_valued_column使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了real_valued_column函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testLinearlySeparableBinaryDataNoKernels
def testLinearlySeparableBinaryDataNoKernels(self):
"""Tests classifier w/o kernels (log. regression) for lin-separable data."""
feature1 = layers.real_valued_column('feature1')
feature2 = layers.real_valued_column('feature2')
logreg_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[feature1, feature2])
logreg_classifier.fit(
input_fn=_linearly_separable_binary_input_fn, steps=100)
metrics = logreg_classifier.evaluate(
input_fn=_linearly_separable_binary_input_fn, steps=1)
# Since the data is linearly separable, the classifier should have small
# loss and perfect accuracy.
self.assertLess(metrics['loss'], 0.1)
self.assertEqual(metrics['accuracy'], 1.0)
# As a result, it should assign higher probability to class 1 for the 1st
# and 3rd example and higher probability to class 0 for the second example.
logreg_prob_predictions = list(
logreg_classifier.predict_proba(input_fn=
_linearly_separable_binary_input_fn))
self.assertGreater(logreg_prob_predictions[0][1], 0.5)
self.assertGreater(logreg_prob_predictions[1][0], 0.5)
self.assertGreater(logreg_prob_predictions[2][1], 0.5)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:26,代码来源:kernel_estimators_test.py
示例2: get_wide_deep
def get_wide_deep():
# define column types
races = ['White', 'Black', 'American Indian', 'Chinese',
'Japanese', 'Hawaiian', 'Filipino', 'Unknown',
'Asian Indian', 'Korean', 'Samaon', 'Vietnamese']
is_male,mother_age,mother_race,plurality,gestation_weeks,mother_married,cigarette_use,alcohol_use = \
[ \
tflayers.sparse_column_with_keys('is_male', keys=['True', 'False']),
tflayers.real_valued_column('mother_age'),
tflayers.sparse_column_with_keys('mother_race', keys=races),
tflayers.real_valued_column('plurality'),
tflayers.real_valued_column('gestation_weeks'),
tflayers.sparse_column_with_keys('mother_married', keys=['True', 'False']),
tflayers.sparse_column_with_keys('cigarette_use', keys=['True', 'False', 'None']),
tflayers.sparse_column_with_keys('alcohol_use', keys=['True', 'False', 'None'])
]
# which columns are wide (sparse, linear relationship to output) and which are deep (complex relationship to output?)
wide = [is_male, mother_race, plurality, mother_married, cigarette_use, alcohol_use]
deep = [\
mother_age,
gestation_weeks,
tflayers.embedding_column(mother_race, 3)
]
return wide, deep
开发者ID:rpc01,项目名称:training-data-analyst,代码行数:25,代码来源:model.py
示例3: testMulticlassDataWithAndWithoutKernels
def testMulticlassDataWithAndWithoutKernels(self):
"""Tests classifier w/ and w/o kernels on multiclass data."""
feature_column = layers.real_valued_column('feature', dimension=4)
# Metrics for linear classifier (no kernels).
linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[feature_column], n_classes=3)
linear_classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=50)
linear_metrics = linear_classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
linear_loss = linear_metrics['loss']
linear_accuracy = linear_metrics['accuracy']
# Using kernel mappers allows to discover non-linearities in data (via RBF
# kernel approximation), reduces loss and increases accuracy.
kernel_mappers = {
feature_column: [
RandomFourierFeatureMapper(
input_dim=4, output_dim=50, stddev=1.0, name='rffm')
]
}
kernel_linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[], n_classes=3, kernel_mappers=kernel_mappers)
kernel_linear_classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=50)
kernel_linear_metrics = kernel_linear_classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
kernel_linear_loss = kernel_linear_metrics['loss']
kernel_linear_accuracy = kernel_linear_metrics['accuracy']
self.assertLess(kernel_linear_loss, linear_loss)
self.assertGreater(kernel_linear_accuracy, linear_accuracy)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:31,代码来源:kernel_estimators_test.py
示例4: get_conv_classifier
def get_conv_classifier():
n_classes = 5
feature_columns = [layers.real_valued_column("", dimension=3)]
# learning_rate = 1.0
# optimizer = AdagradOptimizer(learning_rate)
#
# learning_rate = 1.0
# optimizer = AdadeltaOptimizer(learning_rate=learning_rate)
# ~ 62.55%
learning_rate = 0.01
optimizer = AdamOptimizer(learning_rate, epsilon=0.1)
# learning_rate = 0.05
# optimizer = GradientDescentOptimizer(learning_rate)
# learning_rate = 0.1
# optimizer = RMSPropOptimizer(learning_rate, momentum=0.1)
# learning_rate = 0.1
# optimizer = FtrlOptimizer(learning_rate)
return SKCompat(Estimator(
model_fn=get_conv_model,
params={
'head': head_lib._multi_class_head( # pylint: disable=protected-access
n_classes,
enable_centered_bias=False),
'feature_columns': feature_columns,
'activation_fn': tf.nn.relu,
'learning_rate': learning_rate,
'optimizer': optimizer
},
model_dir='saved_model'))
开发者ID:soswow,项目名称:Various-JS-and-Python,代码行数:35,代码来源:machine_learning.py
示例5: testInvalidNumberOfClasses
def testInvalidNumberOfClasses(self):
"""ValueError raised when the kernel mappers provided have invalid type."""
feature = layers.real_valued_column('feature')
with self.assertRaises(ValueError):
_ = kernel_estimators.KernelLinearClassifier(
feature_columns=[feature], n_classes=1)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:kernel_estimators_test.py
示例6: _add_bias_column
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable, targets, columns_to_variables):
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones_like(targets, dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
开发者ID:flyingbirdman,项目名称:tensorflow,代码行数:8,代码来源:linear.py
示例7: get_wide_deep
def get_wide_deep():
# define column types
StyleName,quantity, demand, org_ret_price,sell_price, margin, off_orig_retail, total_ots = \
[ \
tflayers.sparse_column_with_hash_bucket('Style_Name', hash_bucket_size = 1000),
tflayers.real_valued_column('Quantity'),
tflayers.real_valued_column('Demand'),
tflayers.real_valued_column('Original_Retail_Price'),
tflayers.real_valued_column('Selling_Price'),
tflayers.real_valued_column('Margin'),
tflayers.real_valued_column('off_Orig_Retail'),
tflayers.real_valued_column('Total_OTS'),
]
# which columns are wide (sparse, linear relationship to output) and which are deep (complex relationship to output?)
wide = [StyleName,quantity, demand]
deep = [\
org_ret_price,
sell_price,
margin,
off_orig_retail,
total_ots,
tflayers.embedding_column(StyleName, 3)
]
return wide, deep
开发者ID:tpatil0412,项目名称:Dynamic-Pricing,代码行数:25,代码来源:model.py
示例8: get_features_ch8
def get_features_ch8():
"""Using the three inputs we originally used in Chapter 7, plus the time averages computed in Chapter 8"""
real = {
colname : tflayers.real_valued_column(colname) \
for colname in \
('dep_delay,taxiout,distance,avg_dep_delay,avg_arr_delay').split(',')
}
sparse = {}
return real, sparse
开发者ID:yogiadi,项目名称:data-science-on-gcp,代码行数:9,代码来源:model.py
示例9: get_features_ch7
def get_features_ch7():
"""Using only the three inputs we originally used in Chapter 7"""
real = {
colname : tflayers.real_valued_column(colname) \
for colname in \
('dep_delay,taxiout,distance').split(',')
}
sparse = {}
return real, sparse
开发者ID:yogiadi,项目名称:data-science-on-gcp,代码行数:9,代码来源:model.py
示例10: get_classifier
def get_classifier():
# (kernel_size * kernel_size, 3)
feature_columns = [layers.real_valued_column("", dimension=3)]
return DNNClassifier(feature_columns=feature_columns,
hidden_units=[256, 128],
n_classes=5,
model_dir="saved_model",
# optimizer=AdadeltaOptimizer(learning_rate=0.1)
# optimizer=AdamOptimizer()
# dropout=0.5
)
开发者ID:soswow,项目名称:Various-JS-and-Python,代码行数:11,代码来源:machine_learning.py
示例11: testInvalidKernelMapper
def testInvalidKernelMapper(self):
"""ValueError raised when the kernel mappers provided have invalid type."""
class DummyKernelMapper(object):
def __init__(self):
pass
feature = layers.real_valued_column('feature')
kernel_mappers = {feature: [DummyKernelMapper()]}
with self.assertRaises(ValueError):
_ = kernel_estimators.KernelLinearClassifier(
feature_columns=[feature], kernel_mappers=kernel_mappers)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:13,代码来源:kernel_estimators_test.py
示例12: get_features_raw
def get_features_raw():
real = {
colname : tflayers.real_valued_column(colname) \
for colname in \
('dep_delay,taxiout,distance,avg_dep_delay,avg_arr_delay' +
',dep_lat,dep_lon,arr_lat,arr_lon').split(',')
}
sparse = {
'carrier': tflayers.sparse_column_with_keys('carrier',
keys='AS,VX,F9,UA,US,WN,HA,EV,MQ,DL,OO,B6,NK,AA'.split(',')),
'origin' : tflayers.sparse_column_with_hash_bucket('origin', hash_bucket_size=1000), # FIXME
'dest' : tflayers.sparse_column_with_hash_bucket('dest', hash_bucket_size=1000) #FIXME
}
return real, sparse
开发者ID:yogiadi,项目名称:data-science-on-gcp,代码行数:14,代码来源:model.py
示例13: testExtractFeaturesWithTransformation
def testExtractFeaturesWithTransformation(self):
"""Tests feature extraction."""
with self.test_session():
features = {}
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.zeros([2], dtypes.int64))
features["sparse_categorical"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros(
[2], dtypes.string), array_ops.zeros([2], dtypes.int64))
feature_columns = set()
feature_columns.add(layers.real_valued_column("dense_float"))
feature_columns.add(
layers.feature_column._real_valued_var_len_column(
"sparse_float", is_sparse=True))
feature_columns.add(
feature_column_lib.sparse_column_with_hash_bucket(
"sparse_categorical", hash_bucket_size=1000000))
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (gbdt_batch.extract_features(
features, feature_columns))
self.assertEqual(len(fc_names), 3)
self.assertAllEqual(fc_names,
["dense_float", "sparse_float", "sparse_categorical"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_float_indices), 1)
self.assertEqual(len(sparse_float_values), 1)
self.assertEqual(len(sparse_float_shapes), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_float_indices[0].eval(),
features["sparse_float"].indices.eval())
self.assertAllEqual(sparse_float_values[0].eval(),
features["sparse_float"].values.eval())
self.assertAllEqual(sparse_float_shapes[0].eval(),
features["sparse_float"].dense_shape.eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_categorical"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263])
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_categorical"].dense_shape.eval())
开发者ID:chdinh,项目名称:tensorflow,代码行数:48,代码来源:gbdt_batch_test.py
示例14: testClassifierWithAndWithoutKernelsNoRealValuedColumns
def testClassifierWithAndWithoutKernelsNoRealValuedColumns(self):
"""Tests kernels have no effect for non-real valued columns ."""
def input_fn():
return {
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
}, constant_op.constant([[1], [0], [1]])
price = layers.real_valued_column('price')
country = layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[price, country])
linear_classifier.fit(input_fn=input_fn, steps=100)
linear_metrics = linear_classifier.evaluate(input_fn=input_fn, steps=1)
linear_loss = linear_metrics['loss']
linear_accuracy = linear_metrics['accuracy']
kernel_mappers = {
country: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')]
}
kernel_linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[price, country], kernel_mappers=kernel_mappers)
kernel_linear_classifier.fit(input_fn=input_fn, steps=100)
kernel_linear_metrics = kernel_linear_classifier.evaluate(
input_fn=input_fn, steps=1)
kernel_linear_loss = kernel_linear_metrics['loss']
kernel_linear_accuracy = kernel_linear_metrics['accuracy']
# The kernel mapping is applied to a non-real-valued feature column and so
# it should have no effect on the model. The loss and accuracy of the
# "kernelized" model should match the loss and accuracy of the initial model
# (without kernels).
self.assertAlmostEqual(linear_loss, kernel_linear_loss, delta=0.01)
self.assertAlmostEqual(linear_accuracy, kernel_linear_accuracy, delta=0.01)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:43,代码来源:kernel_estimators_test.py
示例15: testLinearlyInseparableBinaryDataWithAndWithoutKernels
def testLinearlyInseparableBinaryDataWithAndWithoutKernels(self):
"""Tests classifier w/ and w/o kernels on non-linearly-separable data."""
multi_dim_feature = layers.real_valued_column(
'multi_dim_feature', dimension=2)
# Data points are non-linearly separable so there will be at least one
# mis-classified sample (accuracy < 0.8). In fact, the loss is minimized for
# w1=w2=0.0, in which case each example incurs a loss of ln(2). The overall
# (average) loss should then be ln(2) and the logits should be approximately
# 0.0 for each sample.
logreg_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[multi_dim_feature])
logreg_classifier.fit(
input_fn=_linearly_inseparable_binary_input_fn, steps=50)
logreg_metrics = logreg_classifier.evaluate(
input_fn=_linearly_inseparable_binary_input_fn, steps=1)
logreg_loss = logreg_metrics['loss']
logreg_accuracy = logreg_metrics['accuracy']
logreg_predictions = logreg_classifier.predict(
input_fn=_linearly_inseparable_binary_input_fn, as_iterable=False)
self.assertAlmostEqual(logreg_loss, np.log(2), places=3)
self.assertLess(logreg_accuracy, 0.8)
self.assertAllClose(logreg_predictions['logits'], [[0.0], [0.0], [0.0],
[0.0]])
# Using kernel mappers allows to discover non-linearities in data. Mapping
# the data to a higher dimensional feature space using approx RBF kernels,
# substantially reduces the loss and leads to perfect classification
# accuracy.
kernel_mappers = {
multi_dim_feature: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')]
}
kernelized_logreg_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[], kernel_mappers=kernel_mappers)
kernelized_logreg_classifier.fit(
input_fn=_linearly_inseparable_binary_input_fn, steps=50)
kernelized_logreg_metrics = kernelized_logreg_classifier.evaluate(
input_fn=_linearly_inseparable_binary_input_fn, steps=1)
kernelized_logreg_loss = kernelized_logreg_metrics['loss']
kernelized_logreg_accuracy = kernelized_logreg_metrics['accuracy']
self.assertLess(kernelized_logreg_loss, 0.2)
self.assertEqual(kernelized_logreg_accuracy, 1.0)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:42,代码来源:kernel_estimators_test.py
示例16: testVariablesWithAndWithoutKernels
def testVariablesWithAndWithoutKernels(self):
"""Tests variables w/ and w/o kernel."""
multi_dim_feature = layers.real_valued_column(
'multi_dim_feature', dimension=2)
linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[multi_dim_feature])
linear_classifier.fit(
input_fn=_linearly_inseparable_binary_input_fn, steps=50)
linear_variables = linear_classifier.get_variable_names()
self.assertIn('linear/multi_dim_feature/weight', linear_variables)
self.assertIn('linear/bias_weight', linear_variables)
linear_weights = linear_classifier.get_variable_value(
'linear/multi_dim_feature/weight')
linear_bias = linear_classifier.get_variable_value('linear/bias_weight')
kernel_mappers = {
multi_dim_feature: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')]
}
kernel_linear_classifier = kernel_estimators.KernelLinearClassifier(
feature_columns=[], kernel_mappers=kernel_mappers)
kernel_linear_classifier.fit(
input_fn=_linearly_inseparable_binary_input_fn, steps=50)
kernel_linear_variables = kernel_linear_classifier.get_variable_names()
self.assertIn('linear/multi_dim_feature_MAPPED/weight',
kernel_linear_variables)
self.assertIn('linear/bias_weight', kernel_linear_variables)
kernel_linear_weights = kernel_linear_classifier.get_variable_value(
'linear/multi_dim_feature_MAPPED/weight')
kernel_linear_bias = kernel_linear_classifier.get_variable_value(
'linear/bias_weight')
# The feature column used for linear classification (no kernels) has
# dimension 2 so the model will learn a 2-dimension weights vector (and a
# scalar for the bias). In the kernelized model, the features are mapped to
# a 30-dimensional feature space and so the weights variable will also have
# dimension 30.
self.assertEqual(2, len(linear_weights))
self.assertEqual(1, len(linear_bias))
self.assertEqual(30, len(kernel_linear_weights))
self.assertEqual(1, len(kernel_linear_bias))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:41,代码来源:kernel_estimators_test.py
示例17: LabelEncoder
X_train = X_train.copy()
X_test = X_test.copy()
categorical_var_encoders = {}
for var in categorical_vars:
le = LabelEncoder().fit(X_train[var])
X_train[var + '_ids'] = le.transform(X_train[var])
X_test[var + '_ids'] = le.transform(X_test[var])
X_train.pop(var)
X_test.pop(var)
categorical_var_encoders[var] = le
### Note: Feature Columns currently (2016/10/22) not working, update is coming.
# Setup feature columns.
CATEGORICAL_EMBED_SIZE = 10 # Note, you can customize this per variable.
feature_columns = [
layers.real_valued_column(var) for var in continues_vars
] + [
layers.embedding_column(
layers.sparse_column_with_integerized_feature(
var + '_ids', len(categorical_var_encoders[var].classes_)),
CATEGORICAL_EMBED_SIZE) for var in
categorical_vars
]
# Linear classifier.
'''
random.seed(42)
tflr = learn.LinearClassifier(n_classes=2,
feature_columns=feature_columns,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05))
开发者ID:ilblackdragon,项目名称:tf_examples,代码行数:31,代码来源:titanic_all_features_with_fc.py
示例18: test_savedmodel_state_override
def test_savedmodel_state_override(self):
random_model = RandomStateSpaceModel(
state_dimension=5,
state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
exogenous_feature_columns=[layers.real_valued_column("exogenous")],
dtype=dtypes.float64, num_features=1))
estimator = estimators.StateSpaceRegressor(
model=random_model,
optimizer=gradient_descent.GradientDescentOptimizer(0.1))
combined_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader({
feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
"exogenous": [-1., -2., -3., -4.]
}))
estimator.train(combined_input_fn, steps=1)
export_location = estimator.export_savedmodel(
self.get_temp_dir(),
estimator.build_raw_serving_input_receiver_fn())
with ops.Graph().as_default() as graph:
random_model.initialize_graph()
with self.session(graph=graph) as session:
variables.global_variables_initializer().run()
evaled_start_state = session.run(random_model.get_start_state())
evaled_start_state = [
state_element[None, ...] for state_element in evaled_start_state]
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as session:
signatures = loader.load(
session, [tag_constants.SERVING], export_location)
first_split_filtering = saved_model_utils.filter_continuation(
continue_from={
feature_keys.FilteringResults.STATE_TUPLE: evaled_start_state},
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [1, 2],
feature_keys.FilteringFeatures.VALUES: [1., 2.],
"exogenous": [[-1.], [-2.]]})
second_split_filtering = saved_model_utils.filter_continuation(
continue_from=first_split_filtering,
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [3, 4],
feature_keys.FilteringFeatures.VALUES: [3., 4.],
"exogenous": [[-3.], [-4.]]
})
combined_filtering = saved_model_utils.filter_continuation(
continue_from={
feature_keys.FilteringResults.STATE_TUPLE: evaled_start_state},
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
"exogenous": [[-1.], [-2.], [-3.], [-4.]]
})
split_predict = saved_model_utils.predict_continuation(
continue_from=second_split_filtering,
signatures=signatures,
session=session,
steps=1,
exogenous_features={
"exogenous": [[[-5.]]]})
combined_predict = saved_model_utils.predict_continuation(
continue_from=combined_filtering,
signatures=signatures,
session=session,
steps=1,
exogenous_features={
"exogenous": [[[-5.]]]})
for state_key, combined_state_value in combined_filtering.items():
if state_key == feature_keys.FilteringResults.TIMES:
continue
self.assertAllClose(
combined_state_value, second_split_filtering[state_key])
for prediction_key, combined_value in combined_predict.items():
self.assertAllClose(combined_value, split_predict[prediction_key])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:80,代码来源:state_space_model_test.py
示例19: _dnn_tree_combined_model_fn
#.........这里部分代码省略.........
"logits", values=(previous_layer,)) as logits_scope:
dnn_logits = layers.fully_connected(
previous_layer,
head.logits_dimension,
activation_fn=None,
variables_collections=[dnn_parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(dnn_logits, logits_scope.name)
def _dnn_train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=training_util.get_global_step(),
learning_rate=_DNN_LEARNING_RATE,
optimizer=_get_optimizer(dnn_optimizer),
name=dnn_parent_scope,
variables=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES, scope=dnn_parent_scope),
# Empty summaries to prevent optimizers from logging training_loss.
summaries=[])
# Build Tree Logits.
global_step = training_util.get_global_step()
with ops.device(global_step.device):
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config="", # Initialize an empty ensemble.
name="ensemble_model")
tree_features = features.copy()
if dnn_input_layer_to_tree:
tree_features["dnn_input_layer"] = input_layer
tree_feature_columns.append(layers.real_valued_column("dnn_input_layer"))
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=tree_center_bias,
examples_per_layer=tree_examples_per_layer,
learner_config=tree_learner_config,
feature_columns=tree_feature_columns,
logits_dimension=head.logits_dimension,
features=tree_features,
use_core_columns=use_core_versions)
with ops.name_scope("gbdt"):
predictions_dict = gbdt_model.predict(mode)
tree_logits = predictions_dict["predictions"]
def _tree_train_op_fn(loss):
"""Returns the op to optimize the loss."""
if dnn_to_tree_distillation_param:
loss_weight, loss_fn = dnn_to_tree_distillation_param
weight_tensor = head_lib._weight_tensor( # pylint: disable=protected-access
features, head.weight_column_name)
dnn_logits_fixed = array_ops.stop_gradient(dnn_logits)
if loss_fn is None:
# we create the loss_fn similar to the head loss_fn for
# multi_class_head used previously as the default one.
n_classes = 2 if head.logits_dimension == 1 else head.logits_dimension
loss_fn = distillation_loss.create_dnn_to_tree_cross_entropy_loss_fn(
n_classes)
dnn_to_tree_distillation_loss = loss_weight * loss_fn(
开发者ID:StephenOman,项目名称:tensorflow,代码行数:67,代码来源:dnn_tree_combined_estimator.py
示例20: dirname
from tensorflow.contrib.layers import bucketized_column, crossed_column, embedding_column, sparse_column_with_keys, sparse_column_with_hash_bucket, real_valued_column
from tempfile import mkdtemp
PATH_TO_DIRECTORY_OF_THIS_FILE = dirname(realpath(__file__))
PATH_TO_DIRECTORY_OF_INPUT_DATA = PATH_TO_DIRECTORY_OF_THIS_FILE + "/data/input"
MODEL_DIR = PATH_TO_DIRECTORY_OF_THIS_FILE + "/classifier"
CATEGORICAL_COLUMNS = ["admin_level", "country_code", "edit_distance", "has_mpoly", "has_pcode", "is_country", "is_highest_population", "is_lowest_admin_level", "matches_topic"]
CONTINUOUS_COLUMNS = ["cluster_frequency", "country_rank", "median_distance", "population", "popularity"]
LABEL_COLUMN = "correct"
COLUMNS = sorted(CATEGORICAL_COLUMNS + CONTINUOUS_COLUMNS) + [LABEL_COLUMN]
print "COLUMNS:", COLUMNS
admin_level = sparse_column_with_keys(column_name="admin_level", keys=["None","0","1","2","3","4","5","6"]) # I've never seen admin 6, but you never know!
cluster_frequency = real_valued_column("cluster_frequency")
cluster_frequency_buckets = bucketized_column(cluster_frequency, boundaries=[0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1])
country_code = sparse_column_with_hash_bucket("country_code", hash_bucket_size=500)
country_rank = real_valued_column("country_rank")
edit_distance = sparse_column_with_keys(column_name="edit_distance", keys=["0", "1", "2"])
has_pcode = sparse_column_with_keys(column_name="has_pcode", keys=["True", "False"])
has_mpoly = sparse_column_with_keys(column_name="has_mpoly", keys=["True", "False"])
is_country = sparse_column_with_keys(column_name="is_country", keys=["True", "False"])
is_lowest_admin_level = sparse_column_with_keys(column_name="is_lowest_admin_level", keys=["True", "False"])
is_highest_population = sparse_column_with_keys(column_name="is_highest_population", keys=["True", "False"])
matches_topic = sparse_column_with_keys(column_name="matches_topic", keys=["True", "False"])
median_distance = real_valued_column("median_distance")
median_distance_buckets = bucketized_column(median_distance, boundaries=[10,50,100,200,300])
population = real_valued_column("population")
population_buckets = bucketized_column(population, boundaries=[0, 1, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000])
popularity = real_valued_column("popularity")
开发者ID:FirstDraftGIS,项目名称:firstdraft,代码行数:31,代码来源:predict.py
注:本文中的tensorflow.contrib.layers.real_valued_column函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论