本文整理汇总了Python中tensorflow.python.ops.resource_variable_ops.destroy_resource_op函数的典型用法代码示例。如果您正苦于以下问题:Python destroy_resource_op函数的具体用法?Python destroy_resource_op怎么用?Python destroy_resource_op使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了destroy_resource_op函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testDestruction
def testDestruction(self):
with context.eager_mode():
var = resource_variable_ops.ResourceVariable(initial_value=1.0,
name="var8")
var.__del__()
with self.assertRaisesRegexp(errors.NotFoundError,
r"Resource .*\/var8\/.* does not exist."):
resource_variable_ops.destroy_resource_op(var._handle,
ignore_lookup_error=False)
开发者ID:alexsax,项目名称:tensorflow,代码行数:9,代码来源:resource_variable_ops_test.py
示例2: testDestroyResource
def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3.0, self.evaluate(v.value()))
self.evaluate(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(v.value())
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
self.evaluate(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
开发者ID:aeverall,项目名称:tensorflow,代码行数:12,代码来源:resource_variable_ops_test.py
示例3: testDestroyResource
def testDestroyResource(self):
with self.test_session() as sess:
v = resource_variable_ops.ResourceVariable(3.0)
variables.global_variables_initializer().run()
self.assertEqual(3.0, v.value().eval())
sess.run(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.NotFoundError):
v.value().eval()
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
sess.run(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:13,代码来源:resource_variable_ops_test.py
示例4: _create_ops
def _create_ops(self, ds, ds_iterator, buffer_name, device0, device1):
ds_iterator_handle = ds_iterator.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, ds.output_types, ds.output_shapes)
return remote_iterator.get_next()
target = constant_op.constant(device0)
with ops.device(device1):
buffer_resource_handle = prefetching_ops.function_buffering_resource(
f=_remote_fn,
output_types=[dtypes.float32],
target_device=target,
string_arg=ds_iterator_handle,
buffer_size=3,
shared_name=buffer_name)
with ops.device(device1):
prefetch_op = prefetching_ops.function_buffering_resource_get_next(
function_buffer_resource=buffer_resource_handle,
output_types=[dtypes.float32])
reset_op = prefetching_ops.function_buffering_resource_reset(
function_buffer_resource=buffer_resource_handle)
destroy_op = resource_variable_ops.destroy_resource_op(
buffer_resource_handle, ignore_lookup_error=True)
return (prefetch_op, reset_op, destroy_op)
开发者ID:baojianzhou,项目名称:tensorflow,代码行数:29,代码来源:prefetching_ops_test.py
示例5: _prefetch_fn_helper
def _prefetch_fn_helper(self, buffer_name, device0, device1):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
def gen():
for i in itertools.count(start=1, step=1):
yield [i + 0.0]
if i == 6:
self._event.set()
with ops.device(device0):
dataset_3 = dataset_ops.Dataset.from_generator(gen, (dtypes.float32))
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_3.output_types, dataset_3.output_shapes)
return remote_iterator.get_next()
target = constant_op.constant(device0)
with ops.device(device1):
buffer_resource_handle = prefetching_ops.function_buffering_resource(
f=_remote_fn,
target_device=target,
string_arg=iterator_3_handle,
buffer_size=3,
thread_pool_size=2,
shared_name=buffer_name)
with ops.device(device1):
prefetch_op = prefetching_ops.function_buffering_resource_get_next(
function_buffer_resource=buffer_resource_handle,
output_types=[dtypes.float32])
with self.test_session(config=worker_config) as sess:
elem = sess.run(prefetch_op)
self.assertEqual(elem, [1.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [2.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [3.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [4.0])
self._event.wait()
elem = sess.run(prefetch_op)
self.assertEqual(elem, [5.0])
sess.run(
resource_variable_ops.destroy_resource_op(
buffer_resource_handle, ignore_lookup_error=True))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:51,代码来源:prefetching_ops_test.py
示例6: _finalize_func
def _finalize_func(string_handle):
"""Destroys the iterator resource created.
Args:
string_handle: An iterator string handle created by _init_func
Returns:
Tensor constant 0
"""
iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
string_handle,
**dataset_ops.flat_structure(self._input_dataset))
with ops.control_dependencies([
resource_variable_ops.destroy_resource_op(
iterator_resource, ignore_lookup_error=True)]):
return array_ops.constant(0, dtypes.int64)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:15,代码来源:prefetching_ops.py
示例7: testStringsGPU
def testStringsGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
device0 = "/job:localhost/replica:0/task:0/cpu:0"
device1 = "/job:localhost/replica:0/task:0/gpu:0"
ds = dataset_ops.Dataset.from_tensor_slices(["a", "b", "c"])
ds_iterator = ds.make_one_shot_iterator()
ds_iterator_handle = ds_iterator.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, ds.output_types, ds.output_shapes)
return remote_iterator.get_next()
target = constant_op.constant(device0)
with ops.device(device1):
buffer_resource_handle = prefetching_ops.function_buffering_resource(
f=_remote_fn,
output_types=[dtypes.string],
target_device=target,
string_arg=ds_iterator_handle,
buffer_size=3,
shared_name="strings")
with ops.device(device1):
prefetch_op = prefetching_ops.function_buffering_resource_get_next(
function_buffer_resource=buffer_resource_handle,
output_types=[dtypes.string])
destroy_op = resource_variable_ops.destroy_resource_op(
buffer_resource_handle, ignore_lookup_error=True)
with self.cached_session() as sess:
self.assertEqual([b"a"], sess.run(prefetch_op))
self.assertEqual([b"b"], sess.run(prefetch_op))
self.assertEqual([b"c"], sess.run(prefetch_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(prefetch_op)
sess.run(destroy_op)
开发者ID:baojianzhou,项目名称:tensorflow,代码行数:42,代码来源:prefetching_ops_test.py
示例8: __del__
def __del__(self):
if self._resource is not None:
resource_variable_ops.destroy_resource_op(self._resource)
self._resource = None
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:4,代码来源:datasets.py
示例9: _model_fn
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
if (isinstance(features, ops.Tensor) or
isinstance(features, sparse_tensor.SparseTensor)):
features = {'features': features}
weights = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
keys = None
if keys_name and keys_name in features:
keys = features.pop(keys_name)
# If we're doing eval, optionally ignore device_assigner.
# Also ignore device assigner if we're exporting (mode == INFER)
dev_assn = device_assigner
if (mode == model_fn_lib.ModeKeys.INFER or
(local_eval and mode == model_fn_lib.ModeKeys.EVAL)):
dev_assn = None
graph_builder = graph_builder_class(params,
device_assigner=dev_assn)
logits, tree_paths, regression_variance = graph_builder.inference_graph(
features)
summary.scalar('average_tree_size', graph_builder.average_size())
# For binary classification problems, convert probabilities to logits.
# Includes hack to get around the fact that a probability might be 0 or 1.
if not params.regression and params.num_classes == 2:
class_1_probs = array_ops.slice(logits, [0, 1], [-1, 1])
logits = math_ops.log(
math_ops.maximum(class_1_probs / math_ops.maximum(
1.0 - class_1_probs, EPSILON), EPSILON))
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_graph = None
training_hooks = []
if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
with ops.control_dependencies([logits.op]):
training_graph = control_flow_ops.group(
graph_builder.training_graph(
features, labels, input_weights=weights,
num_trainers=num_trainers,
trainer_id=trainer_id),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
# TensorForest's training graph isn't calculated directly from the loss
# like many other models.
def _train_fn(unused_loss):
return training_graph
model_ops = model_head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_fn,
logits=logits,
scope=head_scope)
# Ops are run in lexigraphical order of their keys. Run the resource
# clean-up op last.
all_handles = graph_builder.get_all_resource_handles()
ops_at_end = {
'9: clean up resources': control_flow_ops.group(
*[resource_variable_ops.destroy_resource_op(handle)
for handle in all_handles])}
if report_feature_importances:
ops_at_end['1: feature_importances'] = (
graph_builder.feature_importances())
training_hooks.append(TensorForestRunOpAtEndHook(ops_at_end))
if early_stopping_rounds:
training_hooks.append(
TensorForestLossHook(
early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
loss_op=model_ops.loss))
model_ops.training_hooks.extend(training_hooks)
if keys is not None:
model_ops.predictions[keys_name] = keys
if params.inference_tree_paths:
model_ops.predictions[TREE_PATHS_PREDICTION_KEY] = tree_paths
if params.regression:
model_ops.predictions[VARIANCE_PREDICTION_KEY] = regression_variance
return model_ops
开发者ID:rmcguinness,项目名称:tensorflow,代码行数:98,代码来源:random_forest.py
示例10: _model_fn
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
if (isinstance(features, ops.Tensor) or
isinstance(features, sparse_tensor.SparseTensor)):
features = {'features': features}
if feature_columns:
features = features.copy()
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
features.update(layers.transform_features(features, feature_columns))
else:
for fc in feature_columns:
tensor = fc_core._transform_features(features, [fc])[fc] # pylint: disable=protected-access
features[fc.name] = tensor
weights = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
keys = None
if keys_name and keys_name in features:
keys = features.pop(keys_name)
# If we're doing eval, optionally ignore device_assigner.
# Also ignore device assigner if we're exporting (mode == INFER)
dev_assn = device_assigner
if (mode == model_fn_lib.ModeKeys.INFER or
(local_eval and mode == model_fn_lib.ModeKeys.EVAL)):
dev_assn = None
graph_builder = graph_builder_class(params,
device_assigner=dev_assn)
logits, tree_paths, regression_variance = graph_builder.inference_graph(
features)
summary.scalar('average_tree_size', graph_builder.average_size())
# For binary classification problems, convert probabilities to logits.
# Includes hack to get around the fact that a probability might be 0 or 1.
if not params.regression and params.num_classes == 2:
class_1_probs = array_ops.slice(logits, [0, 1], [-1, 1])
logits = math_ops.log(
math_ops.maximum(class_1_probs / math_ops.maximum(
1.0 - class_1_probs, EPSILON), EPSILON))
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_graph = None
training_hooks = []
if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
with ops.control_dependencies([logits.op]):
training_graph = control_flow_ops.group(
graph_builder.training_graph(
features, labels, input_weights=weights,
num_trainers=num_trainers,
trainer_id=trainer_id),
state_ops.assign_add(training_util.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
# TensorForest's training graph isn't calculated directly from the loss
# like many other models.
def _train_fn(unused_loss):
return training_graph
# Ops are run in lexigraphical order of their keys. Run the resource
# clean-up op last.
all_handles = graph_builder.get_all_resource_handles()
ops_at_end = {
'9: clean up resources':
control_flow_ops.group(*[
resource_variable_ops.destroy_resource_op(handle)
for handle in all_handles
])
}
if report_feature_importances:
ops_at_end['1: feature_importances'] = (
graph_builder.feature_importances())
training_hooks = [TensorForestRunOpAtEndHook(ops_at_end)]
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
model_ops = model_head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_fn,
logits=logits,
scope=head_scope)
if early_stopping_rounds:
training_hooks.append(
TensorForestLossHook(
early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
#.........这里部分代码省略.........
开发者ID:AnishShah,项目名称:tensorflow,代码行数:101,代码来源:random_forest.py
示例11: __del__
def __del__(self):
if self._resource is not None:
with ops.device("/device:CPU:0"):
resource_variable_ops.destroy_resource_op(self._resource)
self._resource = None
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:5,代码来源:datasets.py
注:本文中的tensorflow.python.ops.resource_variable_ops.destroy_resource_op函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论