本文整理汇总了Python中tensorflow.python.framework.ops.uid函数的典型用法代码示例。如果您正苦于以下问题:Python uid函数的具体用法?Python uid怎么用?Python uid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了uid函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_asset_loading
def test_asset_loading(self):
first_path = self._v1_asset_saved_model()
imported = load.load(first_path)
self.evaluate(lookup_ops.tables_initializer())
fn = imported.signatures["serving_default"]
self.assertAllClose({"output": [2, 0]},
fn(start=constant_op.constant(["gamma", "alpha"])))
second_path = os.path.join(self.get_temp_dir(), "saved_model",
str(ops.uid()))
save.save(imported, second_path, signatures=imported.signatures)
shutil.rmtree(first_path)
del ops.get_collection_ref(ops.GraphKeys.TABLE_INITIALIZERS)[:]
second_import = load.load(second_path)
self.evaluate(lookup_ops.tables_initializer())
fn = second_import.signatures["serving_default"]
self.assertAllClose({"output": [2, 0]},
fn(start=constant_op.constant(["gamma", "alpha"])))
third_path = os.path.join(self.get_temp_dir(), "saved_model",
str(ops.uid()))
save.save(second_import, third_path, signatures=second_import.signatures)
shutil.rmtree(second_path)
del ops.get_collection_ref(ops.GraphKeys.TABLE_INITIALIZERS)[:]
third_import = load.load(third_path)
self.evaluate(lookup_ops.tables_initializer())
fn = third_import.signatures["serving_default"]
self.assertAllClose({"output": [2, 0]},
fn(start=constant_op.constant(["gamma", "alpha"])))
开发者ID:aritratony,项目名称:tensorflow,代码行数:28,代码来源:load_v1_in_v2_test.py
示例2: _v1_multi_metagraph_saved_model
def _v1_multi_metagraph_saved_model(self):
export_graph = ops.Graph()
with export_graph.as_default():
start = array_ops.placeholder(
shape=[None], dtype=dtypes.float32, name="start")
v = resource_variable_ops.ResourceVariable(21.)
first_output = array_ops.identity(start * v, name="first_output")
second_output = array_ops.identity(v, name="second_output")
with session_lib.Session() as session:
session.run(v.initializer)
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
builder = builder_impl.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
session, tags=["first"],
signature_def_map={
"first_key": signature_def_utils.build_signature_def(
{"first_start": utils_impl.build_tensor_info(start)},
{"first_output": utils_impl.build_tensor_info(
first_output)})})
builder.add_meta_graph(
tags=["second"],
signature_def_map={
"second_key": signature_def_utils.build_signature_def(
{"second_start": utils_impl.build_tensor_info(start)},
{"second_output": utils_impl.build_tensor_info(
second_output)})})
builder.save()
return path
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:28,代码来源:load_v1_in_v2_test.py
示例3: load_function_def_library
def load_function_def_library(library):
"""Load a set of functions as concrete functions without captured inputs.
Functions names are manipulated during load such that they do not overlap
with previously created ones.
Args:
library: FunctionDefLibrary proto message.
Returns:
Map of original function names in the library to instances of
`ConcreteFunction` without captured inputs.
Raises:
ValueError: if functions dependencies have a cycle.
"""
functions = {}
load_shared_name_suffix = "_load_{}".format(ops.uid())
for fdef in _sort_function_defs(library):
copy = _fix_fdef(fdef, functions, load_shared_name_suffix)
func_graph = function_def_lib.function_def_to_graph(copy)
for dep in _list_function_deps(fdef):
functions[dep].add_to_graph(func_graph)
func = function_lib.ConcreteFunction(func_graph)
func.add_to_graph()
functions[fdef.signature.name] = func
# Also register the gradients in the current root context.
with ops.init_scope():
func._register_gradient() # pylint: disable=protected-access
return functions
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:35,代码来源:function_deserialization.py
示例4: _no_trainable_variable_attribute
def _no_trainable_variable_attribute(self, trainable):
"""A SavedModel where the VariableDef has no 'trainable' (it's false)."""
class _MissingFieldsVariable(resource_variable_ops.ResourceVariable):
def to_proto(self, export_scope=None):
full_proto = super(_MissingFieldsVariable, self).to_proto(export_scope)
return variable_pb2.VariableDef(
variable_name=full_proto.variable_name,
initial_value_name=full_proto.initial_value_name,
initializer_name=full_proto.snapshot_name,
save_slice_info_def=full_proto.save_slice_info_def,
is_resource=full_proto.is_resource)
export_graph = ops.Graph()
with export_graph.as_default():
v = _MissingFieldsVariable(3., trainable=trainable)
with session_lib.Session() as session:
session.run([v.initializer])
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
b = builder_impl.SavedModelBuilder(path)
b.add_meta_graph_and_variables(
session,
tags=[tag_constants.SERVING],
signature_def_map={})
b.save()
return path
开发者ID:aritratony,项目名称:tensorflow,代码行数:28,代码来源:load_v1_in_v2_test.py
示例5: _v1_single_metagraph_saved_model
def _v1_single_metagraph_saved_model(self, use_resource):
export_graph = ops.Graph()
with export_graph.as_default():
start = array_ops.placeholder(
shape=[None], dtype=dtypes.float32, name="start")
if use_resource:
distractor = variables.RefVariable(-1., name="distractor")
v = resource_variable_ops.ResourceVariable(3., name="v")
else:
# "distractor" gets saved in the checkpoint and so used in the restore
# function, but not in the pruned function for the signature. This tests
# node naming: it needs to be consistent (and ideally always the same as
# the node in the original GraphDef) for the resource manager to find
# the right variable.
distractor = variables.RefVariable(-1., name="distractor")
v = variables.RefVariable(3., name="v")
local_variable = variables.VariableV1(
1.,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
trainable=False,
use_resource=True)
output = array_ops.identity(start * v * local_variable, name="output")
with session_lib.Session() as session:
session.run([v.initializer, distractor.initializer,
local_variable.initializer])
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": start},
outputs={"output": output},
legacy_init_op=local_variable.initializer)
return path
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:33,代码来源:load_v1_in_v2_test.py
示例6: _v1_nested_while_saved_model
def _v1_nested_while_saved_model(self):
export_graph = ops.Graph()
with export_graph.as_default():
def _inner_while(loop_iterations):
_, output = control_flow_ops.while_loop(
lambda index, accum: index <= loop_iterations,
lambda index, accum: (index + 1, accum + index),
[constant_op.constant(0), constant_op.constant(0)])
return output
loop_iterations = array_ops.placeholder(
name="loop_iterations", shape=[], dtype=dtypes.int32)
_, output = control_flow_ops.while_loop(
lambda index, accum: index <= loop_iterations,
lambda index, accum: (index + 1, accum + _inner_while(index)),
[constant_op.constant(0), constant_op.constant(0)])
with session_lib.Session() as session:
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"loop_iterations": loop_iterations},
outputs={"output": output})
return path
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:25,代码来源:load_v1_in_v2_test.py
示例7: initialize
def initialize(self, table):
"""Initializes the given `table` with `keys` and `values` tensors.
Args:
table: The table to initialize.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self._keys.dtype, self._values.dtype)
with ops.name_scope(
self._name, values=(table.table_ref, self._keys,
self._values)) as scope:
if context.executing_eagerly():
# Ensure a unique name when eager execution is enabled to avoid spurious
# sharing issues.
scope += str(ops.uid())
init_op = gen_lookup_ops.initialize_table_v2(
table.table_ref, self._keys, self._values, name=scope)
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:25,代码来源:lookup_ops.py
示例8: capture
def capture(self, tensor, name=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
"""
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
return self._capture_helper(tensor, name)
return tensor
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:25,代码来源:func_graph.py
示例9: initialize
def initialize(self, table):
"""Initializes the given `table` with `keys` and `values` tensors.
Args:
table: The table to initialize.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self._keys.dtype, self._values.dtype)
with ops.name_scope(
self._name, values=(table.resource_handle, self._keys,
self._values)) as scope:
if context.executing_eagerly():
# Ensure a unique name when eager execution is enabled to avoid spurious
# sharing issues.
scope += str(ops.uid())
if fwd_compat.forward_compatible(2018, 9, 19):
init_op = gen_lookup_ops.lookup_table_import_v2(
table.resource_handle, self._keys, self._values, name=scope)
else:
# To maintain forward compatibiltiy, use the old implementation.
init_op = gen_lookup_ops.initialize_table_v2(
table.resource_handle, self._keys, self._values, name=scope)
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
开发者ID:aeverall,项目名称:tensorflow,代码行数:30,代码来源:lookup_ops.py
示例10: opt_variable
def opt_variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it."""
if dtype is None:
dtype = backend.floatx()
variables = []
for i in range(num_replicas):
# Keras holds the variables in optimizer class instance , so the name
# does not matter here. ResourceVariable constructor will find a unique
# name (including name=None) for each replica.
with ops.device("device:TPU:{}".format(i)):
v = resource_variable_ops.ResourceVariable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
variables.append(v)
name = "replicate_{}_{}".format("variable" if name is None else name,
ops.uid())
v = ReplicatedVariable(name, variables)
# pylint: disable=protected-access
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, "shape"):
v._keras_shape = backend.int_shape(value)
v._uses_learning_phase = False
backend.track_variable(v)
return v
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:30,代码来源:keras_tpu_variables.py
示例11: _v1_asset_saved_model
def _v1_asset_saved_model(self):
export_graph = ops.Graph()
vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt")
with open(vocab_path, "w") as f:
f.write("alpha\nbeta\ngamma\n")
with export_graph.as_default():
initializer = lookup_ops.TextFileInitializer(
vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
table = lookup_ops.HashTable(
initializer, default_value=-1)
start = array_ops.placeholder(
shape=None, dtype=dtypes.string, name="in")
output = table.lookup(start, name="out")
with session_lib.Session() as session:
session.run([table.initializer])
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": start},
outputs={"output": output},
legacy_init_op=table.initializer)
file_io.delete_file(vocab_path)
return path
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:28,代码来源:load_v1_in_v2_test.py
示例12: maybe_capture_tensor
def maybe_capture_tensor(self, tensor):
if isinstance(tensor, ops.EagerTensor):
return capture_value(
self.captures, tensor, tensor.dtype, str(ops.uid()))
if tensor.graph is not self:
return capture_value(
self.captures, tensor, tensor.dtype, tensor.op.name)
return tensor
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:8,代码来源:function.py
示例13: create_file_writer_v2
def create_file_writer_v2(logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None):
"""Creates a summary file writer for the given log directory.
Args:
logdir: a string specifying the directory in which to write an event file.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: a name for the op that creates the writer.
Returns:
A SummaryWriter object.
"""
if logdir is None:
raise ValueError("logdir cannot be None")
inside_function = ops.inside_function()
with ops.name_scope(name, "create_file_writer") as scope, ops.device("cpu:0"):
# Run init inside an init_scope() to hoist it out of tf.functions.
with ops.init_scope():
if context.executing_eagerly():
_check_create_file_writer_args(
inside_function,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix)
logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string)
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant(".v2")
# Prepend the PID and a process-local UID to the filename suffix to avoid
# filename collisions within the machine (the filename already contains
# the hostname to avoid cross-machine collisions).
unique_prefix = constant_op.constant(".%s.%s" % (os.getpid(), ops.uid()))
filename_suffix = unique_prefix + filename_suffix
# Use a unique shared_name to prevent resource sharing.
if context.executing_eagerly():
shared_name = context.shared_name()
else:
shared_name = ops.name_from_scope_name(scope) # pylint: disable=protected-access
return ResourceSummaryWriter(
shared_name=shared_name,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_file_writer,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix),
name=name,
v2=True)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:58,代码来源:summary_ops_v2.py
示例14: test_nonexistant_prefix_directory
def test_nonexistant_prefix_directory(self):
m = keras.Model()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), '{}'.format(ops.uid()), 'bckpt')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:9,代码来源:hdf5_format_test.py
示例15: _eager_safe_variable_handle
def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
"""Creates a variable handle with information to do shape inference."""
container = ops.get_default_graph()._container # pylint: disable=protected-access
if container is None:
container = ""
if not graph_mode:
# When in eager mode use a uid for the shared_name, to prevent accidental
# sharing.
shared_name = str(ops.uid())
handle = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
if graph_mode:
return handle
# We do not want two distinct ResourceVariable objects for the same
# underlying resource in the runtime.
# When in eager mode, explicitly ensure so here. When in graph mode, it's
# ensured by always generating different variable names.
exists = gen_resource_variable_ops.var_is_initialized_op(handle)
if exists:
raise ValueError("variable object with name '%s' already created. Use "
"get_variable() if reuse is desired." %
shared_name)
with context.graph_mode(), ops.Graph().as_default() as graph:
h = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
# Tensor._handle_data contains information for the shape-inference code to
# know the shape and dtype of the variable pointed to by a handle. Since
# shape inference doesn't run in eager mode we copy this data here for when
# the handle is captured by an eager mode function.
handle._handle_data = h._handle_data # pylint: disable=protected-access
# Clean up our reference cycles to avoid making the garbage collector run.
# pylint: disable=protected-access
# OrderedDict, constructed on Graph creation, makes a simple reference loop
# and hides it in an __attribute in some Python versions. We don't need to
# throw an error if we can't find it, but if we do find it we can break the
# loop to avoid creating work for the garbage collector.
problematic_cycle = graph._functions.__dict__.get("_OrderedDict__root", None)
# pylint: enable=protected-access
if problematic_cycle:
try:
del problematic_cycle[0][:]
except TypeError:
# This is probably not one of the problematic Python versions. Continue
# with the rest of our cleanup.
pass
# Now clean up our own reference cycles by clearing all of the attributes for
# the Graph and op we created.
h.__dict__ = {}
graph.__dict__ = {}
return handle
开发者ID:keithc61,项目名称:tensorflow,代码行数:56,代码来源:resource_variable_ops.py
示例16: capture
def capture(self, tensor, name=None):
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return capture_value(self.captures, tensor, tensor.dtype, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
return capture_value(self.captures, tensor, tensor.dtype, name)
return tensor
开发者ID:KiaraStarlab,项目名称:tensorflow,代码行数:10,代码来源:function.py
示例17: create_resource
def create_resource(self):
if context.executing_eagerly():
# TODO(allenl): This will leak memory due to kernel caching by the
# shared_name attribute value (but is better than the alternative of
# sharing everything by default when executing eagerly; hopefully creating
# tables in a loop is uncommon).
shared_name = "fertile_stats_variable_%d" % (ops.uid(),)
else:
shared_name = self._name
return gen_stats_ops.fertile_stats_resource_handle_op(
self._container, shared_name=shared_name, name=self._name)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:11,代码来源:stats_ops.py
示例18: unique_fn_name
def unique_fn_name(scope, name):
"""Returns a unique name to use for a control flow function.
Args:
scope: A name scope string.
name: An identifier for this function (e.g. "true", "body").
Returns:
A string, the name to use for the function.
"""
return ("%s%s_%s" % (scope, name, ops.uid())).replace("/", "_")
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:11,代码来源:control_flow_util_v2.py
示例19: __init__
def __init__(self, object_graph_proto, save_path, dtype_map=None):
"""Specify the checkpoint being loaded.
Args:
object_graph_proto: The CheckpointableObjectGraph protocol buffer
associated with this checkpoint.
save_path: A string `Tensor`. The path to the checkpoint, as returned by
`tf.train.latest_checkpoint`.
dtype_map: When executing eagerly, specifies dtypes for creating slot
variables. None when graph building.
"""
self.builder = saver_lib.BulkSaverBuilder()
self.object_graph_proto = object_graph_proto
self.restore_uid = ops.uid()
# Maps from objects to lists of attributes which were in the checkpoint but
# not loaded into any object, for error checking.
self.unused_attributes = weakref.WeakKeyDictionary()
# Dictionary mapping from an id in the protocol buffer flat array to
# Checkpointable Python objects. This mapping may be deferred if a
# checkpoint is restored before all dependencies have been tracked. Uses
# weak references so that partial restorations don't create reference cycles
# (as objects with deferred dependencies will generally have references to
# this object).
self.object_by_proto_id = weakref.WeakValueDictionary()
# A set of all Python objects we've seen as dependencies, even if we didn't
# use them (for example because of inconsistent references when
# loading). Used to make status assertions fail when loading checkpoints
# that don't quite match.
self.all_python_objects = weakref.WeakSet()
self.save_path = save_path
self.dtype_map = dtype_map
# When graph building, contains a list of ops to run to restore objects from
# this checkpoint.
self.restore_ops = []
self.restore_ops_by_name = {}
# A mapping from optimizer proto ids to lists of slot variables to be
# restored when the optimizer is tracked. Only includes slot variables whose
# regular variables have already been created, and only for optimizer
# objects which have not yet been created/tracked.
self.deferred_slot_restorations = {}
# A mapping from variable proto ids to lists of slot variables to be
# restored when the variable is created/tracked. These get shifted over to
# deferred_slot_restorations if the optimizer hasn't been created when that
# happens.
self.slot_restorations = {}
for node_index, node in enumerate(self.object_graph_proto.nodes):
for slot_reference in node.slot_variables:
# `node` refers to an `Optimizer`, since only these have slot variables.
self.slot_restorations.setdefault(
slot_reference.original_variable_node_id, []).append(
checkpointable_lib._SlotVariableRestoration( # pylint: disable=protected-access
optimizer_id=node_index,
slot_variable_id=slot_reference.slot_variable_node_id,
slot_name=slot_reference.slot_name))
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:54,代码来源:checkpointable_utils.py
示例20: __init__
def __init__(self,
key_dtype,
value_dtype,
default_value,
shared_name=None,
name="MutableHashTable",
checkpoint=True):
"""Creates an empty `MutableHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
Returns:
A `MutableHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(default_value,
dtype=value_dtype)
self._value_shape = self._default_value.get_shape()
self._checkpoint = checkpoint
self._key_dtype = key_dtype
self._value_dtype = value_dtype
self._name = name
if context.executing_eagerly() and shared_name is None:
# TODO(allenl): This will leak memory due to kernel caching by the
# shared_name attribute value (but is better than the alternative of
# sharing everything by default when executing eagerly; hopefully creating
# tables in a loop is uncommon).
shared_name = "table_%d" % (ops.uid(),)
self._shared_name = shared_name
super(MutableHashTable, self).__init__(key_dtype, value_dtype)
self._resource_handle = self.create_resource()
if checkpoint:
saveable = MutableHashTable._Saveable(self, name)
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:51,代码来源:lookup_ops.py
注:本文中的tensorflow.python.framework.ops.uid函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论