本文整理汇总了Python中tensorflow.python.ops.summary_ops_v2.scalar函数的典型用法代码示例。如果您正苦于以下问题:Python scalar函数的具体用法?Python scalar怎么用?Python scalar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了scalar函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testWriterInitAndClose
def testWriterInitAndClose(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Running init() again while writer is open has no effect
sess.run(writer.init())
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
# Running close() should do an implicit flush
sess.run(writer.close())
self.assertEqual(2, get_total())
# Running init() on a closed writer should start a new file
time.sleep(1.1) # Ensure filename has a different timestamp
sess.run(writer.init())
sess.run(summary_ops.all_summary_ops())
sess.run(writer.close())
files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
self.assertEqual(2, len(files))
self.assertEqual(2, len(summary_test_util.events_from_file(files[1])))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:summary_ops_graph_test.py
示例2: _write_custom_summaries
def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries.
Arguments:
step: the global step to use for TensorBoard.
logs: dict. Keys are scalar summary names, values are
NumPy scalars.
"""
logs = logs or {}
if context.executing_eagerly():
# use v2 summary ops
with self.writer.as_default(), summary_ops_v2.always_record_summaries():
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary_ops_v2.scalar(name, value, step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
开发者ID:kylin9872,项目名称:tensorflow,代码行数:28,代码来源:callbacks_v1.py
示例3: testWriterInitAndClose
def testWriterInitAndClose(self):
logdir = self.get_temp_dir()
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
# Calling init() again while writer is open has no effect
writer.init()
self.assertEqual(1, get_total())
try:
# Not using .as_default() to avoid implicit flush when exiting
writer.set_as_default()
summary_ops.scalar('one', 1.0, step=1)
self.assertEqual(1, get_total())
# Calling .close() should do an implicit flush
writer.close()
self.assertEqual(2, get_total())
# Calling init() on a closed writer should start a new file
time.sleep(1.1) # Ensure filename has a different timestamp
writer.init()
files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
self.assertEqual(2, len(files))
get_total = lambda: len(summary_test_util.events_from_file(files[1]))
self.assertEqual(1, get_total()) # file_version Event
summary_ops.scalar('two', 2.0, step=2)
writer.close()
self.assertEqual(2, get_total())
finally:
# Clean up by resetting default writer
summary_ops.create_file_writer(None).set_as_default()
开发者ID:AnishShah,项目名称:tensorflow,代码行数:31,代码来源:summary_ops_test.py
示例4: testEagerMemory
def testEagerMemory(self):
training_util.get_or_create_global_step()
logdir = self.get_temp_dir()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), summary_ops.always_record_summaries():
summary_ops.generic('tensor', 1, '')
summary_ops.scalar('scalar', 2.0)
summary_ops.histogram('histogram', [1.0])
summary_ops.image('image', [[[[1.0]]]])
summary_ops.audio('audio', [[1.0]], 1.0, 1)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:11,代码来源:summary_ops_test.py
示例5: testSummaryName
def testSummaryName(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual('scalar', events[1].summary.value[0].tag)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:11,代码来源:summary_ops_graph_test.py
示例6: testSummaryGlobalStep
def testSummaryGlobalStep(self):
step = training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=step)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scalar')
开发者ID:AnishShah,项目名称:tensorflow,代码行数:12,代码来源:summary_ops_test.py
示例7: testMaxQueue
def testMaxQueue(self):
logs = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logs, max_queue=1, flush_millis=999999,
name='lol').as_default(), summary_ops.always_record_summaries():
get_total = lambda: len(summary_test_util.events_from_logdir(logs))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=1)
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
summary_ops.scalar('scalar', 2.0, step=2)
self.assertEqual(3, get_total())
开发者ID:AnishShah,项目名称:tensorflow,代码行数:13,代码来源:summary_ops_test.py
示例8: testSummaryGlobalStep
def testSummaryGlobalStep(self):
training_util.get_or_create_global_step()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(summary_ops.summary_writer_initializer_op())
step, _ = sess.run(
[training_util.get_global_step(), summary_ops.all_summary_ops()])
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(step, events[1].step)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:14,代码来源:summary_ops_graph_test.py
示例9: testSummaryOps
def testSummaryOps(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), summary_ops.always_record_summaries():
summary_ops.generic('tensor', 1, '')
summary_ops.scalar('scalar', 2.0)
summary_ops.histogram('histogram', [1.0])
summary_ops.image('image', [[[[1.0]]]])
summary_ops.audio('audio', [[1.0]], 1.0, 1)
# The working condition of the ops is tested in the C++ test so we just
# test here that we're calling them correctly.
self.assertTrue(gfile.Exists(logdir))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:14,代码来源:summary_ops_test.py
示例10: testWriterFlush
def testWriterFlush(self):
logdir = self.get_temp_dir()
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
self.assertEqual(1, get_total())
writer.flush()
self.assertEqual(2, get_total())
summary_ops.scalar('two', 2.0, step=2)
# Exiting the "as_default()" should do an implicit flush of the "two" tag
self.assertEqual(3, get_total())
开发者ID:AnishShah,项目名称:tensorflow,代码行数:15,代码来源:summary_ops_test.py
示例11: testDbURIOpen
def testDbURIOpen(self):
tmpdb_path = os.path.join(self.get_temp_dir(), 'tmpDbURITest.sqlite')
tmpdb_uri = six.moves.urllib_parse.urljoin("file:", tmpdb_path)
tmpdb_writer = summary_ops.create_db_writer(
tmpdb_uri,
"experimentA",
"run1",
"user1")
with summary_ops.always_record_summaries():
with tmpdb_writer.as_default():
summary_ops.scalar('t1', 2.0)
tmpdb = sqlite3.connect(tmpdb_path)
num = get_one(tmpdb, 'SELECT count(*) FROM Tags WHERE tag_name = "t1"')
self.assertEqual(num, 1)
tmpdb.close()
开发者ID:AnishShah,项目名称:tensorflow,代码行数:15,代码来源:summary_ops_test.py
示例12: testWriterFlush
def testWriterFlush(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
sess.run(writer.flush())
self.assertEqual(2, get_total())
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:15,代码来源:summary_ops_graph_test.py
示例13: testSummaryOps
def testSummaryOps(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.generic('tensor', 1, step=1)
summary_ops.scalar('scalar', 2.0, step=1)
summary_ops.histogram('histogram', [1.0], step=1)
summary_ops.image('image', [[[[1.0]]]], step=1)
summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
# The working condition of the ops is tested in the C++ test so we just
# test here that we're calling them correctly.
self.assertTrue(gfile.Exists(logdir))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:15,代码来源:summary_ops_graph_test.py
示例14: testMaxQueue
def testMaxQueue(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(
logdir, max_queue=1, flush_millis=999999)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
sess.run(summary_ops.all_summary_ops())
self.assertEqual(3, get_total())
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:16,代码来源:summary_ops_graph_test.py
示例15: testScalarSummaryNameScope
def testScalarSummaryNameScope(self):
"""Test record_summaries_every_n_global_steps and all_summaries()."""
with ops.Graph().as_default(), self.cached_session() as sess:
global_step = training_util.get_or_create_global_step()
global_step.initializer.run()
with ops.device('/cpu:0'):
step_increment = state_ops.assign_add(global_step, 1)
sess.run(step_increment) # Increment global step from 0 to 1
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(logdir, max_queue=0,
name='t2').as_default():
with summary_ops.record_summaries_every_n_global_steps(2):
summary_ops.initialize()
with ops.name_scope('scope'):
summary_op = summary_ops.scalar('my_scalar', 2.0)
# Neither of these should produce a summary because
# global_step is 1 and "1 % 2 != 0"
sess.run(summary_ops.all_summary_ops())
sess.run(summary_op)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 1)
# Increment global step from 1 to 2 and check that the summary
# is now written
sess.run(step_increment)
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scope/my_scalar')
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:31,代码来源:summary_ops_graph_test.py
示例16: define_ops
def define_ops():
result = []
# TF 2.0 summary ops
result.append(summary_ops.write('write', 1, step=0))
result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
# TF 1.x tf.contrib.summary ops
result.append(summary_ops.generic('tensor', 1, step=1))
result.append(summary_ops.scalar('scalar', 2.0, step=1))
result.append(summary_ops.histogram('histogram', [1.0], step=1))
result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
return result
开发者ID:aritratony,项目名称:tensorflow,代码行数:12,代码来源:summary_ops_test.py
示例17: testFlushFunction
def testFlushFunction(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(
logdir, max_queue=999999, flush_millis=999999)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
flush_op = summary_ops.flush()
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
sess.run(flush_op)
self.assertEqual(2, get_total())
# Test "writer" parameter
sess.run(summary_ops.all_summary_ops())
sess.run(summary_ops.flush(writer=writer))
self.assertEqual(3, get_total())
sess.run(summary_ops.all_summary_ops())
sess.run(summary_ops.flush(writer=writer._resource)) # pylint:disable=protected-access
self.assertEqual(4, get_total())
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:23,代码来源:summary_ops_graph_test.py
示例18: testFlushFunction
def testFlushFunction(self):
logs = tempfile.mkdtemp()
writer = summary_ops.create_file_writer(
logs, max_queue=999999, flush_millis=999999, name='lol')
with writer.as_default(), summary_ops.always_record_summaries():
get_total = lambda: len(summary_test_util.events_from_logdir(logs))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=1)
summary_ops.scalar('scalar', 2.0, step=2)
self.assertEqual(1, get_total())
summary_ops.flush()
self.assertEqual(3, get_total())
# Test "writer" parameter
summary_ops.scalar('scalar', 2.0, step=3)
summary_ops.flush(writer=writer)
self.assertEqual(4, get_total())
summary_ops.scalar('scalar', 2.0, step=4)
summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access
self.assertEqual(5, get_total())
开发者ID:AnishShah,项目名称:tensorflow,代码行数:20,代码来源:summary_ops_test.py
示例19: testSharedName
def testSharedName(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
# Create with default shared name (should match logdir)
writer1 = summary_ops.create_file_writer(logdir)
with writer1.as_default():
summary_ops.scalar('one', 1.0, step=1)
# Create with explicit logdir shared name (should be same resource/file)
shared_name = 'logdir:' + logdir
writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
with writer2.as_default():
summary_ops.scalar('two', 2.0, step=2)
# Create with different shared name (should be separate resource/file)
writer3 = summary_ops.create_file_writer(logdir, name='other')
with writer3.as_default():
summary_ops.scalar('three', 3.0, step=3)
with self.cached_session() as sess:
# Run init ops across writers sequentially to avoid race condition.
# TODO(nickfelt): fix race condition in resource manager lookup or create
sess.run(writer1.init())
sess.run(writer2.init())
time.sleep(1.1) # Ensure filename has a different timestamp
sess.run(writer3.init())
sess.run(summary_ops.all_summary_ops())
sess.run([writer1.flush(), writer2.flush(), writer3.flush()])
event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))
# First file has tags "one" and "two"
events = summary_test_util.events_from_file(next(event_files))
self.assertEqual('brain.Event:2', events[0].file_version)
tags = [e.summary.value[0].tag for e in events[1:]]
self.assertItemsEqual(['one', 'two'], tags)
# Second file has tag "three"
events = summary_test_util.events_from_file(next(event_files))
self.assertEqual('brain.Event:2', events[0].file_version)
tags = [e.summary.value[0].tag for e in events[1:]]
self.assertItemsEqual(['three'], tags)
# No more files
self.assertRaises(StopIteration, lambda: next(event_files))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:43,代码来源:summary_ops_graph_test.py
示例20: testSharedName
def testSharedName(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
# Create with default shared name (should match logdir)
writer1 = summary_ops.create_file_writer(logdir)
with writer1.as_default():
summary_ops.scalar('one', 1.0, step=1)
summary_ops.flush()
# Create with explicit logdir shared name (should be same resource/file)
shared_name = 'logdir:' + logdir
writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
with writer2.as_default():
summary_ops.scalar('two', 2.0, step=2)
summary_ops.flush()
# Create with different shared name (should be separate resource/file)
time.sleep(1.1) # Ensure filename has a different timestamp
writer3 = summary_ops.create_file_writer(logdir, name='other')
with writer3.as_default():
summary_ops.scalar('three', 3.0, step=3)
summary_ops.flush()
event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))
# First file has tags "one" and "two"
events = iter(summary_test_util.events_from_file(next(event_files)))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual('one', next(events).summary.value[0].tag)
self.assertEqual('two', next(events).summary.value[0].tag)
self.assertRaises(StopIteration, lambda: next(events))
# Second file has tag "three"
events = iter(summary_test_util.events_from_file(next(event_files)))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual('three', next(events).summary.value[0].tag)
self.assertRaises(StopIteration, lambda: next(events))
# No more files
self.assertRaises(StopIteration, lambda: next(event_files))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:38,代码来源:summary_ops_test.py
注:本文中的tensorflow.python.ops.summary_ops_v2.scalar函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论