本文整理汇总了Python中tensorflow.contrib.summary.summary_test_util.events_from_logdir函数的典型用法代码示例。如果您正苦于以下问题:Python events_from_logdir函数的具体用法?Python events_from_logdir怎么用?Python events_from_logdir使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了events_from_logdir函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testScalarSummary
def testScalarSummary(self):
"""Test record_summaries_every_n_global_steps and all_summaries()."""
with ops.Graph().as_default(), self.test_session() as sess:
global_step = training_util.get_or_create_global_step()
global_step.initializer.run()
with ops.device('/cpu:0'):
step_increment = state_ops.assign_add(global_step, 1)
sess.run(step_increment) # Increment global step from 0 to 1
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(logdir, max_queue=0,
name='t2').as_default():
with summary_ops.record_summaries_every_n_global_steps(2):
summary_ops.initialize()
summary_op = summary_ops.scalar('my_scalar', 2.0)
# Neither of these should produce a summary because
# global_step is 1 and "1 % 2 != 0"
sess.run(summary_ops.all_summary_ops())
sess.run(summary_op)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 1)
# Increment global step from 1 to 2 and check that the summary
# is now written
sess.run(step_increment)
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'my_scalar')
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:30,代码来源:summary_ops_graph_test.py
示例2: testTrainWithSummary
def testTrainWithSummary(self):
with tf.Graph().as_default():
images = tf.placeholder(tf.float32, image_shape(None), name='images')
labels = tf.placeholder(tf.float32, [None, 1000], name='labels')
tf.train.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with tf.contrib.summary.always_record_summaries():
with tf.contrib.summary.create_file_writer(
logdir, max_queue=0,
name='t0').as_default():
model = resnet50.ResNet50(data_format())
logits = model(images, training=True)
loss = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
tf.contrib.summary.scalar(name='loss', tensor=loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
self.assertEqual(321, len(tf.global_variables()))
batch_size = 32
with tf.Session() as sess:
sess.run(init)
sess.run(tf.contrib.summary.summary_writer_initializer_op())
np_images, np_labels = random_batch(batch_size)
sess.run([train_op, tf.contrib.summary.all_summary_ops()],
feed_dict={images: np_images, labels: np_labels})
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'loss')
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:33,代码来源:resnet50_graph_test.py
示例3: testWriterInitAndClose
def testWriterInitAndClose(self):
logdir = self.get_temp_dir()
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
# Calling init() again while writer is open has no effect
writer.init()
self.assertEqual(1, get_total())
try:
# Not using .as_default() to avoid implicit flush when exiting
writer.set_as_default()
summary_ops.scalar('one', 1.0, step=1)
self.assertEqual(1, get_total())
# Calling .close() should do an implicit flush
writer.close()
self.assertEqual(2, get_total())
# Calling init() on a closed writer should start a new file
time.sleep(1.1) # Ensure filename has a different timestamp
writer.init()
files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
self.assertEqual(2, len(files))
get_total = lambda: len(summary_test_util.events_from_file(files[1]))
self.assertEqual(1, get_total()) # file_version Event
summary_ops.scalar('two', 2.0, step=2)
writer.close()
self.assertEqual(2, get_total())
finally:
# Clean up by resetting default writer
summary_ops.create_file_writer(None).set_as_default()
开发者ID:AnishShah,项目名称:tensorflow,代码行数:31,代码来源:summary_ops_test.py
示例4: testWriterInitAndClose
def testWriterInitAndClose(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Running init() again while writer is open has no effect
sess.run(writer.init())
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
# Running close() should do an implicit flush
sess.run(writer.close())
self.assertEqual(2, get_total())
# Running init() on a closed writer should start a new file
time.sleep(1.1) # Ensure filename has a different timestamp
sess.run(writer.init())
sess.run(summary_ops.all_summary_ops())
sess.run(writer.close())
files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
self.assertEqual(2, len(files))
self.assertEqual(2, len(summary_test_util.events_from_file(files[1])))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:summary_ops_graph_test.py
示例5: testSummaryName
def testSummaryName(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual('scalar', events[1].summary.value[0].tag)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:11,代码来源:summary_ops_graph_test.py
示例6: testSummaryGlobalStep
def testSummaryGlobalStep(self):
step = training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=step)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scalar')
开发者ID:AnishShah,项目名称:tensorflow,代码行数:12,代码来源:summary_ops_test.py
示例7: testWriteSummaries
def testWriteSummaries(self):
e = SimpleEvaluator(IdentityModel())
e(3.0)
e([5.0, 7.0, 9.0])
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
e.all_metric_results(logdir)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 6.0)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:12,代码来源:evaluator_test.py
示例8: testMaxQueue
def testMaxQueue(self):
logs = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logs, max_queue=2, flush_millis=999999,
name='lol').as_default(), summary_ops.always_record_summaries():
get_total = lambda: len(summary_test_util.events_from_logdir(logs))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=1)
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=2)
self.assertEqual(3, get_total())
开发者ID:dansbecker,项目名称:tensorflow,代码行数:12,代码来源:summary_ops_test.py
示例9: testWriteSummaries
def testWriteSummaries(self):
m = metrics.Mean()
m([1, 10, 100])
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name="t0").as_default(), summary_ops.always_record_summaries():
m.result() # As a side-effect will write summaries.
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 37.0)
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:13,代码来源:metrics_test.py
示例10: testSummaryGlobalStep
def testSummaryGlobalStep(self):
training_util.get_or_create_global_step()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(summary_ops.summary_writer_initializer_op())
step, _ = sess.run(
[training_util.get_global_step(), summary_ops.all_summary_ops()])
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(step, events[1].step)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:14,代码来源:summary_ops_graph_test.py
示例11: testWriteSummariesGraph
def testWriteSummariesGraph(self):
with context.graph_mode(), ops.Graph().as_default(), self.test_session():
e = SimpleEvaluator(IdentityModel())
ds = dataset_ops.Dataset.from_tensor_slices([3.0, 5.0, 7.0, 9.0])
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
init_op, call_op, results_op = e.evaluate_on_dataset(
ds, summary_logdir=logdir)
variables.global_variables_initializer().run()
e.run_evaluation(init_op, call_op, results_op)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 6.0)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:14,代码来源:evaluator_test.py
示例12: testDefunSummarys
def testDefunSummarys(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t1').as_default(), summary_ops.always_record_summaries():
@function.defun
def write():
summary_ops.scalar('scalar', 2.0)
write()
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 2.0)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:15,代码来源:summary_ops_test.py
示例13: testWriterFlush
def testWriterFlush(self):
logdir = self.get_temp_dir()
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
self.assertEqual(1, get_total())
writer.flush()
self.assertEqual(2, get_total())
summary_ops.scalar('two', 2.0, step=2)
# Exiting the "as_default()" should do an implicit flush of the "two" tag
self.assertEqual(3, get_total())
开发者ID:AnishShah,项目名称:tensorflow,代码行数:15,代码来源:summary_ops_test.py
示例14: testWriterFlush
def testWriterFlush(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
sess.run(writer.flush())
self.assertEqual(2, get_total())
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:15,代码来源:summary_ops_graph_test.py
示例15: testMaxQueue
def testMaxQueue(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(
logdir, max_queue=1, flush_millis=999999)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
sess.run(summary_ops.all_summary_ops())
self.assertEqual(3, get_total())
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:16,代码来源:summary_ops_graph_test.py
示例16: test_train
def test_train(self):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format)
tf.train.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with tf.contrib.summary.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), tf.contrib.summary.always_record_summaries():
with tf.device(device):
optimizer = tf.train.GradientDescentOptimizer(0.1)
images, labels = random_batch(2)
train_one_step(model, images, labels, optimizer)
self.assertEqual(320, len(model.variables))
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'loss')
开发者ID:Fair-Child,项目名称:tensorflow,代码行数:16,代码来源:resnet50_test.py
示例17: testSummaryGraphModeCond
def testSummaryGraphModeCond(self):
with ops.Graph().as_default(), self.test_session():
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.initialize()
training_util.get_or_create_global_step().initializer.run()
def f():
summary_ops.scalar('scalar', 2.0)
return constant_op.constant(True)
pred = array_ops.placeholder(dtypes.bool)
x = control_flow_ops.cond(pred, f,
lambda: constant_op.constant(False))
x.eval(feed_dict={pred: True})
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'cond/scalar')
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:20,代码来源:summary_ops_graph_test.py
示例18: testFlushFunction
def testFlushFunction(self):
logs = tempfile.mkdtemp()
writer = summary_ops.create_file_writer(
logs, max_queue=999999, flush_millis=999999, name='lol')
with writer.as_default(), summary_ops.always_record_summaries():
get_total = lambda: len(summary_test_util.events_from_logdir(logs))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=1)
summary_ops.scalar('scalar', 2.0, step=2)
self.assertEqual(1, get_total())
summary_ops.flush()
self.assertEqual(3, get_total())
# Test "writer" parameter
summary_ops.scalar('scalar', 2.0, step=3)
summary_ops.flush(writer=writer)
self.assertEqual(4, get_total())
summary_ops.scalar('scalar', 2.0, step=4)
summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access
self.assertEqual(5, get_total())
开发者ID:AnishShah,项目名称:tensorflow,代码行数:20,代码来源:summary_ops_test.py
示例19: testRecordEveryNGlobalSteps
def testRecordEveryNGlobalSteps(self):
step = training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
def run_step():
summary_ops.scalar('scalar', i, step=step)
step.assign_add(1)
with summary_ops.create_file_writer(
logdir).as_default(), summary_ops.record_summaries_every_n_global_steps(
2, step):
for i in range(10):
run_step()
# And another 10 steps as a graph function.
run_step_fn = function.defun(run_step)
for i in range(10):
run_step_fn()
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 11)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:20,代码来源:summary_ops_test.py
示例20: testFlushFunction
def testFlushFunction(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(
logdir, max_queue=999999, flush_millis=999999)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
flush_op = summary_ops.flush()
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
sess.run(flush_op)
self.assertEqual(2, get_total())
# Test "writer" parameter
sess.run(summary_ops.all_summary_ops())
sess.run(summary_ops.flush(writer=writer))
self.assertEqual(3, get_total())
sess.run(summary_ops.all_summary_ops())
sess.run(summary_ops.flush(writer=writer._resource)) # pylint:disable=protected-access
self.assertEqual(4, get_total())
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:23,代码来源:summary_ops_graph_test.py
注:本文中的tensorflow.contrib.summary.summary_test_util.events_from_logdir函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论