本文整理汇总了Python中tensorflow.python.ops.string_ops.string_join函数的典型用法代码示例。如果您正苦于以下问题:Python string_join函数的具体用法?Python string_join怎么用?Python string_join使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了string_join函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: input_fn
def input_fn():
start = random_ops.random_uniform(
(), minval=0, maxval=sequence_length, dtype=dtypes.int32, seed=seed)
# Concatenate lyrics_list so inputs and labels wrap when start > 0.
lyrics_list_concat = lyrics_list + lyrics_list
inputs_dense = array_ops.slice(lyrics_list_concat, [start],
[sequence_length])
indices = array_ops.constant(
[[i, 0] for i in range(sequence_length)], dtype=dtypes.int64)
dense_shape = [sequence_length, 1]
inputs = sparse_tensor.SparseTensor(
indices=indices, values=inputs_dense, dense_shape=dense_shape)
table = lookup.string_to_index_table_from_tensor(
mapping=list(vocab), default_value=-1, name='lookup')
labels = table.lookup(
array_ops.slice(lyrics_list_concat, [start + 1], [sequence_length]))
input_key = string_ops.string_join([
'key_', string_ops.as_string(
random_ops.random_uniform(
(),
minval=0,
maxval=10000000,
dtype=dtypes.int32,
seed=seed))
])
return {'lyrics': inputs, input_key_column_name: input_key}, labels
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:26,代码来源:state_saving_rnn_estimator_test.py
示例2: testStateSaverScopeNames
def testStateSaverScopeNames(self):
batch_size = constant_op.constant(2)
sqss_scope_name = "unique_scope_name_for_sqss"
num_unroll = 2
length = 3
key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
])
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
name=sqss_scope_name)
prefetch_op = state_saver.prefetch_op
next_batch = state_saver.next_batch
self.assertTrue(
state_saver.barrier.barrier_ref.name.startswith("%s/" %
sqss_scope_name))
self.assertTrue(prefetch_op.name.startswith("%s/" % sqss_scope_name))
self.assertTrue(next_batch.key.name.startswith("%s/" % sqss_scope_name))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:sequence_queueing_state_saver_test.py
示例3: setUp
def setUp(self):
super(BatchSequencesWithStatesTest, self).setUp()
self.value_length = 4
ind1 = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val1 = np.array([0, 10, 13, 14, 32, 33])
shape1 = np.array([self.value_length, 6])
sp_tensor1 = sparse_tensor.SparseTensor(
array_ops.constant(ind1, dtypes.int64),
array_ops.constant(val1, dtypes.int64),
array_ops.constant(shape1, dtypes.int64))
ind2 = np.array([
[0, 0, 1],
[0, 1, 0],
[0, 1, 2],
[1, 0, 3],
[1, 1, 0],
[1, 1, 1],
[1, 1, 2],
[1, 2, 2]])
val2 = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape2 = np.array([self.value_length, 3, 4])
sp_tensor2 = sparse_tensor.SparseTensor(
array_ops.constant(ind2, dtypes.int64),
array_ops.constant(val2, dtypes.int64),
array_ops.constant(shape2, dtypes.int64))
sp_tensor3 = sparse_tensor.SparseTensor(
array_ops.constant([[1, 9], [2, 2], [2, 10]], dtypes.int64),
array_ops.constant([7, 15, 2], dtypes.int64),
array_ops.constant([5, 12], dtypes.int64)
)
self.sp_tensor3_expected = sparse_tensor.SparseTensorValue(
[[0, 1, 9], [0, 2, 2], [0, 2, 10], [1, 1, 9], [1, 2, 2], [1, 2, 10]],
[7, 15, 2, 7, 15, 2],
[2, 5, 12]
)
self.batch_size = 2
self.key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
])
self.sequences = {
"seq1": np.random.rand(self.value_length, 5),
"seq2": np.random.rand(self.value_length, 4, 2),
"seq3": sp_tensor1,
"seq4": sp_tensor2}
self.context = {
"context1": [3, 4],
"sp_context": sp_tensor3}
self.initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:55,代码来源:batch_sequences_with_states_test.py
示例4: testValidPipelineWithRangeDataset
def testValidPipelineWithRangeDataset(self, shuffle):
dataset = dataset_ops.Dataset.range(self._num_files)
dataset = dataset.map(lambda n: string_ops.string_join( # pylint:disable=g-long-lambda
[self.get_temp_dir(),
string_ops.string_format("/tf_record.{}.txt", [n])]))
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
开发者ID:aritratony,项目名称:tensorflow,代码行数:17,代码来源:auto_shard_dataset_test.py
示例5: setUp
def setUp(self):
super(BatchSequencesWithStatesTest, self).setUp()
self.value_length = 4
self.batch_size = 2
self.key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
])
self.sequences = {
"seq1": np.random.rand(self.value_length, 5),
"seq2": np.random.rand(self.value_length, 4, 2)
}
self.context = {"context1": [3, 4]}
self.initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:17,代码来源:batch_sequences_with_states_test.py
示例6: testStringJoin
def testStringJoin(self):
input0 = ["a", "b"]
input1 = "a"
input2 = [["b"], ["c"]]
with self.cached_session():
output = string_ops.string_join([input0, input1])
self.assertAllEqual(output.eval(), [b"aa", b"ba"])
output = string_ops.string_join([input0, input1], separator="--")
self.assertAllEqual(output.eval(), [b"a--a", b"b--a"])
output = string_ops.string_join([input0, input1, input0], separator="--")
self.assertAllEqual(output.eval(), [b"a--a--a", b"b--a--b"])
output = string_ops.string_join([input1] * 4, separator="!")
self.assertEqual(output.eval(), b"a!a!a!a")
output = string_ops.string_join([input2] * 2, separator="")
self.assertAllEqual(output.eval(), [[b"bb"], [b"cc"]])
with self.assertRaises(ValueError): # Inconsistent shapes
string_ops.string_join([input0, input2]).eval()
开发者ID:HughKu,项目名称:tensorflow,代码行数:23,代码来源:string_join_op_test.py
示例7: _train_op_fn
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
开发者ID:andrewharp,项目名称:tensorflow,代码行数:4,代码来源:head_test.py
示例8: minimize
def minimize(self, loss, global_step):
del global_step
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:5,代码来源:multi_head_test.py
示例9: testStateSaverWithTwoSimpleSteps
def testStateSaverWithTwoSimpleSteps(self):
with self.test_session() as sess:
batch_size_value = 2
batch_size = constant_op.constant(batch_size_value)
num_unroll = 2
length = 3
key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()),
dtypes.int32))
])
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
capacity=100)
initial_key_value_0, _ = sess.run((key, state_saver.prefetch_op))
initial_key_value_1, _ = sess.run((key, state_saver.prefetch_op))
initial_key_value_0 = initial_key_value_0.decode("ascii")
initial_key_value_1 = initial_key_value_1.decode("ascii")
# Step 1
next_batch = state_saver.next_batch
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.context["context1"],
next_batch.state("state1"), next_batch.state("state2"),
next_batch.length,
next_batch.save_state("state1", next_batch.state("state1") + 1),
next_batch.save_state("state2", next_batch.state("state2") - 1)))
expected_first_keys = set(
("00000_of_00002:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
expected_second_keys = set(
("00001_of_00002:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
expected_final_keys = set(
("STOP:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
self.assertEqual(set(key_value), expected_first_keys)
self.assertEqual(set(next_key_value), expected_second_keys)
self.assertAllEqual(context1_value,
np.tile(context["context1"], (batch_size_value, 1)))
self.assertAllEqual(seq1_value,
np.tile(sequences["seq1"][np.newaxis, 0:2, :],
(batch_size_value, 1, 1)))
self.assertAllEqual(seq2_value,
np.tile(sequences["seq2"][np.newaxis, 0:2, :, :],
(batch_size_value, 1, 1, 1)))
self.assertAllEqual(state1_value,
np.tile(initial_states["state1"],
(batch_size_value, 1, 1)))
self.assertAllEqual(state2_value,
np.tile(initial_states["state2"],
(batch_size_value, 1)))
self.assertAllEqual(length_value, [2, 2])
# Step 2
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.context["context1"],
next_batch.state("state1"), next_batch.state("state2"),
next_batch.length,
next_batch.save_state("state1", next_batch.state("state1") + 1),
next_batch.save_state("state2", next_batch.state("state2") - 1)))
self.assertEqual(set(key_value), expected_second_keys)
self.assertEqual(set(next_key_value), expected_final_keys)
self.assertAllEqual(context1_value,
np.tile(context["context1"], (batch_size_value, 1)))
self.assertAllEqual(seq1_value,
np.tile(sequences["seq1"][np.newaxis, 2:4, :],
(batch_size_value, 1, 1)))
self.assertAllEqual(seq2_value,
np.tile(sequences["seq2"][np.newaxis, 2:4, :, :],
(batch_size_value, 1, 1, 1)))
self.assertAllEqual(state1_value, 1 + np.tile(initial_states["state1"],
(batch_size_value, 1, 1)))
self.assertAllEqual(state2_value, -1 + np.tile(initial_states["state2"],
(batch_size_value, 1)))
#.........这里部分代码省略.........
开发者ID:1000sprites,项目名称:tensorflow,代码行数:101,代码来源:sequence_queueing_state_saver_test.py
示例10: save
def save(self, file_prefix):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
Returns:
An `Operation`, or None when executing eagerly.
"""
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><sharded_suffix>.
#
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
# Users only need to interact with the user-specified prefix, which is
# "<train dir>/myckpt" in this case. Save() and Restore() work with the
# prefix directly, instead of any physical pathname. (On failure and
# subsequent restore, an outdated and orphaned temporary directory can be
# safely removed.)
sharded_suffix = "_temp_%s/part" % uuid.uuid4().hex
with ops.device("cpu:0"):
tmp_checkpoint_prefix = string_ops.string_join(
[file_prefix, sharded_suffix])
num_shards = len(self._single_device_savers)
sharded_saves = []
sharded_prefixes = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
last_device = None
for shard, (device, saver) in enumerate(
sorted(self._single_device_savers.items())):
last_device = device
with ops.device(saveable_object_util.set_cpu0(device)):
shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard,
num_shards_tensor)
sharded_prefixes.append(shard_prefix)
with ops.device(device):
# _SingleDeviceSaver will use the CPU device when necessary, but initial
# read operations should be placed on the SaveableObject's device.
sharded_saves.append(saver.save(shard_prefix))
with ops.control_dependencies(sharded_saves):
# Co-locates the merge step with the last device.
with ops.device(saveable_object_util.set_cpu0(last_device)):
# V2 format write path consists of a metadata merge step. Once merged,
# attempts to delete the temporary directory, "<user-fed prefix>_temp".
return gen_io_ops.merge_v2_checkpoints(
sharded_prefixes, file_prefix, delete_old_dirs=True)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:63,代码来源:functional_saver.py
示例11: fn
def fn(entry_lt):
op = string_ops.string_join([entry_lt, 'world'])
return core.LabeledTensor(op, [])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:3,代码来源:ops_test.py
注:本文中的tensorflow.python.ops.string_ops.string_join函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论