本文整理汇总了Python中tensorflow.python.ops.math_ops.to_int64函数的典型用法代码示例。如果您正苦于以下问题:Python to_int64函数的具体用法?Python to_int64怎么用?Python to_int64使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_int64函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _get_eval_ops
def _get_eval_ops(self, features, targets, metrics):
features, _, spec = data_ops.ParseDataTensorOrDict(features)
labels = data_ops.ParseLabelTensorOrDict(targets)
_assert_float32(features)
_assert_float32(labels)
graph_builder = self.graph_builder_class(
self.params, device_assigner=self.device_assigner, training=False,
**self.construction_args)
probabilities = graph_builder.inference_graph(features, data_spec=spec)
# One-hot the labels.
if not self.params.regression:
labels = math_ops.to_int64(array_ops.one_hot(math_ops.to_int64(
array_ops.squeeze(labels)), self.params.num_classes, 1, 0))
if metrics is None:
metrics = {self.accuracy_metric:
eval_metrics.get_metric(self.accuracy_metric)}
result = {}
for name, metric in six.iteritems(metrics):
result[name] = metric(probabilities, labels)
return result
开发者ID:Nishant23,项目名称:tensorflow,代码行数:26,代码来源:random_forest.py
示例2: generate_sequence_output
def generate_sequence_output(num_encoder_symbols,
encoder_outputs, encoder_state, targets,sequence_length, num_decoder_symbols, weights,
buckets, softmax_loss_function=None,
per_example_loss=False, name=None, use_attention=False):
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last"
"bucket (%d)." % (len(targets), buckets[-1][1]))
all_inputs = encoder_outputs + targets + weights
with ops.op_scope(all_inputs, name, "model_with_buckets"):
with variable_scope.variable_scope("decoder_sequence_output", reuse=None):
logits, attention_weights = attention_RNN(encoder_outputs,
encoder_state,
num_decoder_symbols,
sequence_length,
use_attention=use_attention)
if per_example_loss is None:
assert len(logits) == len(targets)
# We need to make target and int64-tensor and set its shape.
bucket_target = [array_ops.reshape(math_ops.to_int64(x), [-1]) for x in targets]
crossent = sequence_loss_by_example(
logits, bucket_target, weights,
softmax_loss_function=softmax_loss_function)
else:
assert len(logits) == len(targets)
bucket_target = [array_ops.reshape(math_ops.to_int64(x), [-1]) for x in targets]
crossent = sequence_loss(
logits, bucket_target, weights,
softmax_loss_function=softmax_loss_function)
return logits, crossent
开发者ID:bwang514,项目名称:rnn-nlu,代码行数:31,代码来源:seq_labeling.py
示例3: to_weighted_sum
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
"""Returns a Tensor as linear predictions and a list of created Variable."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor)[0]
if dimension > 1:
i1 = array_ops.reshape(array_ops.tile(array_ops.expand_dims(
math_ops.range(0, batch_size), 1), [1, dimension]), [-1])
i2 = array_ops.tile(math_ops.range(0, dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
# TODO(chapelle): move that logic to insert_transformed_feature to ensure
# unique buckets across dimensions after crossing.
bucket_indices = array_ops.reshape(input_tensor, [-1]) + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32)
bucket_indices = array_ops.reshape(input_tensor, [-1])
indices = math_ops.to_int64(array_ops.transpose(array_ops.pack((i1, i2))))
shape = math_ops.to_int64(array_ops.pack([batch_size, 1]))
sparse_id_values = ops.SparseTensor(indices, bucket_indices, shape)
vocab_size = self.length * self.source_column.dimension
return _create_embedding_lookup(
sparse_id_values, vocab_size, num_outputs,
_add_variable_collection(weight_collections), 0., "sum",
trainable, self.name + "_weights")
开发者ID:YanLongDong,项目名称:tensorflow,代码行数:33,代码来源:feature_column.py
示例4: _call_cell
def _call_cell(self,
inputs,
initial_cell_state=None,
initial_output=None,
dtype=None,
sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
if self._use_peephole:
wci = self._w_i_diag
wco = self._w_o_diag
wcf = self._w_f_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=self._kernel,
wci=wci,
wcf=wcf,
wco=wco,
b=self._bias,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:59,代码来源:lstm_ops.py
示例5: sparse_feature_cross
def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
name=None):
"""Crosses a list of Tensor or SparseTensor objects.
See sparse_cross_op.cc for more details.
Args:
inputs: List of `SparseTensor` or `Tensor` to be crossed.
hashed_output: If true, returns the hash of the cross instead of the string.
This will allow us avoiding string manipulations.
num_buckets: It is used if hashed_output is true.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
name: A name prefix for the returned tensors (optional).
Returns:
A `SparseTensor` with the crossed features.
Return type is string if hashed_output=False, int64 otherwise.
Raises:
TypeError: If the inputs aren't either SparseTensor or Tensor.
"""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(isinstance(i, ops.SparseTensor) or
isinstance(i, ops.Tensor) for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [i for i in inputs if isinstance(i, ops.SparseTensor)]
dense_inputs = [i for i in inputs if not isinstance(i, ops.SparseTensor)]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
shapes = [sp_input.shape for sp_input in sparse_inputs]
out_type = dtypes.int64 if hashed_output else dtypes.string
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
internal_type = dtypes.int64
indices_out, values_out, shape_out = (
_sparse_feature_cross_op.sparse_feature_cross(indices,
values,
shapes,
dense_inputs,
hashed_output,
num_buckets,
out_type=out_type,
internal_type=internal_type,
name=name))
return ops.SparseTensor(indices_out, values_out, shape_out)
开发者ID:285219011,项目名称:hello-world,代码行数:56,代码来源:sparse_feature_cross_op.py
示例6: testDistribution
def testDistribution(self, initial_known):
classes = np.random.randint(5, size=(20000,)) # Uniformly sampled
target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
initial_dist = [0.2] * 5 if initial_known else None
classes = math_ops.to_int64(classes) # needed for Windows build.
dataset = dataset_ops.Dataset.from_tensor_slices(classes).shuffle(
200, seed=21).map(lambda c: (c, string_ops.as_string(c))).repeat()
get_next = dataset.apply(
resampling.rejection_resample(
target_dist=target_dist,
initial_dist=initial_dist,
class_func=lambda c, _: c,
seed=27)).make_one_shot_iterator().get_next()
with self.cached_session() as sess:
returned = []
while len(returned) < 4000:
returned.append(sess.run(get_next))
returned_classes, returned_classes_and_data = zip(*returned)
_, returned_data = zip(*returned_classes_and_data)
self.assertAllEqual([compat.as_bytes(str(c))
for c in returned_classes], returned_data)
total_returned = len(returned_classes)
class_counts = np.array([
len([True for v in returned_classes if v == c])
for c in range(5)])
returned_dist = class_counts / total_returned
self.assertAllClose(target_dist, returned_dist, atol=1e-2)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:30,代码来源:resample_test.py
示例7: make_splits
def make_splits(self, stamp_token, next_stamp_token, class_id):
"""Create the best split using the accumulated stats and flush the state."""
# Get the aggregated gradients and hessians per <partition_id, feature_id>
# pair.
num_minibatches, partition_ids, feature_ids, gradients, hessians = (
self._stats_accumulator.flush(stamp_token, next_stamp_token))
# For sum_reduction, we don't need to divide by number of minibatches.
num_minibatches = control_flow_ops.cond(
ops.convert_to_tensor(self._loss_uses_sum_reduction),
lambda: math_ops.to_int64(1), lambda: num_minibatches)
partition_ids, gains, split_infos = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=num_minibatches,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
class_id=class_id,
feature_column_group_id=self._feature_column_group_id,
l1_regularization=self._l1_regularization,
l2_regularization=self._l2_regularization,
tree_complexity_regularization=self._tree_complexity_regularization,
min_node_weight=self._min_node_weight,
bias_feature_id=_BIAS_FEATURE_ID,
multiclass_strategy=self._multiclass_strategy,
weak_learner_type=self._weak_learner_type))
# There are no warm-up rounds needed in the equality column handler. So we
# always return ready.
are_splits_ready = constant_op.constant(True)
return (are_splits_ready, partition_ids, gains, split_infos)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:31,代码来源:categorical_split_handler.py
示例8: _process_labels
def _process_labels(self, labels):
if labels is None:
raise ValueError(
'You must provide a labels Tensor. Given: None. '
'Suggested troubleshooting steps: Check that your data contain '
'your label feature. Check that your input_fn properly parses and '
'returns labels.')
if isinstance(labels, sparse_tensor.SparseTensor):
if labels.dtype == dtypes.string:
label_ids_values = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels.values)
label_ids = sparse_tensor.SparseTensor(
indices=labels.indices,
values=label_ids_values,
dense_shape=labels.dense_shape)
else:
label_ids = labels
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(label_ids, self._n_classes))
msg = ('labels shape must be [batch_size, {}]. '
'Given: ').format(self._n_classes)
labels_shape = array_ops.shape(labels)
check_rank_op = control_flow_ops.Assert(
math_ops.equal(array_ops.rank(labels), 2),
data=[msg, labels_shape])
check_label_dim = control_flow_ops.Assert(
math_ops.equal(labels_shape[-1], self._n_classes),
data=[msg, labels_shape])
with ops.control_dependencies([check_rank_op, check_label_dim]):
return array_ops.identity(labels)
开发者ID:alexsax,项目名称:tensorflow,代码行数:31,代码来源:head.py
示例9: Forward
def Forward(*args):
"""Forward pass of the recurrent net."""
theta, state0, inputs, max_input_length, extras = _Pack(args, forward_sig)
slen_dim = _SeqLenDim(inputs)
# Creates accumulators for state0 and extras.
acc_state = _EmptyAcc(slen_dim, state0)
acc_extras = _EmptyAcc(slen_dim, extras)
t = slen_dim - max_input_length if self._aligned_end else 0
dev_t = math_ops.to_int32(t) if use_tpu else math_ops.to_int64(t)
run = functional_ops.For(
start=t,
limit=slen_dim if self._aligned_end else max_input_length,
delta=1,
inputs=[dev_t] + _Flatten(
[theta, state0, inputs, acc_state, acc_extras]),
body=ForwardLoopBody,
rewrite_with_while=compiled)
_, state1, _, acc_state, acc_extras = _Pack(
run[1:],
[self._theta, self._state, self._inputs, self._state, self._extras])
return _Flatten([acc_state, state1, acc_extras])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:25,代码来源:recurrent.py
示例10: split
def split(self, value, lengths, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArraySplit", [self._flow, value, lengths]):
value = ops.convert_to_tensor(value, name="value")
lengths_64 = math_ops.to_int64(lengths)
if self._infer_shape and not context.executing_eagerly():
clengths = tensor_util.constant_value(lengths_64)
if value.shape.dims is not None:
if clengths is not None and clengths.max() == clengths.min():
self._merge_element_shape(
tensor_shape.TensorShape([clengths[0]]).concatenate(
value.shape[1:]))
flow_out = list_ops.tensor_list_split(
tensor=value,
lengths=lengths_64,
element_shape=self._element_shape[0] if self._element_shape else None,
name=name)
ta = TensorArray(
dtype=self._dtype,
handle=self.handle,
flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
return ta
开发者ID:aeverall,项目名称:tensorflow,代码行数:26,代码来源:tensor_array_ops.py
示例11: _call_cell
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len x batch_size x input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len x batch_size x
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len x batch_size x
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
input_size = inputs_shape[2].value
w = vs.get_variable(
"W_0", [input_size + self._num_units, self._num_units * 4], dtype=dtype)
b = vs.get_variable(
"B", [w.get_shape().with_rank(2)[1]],
initializer=init_ops.constant_initializer(0.0),
dtype=dtype)
if self._use_peephole:
wci = vs.get_variable("W_I_diag", [self._num_units], dtype=dtype)
wco = vs.get_variable("W_O_diag", [self._num_units], dtype=dtype)
wcf = vs.get_variable("W_F_diag", [self._num_units], dtype=dtype)
else:
wci = wco = wcf = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = time_len
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = _lstm_ops_so.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
开发者ID:MostafaGazar,项目名称:tensorflow,代码行数:60,代码来源:lstm_ops.py
示例12: ndlstm_base_dynamic
def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False):
"""Run an LSTM, either forward or backward.
This is a 1D LSTM implementation using dynamic_rnn and
the TensorFlow LSTM op.
Args:
inputs: input sequence (length, batch_size, ninput)
noutput: depth of output
scope: optional scope name
reverse: run LSTM in reverse
Returns:
Output sequence (length, batch_size, noutput)
"""
with variable_scope.variable_scope(scope, "SeqLstm", [inputs]):
# TODO(tmb) make batch size, sequence_length dynamic
# example: sequence_length = tf.shape(inputs)[0]
_, batch_size, _ = _shape(inputs)
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(noutput, state_is_tuple=False)
state = array_ops.zeros([batch_size, lstm_cell.state_size])
sequence_length = int(inputs.get_shape()[0])
sequence_lengths = math_ops.to_int64(
array_ops.fill([batch_size], sequence_length))
if reverse:
inputs = array_ops.reverse_v2(inputs, [0])
outputs, _ = rnn.dynamic_rnn(
lstm_cell, inputs, sequence_lengths, state, time_major=True)
if reverse:
outputs = array_ops.reverse_v2(outputs, [0])
return outputs
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:31,代码来源:lstm1d.py
示例13: _select_last_activations
def _select_last_activations(activations, sequence_lengths):
"""Selects the nth set of activations for each n in `sequence_length`.
Returns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not
`None`, then `output[i, :] = activations[i, sequence_length[i] - 1, :]`. If
`sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.
Args:
activations: A `Tensor` with shape `[batch_size, padded_length, k]`.
sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.
Returns:
A `Tensor` of shape `[batch_size, k]`.
"""
with ops.name_scope(
'select_last_activations', values=[activations, sequence_lengths]):
activations_shape = array_ops.shape(activations)
batch_size = activations_shape[0]
padded_length = activations_shape[1]
output_units = activations_shape[2]
if sequence_lengths is None:
sequence_lengths = padded_length
start_indices = math_ops.to_int64(
math_ops.range(batch_size) * padded_length)
last_indices = start_indices + sequence_lengths - 1
reshaped_activations = array_ops.reshape(
activations, [batch_size * padded_length, output_units])
last_activations = array_ops.gather(reshaped_activations, last_indices)
last_activations.set_shape([activations.shape[0], activations.shape[2]])
return last_activations
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:30,代码来源:rnn.py
示例14: generate_single_output
def generate_single_output(encoder_state, attention_states, sequence_length, targets, num_classes, buckets,
use_mean_attention=False,
softmax_loss_function=None, per_example_loss=False, name=None, use_attention=False):
all_inputs = targets
with ops.op_scope(all_inputs, name, "model_with_buckets"):
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=None):
bucket_attention_states, bucket_attn_weights, bucket_attns, bucket_outputs = attention_single_output_decoder(
encoder_state, attention_states, output_size=num_classes,
num_heads=1,
sequence_length=sequence_length,
initial_state_attention=True,
use_attention=use_attention)
if softmax_loss_function is None:
assert len(bucket_outputs) == len(targets) == 1
# We need to make target and int64-tensor and set its shape.
bucket_target = array_ops.reshape(math_ops.to_int64(targets[0]), [-1])
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=bucket_outputs[0], labels=bucket_target)
else:
assert len(bucket_outputs) == len(targets) == 1
crossent = softmax_loss_function(bucket_outputs[0], targets[0])
batch_size = array_ops.shape(targets[0])[0]
loss = tf.reduce_sum(crossent) / math_ops.cast(batch_size, dtypes.float32)
return bucket_outputs, loss
开发者ID:bwang514,项目名称:rnn-nlu,代码行数:28,代码来源:seq_classification.py
示例15: index_to_string_table_from_tensor
def index_to_string_table_from_tensor(vocabulary_list,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
vocabulary_list, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_list: A 1-D string `Tensor` that specifies the strings to map
from indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_list` is not set.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
with ops.name_scope(name, "index_to_string") as scope:
vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)
num_elements = array_ops.size(vocabulary_list)
keys = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
init = KeyValueTensorInitializer(
keys, vocabulary_list, dtypes.int64, dtypes.string, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:60,代码来源:lookup_ops.py
示例16: split
def split(self, value, lengths, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArraySplit",
[self._handle, value, lengths]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
lengths_64 = math_ops.to_int64(lengths)
if self._infer_shape and context.in_graph_mode():
clengths = tensor_util.constant_value(lengths_64)
if value.shape.dims is not None:
if clengths is not None and clengths.max() == clengths.min():
self._merge_element_shape(
tensor_shape.TensorShape([clengths[0]]).concatenate(
value.shape[1:]))
flow_out = gen_data_flow_ops._tensor_array_split_v3(
handle=self._handle,
value=value,
lengths=lengths_64,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
return ta
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:27,代码来源:tensor_array_ops.py
示例17: Backward
def Backward(*args):
"""Backward pass for the recurrent net."""
# theta, state0, inputs are Forward's inputs.
# acc_state is the accumulated 1st output of Forward.
# acc_extras is the accumulated 2nd output of Forward.
# d_acc_state is the gradient for acc_state.
# d_state1 is the gradient for the final state computed by Forward.
(theta, state0, inputs, max_input_length, acc_state, acc_extras,
d_acc_state, d_state1) = _Pack(args, backward_sig)
# Accumulators for gradients.
d_theta = _EmptyLike(theta)
d_inputs = _EmptyLike(inputs)
# Loop backwards. Note the loop's limit is open-ended, so goes through
# t=0.
t = max_input_length - 1
dev_t = math_ops.to_int32(t) if use_tpu else math_ops.to_int64(t)
run = functional_ops.For(
start=t,
limit=-1,
delta=-1,
inputs=[dev_t] + _Flatten([
theta, state0, inputs, acc_state, acc_extras, d_theta, d_state1,
d_inputs, d_acc_state
]),
body=BackwardLoopBody,
rewrite_with_while=compiled)
(theta, state0, inputs, acc_state, acc_extras, d_theta, d_state0,
d_inputs, d_acc_state) = _Pack(run[1:], bakloop_sig)
d_max_input_length = array_ops.constant(0, dtype=max_input_length.dtype)
return _Flatten(
[d_theta, d_state0, d_inputs, d_max_input_length, acc_extras])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:35,代码来源:recurrent.py
示例18: crf_unary_score
def crf_unary_score(tag_indices, sequence_lengths, inputs):
"""Computes the unary scores of tag sequences.
Args:
tag_indices: A [batch_size, max_seq_len] matrix of tag indices.
sequence_lengths: A [batch_size] vector of true sequence lengths.
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials.
Returns:
unary_scores: A [batch_size] vector of unary scores.
"""
batch_size = array_ops.shape(inputs)[0]
max_seq_len = array_ops.shape(inputs)[1]
num_tags = array_ops.shape(inputs)[2]
flattened_inputs = array_ops.reshape(inputs, [-1])
offsets = array_ops.expand_dims(
math_ops.range(batch_size) * max_seq_len * num_tags, 1)
offsets += array_ops.expand_dims(math_ops.range(max_seq_len) * num_tags, 0)
# Use int32 or int64 based on tag_indices' dtype.
if tag_indices.dtype == dtypes.int64:
offsets = math_ops.to_int64(offsets)
flattened_tag_indices = array_ops.reshape(offsets + tag_indices, [-1])
unary_scores = array_ops.reshape(
array_ops.gather(flattened_inputs, flattened_tag_indices),
[batch_size, max_seq_len])
masks = array_ops.sequence_mask(sequence_lengths,
maxlen=array_ops.shape(tag_indices)[1],
dtype=dtypes.float32)
unary_scores = math_ops.reduce_sum(unary_scores * masks, 1)
return unary_scores
开发者ID:Jordan1237,项目名称:tensorflow,代码行数:34,代码来源:crf.py
示例19: _process_labels
def _process_labels(self, labels):
if isinstance(labels, sparse_tensor.SparseTensor):
if labels.dtype == dtypes.string:
label_ids_values = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels.values)
label_ids = sparse_tensor.SparseTensor(
indices=labels.indices,
values=label_ids_values,
dense_shape=labels.dense_shape)
else:
label_ids = labels
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(label_ids, self._n_classes))
msg = ('labels shape must be [batch_size, {}]. '
'Given: ').format(self._n_classes)
labels_shape = array_ops.shape(labels)
check_rank_op = control_flow_ops.Assert(
math_ops.equal(array_ops.rank(labels), 2),
data=[msg, labels_shape])
check_label_dim = control_flow_ops.Assert(
math_ops.equal(labels_shape[-1], self._n_classes),
data=[msg, labels_shape])
with ops.control_dependencies([check_rank_op, check_label_dim]):
return array_ops.identity(labels)
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:25,代码来源:head.py
示例20: _reverse_seq
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
input_shape = tensor_shape.unknown_shape(ndims=input_seq[0].get_shape().ndims)
for input_ in input_seq:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.pack(input_seq)
# TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
if lengths is not None:
lengths = math_ops.to_int64(lengths)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unpack(s_reversed)
for r in result:
r.set_shape(input_shape)
return result
开发者ID:chemelnucfin,项目名称:tensorflow,代码行数:34,代码来源:rnn.py
注:本文中的tensorflow.python.ops.math_ops.to_int64函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论