本文整理汇总了Python中tensorflow.python.ops.math_ops.cast函数的典型用法代码示例。如果您正苦于以下问题:Python cast函数的具体用法?Python cast怎么用?Python cast使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cast函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _spectrum_to_circulant_1d
def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
"""Creates a circulant matrix from a spectrum.
Intentionally done in an explicit yet inefficient way. This provides a
cross check to the main code that uses fancy reshapes.
Args:
spectrum: Float or complex `Tensor`.
shape: Python list. Desired shape of returned matrix.
dtype: Type to cast the returned matrix to.
Returns:
Circulant (batch) matrix of desired `dtype`.
"""
spectrum = _to_complex(spectrum)
spectrum_shape = self._shape_to_spectrum_shape(shape)
domain_dimension = spectrum_shape[-1]
if not domain_dimension:
return array_ops.zeros(shape, dtype)
# Explicitly compute the action of spectrum on basis vectors.
matrix_rows = []
for m in range(domain_dimension):
x = np.zeros([domain_dimension])
# x is a basis vector.
x[m] = 1.0
fft_x = fft_ops.fft(math_ops.cast(x, spectrum.dtype))
h_convolve_x = fft_ops.ifft(spectrum * fft_x)
matrix_rows.append(h_convolve_x)
matrix = array_ops.stack(matrix_rows, axis=-1)
return math_ops.cast(matrix, dtype)
开发者ID:aritratony,项目名称:tensorflow,代码行数:31,代码来源:linear_operator_circulant_test.py
示例2: __init__
def __init__(self, partitioned_dim_sizes, inner_dim_sizes,
dim_size_dtype=None):
"""Creates a RaggedTensorDynamicShape.
Args:
partitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for
each partitioned dimension. If dimension `d` is uniform, then
`partitioned_dim_sizes[d]` must be an integer scalar, specifying the
size of all slices across dimension `d`. If dimension `d` is ragged,
then `partitioned_dim_sizes[d]` must be an integer vector, specifying
the size of each slice across dimension `d`.
inner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the
number of inner dimensions. `inner_dim_sizes[n]` is the size of all
slices across the `n`th inner dimension (which is the
`(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor.
dim_size_dtype: dtype for dimension sizes. If not specified, then it
is chosen based on the dtypes of `partitioned_dim_sizes` and
`inner_dim_sizes`.
"""
assert isinstance(partitioned_dim_sizes, (list, tuple))
with ops.name_scope(None, 'RaggedTensorDynamicShape',
(partitioned_dim_sizes, inner_dim_sizes)):
partitioned_dim_sizes = tuple(
ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i)
for (i, size) in enumerate(partitioned_dim_sizes))
inner_dim_sizes = ops.convert_to_tensor(
inner_dim_sizes, name='inner_dim_sizes')
# Validate shapes.
if partitioned_dim_sizes:
for axis, dimension_size in enumerate(partitioned_dim_sizes):
if dimension_size.shape.ndims is None:
raise ValueError(
'rank of partitioned_dim_sizes[%d] is unknown' % axis)
dimension_size.shape.with_rank_at_most(1)
if partitioned_dim_sizes[0].shape.ndims == 1:
raise ValueError('outermost partitioned dimension must be uniform')
if partitioned_dim_sizes[-1].shape.ndims == 0:
raise ValueError('innermost partitioned dimension must be ragged')
inner_dim_sizes.shape.assert_has_rank(1)
# Convert dimension size tensors to a single dtype.
if dim_size_dtype is None:
dim_size_dtypes = set([p.dtype for p in partitioned_dim_sizes
if p.shape.ndims == 1])
if not dim_size_dtypes:
dim_size_dtype = dtypes.int64
elif len(dim_size_dtypes) == 1:
dim_size_dtype = dim_size_dtypes.pop()
else:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError('partitioned_dim_sizes must have matching dtypes')
dim_size_dtype = dtypes.int64
partitioned_dim_sizes = tuple(math_ops.cast(p, dim_size_dtype)
for p in partitioned_dim_sizes)
inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype)
self._partitioned_dim_sizes = partitioned_dim_sizes
self._inner_dim_sizes = inner_dim_sizes
开发者ID:aritratony,项目名称:tensorflow,代码行数:60,代码来源:ragged_tensor_shape.py
示例3: get_beta_accumulators
def get_beta_accumulators(opt, dtype):
local_step = math_ops.cast(opt.iterations + 1, dtype)
beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype)
beta_2_power = math_ops.pow(beta_2_t, local_step)
return (beta_1_power, beta_2_power)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:adam_test.py
示例4: weighted
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
Arguments:
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
Returns:
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = math_ops.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# apply sample weighting
if weights is not None:
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
score_array *= weights
score_array /= K.mean(
math_ops.cast(math_ops.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
开发者ID:jinxin0924,项目名称:tensorflow,代码行数:33,代码来源:training_utils.py
示例5: _batch_norm
def _batch_norm(self, x, mean, var, offset, scale, epsilon):
# We compute the batch norm manually in this function because
# nn_impl.batch_normalization does not support float16 yet.
# TODO(reedwm): Add float16 support to nn_impl.batch_normalization.
inv = math_ops.rsqrt(var + epsilon) * scale
y = math_ops.cast(x, scale.dtype) * inv + (offset - mean * inv)
return math_ops.cast(y, x.dtype)
开发者ID:SylChan,项目名称:tensorflow,代码行数:7,代码来源:nn_fused_batchnorm_test.py
示例6: per_step_batch_loss
def per_step_batch_loss(self, features, mode, state):
"""Computes predictions, losses, and intermediate model states.
Args:
features: A dictionary with times, values, and (optionally) exogenous
regressors. See `define_loss`.
mode: The tf.estimator.ModeKeys mode to use (TRAIN, EVAL, INFER).
state: Model-dependent state, each with size [batch size x ...]. The
number and type will typically be fixed by the model (for example a
mean and variance).
Returns:
A tuple of (loss, filtered_states, predictions)
loss: Average loss values across the batch.
filtered_states: For each Tensor in `state` with shape [batch size x
...], `filtered_states` has a Tensor with shape [batch size x window
size x ...] with filtered state for each part of the batch and
window.
predictions: A dictionary with model-dependent one-step-ahead (or
at-least-one-step-ahead with missing values) predictions, with keys
indicating the type of prediction and values having shape [batch
size x window size x ...]. For example state space models provide
"mean", "covariance", and "log_likelihood".
"""
self._check_graph_initialized()
times = math_ops.cast(features[TrainEvalFeatures.TIMES], dtype=dtypes.int64)
values = math_ops.cast(features[TrainEvalFeatures.VALUES], dtype=self.dtype)
exogenous_regressors = self._process_exogenous_features(
times=times,
features={key: value for key, value in features.items()
if key not in [TrainEvalFeatures.TIMES,
TrainEvalFeatures.VALUES]})
def _batch_loss_filtering_step(step_number, current_times, state):
"""Make a prediction and update it based on data."""
current_values = values[:, step_number, :]
state = self._apply_exogenous_update(
step_number=step_number, current_times=current_times, state=state,
raw_features=features,
embedded_exogenous_regressors=exogenous_regressors)
predicted_state, predictions = self._prediction_step(
current_times=current_times,
state=state)
filtered_state, outputs = self._filtering_step(
current_times=current_times,
current_values=current_values,
state=predicted_state,
predictions=predictions)
return filtered_state, outputs
state, outputs = self._state_update_loop(
times=times, state=state, state_update_fn=_batch_loss_filtering_step,
outputs=["loss"] + self._train_output_names)
outputs["loss"].set_shape(times.get_shape())
loss_sum = math_ops.reduce_sum(outputs["loss"])
per_observation_loss = (loss_sum / math_ops.cast(
math_ops.reduce_prod(array_ops.shape(times)), dtype=self.dtype))
per_observation_loss += self._loss_additions(times, values, mode)
# Since we have window-level additions to the loss, its per-step value is
# misleading, so we avoid returning it.
del outputs["loss"]
return per_observation_loss, state, outputs
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:60,代码来源:model.py
示例7: assert_integer_form
def assert_integer_form(
x, data=None, summarize=None, message=None,
int_dtype=None, name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will be used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
"""
with ops.name_scope(name, values=[x, data]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return control_flow_ops.no_op()
message = message or "{} has non-integer components".format(x.op.name)
if int_dtype is None:
try:
int_dtype = {
dtypes.float16: dtypes.int16,
dtypes.float32: dtypes.int32,
dtypes.float64: dtypes.int64,
}[x.dtype.base_dtype]
except KeyError:
raise TypeError("Unrecognized type {}".format(x.dtype.name))
return check_ops.assert_equal(
x, math_ops.cast(math_ops.cast(x, int_dtype), x.dtype),
data=data, summarize=summarize, message=message, name=name)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:util.py
示例8: _fused_batch_norm
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = tf_utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = tf_utils.constant_value(training)
if training_value is None:
momentum = tf_utils.smart_cond(training,
lambda: self.momentum,
lambda: 1.0)
else:
momentum = ops.convert_to_tensor(self.momentum)
if training_value or training_value is None:
if distribution_strategy_context.in_cross_replica_context():
strategy = distribution_strategy_context.get_strategy()
mean_update = strategy.extended.update(
self.moving_mean, self._assign_moving_average,
(mean, self.momentum))
variance_update = strategy.extended.update(
self.moving_variance, self._assign_moving_average,
(variance, self.momentum))
else:
mean_update = self._assign_moving_average(self.moving_mean, mean,
momentum)
variance_update = self._assign_moving_average(self.moving_variance,
variance, momentum)
self.add_update(mean_update, inputs=True)
self.add_update(variance_update, inputs=True)
return output
开发者ID:gautam1858,项目名称:tensorflow,代码行数:60,代码来源:normalization.py
示例9: _sample_n
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
if self.total_count.get_shape().ndims is not None:
if self.total_count.get_shape().ndims != 0:
raise NotImplementedError(
"Sample only supported for scalar number of draws.")
elif self.validate_args:
is_scalar = check_ops.assert_rank(
n_draws, 0,
message="Sample only supported for scalar number of draws.")
n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
k = self.event_shape_tensor()[0]
# Flatten batch dims so logits has shape [B, k],
# where B = reduce_prod(self.batch_shape_tensor()).
x = random_ops.multinomial(
logits=array_ops.reshape(self.logits, [-1, k]),
num_samples=n * n_draws,
seed=seed)
x = array_ops.reshape(x, shape=[-1, n, n_draws])
x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k),
axis=-2) # shape: [B, n, k]
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape)
return math_ops.cast(x, self.dtype)
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:25,代码来源:multinomial.py
示例10: quantiles_ready
def quantiles_ready():
"""The subgraph for when the quantiles are ready."""
quantized_feature = quantile_ops.quantiles([sparse_column_values], [],
[quantile_buckets], [])
quantized_feature = math_ops.cast(quantized_feature[0], dtypes.int64)
quantized_feature = array_ops.reshape(quantized_feature, [-1])
example_indices, _ = array_ops.split(
sparse_column_indices, num_or_size_splits=2, axis=1)
example_indices = array_ops.squeeze(example_indices, [1])
filtered_gradients = array_ops.gather(gradients, example_indices)
filtered_hessians = array_ops.gather(hessians, example_indices)
filtered_partition_ids = array_ops.gather(example_partition_ids,
example_indices)
unique_partitions, mapped_partitions = array_ops.unique(
example_partition_ids)
# Compute aggregate stats for each partition.
per_partition_gradients = math_ops.unsorted_segment_sum(
gradients, mapped_partitions, array_ops.size(unique_partitions))
per_partition_hessians = math_ops.unsorted_segment_sum(
hessians, mapped_partitions, array_ops.size(unique_partitions))
# Prepend a bias feature per partition that accumulates the stats for all
# examples in that partition.
bias_feature_ids = array_ops.fill(
array_ops.shape(unique_partitions), _BIAS_FEATURE_ID)
bias_feature_ids = math_ops.cast(bias_feature_ids, dtypes.int64)
partition_ids = array_ops.concat(
[unique_partitions, filtered_partition_ids], 0)
filtered_gradients = array_ops.concat(
[per_partition_gradients, filtered_gradients], 0)
filtered_hessians = array_ops.concat(
[per_partition_hessians, filtered_hessians], 0)
bucket_ids = array_ops.concat([bias_feature_ids, quantized_feature], 0)
return partition_ids, bucket_ids, filtered_gradients, filtered_hessians
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:ordinal_split_handler.py
示例11: test_defining_spd_operator_by_taking_real_part
def test_defining_spd_operator_by_taking_real_part(self):
with self.cached_session() as sess:
# S is real and positive.
s = linear_operator_test_util.random_uniform(
shape=(10, 2, 3, 4), dtype=dtypes.float32, minval=1., maxval=2.)
# Let S = S1 + S2, the Hermitian and anti-hermitian parts.
# S1 = 0.5 * (S + S^H), S2 = 0.5 * (S - S^H),
# where ^H is the Hermitian transpose of the function:
# f(n0, n1, n2)^H := ComplexConjugate[f(N0-n0, N1-n1, N2-n2)].
# We want to isolate S1, since
# S1 is Hermitian by construction
# S1 is real since S is
# S1 is positive since it is the sum of two positive kernels
# IDFT[S] = IDFT[S1] + IDFT[S2]
# = H1 + H2
# where H1 is real since it is Hermitian,
# and H2 is imaginary since it is anti-Hermitian.
ifft_s = fft_ops.ifft3d(math_ops.cast(s, dtypes.complex64))
# Throw away H2, keep H1.
real_ifft_s = math_ops.real(ifft_s)
# This is the perfect spectrum!
# spectrum = DFT[H1]
# = S1,
fft_real_ifft_s = fft_ops.fft3d(
math_ops.cast(real_ifft_s, dtypes.complex64))
# S1 is Hermitian ==> operator is real.
# S1 is real ==> operator is self-adjoint.
# S1 is positive ==> operator is positive-definite.
operator = linalg.LinearOperatorCirculant3D(fft_real_ifft_s)
# Allow for complex output so we can check operator has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix, matrix_t = sess.run([
operator.to_dense(),
array_ops.matrix_transpose(operator.to_dense())
])
operator.assert_positive_definite().run() # Should not fail.
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
self.assertAllClose(matrix, matrix_t)
# Just to test the theory, get S2 as well.
# This should create an imaginary operator.
# S2 is anti-Hermitian ==> operator is imaginary.
# S2 is real ==> operator is self-adjoint.
imag_ifft_s = math_ops.imag(ifft_s)
fft_imag_ifft_s = fft_ops.fft3d(
1j * math_ops.cast(imag_ifft_s, dtypes.complex64))
operator_imag = linalg.LinearOperatorCirculant3D(fft_imag_ifft_s)
matrix, matrix_h = sess.run([
operator_imag.to_dense(),
array_ops.matrix_transpose(math_ops.conj(operator_imag.to_dense()))
])
self.assertAllClose(matrix, matrix_h)
np.testing.assert_allclose(0, np.real(matrix), atol=1e-7)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:60,代码来源:linear_operator_circulant_test.py
示例12: _apply_sparse_shared
def _apply_sparse_shared(self, grad, var, indices,
scatter_add, scatter_update):
beta1_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_slice = array_ops.gather(m, indices)
m_t_slice = m_slice * beta1_t + grad * (1 - beta1_t)
with ops.control_dependencies([m_t_slice]):
m_t = scatter_update(m, indices, m_t_slice)
# u_t = max(beta2 * u, abs(g_t))
v = self.get_slot(var, "v")
v_slice = array_ops.gather(v, indices)
v_t_slice = math_ops.maximum(v_slice * beta2_t, math_ops.abs(grad))
with ops.control_dependencies([v_t_slice]):
v_t = scatter_update(v, indices, v_t_slice)
# theta_t = theta - lr / (1 - beta1^t) * m_t / u_t
var_slice = -lr_t / (1 - beta1_power) * (m_t_slice /
(v_t_slice + epsilon_t))
with ops.control_dependencies([var_slice]):
var_update = scatter_add(var, indices, var_slice)
return control_flow_ops.group(*[var_update, m_t, v_t])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:26,代码来源:adamax.py
示例13: call
def call(self, values, weights=None):
"""Accumulate statistics for computing the mean.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
Args:
values: Tensor with the per-example value.
weights: Optional weighting of each example. Defaults to 1.
"""
if not self.built: # False only in the first call().
self.numer = self.add_variable(name="numer", shape=(),
dtype=dtypes.float64,
initializer=init_ops.zeros_initializer)
self.denom = self.add_variable(name="denom", shape=(),
dtype=dtypes.float64,
initializer=init_ops.zeros_initializer)
if weights is None:
self.denom.assign_add(
math_ops.cast(array_ops.size(values), dtypes.float64))
values = math_ops.reduce_sum(values)
self.numer.assign_add(math_ops.cast(values, dtypes.float64))
else:
weights = math_ops.cast(weights, dtypes.float64)
self.denom.assign_add(math_ops.reduce_sum(weights))
values = math_ops.cast(values, dtypes.float64) * weights
self.numer.assign_add(math_ops.reduce_sum(values))
开发者ID:rajeev921,项目名称:tensorflow,代码行数:27,代码来源:metrics_impl.py
示例14: call
def call(self, y_true, y_pred):
"""Invokes the `CategoricalCrossentropy` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Categorical cross entropy losses.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true)
is_sparse = y_pred.shape != y_true.shape
if is_sparse:
return sparse_categorical_crossentropy(
y_true, y_pred, from_logits=self.from_logits)
else:
y_true = math_ops.cast(y_true, y_pred.dtype)
if self.label_smoothing > 0:
num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype)
smooth_positives = 1.0 - self.label_smoothing
smooth_negatives = self.label_smoothing / num_classes
y_true = y_true * smooth_positives + smooth_negatives
return categorical_crossentropy(
y_true, y_pred, from_logits=self.from_logits)
开发者ID:aeverall,项目名称:tensorflow,代码行数:27,代码来源:losses.py
示例15: _apply_transform
def _apply_transform(self, input_tensors, **kwargs):
"""Applies the transformation to the `transform_input`.
Args:
input_tensors: a list of Tensors representing the input to
the Transform.
**kwargs: Additional keyword arguments, unused here.
Returns:
A namedtuple of Tensors representing the transformed output.
"""
d = input_tensors[0]
if self.strip_value is np.nan:
strip_hot = math_ops.is_nan(d)
else:
strip_hot = math_ops.equal(d,
array_ops.constant([self.strip_value],
dtype=d.dtype))
keep_hot = math_ops.logical_not(strip_hot)
length = array_ops.reshape(array_ops.shape(d), [])
indices = array_ops.boolean_mask(math_ops.range(length), keep_hot)
values = array_ops.boolean_mask(d, keep_hot)
sparse_indices = array_ops.reshape(
math_ops.cast(indices, dtypes.int64), [-1, 1])
shape = math_ops.cast(array_ops.shape(d), dtypes.int64)
# pylint: disable=not-callable
return self.return_type(ops.SparseTensor(sparse_indices, values, shape))
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:31,代码来源:sparsify.py
示例16: _apply_sparse_shared
def _apply_sparse_shared(self, grad, var, indices, scatter_add, state):
beta1_power, beta2_power = self._get_beta_accumulators(state)
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = state.get_hyper("learning_rate", var.dtype.base_dtype)
beta1_t = state.get_hyper("beta1", var.dtype.base_dtype)
beta2_t = state.get_hyper("beta2", var.dtype.base_dtype)
epsilon_t = state.get_hyper("epsilon", var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = state.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = state.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:25,代码来源:adam.py
示例17: _format_for_tpu_embedding_sparse_batch
def _format_for_tpu_embedding_sparse_batch(self, sparse_features):
"""Format sparse features for `enqueue_tpu_embedding_sparse_batch()`.
Args:
sparse_features: a `Dict` of `SparseTensor`s for embedding.
Returns:
Arguments for `enqueue_tpu_embedding_sparse_batch()`.
"""
sample_idcs, embedding_idcs, aggregation_weights = list(), list(), list()
for table in self._table_to_features_dict:
sample_t, indices_t, weights_t = list(), list(), list()
features = self._table_to_features_dict[table]
for i, feature in enumerate(features):
tensor = sparse_features[feature]
sample_indices = tensor.indices[:, 0]
embedding_indices = tensor.values
weights = array_ops.ones_like(embedding_indices)
sample_t.append(i * self._batch_size_per_core + sample_indices)
indices_t.append(embedding_indices)
weights_t.append(weights)
sample_idcs.append(
math_ops.cast(array_ops.concat(sample_t, axis=0), dtype=dtypes.int32))
embedding_idcs.append(
math_ops.cast(
array_ops.concat(indices_t, axis=0), dtype=dtypes.int32))
aggregation_weights.append(
math_ops.cast(
array_ops.concat(weights_t, axis=0), dtype=dtypes.float32))
return sample_idcs, embedding_idcs, aggregation_weights
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:34,代码来源:tpu_embedding.py
示例18: _entropy
def _entropy(self):
if not self.bijector.is_constant_jacobian:
raise NotImplementedError("entropy is not implemented")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("entropy is not implemented when "
"bijector is not injective.")
# Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
# can be shown that:
# H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
# If is_constant_jacobian then:
# E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
# where c can by anything.
entropy = self.distribution.entropy()
if self._is_maybe_event_override:
# H[X] = sum_i H[X_i] if X_i are mutually independent.
# This means that a reduce_sum is a simple rescaling.
entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
dtype=entropy.dtype.base_dtype)
if self._is_maybe_batch_override:
new_shape = array_ops.concat([
_ones_like(self._override_batch_shape),
self.distribution.batch_shape_tensor()
], 0)
entropy = array_ops.reshape(entropy, new_shape)
multiples = array_ops.concat([
self._override_batch_shape,
_ones_like(self.distribution.batch_shape_tensor())
], 0)
entropy = array_ops.tile(entropy, multiples)
dummy = array_ops.zeros([], self.dtype)
entropy -= math_ops.cast(
self.bijector.inverse_log_det_jacobian(dummy),
entropy.dtype)
entropy.set_shape(self.batch_shape)
return entropy
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:35,代码来源:transformed_distribution.py
示例19: call
def call(self, values, weights=None):
"""Accumulate statistics for computing the mean.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
Args:
values: Tensor with the per-example value.
weights: Optional weighting of each example. Defaults to 1.
Returns:
The arguments, for easy chaining.
"""
if weights is None:
self.denom.assign_add(
math_ops.cast(array_ops.identity(array_ops.size(values)), self.dtype))
values = math_ops.reduce_sum(values)
self.numer.assign_add(math_ops.cast(values, self.dtype))
else:
weights = math_ops.cast(weights, self.dtype)
self.denom.assign_add(math_ops.reduce_sum(weights))
values = math_ops.cast(values, self.dtype) * weights
self.numer.assign_add(math_ops.reduce_sum(values))
if weights is None:
return values
return values, weights
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:26,代码来源:metrics_impl.py
示例20: _testGradients
def _testGradients(self, tr_a, tr_b, sp_a, sp_b, a_dtype, b_dtype, delta,
name):
with self.test_session():
a = constant_op.constant(
RandMatrix(
3, 2, tr_a, round_bfloat=True), dtype=dtypes.float32)
b = constant_op.constant(
RandMatrix(
2, 4, tr_b, round_bfloat=True), dtype=dtypes.float32)
tf_a = math_ops.cast(a, a_dtype) if a_dtype != dtypes.float32 else a
tf_b = math_ops.cast(b, b_dtype) if b_dtype != dtypes.float32 else b
m = math_ops.matmul(
tf_a,
tf_b,
name=name,
transpose_a=tr_a,
transpose_b=tr_b,
a_is_sparse=sp_a,
b_is_sparse=sp_b)
err = (gradient_checker.compute_gradient_error(
a, [2, 3] if tr_a else [3, 2],
m, [3, 4],
x_init_value=a.eval(),
delta=delta) + gradient_checker.compute_gradient_error(
b, [4, 2] if tr_b else [2, 4],
m, [3, 4],
x_init_value=b.eval(),
delta=delta))
self.assertLessEqual(err, delta / 2.)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:30,代码来源:sparse_matmul_op_test.py
注:本文中的tensorflow.python.ops.math_ops.cast函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论