本文整理汇总了Python中tensorflow.python.keras.backend.dtype函数的典型用法代码示例。如果您正苦于以下问题:Python dtype函数的具体用法?Python dtype怎么用?Python dtype使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dtype函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_updates
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:25,代码来源:optimizers.py
示例2: _preprocess_symbolic_input
def _preprocess_symbolic_input(x, data_format, mode):
"""Preprocesses a tensor encoding a batch of images.
Arguments:
x: Input tensor, 3D or 4D.
data_format: Data format of the image tensor.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed tensor.
"""
global _IMAGENET_MEAN
if mode == 'tf':
x /= 127.5
x -= 1.
return x
if mode == 'torch':
x /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == 'channels_first':
# 'RGB'->'BGR'
if K.ndim(x) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
if _IMAGENET_MEAN is None:
_IMAGENET_MEAN = constant_op.constant(-np.array(mean), dtype=K.floatx())
# Zero-center by mean pixel
if K.dtype(x) != K.dtype(_IMAGENET_MEAN):
x = K.bias_add(x, math_ops.cast(_IMAGENET_MEAN, K.dtype(x)), data_format)
else:
x = K.bias_add(x, _IMAGENET_MEAN, data_format)
if std is not None:
x /= std
return x
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:55,代码来源:imagenet_utils.py
示例3: sparse_categorical_accuracy
def sparse_categorical_accuracy(y_true, y_pred):
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (len(K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast them
# to match.
if K.dtype(y_pred) != K.dtype(y_true):
y_pred = math_ops.cast(y_pred, K.dtype(y_true))
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:12,代码来源:metrics.py
示例4: get_updates
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * math_ops.square(update)
self.updates.append(state_ops.assign(d_a, new_d_a))
return self.updates
开发者ID:sonnyhu,项目名称:tensorflow,代码行数:33,代码来源:optimizers.py
示例5: sparse_categorical_accuracy
def sparse_categorical_accuracy(y_true, y_pred):
y_true = math_ops.reduce_max(y_true, axis=-1)
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the expected labels are float, we need to cast the int returned by
# argmax to compare.
if K.dtype(y_true) == K.floatx():
y_pred = math_ops.cast(y_pred, K.floatx())
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
开发者ID:gunan,项目名称:tensorflow,代码行数:10,代码来源:metrics.py
示例6: sparse_categorical_accuracy
def sparse_categorical_accuracy(y_true, y_pred):
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (len(K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the expected labels are float, we need to cast the int returned by
# argmax to compare.
if K.dtype(y_true) == K.floatx():
y_pred = math_ops.cast(y_pred, K.floatx())
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:12,代码来源:metrics.py
示例7: __init__
def __init__(self,
input_shape=None,
batch_size=None,
dtype=None,
input_tensor=None,
sparse=False,
name=None,
**kwargs):
if 'batch_input_shape' in kwargs:
batch_input_shape = kwargs.pop('batch_input_shape')
if input_shape and batch_input_shape:
raise ValueError('Only provide the input_shape OR '
'batch_input_shape argument to '
'InputLayer, not both at the same time.')
batch_size = batch_input_shape[0]
input_shape = batch_input_shape[1:]
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if not name:
prefix = 'input'
name = prefix + '_' + str(K.get_uid(prefix))
if not dtype:
if input_tensor is None:
dtype = K.floatx()
else:
dtype = K.dtype(input_tensor)
super(InputLayer, self).__init__(dtype=dtype, name=name)
self.built = True
self.sparse = sparse
self.batch_size = batch_size
if isinstance(input_shape, tensor_shape.TensorShape):
input_shape = tuple(input_shape.as_list())
if input_tensor is None:
if input_shape is not None:
batch_input_shape = (batch_size,) + tuple(input_shape)
else:
batch_input_shape = None
if context.executing_eagerly():
# In eager mode, create a temporary placeholder to call the layer on.
input_tensor = base_layer.DeferredTensor( # pylint: disable=protected-access
shape=batch_input_shape,
dtype=dtype,
name=self.name)
else:
# In graph mode, create a graph placeholder to call the layer on.
if sparse:
input_tensor = array_ops.sparse_placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name)
else:
input_tensor = array_ops.placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name)
# For compatibility with Keras API.
self.is_placeholder = True
self._batch_input_shape = batch_input_shape
else:
# For compatibility with Keras API.
self.is_placeholder = False
self._batch_input_shape = tuple(input_tensor.get_shape().as_list())
if context.executing_eagerly():
raise ValueError('You should not pass an input tensor when executing '
'in eager mode. For example, instead of creating an '
'InputLayer, you should instantiate your model and '
'directly call it on your input.')
# Create an input node to add to self.outbound_node
# and set output_tensors' _keras_history.
input_tensor._keras_history = (self, 0, 0) # pylint: disable=protected-access
base_layer.Node(
self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=[input_tensor],
output_tensors=[input_tensor])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:85,代码来源:input_layer.py
示例8: call
def call(self, inputs):
dtype = K.dtype(inputs)
if dtype != 'int32' and dtype != 'int64':
inputs = math_ops.cast(inputs, 'int32')
out = embedding_ops.embedding_lookup(self.embeddings, inputs)
return out
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:6,代码来源:embeddings.py
示例9: __init__
def __init__(self,
input_shape=None,
batch_size=None,
dtype=None,
input_tensor=None,
sparse=False,
name=None,
**kwargs):
if 'batch_input_shape' in kwargs:
batch_input_shape = kwargs.pop('batch_input_shape')
if input_shape and batch_input_shape:
raise ValueError('Only provide the input_shape OR '
'batch_input_shape argument to '
'InputLayer, not both at the same time.')
batch_size = batch_input_shape[0]
input_shape = batch_input_shape[1:]
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if not name:
prefix = 'input'
name = prefix + '_' + str(backend.get_uid(prefix))
if not dtype:
if input_tensor is None:
dtype = backend.floatx()
else:
dtype = backend.dtype(input_tensor)
super(InputLayer, self).__init__(dtype=dtype, name=name)
self.built = True
self.sparse = sparse
self.batch_size = batch_size
self.supports_masking = True
if isinstance(input_shape, tensor_shape.TensorShape):
input_shape = tuple(input_shape.as_list())
if input_tensor is None:
if input_shape is not None:
batch_input_shape = (batch_size,) + tuple(input_shape)
else:
batch_input_shape = None
graph = backend.get_graph()
with graph.as_default():
# In graph mode, create a graph placeholder to call the layer on.
if sparse:
input_tensor = backend.placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name,
sparse=True)
else:
input_tensor = backend.placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name)
self.is_placeholder = True
self._batch_input_shape = batch_input_shape
else:
if not tf_utils.is_symbolic_tensor(input_tensor):
raise ValueError('You should not pass an EagerTensor to `Input`. '
'For example, instead of creating an '
'InputLayer, you should instantiate your model and '
'directly call it on your input.')
self.is_placeholder = False
self._batch_input_shape = tuple(input_tensor.get_shape().as_list())
# Create an input node to add to self.outbound_node
# and set output_tensors' _keras_history.
input_tensor._keras_history = (self, 0, 0) # pylint: disable=protected-access
base_layer.Node(
self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=[input_tensor],
output_tensors=[input_tensor])
开发者ID:aeverall,项目名称:tensorflow,代码行数:78,代码来源:input_layer.py
示例10: call
def call(self, inputs):
if K.dtype(inputs) != 'int32':
inputs = math_ops.cast(inputs, 'int32')
inputs = array_ops.one_hot(inputs, self.input_dim)
return math_ops.tensordot(inputs, self.embeddings, 1)
开发者ID:Eagle732,项目名称:tensorflow,代码行数:6,代码来源:keras_support.py
示例11: __init__
def __init__(self,
input_shape=None,
batch_size=None,
dtype=None,
input_tensor=None,
sparse=False,
name=None,
**kwargs):
strategy = distribution_strategy_context.get_strategy()
if strategy and batch_size is not None and \
distributed_training_utils.global_batch_size_supported(strategy):
if batch_size % strategy.num_replicas_in_sync != 0:
raise ValueError('The `batch_size` argument value {} cannot be '
'divisible by number of replicas {}'.format(
batch_size, strategy.num_replicas_in_sync))
batch_size = batch_size // strategy.num_replicas_in_sync
if 'batch_input_shape' in kwargs:
batch_input_shape = kwargs.pop('batch_input_shape')
if input_shape and batch_input_shape:
raise ValueError('Only provide the input_shape OR '
'batch_input_shape argument to '
'InputLayer, not both at the same time.')
batch_size = batch_input_shape[0]
input_shape = batch_input_shape[1:]
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if not name:
prefix = 'input'
name = prefix + '_' + str(backend.get_uid(prefix))
if not dtype:
if input_tensor is None:
dtype = backend.floatx()
else:
dtype = backend.dtype(input_tensor)
elif input_tensor is not None and input_tensor.dtype != dtype:
raise ValueError('`input_tensor.dtype` differs from `dtype`: %s vs. %s' %
(input_tensor.dtype, dtype))
super(InputLayer, self).__init__(dtype=dtype, name=name)
self.built = True
self.sparse = sparse
self.batch_size = batch_size
self.supports_masking = True
if isinstance(input_shape, tensor_shape.TensorShape):
input_shape = tuple(input_shape.as_list())
elif isinstance(input_shape, int):
input_shape = (input_shape,)
if input_tensor is None:
if input_shape is not None:
batch_input_shape = (batch_size,) + tuple(input_shape)
else:
batch_input_shape = None
graph = backend.get_graph()
with graph.as_default():
# In graph mode, create a graph placeholder to call the layer on.
if sparse:
input_tensor = backend.placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name,
sparse=True)
else:
input_tensor = backend.placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name)
self.is_placeholder = True
self._batch_input_shape = batch_input_shape
else:
if not tf_utils.is_symbolic_tensor(input_tensor):
raise ValueError('You should not pass an EagerTensor to `Input`. '
'For example, instead of creating an '
'InputLayer, you should instantiate your model and '
'directly call it on your input.')
self.is_placeholder = False
self._batch_input_shape = tuple(input_tensor.shape.as_list())
# Create an input node to add to self.outbound_node
# and set output_tensors' _keras_history.
input_tensor._keras_history = (self, 0, 0) # pylint: disable=protected-access
input_tensor._keras_mask = None
base_layer.Node(
self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=[input_tensor],
output_tensors=[input_tensor])
开发者ID:aritratony,项目名称:tensorflow,代码行数:93,代码来源:input_layer.py
示例12: layer_test
def layer_test(layer_cls, kwargs={}, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, fixed_batch_size=False):
# generate input data
if input_data is None:
if not input_shape:
raise AssertionError()
if not input_dtype:
input_dtype = K.floatx()
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
if all(isinstance(e, tuple) for e in input_data_shape):
input_data = []
for e in input_data_shape:
input_data.append(
(10 * np.random.random(e)).astype(input_dtype))
else:
input_data = (10 * np.random.random(input_data_shape))
input_data = input_data.astype(input_dtype)
else:
if input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
layer = layer_cls(**kwargs)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
try:
expected_output_shape = layer.compute_output_shape(input_shape)
except Exception:
expected_output_shape = layer._compute_output_shape(input_shape)
# test in functional API
if isinstance(input_shape, list):
if fixed_batch_size:
x = [Input(batch_shape=e, dtype=input_dtype) for e in input_shape]
else:
x = [Input(shape=e[1:], dtype=input_dtype) for e in input_shape]
else:
if fixed_batch_size:
x = Input(batch_shape=input_shape, dtype=input_dtype)
else:
x = Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if not (K.dtype(y) == expected_output_dtype):
raise AssertionError()
# check with the functional API
model = Model(x, y)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape,
actual_output_shape):
#.........这里部分代码省略.........
开发者ID:SundeepMehta,项目名称:DeepCTR,代码行数:101,代码来源:utils.py
注:本文中的tensorflow.python.keras.backend.dtype函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论