本文整理汇总了Python中tensorflow.contrib.keras.python.keras.backend.int_shape函数的典型用法代码示例。如果您正苦于以下问题:Python int_shape函数的具体用法?Python int_shape怎么用?Python int_shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了int_shape函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_updates
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
开发者ID:jiayouwyhit,项目名称:tensorflow,代码行数:32,代码来源:optimizers.py
示例2: get_updates
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * self.iterations))
self.updates.append(K.update_add(self.iterations, 1))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * K.square(g)
self.updates.append(K.update(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
# apply constraints
if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append(K.update(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * K.square(update)
self.updates.append(K.update(d_a, new_d_a))
return self.updates
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:32,代码来源:optimizers.py
示例3: call
def call(self, inputs, mask=None):
input_shape = K.int_shape(inputs)
if input_shape[0]:
# batch size matters, use rnn-based implementation
def step(x, _):
output = self.layer.call(x)
return output, []
_, outputs, _ = K.rnn(
step,
inputs,
initial_states=[],
input_length=input_shape[1],
unroll=False)
y = outputs
else:
# No batch size specified, therefore the layer will be able
# to process batches of any size.
# We can go with reshape-based implementation for performance.
input_length = input_shape[1]
if not input_length:
input_length = K.shape(inputs)[1]
# Shape: (num_samples * timesteps, ...)
inputs = K.reshape(inputs, (-1,) + input_shape[2:])
y = self.layer.call(inputs) # (num_samples * timesteps, ...)
# Shape: (num_samples, timesteps, ...)
output_shape = self._compute_output_shape(input_shape).as_list() # pylint: disable=protected-access
y = K.reshape(y, [-1, input_length] + output_shape[2:])
# Apply activity regularizer if any:
if (hasattr(self.layer, 'activity_regularizer') and
self.layer.activity_regularizer is not None):
regularization_loss = self.layer.activity_regularizer(y)
self.add_loss(regularization_loss, inputs)
return y
开发者ID:finardi,项目名称:tensorflow,代码行数:35,代码来源:wrappers.py
示例4: get_constants
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(3)
]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs(): # pylint: disable=function-redefined
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(3)
]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:34,代码来源:recurrent.py
示例5: call
def call(self, inputs, mask=None, initial_state=None, training=None):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if initial_state is not None:
if not isinstance(initial_state, (list, tuple)):
initial_states = [initial_state]
else:
initial_states = list(initial_state)
if isinstance(inputs, list):
initial_states = inputs[1:]
inputs = inputs[0]
elif self.stateful:
initial_states = self.states
else:
initial_states = self.get_initial_states(inputs)
if len(initial_states) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' + str(len(initial_states)) +
' initial states.')
input_shape = K.int_shape(inputs)
if self.unroll and input_shape[1] is None:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
constants = self.get_constants(inputs, training=None)
preprocessed_input = self.preprocess_input(inputs, training=None)
last_output, outputs, states = K.rnn(
self.step,
preprocessed_input,
initial_states,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=self.unroll)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
# Properly set learning phase
if 0 < self.dropout + self.recurrent_dropout:
last_output._uses_learning_phase = True
outputs._uses_learning_phase = True
if self.return_sequences:
return outputs
else:
return last_output
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:59,代码来源:recurrent.py
示例6: call
def call(self, inputs, training=None, mask=None):
kwargs = {}
if has_arg(self.layer.call, 'training'):
kwargs['training'] = training
uses_learning_phase = False # pylint: disable=redefined-outer-name
input_shape = K.int_shape(inputs)
if input_shape[0]:
# batch size matters, use rnn-based implementation
def step(x, _):
global uses_learning_phase # pylint: disable=global-variable-undefined
output = self.layer.call(x, **kwargs)
if hasattr(output, '_uses_learning_phase'):
uses_learning_phase = (output._uses_learning_phase or
uses_learning_phase)
return output, []
_, outputs, _ = K.rnn(
step,
inputs,
initial_states=[],
unroll=False)
y = outputs
else:
# No batch size specified, therefore the layer will be able
# to process batches of any size.
# We can go with reshape-based implementation for performance.
input_length = input_shape[1]
if not input_length:
input_length = K.shape(inputs)[1]
# Shape: (num_samples * timesteps, ...). And track the
# transformation in self._input_map.
input_uid = tf_base_layers._object_list_uid(inputs)
inputs = K.reshape(inputs, (-1,) + input_shape[2:])
self._input_map[input_uid] = inputs
# (num_samples * timesteps, ...)
y = self.layer.call(inputs, **kwargs)
if hasattr(y, '_uses_learning_phase'):
uses_learning_phase = y._uses_learning_phase
# Shape: (num_samples, timesteps, ...)
output_shape = self._compute_output_shape(input_shape).as_list()
y = K.reshape(y, (-1, input_length) + tuple(output_shape[2:]))
# Apply activity regularizer if any:
if (hasattr(self.layer, 'activity_regularizer') and
self.layer.activity_regularizer is not None):
regularization_loss = self.layer.activity_regularizer(y)
self.add_loss(regularization_loss, inputs)
if uses_learning_phase:
y._uses_learning_phase = True
return y
开发者ID:jiayouwyhit,项目名称:tensorflow,代码行数:52,代码来源:wrappers.py
示例7: pop
def pop(self):
"""Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model.
"""
if not self.layers:
raise TypeError('There are no layers in the model.')
self.layers.pop()
if not self.layers:
self.outputs = []
self.inbound_nodes = []
self.outbound_nodes = []
else:
self.layers[-1].outbound_nodes = []
self.outputs = [self.layers[-1].output]
# update self.inbound_nodes
self.inbound_nodes[0].output_tensors = self.outputs
self.inbound_nodes[0].output_shapes = [K.int_shape(self.outputs[0])]
self.built = False
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:21,代码来源:models.py
示例8: add
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
Arguments:
layer: layer instance.
Raises:
TypeError: If `layer` is not a layer instance.
ValueError: In case the `layer` argument does not
know its input shape.
ValueError: In case the `layer` argument has
multiple output tensors, or is already connected
somewhere else (forbidden in `Sequential` models).
"""
if not isinstance(layer, Layer):
raise TypeError('The added layer must be '
'an instance of class Layer. '
'Found: ' + str(layer))
if not self.outputs:
# first layer in model: check that it is an input layer
if not layer.inbound_nodes:
# create an input layer
if not hasattr(layer, 'batch_input_shape'):
raise ValueError('The first layer in a '
'Sequential model must '
'get an `input_shape` or '
'`batch_input_shape` argument.')
# Instantiate the input layer.
x = Input(
batch_shape=layer.batch_input_shape,
dtype=layer.dtype,
name=layer.name + '_input')
# This will build the current layer
# and create the node connecting the current layer
# to the input layer we just created.
layer(x)
if len(layer.inbound_nodes) != 1:
raise ValueError('A layer added to a Sequential model must '
'not already be connected somewhere else. '
'Model received layer ' + layer.name + ' which has ' +
str(len(layer.inbound_nodes)) +
' pre-existing inbound connections.')
if len(layer.inbound_nodes[0].output_tensors) != 1:
raise ValueError('All layers in a Sequential model '
'should have a single output tensor. '
'For multi-output layers, '
'use the functional API.')
self.outputs = [layer.inbound_nodes[0].output_tensors[0]]
self.inputs = topology.get_source_inputs(self.outputs[0])
# We create an input node, which we will keep updated
# as we add more layers
topology.Node(
outbound_layer=self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=self.inputs,
output_tensors=self.outputs,
# no model-level masking for now
input_masks=[None for _ in self.inputs],
output_masks=[None])
else:
output_tensor = layer(self.outputs[0])
if isinstance(output_tensor, list):
raise TypeError('All layers in a Sequential model '
'should have a single output tensor. '
'For multi-output layers, '
'use the functional API.')
self.outputs = [output_tensor]
# update self.inbound_nodes
self.inbound_nodes[0].output_tensors = self.outputs
self.inbound_nodes[0].output_shapes = [K.int_shape(self.outputs[0])]
self.layers.append(layer)
self.built = False
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:79,代码来源:models.py
示例9: set_model
def set_model(self, model):
self.model = model
self.sess = K.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf_summary.histogram(mapped_weight_name, weight)
if self.write_grads:
grads = model.optimizer.get_gradients(model.total_loss, weight)
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(mapped_weight_name, w_img)
if hasattr(layer, 'output'):
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
else:
self.writer = tf_summary.FileWriter(self.log_dir)
if self.embeddings_freq:
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
embeddings = {
layer.name: layer.weights[0]
for layer in self.model.layers if layer.name in embeddings_layer_names
}
self.saver = saver_lib.Saver(list(embeddings.values()))
embeddings_metadata = {}
if not isinstance(self.embeddings_metadata, str):
embeddings_metadata = self.embeddings_metadata
else:
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings.keys()
}
config = projector.ProjectorConfig()
self.embeddings_ckpt_path = os.path.join(self.log_dir,
'keras_embedding.ckpt')
for layer_name, tensor in embeddings.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if layer_name in embeddings_metadata:
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:84,代码来源:callbacks.py
注:本文中的tensorflow.contrib.keras.python.keras.backend.int_shape函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论