本文整理汇总了Python中tensorflow.python.ops.state_ops.assign函数的典型用法代码示例。如果您正苦于以下问题:Python assign函数的具体用法?Python assign怎么用?Python assign使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assign函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: streaming_tp_fp_arrays
def streaming_tp_fp_arrays(num_gbboxes, tp, fp,
metrics_collections=None,
updates_collections=None,
name=None):
"""Streaming computation of True and False Positive arrays.
"""
with variable_scope.variable_scope(name, 'streaming_tp_fp',
[num_gbboxes, tp, fp]):
num_gbboxes = tf.cast(num_gbboxes, tf.int32)
tp = tf.cast(tp, tf.bool)
fp = tf.cast(fp, tf.bool)
# Reshape TP and FP tensors and clean away 0 class values.
tp = tf.reshape(tp, [-1])
fp = tf.reshape(fp, [-1])
# Local variables accumlating information over batches.
v_num_objects = _create_local('v_num_gbboxes', shape=[], dtype=tf.int32)
v_tp = _create_local('v_tp', shape=[0, ], dtype=tf.bool)
v_fp = _create_local('v_fp', shape=[0, ], dtype=tf.bool)
# Update operations.
num_objects_op = state_ops.assign_add(v_num_objects,
tf.reduce_sum(num_gbboxes))
tp_op = state_ops.assign(v_tp, tf.concat([v_tp, tp], axis=0),
validate_shape=False)
fp_op = state_ops.assign(v_fp, tf.concat([v_fp, fp], axis=0),
validate_shape=False)
# Value and update ops.
val = (v_num_objects, v_tp, v_fp)
with ops.control_dependencies([num_objects_op, tp_op, fp_op]):
update_op = (num_objects_op, tp_op, fp_op)
return val, update_op
开发者ID:cvtower,项目名称:seglink,代码行数:35,代码来源:metrics.py
示例2: _Update_global_variables
def _Update_global_variables():
global_norm = []
# a = a / t
for g in grad_vars:
global_norm.append(state_ops.assign(g, g / self._period))
# apply
with ops.control_dependencies(global_norm):
apply_global_op = self._opt.apply_gradients(
zip(grad_vars, global_center_vars))
# pull
with ops.control_dependencies([apply_global_op]):
update_ops = []
if global_step:
with ops.colocate_with(global_step):
update_ops.append(state_ops.assign_add(global_step, 1))
for lvar in local_vars:
g_val = self._global_map[lvar].read_value()
update_ops.append(state_ops.assign(lvar, g_val))
for grad_var in grad_vars:
update_ops.append(
state_ops.assign(grad_var, array_ops.zeros_like(grad_var)))
variable_update = control_flow_ops.group(*(update_ops))
return variable_update
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:25,代码来源:agn_optimizer.py
示例3: _apply_sparse
def _apply_sparse(self, grad, var):
beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad.values * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t,
use_locking=self._use_locking)
m_t = state_ops.scatter_add(m_t, grad.indices, m_scaled_g_values,
use_locking=self._use_locking)
# u_t = max(beta_2 * u_{t-1}, L1(g_t))
# theta_t = theta_{t-1} - alpha/(1-beta_1).m_t/u_t
v = self.get_slot(var, "v")
g_abs_values = tensorflow.abs(g_t)
v_t = state_ops.assign(v, v * beta_2, use_locking = self._use_locking)
v_t = state_ops.assign_max(v_t, grad.indices, g_abs_values,
use_locking=self._use_locking)
var_update = state_ops.assign_sub(var,
lr*m_t/(v_t*(1 - beta_1)),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
开发者ID:Faruk-Ahmed,项目名称:nn,代码行数:31,代码来源:adamax.py
示例4: testIsVariableInitialized
def testIsVariableInitialized(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
v0 = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual(False, variables.is_variable_initialized(v0).eval())
state_ops.assign(v0, [[2.0, 3.0]]).eval()
self.assertEqual(True, variables.is_variable_initialized(v0).eval())
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:7,代码来源:variable_ops_test.py
示例5: _f
def _f():
# Note that there is a race condition here, so we do a best effort
# updates here. We reset update_in_steps first so that other workers
# don't duplicate the updates. Also we update cluster_center_vars
# before resetting total_counts to avoid large updates to
# cluster_centers_updated based on partially updated
# cluster_center_vars.
with ops.control_dependencies([
state_ops.assign(update_in_steps,
self._mini_batch_steps_per_iteration - 1)
]):
with ops.colocate_with(
cluster_centers_updated, ignore_existing=True):
if self._distance_metric == COSINE_DISTANCE:
cluster_centers = nn_impl.l2_normalize(
cluster_centers_updated, dim=1)
else:
cluster_centers = cluster_centers_updated
with ops.colocate_with(cluster_centers_var, ignore_existing=True):
with ops.control_dependencies(
[state_ops.assign(cluster_centers_var, cluster_centers)]):
with ops.colocate_with(None, ignore_existing=True):
with ops.control_dependencies([
state_ops.assign(total_counts,
array_ops.zeros_like(total_counts))
]):
return array_ops.identity(update_in_steps)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:27,代码来源:clustering_ops.py
示例6: get_placements
def get_placements(self, *args, **kwargs):
num_children = self.hparams.num_children
with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
actions_cache = variable_scope.get_local_variable(
"actions_cache",
initializer=init_ops.zeros_initializer,
dtype=dtypes.int32,
shape=[num_children, self.num_groups],
trainable=False)
x = array_ops.tile(self.seq2seq_input_layer, [num_children, 1, 1])
last_c, last_h, attn_mem = self.encode(x)
actions, log_probs = {}, {}
actions["sample"], log_probs["sample"] = (
self.decode(
x, last_c, last_h, attn_mem, mode="sample"))
actions["target"], log_probs["target"] = (
self.decode(
x,
last_c,
last_h,
attn_mem,
mode="target",
y=actions_cache))
actions["greedy"], log_probs["greedy"] = (
self.decode(
x, last_c, last_h, attn_mem, mode="greedy"))
actions["sample"] = control_flow_ops.cond(
self.global_step < self.hparams.stop_sampling,
lambda: state_ops.assign(actions_cache, actions["sample"]),
lambda: state_ops.assign(actions_cache, actions["target"]))
self.actions_cache = actions_cache
return actions, log_probs
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:34,代码来源:hierarchical_controller.py
示例7: test_fn
def test_fn(a):
state_ops.assign(a, a + 1)
b = a + 1
state_ops.assign(a, a + 1)
c = b + 1
d = c + 1
return d
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:7,代码来源:side_effect_guards_test.py
示例8: _apply_sparse_shared
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t,
use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(var,
lr * m_t / (v_sqrt + epsilon_t),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:26,代码来源:adam.py
示例9: _testDefaultGraphInThread
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
开发者ID:agouwin,项目名称:udacity_deep_learning_homework,代码行数:28,代码来源:session_test.py
示例10: testMultiplyInverseAgainstExplicit
def testMultiplyInverseAgainstExplicit(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
input_dim, output_dim = 3, 2
inputs = array_ops.zeros([32, input_dim])
outputs = array_ops.zeros([32, output_dim])
params = array_ops.zeros([input_dim, output_dim])
block = fb.FullyConnectedKFACBasicFB(
lc.LayerCollection(), inputs, outputs, has_bias=False)
grads = outputs**2
damping = 0. # This test is only valid without damping.
block.instantiate_factors((grads,), damping)
sess.run(state_ops.assign(block._input_factor._cov, _make_psd(3)))
sess.run(state_ops.assign(block._output_factor._cov, _make_psd(2)))
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
v_flat = np.arange(6, dtype=np.float32)
vector = utils.column_to_tensors(params, array_ops.constant(v_flat))
output = block.multiply_inverse(vector)
output_flat = sess.run(utils.tensors_to_column(output)).ravel()
full = sess.run(block.full_fisher_block())
explicit = np.dot(np.linalg.inv(full + damping * np.eye(6)), v_flat)
self.assertAllClose(output_flat, explicit)
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:27,代码来源:fisher_blocks_test.py
示例11: get_updates
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(state_ops.assign(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
开发者ID:sonnyhu,项目名称:tensorflow,代码行数:28,代码来源:optimizers.py
示例12: test_stop_based_on_num_step
def test_stop_based_on_num_step(self):
h = basic_session_run_hooks.StopAtStepHook(num_steps=10)
with ops.Graph().as_default():
global_step = variables.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
h.after_create_session(sess, None)
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 13))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 14))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 15))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 16))
mon_sess._should_stop = False
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:26,代码来源:basic_session_run_hooks_test.py
示例13: get_updates
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + math_ops.square(g) # update accumulator
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:25,代码来源:optimizers.py
示例14: _apply_dense
def _apply_dense(self, grad, var):
beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, beta1_t * m + m_scaled_g_values, use_locking=self._use_locking)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, beta2_t * v + v_scaled_g_values, use_locking=self._use_locking)
# amsgrad
vhat = self.get_slot(var, "vhat")
vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
v_sqrt = math_ops.sqrt(vhat_t)
var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t])
开发者ID:zsdonghao,项目名称:tensorlayer,代码行数:27,代码来源:amsgrad.py
示例15: _resource_apply_sparse
def _resource_apply_sparse(self, grad, var, indices):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
epsilon_t = self._get_hyper('epsilon', var_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * (1 - beta_1_t)
m_t = state_ops.assign(m, m * beta_1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
# m_bar = (1 - beta1) * g_t + beta1 * m_t
m_bar = m_scaled_g_values + beta_1_t * array_ops.gather(m_t, indices)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * (1 - beta_2_t)
v_t = state_ops.assign(v, v * beta_2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
v_t_slice = array_ops.gather(v_t, indices)
v_sqrt = math_ops.sqrt(v_t_slice)
var_update = self._resource_scatter_add(var, indices,
-lr * m_bar / (v_sqrt + epsilon_t))
return control_flow_ops.group(*[var_update, m_bar, v_t])
开发者ID:aeverall,项目名称:tensorflow,代码行数:32,代码来源:nadam.py
示例16: testMultiplyInverseAgainstExplicit
def testMultiplyInverseAgainstExplicit(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = array_ops.zeros((2, 2, 2, 2))
inputs = array_ops.zeros((2, 2, 2, 2))
outputs = array_ops.zeros((2, 2, 2, 2))
block = fb.ConvKFCBasicFB(lc.LayerCollection(), params, (1, 1, 1, 1),
'SAME')
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
damping = 0. # This test is only valid without damping.
block.instantiate_factors(([grads],), damping)
sess.run(state_ops.assign(block._input_factor._cov, _make_psd(8)))
sess.run(state_ops.assign(block._output_factor._cov, _make_psd(2)))
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
v_flat = np.arange(16, dtype=np.float32)
vector = utils.column_to_tensors(params, array_ops.constant(v_flat))
output = block.multiply_inverse(vector)
output_flat = sess.run(utils.tensors_to_column(output)).ravel()
full = sess.run(block.full_fisher_block())
explicit = np.dot(np.linalg.inv(full + damping * np.eye(16)), v_flat)
self.assertAllClose(output_flat, explicit)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:27,代码来源:fisher_blocks_test.py
示例17: testDeferredSlotRestoration
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = trackable_utils.Checkpoint()
root.var = trackable_utils.add_variable(
root, name="var", initializer=0.)
optimizer = adam.AdamOptimizer(0.1)
if context.executing_eagerly():
optimizer.minimize(root.var.read_value)
else:
train_op = optimizer.minimize(root.var)
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(trackable_utils.gather_initializers(
trackable_utils.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = root.save(os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
14.))
slots_path = root.save(os.path.join(checkpoint_directory, "with_slots"))
new_root = trackable_utils.Checkpoint()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = new_root.restore(slots_path)
no_slot_status = new_root.restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = trackable_utils.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.AdamOptimizer(0.1)
slot_status.assert_existing_objects_matched()
with self.assertRaisesRegexp(AssertionError, "beta1_power"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
else:
self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
None)
if context.executing_eagerly():
new_root.optimizer.minimize(new_root.var.read_value)
else:
train_op = new_root.optimizer.minimize(new_root.var)
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:60,代码来源:util_with_v1_optimizers_test.py
示例18: testSaveRestore
def testSaveRestore(self):
network = MyNetwork()
optimizer = adam.AdamOptimizer(0.001)
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, network=network)
input_value = constant_op.constant([[3.]])
if context.in_eager_mode():
optimizer.minimize(
lambda: network(input_value))
else:
train_op = optimizer.minimize(network(input_value))
# TODO(allenl): Make initialization more pleasant when graph building.
root_checkpointable.save_counter # pylint: disable=pointless-statement
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(network._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(network._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_checkpointable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(network._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
optimizer_variables = self.evaluate(optimizer.variables())
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_checkpointable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(network._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if context.in_graph_mode():
return # Restore-on-create is only supported when executing eagerly
on_create_network = MyNetwork()
on_create_optimizer = adam.AdamOptimizer(0.001)
on_create_root = checkpointable_utils.Checkpoint(
optimizer=on_create_optimizer, network=on_create_network)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
on_create_network(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_network._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_network._named_dense.variables[1], "m")
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
self.assertAllEqual(optimizer_variables[2:],
self.evaluate(on_create_optimizer.variables()))
on_create_optimizer._create_slots(
[resource_variable_ops.ResourceVariable([1.])])
status.assert_consumed()
beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators()
self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power))
self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power))
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:57,代码来源:checkpointable_utils_test.py
示例19: testDeferredSlotRestoration
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = checkpointable.Checkpointable()
root.var = checkpointable_utils.add_variable(
root, name="var", initializer=0.)
optimizer = CheckpointableAdam(0.1)
if context.in_graph_mode():
train_op = optimizer.minimize(root.var)
self.evaluate(variables.global_variables_initializer())
self.evaluate(train_op)
else:
optimizer.minimize(root.var.read_value)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = checkpointable_utils.Saver(root).save(
os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
14.))
slots_path = checkpointable_utils.Saver(root).save(
os.path.join(checkpoint_directory, "with_slots"))
new_root = checkpointable.Checkpointable()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = checkpointable_utils.Saver(new_root).restore(slots_path)
no_slot_status = checkpointable_utils.Saver(new_root).restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = checkpointable_utils.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = CheckpointableAdam(0.1)
with self.assertRaisesRegexp(AssertionError, "beta1_power"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.in_eager_mode():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
else:
self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
None)
if context.in_graph_mode():
train_op = new_root.optimizer.minimize(new_root.var)
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
self.evaluate(train_op)
else:
new_root.optimizer.minimize(new_root.var.read_value)
slot_status.assert_consumed()
开发者ID:keithc61,项目名称:tensorflow,代码行数:57,代码来源:checkpointable_utils_test.py
示例20: test_resource_variable
def test_resource_variable(self):
"""Tests that resource variable usage is allowed."""
a = variable_scope.get_variable(
name='variable_a', shape=(1), use_resource=True)
context = self.create_test_xla_compile_context()
context.Enter()
state_ops.assign(a, a + 1)
context.Exit()
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:9,代码来源:xla_test.py
注:本文中的tensorflow.python.ops.state_ops.assign函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论