本文整理汇总了Python中theano.clone函数的典型用法代码示例。如果您正苦于以下问题:Python clone函数的具体用法?Python clone怎么用?Python clone使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clone函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_output_for
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic:
# use stored mean and std
mean = self.mean
std = self.std
else:
# use this batch's mean and std
mean = input.mean(self.axes, keepdims=True)
std = input.std(self.axes, keepdims=True)
# and update the stored mean and std:
# we create (memory-aliased) clones of the stored mean and std
running_mean = theano.clone(self.mean, share_inputs=False)
running_std = theano.clone(self.std, share_inputs=False)
# set a default update for them
running_mean.default_update = (1 - self.alpha) * running_mean + self.alpha * mean
running_std.default_update = (1 - self.alpha) * running_std + self.alpha * std
# and include them in the graph so their default updates will be
# applied (although the expressions will be optimized away later)
mean += 0 * running_mean
std += 0 * running_std
std += self.epsilon
mean = T.addbroadcast(mean, *self.axes)
std = T.addbroadcast(std, *self.axes)
beta = T.addbroadcast(self.beta, *self.axes)
gamma = T.addbroadcast(self.gamma, *self.axes)
normalized = (input - mean) * (gamma / std) + beta
return self.nonlinearity(normalized)
开发者ID:sarin1991,项目名称:Baseball,代码行数:27,代码来源:BatchNormalization.py
示例2: __init__
def __init__(self, freq, activation, input, target_idx, task_loss, surrogate_loss,
hyperparameter, learning_rate, batch_generator, n_batches,
factor=1.5, n_updates=10):
Extension.__init__(self, 'adapt_zloss', freq)
self.batch_generator = batch_generator
self.n_batches = n_batches
self.learning_rate = learning_rate
self.hyperparameter = hyperparameter
self.factor = factor
self.n_updates = n_updates
# grad = theano.grad(surrogate_loss, activation)
# new_activation = activation - learning_rate * grad
self.fun_activation = theano.function([input], activation)
activation_bis = tensor.matrix()
surr_loss_bis = theano.clone(surrogate_loss,
replace={activation: activation_bis})
grad = theano.grad(surr_loss_bis, activation_bis)
new_activation = activation_bis - 100*learning_rate * grad
task_loss_bis = theano.clone(task_loss,
replace={activation: new_activation})
self.fun_update_task_loss = theano.function(
[activation_bis, target_idx], [task_loss_bis, new_activation])
开发者ID:adbrebs,项目名称:raccoon,代码行数:27,代码来源:adaptative_hyper.py
示例3: filter_and_prob
def filter_and_prob(inpt, transition, emission,
visible_noise_mean, visible_noise_cov,
hidden_noise_mean, hidden_noise_cov,
initial_hidden, initial_hidden_cov):
step = forward_step(
transition, emission,
visible_noise_mean, visible_noise_cov,
hidden_noise_mean, hidden_noise_cov)
hidden_mean_0 = T.zeros_like(hidden_noise_mean).dimshuffle('x', 0)
hidden_cov_0 = T.zeros_like(hidden_noise_cov).dimshuffle('x', 0, 1)
f0, F0, ll0 = step(inpt[0], hidden_mean_0, hidden_cov_0)
replace = {hidden_noise_mean: initial_hidden,
hidden_noise_cov: initial_hidden_cov}
f0 = theano.clone(f0, replace)
F0 = theano.clone(F0, replace)
ll0 = theano.clone(ll0, replace)
(f, F, ll), _ = theano.scan(
step,
sequences=inpt[1:],
outputs_info=[f0, F0, None])
ll = ll.sum(axis=0)
f = T.concatenate([T.shape_padleft(f0), f])
F = T.concatenate([T.shape_padleft(F0), F])
ll += ll0
return f, F, ll
开发者ID:ddofer,项目名称:breze,代码行数:30,代码来源:lds.py
示例4: forward
def forward(self,input_org,train=True,update_batch_stat=True,finetune=False):
print "Layer/BatchNormalization"
ldim,cdim,rdim = self._internal_shape(input_org)
input = input_org.reshape((ldim,cdim,rdim))
if (train):
mean = T.mean(input, axis=(0, 2), keepdims=True )
var = T.mean((input-mean)**2, axis=(0, 2), keepdims=True)
if(update_batch_stat):
finetune_N = theano.clone(self.finetune_N, share_inputs=False)
if(finetune):
finetune_N.default_update = finetune_N+1
ratio = T.cast(1-1.0/(finetune_N+1),theano.config.floatX)
else:
finetune_N.default_update = 0
ratio = self.moving_avg_ratio
m = ldim*rdim
scale = T.cast(m/(m-1.0),theano.config.floatX)
est_mean = theano.clone(self.est_mean, share_inputs=False)
est_var = theano.clone(self.est_var, share_inputs=False)
est_mean.default_update = T.cast(ratio*self.est_mean + (1-ratio)*mean,theano.config.floatX)
est_var.default_update = T.cast(ratio*self.est_var + (1-ratio)*scale*var,theano.config.floatX)
mean += 0 * est_mean
var += 0 * est_var
output = self._pbc(self.gamma) * (input - self._pbc(mean)) \
/ T.sqrt(1e-6+self._pbc(var)) + self._pbc(self.beta)
else:
output = self._pbc(self.gamma) * (input - self._pbc(self.est_mean)) \
/ T.sqrt(1e-6+self._pbc(self.est_var)) + self._pbc(self.beta)
return output.reshape(input_org.shape)
开发者ID:ilovecv,项目名称:vat,代码行数:32,代码来源:batch_normalization.py
示例5: get_output_for
def get_output_for(self, input, deterministic=False,
batch_norm_use_averages=None,
batch_norm_update_averages=None, **kwargs):
self.count = self.count + 1
self.alpha = 5.0 / (10 + self.count)
# self.alpha = 1.0 / (self.count^2)
input_mean = input.mean(self.axes)
input_inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))
# Decide whether to use the stored averages or mini-batch statistics
if batch_norm_use_averages is None:
batch_norm_use_averages = deterministic
use_averages = batch_norm_use_averages
if use_averages:
mean = self.mean
inv_std = self.inv_std
else:
mean = input_mean
inv_std = input_inv_std
# Decide whether to update the stored averages
if batch_norm_update_averages is None:
batch_norm_update_averages = not deterministic
update_averages = batch_norm_update_averages
if update_averages:
# Trick: To update the stored statistics, we create memory-aliased
# clones of the stored statistics:
running_mean = theano.clone(self.mean, share_inputs=False)
running_inv_std = theano.clone(self.inv_std, share_inputs=False)
# set a default update for them:
running_mean.default_update = ((1 - self.alpha) * running_mean +
self.alpha * input_mean)
running_inv_std.default_update = ((1 - self.alpha) *
running_inv_std +
self.alpha * input_inv_std)
# and make sure they end up in the graph without participating in
# the computation (this way their default_update will be collected
# and applied, but the computation will be optimized away):
mean += 0 * running_mean
inv_std += 0 * running_inv_std
# prepare dimshuffle pattern inserting broadcastable axes as needed
param_axes = iter(range(input.ndim - len(self.axes)))
pattern = ['x' if input_axis in self.axes
else next(param_axes)
for input_axis in range(input.ndim)]
# apply dimshuffle pattern to all parameters
beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
mean = mean.dimshuffle(pattern)
inv_std = inv_std.dimshuffle(pattern)
# normalize
normalized = (input - mean) * (gamma * inv_std) + beta
return normalized
开发者ID:myt00seven,项目名称:svrg,代码行数:60,代码来源:my_bn_layer_5_10_m.py
示例6: _make_loss_functions
def _make_loss_functions(self, mode=None):
"""Return pair (f_loss, f_d_loss) of functions.
- f_loss returns the current loss,
- f_d_loss returns the gradient of that loss wrt parameters,
"""
rng = T.shared_randomstreams.RandomStreams()
# Drop out inpts.
inpt = self.exprs['inpt']
inpt_dropped_out = corrupt.mask(inpt, self.p_dropout_inpt, rng)
givens = {inpt: inpt_dropped_out}
loss = theano.clone(self.exprs['loss'], givens)
n_layers = len(self.n_hiddens)
for i in range(n_layers - 1):
# Drop out hidden.
hidden = self.exprs['hidden_%i' % i]
hidden_dropped_out = corrupt.mask(hidden, self.p_dropout_hidden, rng)
givens = {hidden: hidden_dropped_out}
loss = theano.clone(loss, givens)
d_loss = T.grad(loss, self.parameters.flat)
f_loss = self.function(['inpt', 'target'], loss, explicit_pars=True,
mode=mode)
f_d_loss = self.function(['inpt', 'target'], d_loss, explicit_pars=True,
mode=mode)
return f_loss, f_d_loss
开发者ID:korhammer,项目名称:breze,代码行数:29,代码来源:mlp.py
示例7: _init_exprs
def _init_exprs(self):
# Here we need to replace the input with a corrupted version. If we do
# so naively by calling clone on the loss, the targets (which are
# identical to the inputs in thesense of identity in programming) the
# targets will be replaced as well. Instead, we just want to thave the
# inputs replaced. Thus we first clone the output of the model and
# replace the input with the corrupted input. This will not change the
# targets. Afterwards, we put that corruption into the loss as well.
super(DenoisingAutoEncoder, self)._init_exprs()
if self.noise_type == 'gauss':
corrupted_inpt = corrupt.gaussian_perturb(
self.exprs['inpt'], self.c_noise)
elif self.noise_type == 'mask':
corrupted_inpt = corrupt.mask(
self.exprs['inpt'], self.c_noise)
output_from_corrupt = theano.clone(
self.exprs['output'],
{self.exprs['inpt']: corrupted_inpt}
)
score = self.exprs['loss']
loss = theano.clone(
self.exprs['loss'],
{self.exprs['output']: output_from_corrupt})
self.exprs.update(get_named_variables(locals(), overwrite=True))
开发者ID:RuinCakeLie,项目名称:breze,代码行数:27,代码来源:autoencoder.py
示例8: apply_replacements
def apply_replacements(self, node, deterministic=False,
include=None, exclude=None,
more_replacements=None):
"""
Replace variables in graph with variational approximation. By default, replaces all variables
Parameters
----------
node : Theano Variables (or Theano expressions)
node or nodes for replacements
deterministic : bool
whether to use zeros as initial distribution
if True - zero initial point will produce constant latent variables
include : list
latent variables to be replaced
exclude : list
latent variables to be excluded for replacements
more_replacements : dict
add custom replacements to graph, e.g. change input source
Returns
-------
node(s) with replacements
"""
replacements = self.construct_replacements(
include, exclude, more_replacements
)
node = theano.clone(node, replacements, strict=False)
posterior = self.random(no_rand=deterministic)
return theano.clone(node, {self.input: posterior}, strict=False)
开发者ID:taku-y,项目名称:pymc3,代码行数:30,代码来源:opvi.py
示例9: safe_clone
def safe_clone(cost, replace):
params = replace.keys()
nw_vals = replace.values()
dummy_params = [x.type() for x in params]
dummy_cost = theano.clone(cost,
replace=dict(zip(params, dummy_params)))
return theano.clone(dummy_cost,
replace=dict(zip(dummy_params, nw_vals)))
开发者ID:cc13ny,项目名称:galatea,代码行数:8,代码来源:daa.py
示例10: __call__
def __call__(self, z):
if z.ndim > 1:
a = theano.scan(
lambda z_: theano.clone(self.op.apply(self.tf), {self.op.input: z_}, strict=False),
sequences=z, n_steps=z.shape[0])[0].mean()
else:
a = theano.clone(self.op.apply(self.tf), {self.op.input: z}, strict=False)
return tt.abs_(a)
开发者ID:taku-y,项目名称:pymc3,代码行数:8,代码来源:opvi.py
示例11: _elbo_t_new
def _elbo_t_new(logp, uw_g, uw_l, inarray_g, inarray_l,
n_mcsamples, random_seed):
"""Return expression of approximate ELBO based on Monte Carlo sampling.
"""
r = MRG_RandomStreams(seed=random_seed)
if uw_l is not None:
l_g = (uw_g.size/2).astype('int64')
u_g = uw_g[:l_g]
w_g = uw_g[l_g:]
l_l = (uw_l.size/2).astype('int64')
u_l = uw_l[:l_l]
w_l = uw_l[l_l:]
logp_ = lambda z_g, z_l: theano.clone(
logp, {inarray_g: z_g, inarray_l: z_l}, strict=False
)
if n_mcsamples == 1:
n_g = r.normal(size=inarray_g.tag.test_value.shape)
z_g = n_g * tt.exp(w_g) + u_g
n_l = r.normal(size=inarray_l.tag.test_value.shape)
z_l = n_l * tt.exp(w_l) + u_l
elbo = logp_(z_g, z_l) + \
tt.sum(w_g) + 0.5 * l_g * (1 + np.log(2.0 * np.pi)) + \
tt.sum(w_l) + 0.5 * l_l * (1 + np.log(2.0 * np.pi))
else:
ns_g = r.normal(size=inarray_g.tag.test_value.shape)
zs_g = ns_g * tt.exp(w_g) + u_g
ns_l = r.normal(size=inarray_l.tag.test_value.shape)
zs_l = ns_l * tt.exp(w_l) + u_l
logps, _ = theano.scan(fn=lambda z_g, z_l: logp_(z_g, z_l),
outputs_info=None,
sequences=zip(zs_g, zs_l))
elbo = tt.mean(logps) + \
tt.sum(w_g) + 0.5 * l_g * (1 + np.log(2.0 * np.pi)) + \
tt.sum(w_l) + 0.5 * l_l * (1 + np.log(2.0 * np.pi))
else:
l_g = (uw_g.size/2).astype('int64')
u_g = uw_g[:l_g]
w_g = uw_g[l_g:]
logp_ = lambda z_g: theano.clone(logp, {inarray_g: z_g}, strict=False)
if n_mcsamples == 1:
n_g = r.normal(size=inarray_g.tag.test_value.shape)
z_g = n_g * tt.exp(w_g) + u_g
elbo = logp_(z_g) + \
tt.sum(w_g) + 0.5 * l_g * (1 + np.log(2.0 * np.pi))
else:
n_g = r.normal(size=(n_mcsamples, u_g.tag.test_value.shape[0]))
zs_g = n_g * tt.exp(w_g) + u_g
logps, _ = theano.scan(fn=lambda q: logp_(q),
outputs_info=None,
sequences=[zs_g])
elbo = tt.mean(logps) + \
tt.sum(w_g) + 0.5 * l_g * (1 + np.log(2.0 * np.pi))
return elbo
开发者ID:LeonBai,项目名称:pymc3,代码行数:58,代码来源:advi_minibatch.py
示例12: step
def step(input, mask, cumsum_grad_att, extra_grad_h, h, h_pre, update, grad_h, C,
*prev_grad_params):
"""
A single timestep of the backward pass.
Parameters
----------
input: (batch_size, n_in)
mask: (batch_size,)
cumsum_grad_att: (batch_size, n_hidden)
h: (batch_size, n_hidden)
h_pre: (batch_size, n_hidden)
update: (batch_size, n_hidden)
grad_h: (batch_size, n_hidden)
C: (batch_size, n_hidden, n_hidden)
*prev_grad_params
Returns
-------
grad_input: (batch_size, n_in)
grad_h_pre: (batch_size, n_hidden)
C_pre: (batch_size, n_hidden, n_hidden)
gradients with respect to the params (both of the recurrent and the
update rule)
"""
C_pre = self.attention_update_rule.restore_previous_matrix(C, update)
att_grads = theano.clone(
output=[u_grad_h] + u_grad_params,
replace={u_h: h,
u_mask: mask,
u_C_pre: C_pre,
u_grad_att: cumsum_grad_att,
u_query: h})
grad_h_att = att_grads[0]
grad_params_att = att_grads[1:]
grad_h_att *= 1000 / T.sum(seq_mask, axis=0)[:, None]
grad_h_att = T.switch(mask[:, None], grad_h_att, .0)
rec_grads = theano.clone(
output=[back_grad_input, back_grad_h_pre] + back_grad_params,
replace={back_input: input,
back_mask: mask,
back_h_pre: h_pre,
back_grad_h: extra_grad_h + grad_h + grad_h_att})
grad_input = rec_grads[0]
grad_h_pre = rec_grads[1]
grad_params_rec = rec_grads[2:]
grad_params = grad_params_att + grad_params_rec
scan_outputs = [grad_input, grad_h_pre, C_pre]
for prev_grad, grad in zip(prev_grad_params, grad_params):
scan_outputs.append(prev_grad + grad)
return tuple(scan_outputs)
开发者ID:adbrebs,项目名称:raccoon,代码行数:58,代码来源:blocks.py
示例13: get_output_for
def get_output_for(self, input, deterministic=False, **kwargs):
input_mean = input.mean(self.axes)
input_var = input.var(self.axes)
# Decide whether to use the stored averages or mini-batch statistics
use_averages = kwargs.get('batch_norm_use_averages',
deterministic)
if use_averages:
mean = self.mean
var = self.var
else:
mean = input_mean
var = input_var
# Decide whether to update the stored averages
update_averages = kwargs.get('batch_norm_update_averages',
not deterministic)
if update_averages:
# Trick: To update the stored statistics, we create memory-aliased
# clones of the stored statistics:
running_mean = theano.clone(self.mean, share_inputs=False)
running_var = theano.clone(self.var, share_inputs=False)
# set a default update for them:
running_mean.default_update = ((1 - self.alpha) * running_mean +
self.alpha * input_mean)
running_var.default_update = ((1 - self.alpha) * running_var +
self.alpha * input_var)
# and make sure they end up in the graph without participating in
# the computation (this way their default_update will be collected
# and applied, but the computation will be optimized away):
mean += 0 * running_mean
var += 0 * running_var
# prepare dimshuffle pattern inserting broadcastable axes as needed
param_axes = iter(range(self.beta.ndim))
pattern = ['x' if input_axis in self.axes
else next(param_axes)
for input_axis in range(input.ndim)]
# apply dimshuffle pattern to all parameters
beta = self.beta.dimshuffle(pattern)
gamma = self.gamma.dimshuffle(pattern)
mean = mean.dimshuffle(pattern)
std = T.sqrt(var + self.epsilon)
std = std.dimshuffle(pattern)
# normalize
# normalized = (input - mean) * (gamma / std) + beta
normalized = T.nnet.batch_normalization(input, gamma=gamma, beta=beta,
mean=mean, std=std,
mode=self.mode)
return self.nonlinearity(normalized)
开发者ID:AdityoSanjaya,项目名称:kaggle-right-whale,代码行数:52,代码来源:layers.py
示例14: _apply
def _apply(self, x):
import theano
input_shape = K.shape(x)
is_training = K.is_training(x)
ndim = K.ndim(x)
self.config(input_shape=input_shape)
# ====== training mode ====== #
input_mean = K.mean(x, self.axes)
input_inv_std = K.inv(K.sqrt(K.var(x, self.axes) + self.epsilon))
# Decide whether to use the stored averages or mini-batch statistics
if not is_training:
mean = self.mean
inv_std = self.inv_std
else: # update the stored averages
mean = input_mean
inv_std = input_inv_std
# Trick: To update the stored statistics, we create memory-aliased
# clones of the stored statistics:
running_mean = theano.clone(self.mean, share_inputs=False)
running_inv_std = theano.clone(self.inv_std, share_inputs=False)
# set a default update for them:
running_mean.default_update = ((1 - self.alpha) * running_mean +
self.alpha * input_mean)
running_inv_std.default_update = ((1 - self.alpha) *
running_inv_std +
self.alpha * input_inv_std)
# and make sure they end up in the graph without participating in
# the computation (this way their default_update will be collected
# and applied, but the computation will be optimized away):
mean += 0 * running_mean
inv_std += 0 * running_inv_std
# prepare dimshuffle pattern inserting broadcastable axes as needed
param_axes = iter(range(ndim - len(self.axes)))
pattern = ['x' if input_axis in self.axes
else next(param_axes)
for input_axis in range(ndim)]
# apply dimshuffle pattern to all parameters
beta = 0 if self.beta is None else K.dimshuffle(self.beta, pattern)
gamma = 1 if self.gamma is None else K.dimshuffle(self.gamma, pattern)
mean = K.dimshuffle(mean, pattern)
inv_std = K.dimshuffle(inv_std, pattern)
# normalize
normalized = (x - mean) * (gamma * inv_std) + beta
# set shape for output
K.add_shape(normalized, input_shape)
return self.activation(normalized)
开发者ID:trungnt13,项目名称:blocks,代码行数:50,代码来源:base.py
示例15: get_output_for
def get_output_for(self, input, deterministic=False, collect=False,
**kwargs):
if collect:
# use this batch's mean and var
if self.stat_indices is None:
mean = input.mean(self.axes, keepdims=True)
var = input.var(self.axes, keepdims=True)
else:
mean = input[self.stat_indices].mean(self.axes, keepdims=True)
var = input[self.stat_indices].var(self.axes, keepdims=True)
# and update the stored mean and var:
# we create (memory-aliased) clones of the stored mean and var
running_mean = theano.clone(self.mean, share_inputs=False)
running_var = theano.clone(self.var, share_inputs=False)
# set a default update for them
if self.alpha is not 'single_pass':
running_mean.default_update = (
(1 - self.alpha) * running_mean + self.alpha * mean)
running_var.default_update = (
(1 - self.alpha) * running_var + self.alpha * var)
else:
print "Collecting using single pass..."
# this is ugly figure out what can be safely removed...
running_mean.default_update = (0 * running_mean + 1.0 * mean)
running_var.default_update = (0 * running_var + 1.0 * var)
# and include them in the graph so their default updates will be
# applied (although the expressions will be optimized away later)
mean += 0 * running_mean
var += 0 * running_var
elif deterministic:
# use stored mean and var
mean = self.mean
var = self.var
else:
# use this batch's mean and var
mean = input.mean(self.axes, keepdims=True)
var = input.var(self.axes, keepdims=True)
mean = T.addbroadcast(mean, *self.axes)
var = T.addbroadcast(var, *self.axes)
normalized = (input - mean) / T.sqrt(var + self.epsilon)
if self.return_stats:
return [normalized, mean, var]
else:
return normalized
开发者ID:2020zyc,项目名称:parmesan,代码行数:50,代码来源:normalize.py
示例16: set_size_and_deterministic
def set_size_and_deterministic(self, node, s, d):
initial_local = self._initial_part_matrix('local', s, d)
initial_global = self._initial_part_matrix('global', s, d)
# optimizations
if isinstance(s, int) and (s == 1) or s is None:
node = theano.clone(node, {
self.logp: self.single_symbolic_logp
})
out = theano.clone(node, {
self.symbolic_initial_local_matrix: initial_local,
self.symbolic_initial_global_matrix: initial_global,
})
try_to_set_test_value(node, out, None)
return out
开发者ID:aasensio,项目名称:pymc3,代码行数:14,代码来源:opvi.py
示例17: clone
def clone(**new_inputs):
new_obj = utils.copy(self)
# Reorder inputs
assert len(new_obj.inputs) == len(new_inputs.items())
pairs=[(x, new_inputs[x.name]) for x in inputs]
new_obj.inputs = new_inputs.values()
new_obj.out = theano.clone(new_obj.out, replace=pairs)
if hasattr(new_obj, 'cost'):
new_obj.cost = theano.clone(new_obj.cost, replace=pairs)
if hasattr(new_obj, 'grads'):
new_obj.grads = theano.clone(new_obj.grads, replace=pairs)
if hasattr(new_obj, 'sample'):
new_obj.sample = theano.clone(new_obj.sample, replace=pairs)
return new_obj
开发者ID:davidtob,项目名称:GroundHog,代码行数:14,代码来源:basic.py
示例18: clone
def clone(self, **new_inputs):
new_obj = utils.copy(self)
# Reorder inputs
assert len(new_obj.inputs) == len(new_inputs.items())
# TODO: error with inputs arg here. corrected missing self argument, this method must not be used
pairs = [(x, new_inputs[x.name]) for x in inputs]
new_obj.inputs = new_inputs.values()
new_obj.out = theano.clone(new_obj.out, replace=pairs)
if hasattr(new_obj, 'cost'):
new_obj.cost = theano.clone(new_obj.cost, replace=pairs)
if hasattr(new_obj, 'grads'):
new_obj.grads = theano.clone(new_obj.grads, replace=pairs)
if hasattr(new_obj, 'sample'):
new_obj.sample = theano.clone(new_obj.sample, replace=pairs)
return new_obj
开发者ID:chrisdonnan,项目名称:GroundHog,代码行数:15,代码来源:basic.py
示例19: __init__
def __init__(self, rng, P_input, L2_input, **kwargs):
#symbol declaration, initialization and definition
x_1_tm1, x_t = (\
sparse.csr_matrix("x_1_tm1", dtype=theano.config.floatX),\
sparse.csr_matrix("x_t",dtype=theano.config.floatX)\
)\
if P_input is None else P_input[:2]
#elements of history
shape = kwargs.get("shape")
if shape is not None:
dict_size = shape[0]
if len(shape) <= 1:
del shape["shape"]
else:
shape["shape"] = shape["shape"][1:]
else:
dict_size = (16,1,32,32)
D_1_tm1 = theano.shared(rng.normal(size=dict_size).astype(theano.config.floatX))
Dx_1_tm1 = sparse.dot(x_1_tm1, D_1_tm1)#array access=dot operation
super(SequenceCNN, self).__init__(rng=rng, inputsymbol=Dx_1_tm1, **kwargs)#attaches new elements into the fgraph
self.L2_output_1_tm1 = self.L2_output
#elements of current time
D_t = theano.shared(rng.normal(size=dict_size).astype(theano.config.floatX))
Dx_t = sparse.dot(x_t, D_t)#array access=dot operation
self.L2_output_t = theano.clone(self.L2_output_1_tm1, replace={Dx_1_tm1:Dx_t})
#element prepartion for model building
self.P_input = (x_1_tm1,x_t)
self.params += [D_1_tm1, D_t]
self.L2_output = self.L2_output_1_tm1*self.L2_output_t
开发者ID:citihome,项目名称:utils,代码行数:32,代码来源:dnn.py
示例20: fuse
def fuse(building_blocks, fuse_dim=4, input_variables=None, entry_expression=None,
output_expressions=-1, input_dtype='float32'):
num_blocks = len(building_blocks)
if isinstance(output_expressions, numbers.Number):
output_expressions = [output_expressions]
# account for indices -1, -2 etc
output_expressions = [oe % num_blocks for oe in output_expressions]
if fuse_dim == 4:
fuse_block = T.tensor4
else:
fuse_block = T.matrix
if input_variables is None and entry_expression is None:
input_variables = fuse_block(dtype=input_dtype)
entry_expression = input_variables
current_expression = entry_expression
outputs = []
for i, block in enumerate(building_blocks):
if not hasattr(block, "expression_"):
block._build_expression()
current_expression = theano.clone(
block.expression_,
replace={block.input_: current_expression},
strict=False)
if i in output_expressions:
outputs.append(current_expression)
return outputs, input_variables
开发者ID:bin2000,项目名称:sklearn-theano,代码行数:34,代码来源:base.py
注:本文中的theano.clone函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论