本文整理汇总了Python中theano.tensor.power函数的典型用法代码示例。如果您正苦于以下问题:Python power函数的具体用法?Python power怎么用?Python power使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了power函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: beta_div
def beta_div(X, W, H, beta):
"""Compute beta divergence D(X|WH)
Parameters
----------
X : Theano tensor
data
W : Theano tensor
Bases
H : Theano tensor
activation matrix
beta : Theano scalar
Returns
-------
div : Theano scalar
beta divergence D(X|WH)"""
div = ifelse(
T.eq(beta, 2),
T.sum(1. / 2 * T.power(X - T.dot(H, W), 2)),
ifelse(
T.eq(beta, 0),
T.sum(X / T.dot(H, W) - T.log(X / T.dot(H, W)) - 1),
ifelse(
T.eq(beta, 1),
T.sum(T.mul(X, (T.log(X) - T.log(T.dot(H, W)))) + T.dot(H, W) - X),
T.sum(1. / (beta * (beta - 1.)) * (T.power(X, beta) +
(beta - 1.) * T.power(T.dot(H, W), beta) -
beta * T.power(T.mul(X, T.dot(H, W)), (beta - 1)))))))
return div
开发者ID:rserizel,项目名称:beta_nmf,代码行数:31,代码来源:costs.py
示例2: beta_H_Sparse
def beta_H_Sparse(X, W, H, beta, l_sp):
"""Update activation with beta divergence
Parameters
----------
X : Theano tensor
data
W : Theano tensor
Bases
H : Theano tensor
activation matrix
beta : Theano scalar
Returns
-------
H : Theano tensor
Updated version of the activations
"""
up = ifelse(T.eq(beta, 2), (T.dot(X, W)) / (T.dot(T.dot(H, W.T), W) +
l_sp),
(T.dot(T.mul(T.power(T.dot(H, W.T),
(beta - 2)), X), W)) /
(T.dot(T.power(T.dot(H, W.T), (beta-1)), W) +
l_sp))
return T.mul(H, up)
开发者ID:mikimaus78,项目名称:groupNMF,代码行数:25,代码来源:updates.py
示例3: beta_H_groupSparse
def beta_H_groupSparse(X, W, H, beta, l_sp, start, stop):
"""Update activation with beta divergence
Parameters
----------
X : Theano tensor
data
W : Theano tensor
Bases
H : Theano tensor
activation matrix
beta : Theano scalar
Returns
-------
H : Theano tensor
Updated version of the activations
"""
results, _ = theano.scan(fn=lambda start_i, stop_i, prior_results, H:
T.set_subtensor(
prior_results[:, start_i:stop_i].T,
H[:, start_i:stop_i].T /
H[:, start_i:stop_i].norm(2, axis=1)).T,
outputs_info=T.zeros_like(H),
sequences=[start, stop],
non_sequences=H)
cst = results[-1]
up = ifelse(T.eq(beta, 2), (T.dot(X, W)) / (T.dot(T.dot(H, W.T), W) +
l_sp * cst),
(T.dot(T.mul(T.power(T.dot(H, W.T),
(beta - 2)), X), W)) /
(T.dot(T.power(T.dot(H, W.T), (beta-1)), W) +
l_sp * cst))
return T.mul(H, up)
开发者ID:mikimaus78,项目名称:groupNMF,代码行数:34,代码来源:updates.py
示例4: W_beta_sub_withcst
def W_beta_sub_withcst(X, W, Wsub, H, Hsub, beta, sum_grp, lambda_grp, card_grp):
"""Update group activation with beta divergence
Parameters
----------
X : Theano tensor
data
W : Theano tensor
Bases
Wsub : Theano tensor
group Bases
H : Theano tensor
activation matrix
Hsub : Theano tensor
group activation matrix
beta : Theano scalar
Returns
-------
H : Theano tensor
Updated version of the activations
"""
up = ifelse(T.eq(beta, 2), (T.dot(X.T, Hsub) + lambda_grp * sum_grp) /
(T.dot(T.dot(H, W.T).T, Hsub) + lambda_grp * card_grp * Wsub),
(T.dot(T.mul(T.power(T.dot(H, W.T), (beta - 2)), X).T, Hsub)+
lambda_grp * sum_grp) /
(T.dot(T.power(T.dot(H, W.T), (beta-1)).T, Hsub) +
lambda_grp * card_grp * Wsub))
return T.mul(Wsub, up)
开发者ID:mikimaus78,项目名称:groupNMF,代码行数:29,代码来源:updates.py
示例5: H_beta_sub
def H_beta_sub(X, W, Wsub, H, Hsub, beta):
"""Update group activation with beta divergence
Parameters
----------
X : Theano tensor
data
W : Theano tensor
Bases
Wsub : Theano tensor
group Bases
H : Theano tensor
activation matrix
Hsub : Theano tensor
group activation matrix
beta : Theano scalar
Returns
-------
H : Theano tensor
Updated version of the activations
"""
up = ifelse(T.eq(beta, 2), (T.dot(X, Wsub)) / (T.dot(T.dot(H, W.T), Wsub)),
(T.dot(T.mul(T.power(T.dot(H, W.T), (beta - 2)), X), Wsub)) /
(T.dot(T.power(T.dot(H, W.T), (beta-1)), Wsub)))
return T.mul(Hsub, up)
开发者ID:mikimaus78,项目名称:groupNMF,代码行数:26,代码来源:updates.py
示例6: logp
def logp(self, value):
q = self.q
beta = self.beta
return bound(tt.log(tt.power(q, tt.power(value, beta)) - tt.power(q, tt.power(value + 1, beta))),
0 <= value,
0 < q, q < 1,
0 < beta)
开发者ID:bballamudi,项目名称:pymc3,代码行数:8,代码来源:discrete.py
示例7: my_activation
def my_activation(input):
d = 5
input = input * T.power(10, d)
input = T.round(input)
x = input / T.power(10, d)
abs_x = T.abs(x)
return x / (1. + abs_x)
开发者ID:ma-ver-ick,项目名称:bell-recognition-training,代码行数:9,代码来源:test_mlp_003.py
示例8: __init__
def __init__(self, n_inputs=1024, n_classes=10, n_hidden_nodes=100, alpha=0.1, lr=0.05, n_epoch=200,
activation='sigmoid'):
"""
A neural network implementation using Theano for a one-hidden layer and output layer with 10 nodes
:param n_hidden_nodes:
Number of nodes in the hidden layer
:param alpha:
the coefficient for L-2 weight regularization
:param n_epoch:
Number of training epochs for SGD. Default: 200
:param activation:
Choice of activation method among ['sigmoid', 'relu', 'linear']. Default: 'sigmoid'
:param n_inputs:
number of inputs (hard coded for assignment)
:param n_classes:
number of output nodes (hard coded for assignment)
"""
self.activation = activation
self.n_epoch = n_epoch
self.n_hidden_nodes = n_hidden_nodes
self.n_inputs = n_inputs
self.n_classes = n_classes
# Initialize Weights & Theano variables & symbolic equations
X = T.matrix('X')
y = T.matrix('y')
self.layers = [
theano.shared(name="W_hidden", value=floatX(np.random.rand(self.n_inputs, self.n_hidden_nodes) - 0.5)),
theano.shared(name="W_output", value=floatX(np.random.rand(self.n_hidden_nodes, self.n_classes) - 0.5))]
self.lr = theano.shared(floatX(lr))
self.alpha = theano.shared(floatX(alpha))
if self.activation == 'sigmoid':
self.fprop = T.dot(T.nnet.sigmoid(T.dot(X, self.layers[0])), self.layers[1])
elif self.activation == 'relu':
self.fprop = T.dot(T.nnet.relu(T.dot(X, self.layers[0])), self.layers[1])
else:
self.fprop = T.dot(T.dot(X, self.layers[0]), self.layers[1])
self.regularization = 0.5 * self.alpha * T.sum(T.power(self.layers[0], 2)) + \
0.5 * self.alpha * T.sum(T.power(self.layers[1], 2)) # TODO check L2 formula
self.loss = T.mean((T.nnet.softmax(self.fprop) - y) ** 2) + self.regularization
gradient_hidden = T.grad(cost=self.loss, wrt=self.layers[0])
gradient_output = T.grad(cost=self.loss, wrt=self.layers[1])
self.update = [(self.layers[0], self.layers[0] - gradient_hidden * self.lr),
(self.layers[1], self.layers[1] - gradient_output * self.lr)]
self.fit = theano.function(inputs=[X, y], outputs=self.loss, updates=self.update, allow_input_downcast=True)
self.predict_ = theano.function(inputs=[X], outputs=T.argmax(T.nnet.softmax(self.fprop), axis=1),
allow_input_downcast=True)
开发者ID:JonnyTran,项目名称:ML-algorithms,代码行数:56,代码来源:neural_net.py
示例9: my_activation
def my_activation(input):
d = 2
input = input * T.power(10, d)
input = T.round(input)
x = input / T.power(10, d)
abs_x = abs(x)
ret = x / (1. + abs_x)
ret = T.round(ret * T.power(10, d)) / T.power(10, d)
return ret
开发者ID:ma-ver-ick,项目名称:bell-recognition-training,代码行数:11,代码来源:test_mlp_003.py
示例10: eucl_dist
def eucl_dist(X, Y):
"""Compute Euclidean distance between X and Y
Parameters
----------
X : Theano tensor
Y : Theano tensor
Returns
-------
out : Theano scalar
Euclidean distance"""
return T.sum((1.0 / 2) * (T.power(X, 2) + T.power(Y, 2) - 2 * T.mul(X, Y)))
开发者ID:rserizel,项目名称:groupNMF,代码行数:13,代码来源:costs.py
示例11: __init__
def __init__(self, rng, input, AELayerSizes, classifyLayerSizes):
self.input = input
self.label = T.ivector('label')
self.params = []
self.AEparams = []
self.params_inc = []
self.AELayerSizes = AELayerSizes + AELayerSizes[::-1][1:]
self.AELayerNum = len(self.AELayerSizes)
self.AELayers=[input]
for i in range(1,self.AELayerNum):
if i==1:
self.AELayers.append(HiddenLayer(rng, self.input, self.AELayerSizes[0], self.AELayerSizes[1]))
elif i!=self.AELayerNum-1:
self.AELayers.append(HiddenLayer(rng, self.AELayers[i-1].output, self.AELayerSizes[i-1], self.AELayerSizes[i]))
else: #last layer: linear output
self.AELayers.append(HiddenLayer(rng, self.AELayers[i-1].output, self.AELayerSizes[i-1], self.AELayerSizes[i], activation=None))
self.params += self.AELayers[i].params
self.AEparams += self.AELayers[i].params
self.params_inc += self.AELayers[i].params_inc
self.classifyLayerSizes = classifyLayerSizes
self.classifyLayerNum = len(self.classifyLayerSizes)
self.classifyLayers=[]
for i in range(self.classifyLayerNum):
if i==0:
mid_layer = len(AELayerSizes)-1
last_input = self.AELayers[mid_layer].output
else:
last_input = self.classifyLayers[i-1].output
if i==0:
self.classifyLayers.append(HiddenLayer(rng, last_input, AELayerSizes[-1], self.classifyLayerSizes[i]))
elif i!=self.classifyLayerNum-1:
self.classifyLayers.append(HiddenLayer(rng, last_input, self.classifyLayerSizes[i-1], self.classifyLayerSizes[i]))
else:
self.classifyLayers.append(LogisticRegression(last_input, self.classifyLayerSizes[i-1], self.classifyLayerSizes[i]))
self.params += self.classifyLayers[i].params
self.params_inc += self.classifyLayers[i].params_inc
self.loss_NLL = (self.classifyLayers[-1].negative_log_likelihood)
self.loss_L2rec = T.mean(T.sum(T.power((self.input-self.AELayers[-1].output),2), axis=1))
self.loss_L2M = []
for i in range(1,self.AELayerNum/2):
self.loss_L2M.append(T.mean(T.sum(T.power((self.AELayers[i].output-self.AELayers[-i-1].output),2), axis=1)))
self.errors = self.classifyLayers[-1].errors
开发者ID:YajunHu,项目名称:Neural-network-with-theano,代码行数:51,代码来源:multiLossAE.py
示例12: beta_div
def beta_div(X, W, H, beta):
"""Compute betat divergence"""
div = ifelse(T.eq(beta, 2),
T.sum(1. / 2 * T.power(X - T.dot(H, W), 2)),
ifelse(T.eq(beta, 0),
T.sum(X / T.dot(H, W) - T.log(X / T.dot(H, W)) - 1),
ifelse(T.eq(beta, 1),
T.sum(T.mul(X, (T.log(X) - T.log(T.dot(H, W)))) + T.dot(H, W) - X),
T.sum(1. / (beta * (beta - 1.)) * (T.power(X, beta) +
(beta - 1.) *
T.power(T.dot(H, W), beta) -
beta *
T.power(T.mul(X, T.dot(H, W)),
(beta - 1)))))))
return div
开发者ID:mikimaus78,项目名称:groupNMF,代码行数:15,代码来源:costs.py
示例13: __init__
def __init__(self, rng, layerSizes):
self.AELayers=[]
self.ups = []
self.downs = []
self.params = []
self.layerSizes = layerSizes
self. n_layers = len(layerSizes)-1
assert self.n_layers>0
self.input = T.matrix('AE_Input')
self.ups.append(self.input)
for i in range(self.n_layers):
if i==0:
self.AELayers.append(AELayer(rng, self.ups[i], self.layerSizes[i],self.layerSizes[i+1],down_activation=None))
else:
self.AELayers.append(AELayer(rng, self.ups[i], self.layerSizes[i],self.layerSizes[i+1]))
self.params += (self.AELayers[i].params)
self.ups.append(self.AELayers[i].get_hidden(self.ups[i]))
self.downs.append(self.ups[-1])
for i in range(self.n_layers-1,-1,-1):
self.downs.append(self.AELayers[i].get_reconstruction(self.downs[self.n_layers-1-i]))
self.loss_rec = T.mean(T.sum(T.power((self.input-self.downs[-1]),2), axis=1))
开发者ID:YajunHu,项目名称:Neural-network-with-theano,代码行数:25,代码来源:AutoEncoder.py
示例14: fit
def fit(self, X, y=None):
self.n_features = y.shape[0]
self.weights['input'] = theano.shared(value=np.zeros((
self.n_features, X.shape[1], self.spatial[0], self.spatial[1]),
dtype=theano.config.floatX), name='w', borrow=True)
input = T.tensor4(name='input')
target = T.tensor4(name='target')
decay = T.scalar(name='decay')
xy = T.nnet.conv2d(input.transpose(1,0,2,3), target.transpose(1,0,2,3),
border_mode=self.pad, subsample=self.stride)
xx = T.sum(T.power(input, 2), axis=(0,2,3))
k = ifelse(self.hidden_matrices['input'] is None, )
lam = theano.shared(value=self._C, name='constrain', borrow=True)
prediction = T.nnet.conv2d(input, self.weights['input'],
border_mode=self.pad,
subsample=self.stride)
weights, _ = theano.scan(
fn=lambda a, k, c: a/(k+c), outputs_info=None,
sequences=[self.hidden_matrices['A'].transpose(1,0,2,3),
self.hidden_matrices['K']], non_sequences=lam)
new_weights = weights.transpose(1,0,2,3)
updates = [(self.hidden_matrices['K'],
self.hidden_matrices['K'].dot(decay)+xx),
(self.hidden_matrices['A'],
self.hidden_matrices['A'].dot(decay) + xy),
(self.weights['input'], new_weights)]
self.conv_fct['train'] = theano.function([input, target, decay],
prediction,
updates=updates)
self.conv_fct['predict'] = theano.function([input], prediction)
return self.conv_fct['train'](X, y, 1)
开发者ID:maestrotf,项目名称:pyextremelm,代码行数:32,代码来源:convolution.py
示例15: _policy_function
def _policy_function(self):
epoch, gm, powr, step = T.scalars('epoch', 'gm', 'powr', 'step')
if self.lr_policy == 'inv':
decay = T.power(1.0+gm*epoch, -powr)
elif self.lr_policy == 'exp':
decay = gm ** epoch
elif self.lr_policy == 'step':
decay = T.switch(T.eq(T.mod_check(epoch, step), 0.0),
T.power(gm, T.floor_div(epoch, step)),
1.0)
elif self.lr_policy == 'fixed':
decay = T.constant(1.0, name='fixed', dtype=theano.config.floatX)
return theano.function([gm, epoch, powr, step],
decay,
allow_input_downcast=True,
on_unused_input='ignore')
开发者ID:hmendozap,项目名称:master-arbeit-projects,代码行数:17,代码来源:LogisticRegression.py
示例16: _ppf
def _ppf(self, p):
"""
The percentile point function (the inverse of the cumulative
distribution function) of the discrete Weibull distribution.
"""
q = self.q
beta = self.beta
return (tt.ceil(tt.power(tt.log(1 - p) / tt.log(q), 1. / beta)) - 1).astype('int64')
开发者ID:bballamudi,项目名称:pymc3,代码行数:9,代码来源:discrete.py
示例17: policy_update
def policy_update(self, lr_policy):
epoch, gm, powr, step = T.scalars('epoch', 'gm', 'powr', 'step')
if lr_policy == 'inv':
decay = T.power(1+gm*epoch, -powr)
elif lr_policy == 'exp':
decay = gm ** epoch
elif lr_policy == 'step':
decay = T.switch(T.eq(T.mod_check(epoch, step), 0),
T.power(gm, T.floor_div(epoch, step)),
1.0)
elif lr_policy == 'fixed':
decay = T.constant(1.0, name='fixed', dtype='float32')
return theano.function([gm, epoch, powr, step],
decay,
updates=[(self.shared_lr,
self.shared_lr * decay)],
on_unused_input='ignore')
开发者ID:hmendozap,项目名称:master-arbeit-projects,代码行数:18,代码来源:test_policies_updates.py
示例18: integrand_w_flat
def integrand_w_flat(z, Om, w):
"""
:param z: redshift
:param Om: matter content
:param w: DE EOS
:return: theano array of 1/H(z)
"""
zp = 1 + z
Ode = 1 - Om - Or # Adjust cosmological constant
return T.power((T.pow(zp, 3) * (Or * zp + Om) + Ode * T.pow(zp, 3.0 * (1 + w))), -0.5)
开发者ID:drJfunk,项目名称:supernova,代码行数:11,代码来源:cosmo.py
示例19: integrand_constant_flat
def integrand_constant_flat(z, Om):
"""
:param z: redshift
:param Om: matter content
:return: theano array of 1/H(z)
"""
zp = 1 + z
Ode = 1 - Om - Or # Adjust cosmological constant
return T.power(T.pow(zp, 3) * Om + Ode, -0.5)
开发者ID:drJfunk,项目名称:supernova,代码行数:11,代码来源:cosmo.py
示例20: get_rbfnet_predict_function
def get_rbfnet_predict_function(metric_name):
X_matrix = T.dmatrix('X')
W_matrix = T.dmatrix('W')
beta = T.dvector('beta')
b = T.scalar('b')
H_matrix = metric_theano[metric_name](X_matrix, W_matrix)
H_rbf = np.exp(T.power(H_matrix, 2) * (-b))
s = T.sgn(T.dot(H_rbf, beta))
rbfnet_predict_function = theano.function([X_matrix, W_matrix, beta, b], s)
return rbfnet_predict_function
开发者ID:alexander-myronov,项目名称:RNN,代码行数:12,代码来源:twelm_theano.py
注:本文中的theano.tensor.power函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论