本文整理汇总了Python中theano.tensor.pow函数的典型用法代码示例。如果您正苦于以下问题:Python pow函数的具体用法?Python pow怎么用?Python pow使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pow函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_testing_function
def get_testing_function(test_data, test_mask, pct_blackout=0.5):
raise Error("fix me!")
i, batch_size = T.iscalars('i', 'batch_size')
self.test_noise = T.shared_randomstreams.RandomStreams(1234).binomial(
(self.inputs.shape), n=1, p=1-pct_blackout,
dtype=theano.config.floatX)
self.test_noisy = self.test_noise * self.inputs
self.test_active_hidden = T.nnet.sigmoid(T.dot(self.test_noisy, self.W) + self.b_in)
self.test_output = T.nnet.sigmoid(T.dot(self.test_active_hidden, self.W.T) + self.b_out)
# root mean squared error of unknowns only
# taking the original input vector's mask of which beers had no input information (no rating)
# mask out any output predicted ratings where there was no rating of the original beer
# so we aren't affecting the error factor in dimensions where we don't have any meaningful information in the original input data
# flattenedOutputVector = dot product ( (mask vector of which items we sent through the network to test, so we only test accuracy of non-inputted answers) with dot product ( inputMask with full output vector ) )
self.only_originally_unknown = T.dot(1-self.test_noise, T.dot(self.inputs_mask, self.test_output))
self.test_error = T.pow(T.mean(T.pow(T.dot(self.inputs_mask, self.test_output) - self.inputs, 2)), 0.5)
self.testing_function = theano.function([i, batch_size], self.test_error,
givens={self.inputs: test_data[i:i+batch_size],
self.inputs_mask: test_mask[i:i+batch_size]})
return self.testing_function
开发者ID:lauraskelton,项目名称:rbm-awesomeness,代码行数:25,代码来源:autoencoder.py
示例2: __init__
def __init__(self,inputData,image_shape):
self.input=inputData
num_out=image_shape[1]
epsilon=0.01
self.image_shape=image_shape
gamma_values = numpy.ones((num_out,), dtype=theano.config.floatX)
self.gamma_vals = theano.shared(value=gamma_values, borrow=True)
beta_values = numpy.zeros((num_out,), dtype=theano.config.floatX)
self.beta_vals = theano.shared(value=beta_values, borrow=True)
batch_mean=T.mean(self.input,keepdims=True,axis=(0,2,3))
batch_var=T.var(self.input,keepdims=True,axis=(0,2,3))+epsilon
self.batch_mean=self.adjustVals(batch_mean)
batch_var=self.adjustVals(batch_var)
self.batch_var=T.pow(batch_var,0.5)
batch_normalize=(inputData-self.batch_mean)/(T.pow(self.batch_var,0.5))
self.beta = self.beta_vals.dimshuffle('x', 0, 'x', 'x')
self.gamma = self.gamma_vals.dimshuffle('x', 0, 'x', 'x')
self.output=batch_normalize*self.gamma+self.beta
#self.output=inputData-self.batch_mean
self.params=[self.gamma_vals,self.beta_vals]
开发者ID:RedHenLab,项目名称:Gesture,代码行数:28,代码来源:layers.py
示例3: test_0
def test_0():
N = 16*1000*10*1
if 1:
aval = abs(numpy.random.randn(N).astype('float32'))+.1
bval = numpy.random.randn(N).astype('float32')
a = T.fvector()
b = T.fvector()
else:
aval = abs(numpy.random.randn(N))+.1
bval = numpy.random.randn(N)
a = T.dvector()
b = T.dvector()
f = theano.function([a,b], T.pow(a,b), mode='LAZY')
theano_opencl.elemwise.swap_impls=False
g = theano.function([a,b], T.pow(a,b), mode='LAZY')
print 'ocl time', timeit.Timer(lambda: f(aval, bval)).repeat(3,3)
print 'gcc time', timeit.Timer(lambda: g(aval, bval)).repeat(3,3)
print 'numpy time', timeit.Timer(lambda: aval**bval).repeat(3,3)
assert ((f(aval, bval) - aval**bval)**2).sum() < 1.1
assert ((g(aval, bval) - aval**bval)**2).sum() < 1.1
开发者ID:jaberg,项目名称:TheanoWS,代码行数:27,代码来源:test_elemwise.py
示例4: _step
def _step(self,xg_t, xo_t, xc_t, mask_tm1,h_tm1, c_tm1, u_g, u_o, u_c):
h_mask_tm1 = mask_tm1 * h_tm1
c_mask_tm1 = mask_tm1 * c_tm1
act = T.tensordot( xg_t + h_mask_tm1, u_g , [[1],[2]])
gate = T.nnet.softmax(act.reshape((-1, act.shape[-1]))).reshape(act.shape)
c_tilda = self.activation(xc_t + T.dot(h_mask_tm1, u_c))
sigma_se = self.k_parameters[0]
sigma_per = self.k_parameters[1]
sigma_b_lin = self.k_parameters[2]
sigma_v_lin = self.k_parameters[3]
sigma_rq = self.k_parameters[4]
l_se = self.k_parameters[5]
l_per = self.k_parameters[6]
l_lin = self.k_parameters[7]
l_rq = self.k_parameters[8]
alpha_rq = self.k_parameters[9]
p_per = self.k_parameters[10]
k_se = T.pow(sigma_se,2) * T.exp( -T.pow(c_mask_tm1 - c_tilda,2) / (2* T.pow(l_se,2) + self.EPS))
k_per = T.pow(sigma_per,2) * T.exp( -2*T.pow(T.sin( math.pi*(c_mask_tm1 - c_tilda)/ (p_per + self.EPS) ),2) / ( T.pow(l_per,2) + self.EPS ))
k_lin = T.pow(sigma_b_lin,2) + T.pow(sigma_v_lin,2) * (c_mask_tm1 - l_lin) * (c_tilda - l_lin )
k_rq = T.pow(sigma_rq,2) * T.pow( 1 + T.pow( (c_mask_tm1 - c_tilda),2) / ( 2 * alpha_rq * T.pow(l_rq,2) + self.EPS), -alpha_rq)
ops = [c_mask_tm1,c_tilda,k_se, k_per, k_lin,k_rq]
yshuff = T.as_tensor_variable( ops, name='yshuff').dimshuffle(1,2,0)
c_t = (gate.reshape((-1,gate.shape[-1])) * yshuff.reshape((-1,yshuff.shape[-1]))).sum(axis = 1).reshape(gate.shape[:2])
o_t = self.inner_activation(xo_t + T.dot(h_mask_tm1, u_o))
h_t = o_t * self.activation(c_t)
return h_t, c_t
开发者ID:hongyuanzhu,项目名称:keras,代码行数:34,代码来源:recurrentpp_soft.py
示例5: _model_setup
def _model_setup(self):
with self._model:
# COSMOLOGY
omega_m = pm.Uniform("OmegaM", lower=0, upper=1.)
# dark energy EOS
w = pm.Normal("w", mu=-1, sd=1)
# My custom distance mod. function to enable
# ADVI and HMC smapling.
dm = distmod_w_flat(omega_m, self._h0, w, self._zcmb)
# PHILIPS PARAMETERS
# M0 is the location parameter for the distribution
# sys_scat is the scale parameter for the M0 distribution
# rather than "unexpalined variance"
M0 = pm.Normal("M0", mu=-19.3, sd=2.)
sys_scat = pm.HalfCauchy('sys_scat', beta=2.5) # Gelman recommendation for variance parameter
M_true = pm.Normal('M_true', M0, sys_scat, shape=self._n_SN)
# following Rubin's Unity model... best idea? not sure
taninv_alpha = pm.Uniform("taninv_alpha", lower=-.2, upper=.3)
taninv_beta = pm.Uniform("taninv_beta", lower=-1.4, upper=1.4)
# Transform variables
alpha = pm.Deterministic('alpha', T.tan(taninv_alpha))
beta = pm.Deterministic('beta', T.tan(taninv_beta))
# Again using Rubin's Unity model.
# After discussion with Rubin, the idea is that
# these parameters are ideally sampled from a Gaussian,
# but we know they are not entirely correct. So instead,
# the Cauchy is less informative around the mean, while
# still having informative tails.
xm = pm.Cauchy('xm', alpha=0, beta=1)
cm = pm.Cauchy('cm', alpha=0, beta=1)
Rx_log = pm.Uniform('Rx_log', lower=-0.5, upper=0.5)
Rc_log = pm.Uniform('Rc_log', lower=-1.5, upper=1.5)
# Transformed variables
Rx = pm.Deterministic("Rx", T.pow(10., Rx_log))
Rc = pm.Deterministic("Rc", T.pow(10., Rc_log))
x_true = pm.Normal('x_true', mu=xm, sd=Rx, shape=self._n_SN)
c_true = pm.Normal('c_true', mu=cm, sd=Rc, shape=self._n_SN)
# Do the correction
mb = pm.Deterministic("mb", M_true + dm - alpha * x_true + beta * c_true)
# Likelihood and measurement error
obsc = pm.Normal("obsc", mu=c_true, sd=self._dcolor, observed=self._color)
obsx = pm.Normal("obsx", mu=x_true, sd=self._dx1, observed=self._x1)
obsm = pm.Normal("obsm", mu=mb, sd=self._dmb_obs, observed=self._mb_obs)
开发者ID:drJfunk,项目名称:supernova,代码行数:60,代码来源:sn_bayes_models.py
示例6: lp_norm
def lp_norm(self, n, k, r, c, z):
'''
Lp = ( 1/n * sum(|x_i|^p, 1..n))^(1/p) where p = 1 + ln(1+e^P)
:param n:
:param k:
:param r:
:param c:
:param z:
:return:
'''
ds0, ds1 = self.pool_size
st0, st1 = self.stride
pad_h = self.pad[0]
pad_w = self.pad[1]
row_st = r * st0
row_end = T.minimum(row_st + ds0, self.img_rows)
row_st = T.maximum(row_st, self.pad[0])
row_end = T.minimum(row_end, self.x_m2d + pad_h)
col_st = c * st1
col_end = T.minimum(col_st + ds1, self.img_cols)
col_st = T.maximum(col_st, self.pad[1])
col_end = T.minimum(col_end, self.x_m1d + pad_w)
Lp = T.pow(
T.mean(T.pow(
T.abs_(T.flatten(self.y[n, k, row_st:row_end, col_st:col_end], 1)),
1 + T.log(1 + T.exp(self.P))
)),
1 / (1 + T.log(1 + T.exp(self.P)))
)
return T.set_subtensor(z[n, k, r, c], Lp)
开发者ID:alxrsngrtn,项目名称:LearnedNormPooling,代码行数:34,代码来源:layer.py
示例7: get_reg_ind
def get_reg_ind(self):
drop_ax, drop_ay = T.pow(T.exp(self.params[-2]), 2), T.pow(T.exp(self.params[-1]), 2)
constant = np.cast[theano.config.floatX](.5 * np.log(self.noise_lvl) + c1 * self.noise_lvl + c2 * (self.noise_lvl**2) + c3 * (self.noise_lvl**3))
reg_indx = .5 * T.log(drop_ax) + c1 * drop_ax + c2 * T.pow(drop_ax, 2) + c3 * T.pow(drop_ax, 3) - constant
reg_indy = .5 * T.log(drop_ay) + c1 * drop_ay + c2 * T.pow(drop_ay, 2) + c3 * T.pow(drop_ay, 3) - constant
reg_ind = T.cast(T.prod(self.params[3].shape), theano.config.floatX) * reg_indx + T.cast(T.prod(self.params[4].shape), theano.config.floatX) * reg_indy
return reg_ind
开发者ID:AMLab-Amsterdam,项目名称:SEVDL_MGP,代码行数:7,代码来源:matrix_layers.py
示例8: finetune_cost_updates
def finetune_cost_updates(self, center, mu, learning_rate):
""" This function computes the cost and the updates ."""
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, withd one entry per
# example in minibatch
network_output = self.get_output()
temp = T.pow(center - network_output, 2)
L = T.sum(temp, axis=1)
# Add the network reconstruction error
z = self.get_network_reconst()
reconst_err = T.sum(T.pow(self.x - z, 2), axis = 1)
L = self.beta*L + self.lbd*reconst_err
cost1 = T.mean(L)
cost2 = self.lbd*T.mean(reconst_err)
cost3 = cost1 - cost2
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost1, self.params)
# generate the list of updates
updates = []
grad_values = []
param_norm = []
for param, delta, gparam in zip(self.params, self.delta, gparams):
updates.append( (delta, mu*delta - learning_rate * gparam) )
updates.append( (param, param + mu*mu*delta - (1+mu)*learning_rate*gparam ))
grad_values.append(gparam.norm(L=2))
param_norm.append(param.norm(L=2))
grad_ = T.stack(*grad_values)
param_ = T.stack(*param_norm)
return ((cost1, cost2, cost3, grad_, param_), updates)
开发者ID:WenjunJiang,项目名称:DCN,代码行数:35,代码来源:multi_layer_km.py
示例9: __init__
def __init__(self, n_in):
self.X = theanoTensor.matrix('X', dtype=theano.config.floatX)
self.y = theanoTensor.vector('y', dtype=theano.config.floatX)
self.W = theano.shared(name='W',
value=np.ones((n_in, ), dtype=theano.config.floatX),
borrow=True)
self.b = theano.shared(name='b',
value=np.cast[theano.config.floatX](0.0),
borrow=True)
y_pred = theanoTensor.dot(self.X, self.W) + self.b
self.MSe = theanoTensor.mean(theanoTensor.pow(y_pred - self.y, 2))
self.MSy = theanoTensor.mean(theanoTensor.pow(self.y, 2))
self.R2 = 1 - (self.MSe / self.MSy)
paramList = [self.W, self.b]
grad_wrtParams = theanoTensor.grad(self.getMSE(), wrt=paramList)
learning_rate = 1e-3
updates = [(p[0], p[0] - learning_rate * p[1]) for p in zip(paramList, grad_wrtParams)]
self.train_model = theano.function(
inputs=[self.X, self.y],
outputs=[self.getMSE()],
updates=updates
)
self.test_model = theano.function(
inputs=[self.X, self.y],
outputs=[self.getR2()],
)
开发者ID:bmcmenamin,项目名称:theanoStocks,代码行数:32,代码来源:theanoLstm.py
示例10: __call__
def __call__(self, loss):
loss += self.l1 * T.sum(T.mean(abs(self.layer.get_output(True)), axis=0))
loss += self.l2 * T.sum(T.mean(self.layer.get_output(True) ** 2, axis=0))
loss += self.l_bin * T.sum(
T.mean(T.pow(self.layer.get_output(True), self.k) * T.pow(1 - self.layer.get_output(True), self.k), axis=0)
)
return loss
开发者ID:wolet,项目名称:keras,代码行数:7,代码来源:regularizers.py
示例11: objective
def objective(x):
"""
objective function
@param x: input vector
@return: value of objective function
"""
z = x - objective.offset
return T.sum(T.pow(z, 4) - 16 * T.pow(z, 2) + 5 * z, axis=1) / 2
开发者ID:murbard,项目名称:vectornet,代码行数:8,代码来源:vectornet.py
示例12: evolve
def evolve(self, x, n, k, gamma):
""" Compute time-derivative at current state
Model: dx/dt = x^n / (x^n + K^n) - gamma*x
This leads to single-species bistability.
"""
dxdt = T.pow(x, n)/(T.pow(x, n)+T.pow(k,n)) - gamma*x
return dxdt
开发者ID:martinholub,项目名称:martinholub.github.io,代码行数:8,代码来源:sde_theano.py
示例13: get_box_mask_se
def get_box_mask_se(a,b):
'''
return (batch_size, grid_num, box_num, 1) tensor as mask
'''
se = T.pow(T.pow(a-b, 2).sum(axis=-1), .5)
sem = se.min(axis=-1, keepdims=True) # find the box with lowest square error
se_mask = T.eq(se, sem).reshape((a.shape[0], a.shape[1], a.shape[2], 1))
return se_mask
开发者ID:lyf910919,项目名称:Darknet.keras,代码行数:8,代码来源:custom_loss.py
示例14: gamma_params
def gamma_params(mode=10., sd=10.):
'''
Converst mode and sd to shape and rate of a gamma distribution.
'''
var = Tns.pow(sd, 2)
rate = (mode + Tns.pow(Tns.pow(mode, 2) + 4 * var, 0.5)) / (2 * var)
shape = 1 + mode * rate
return shape, rate
开发者ID:nwilming,项目名称:mcmodels,代码行数:8,代码来源:appc.py
示例15: get_input_vectors
def get_input_vectors(shape, phases, scaling, offset):
x = T.repeat(offset[0] + T.arange(shape[0]) / scaling, shape[1] * phases).reshape(
(shape[0], shape[1], phases)) * T.pow(2, T.arange(phases))
y = T.repeat(T.tile(offset[1] + T.arange(shape[1]) / scaling, shape[0]).reshape(
(shape[0], shape[1], 1)), phases, axis=2) * T.pow(2, T.arange(phases))
z = T.tile(offset[2] + 10 * T.arange(phases), shape[0] * shape[1]).reshape((shape[0], shape[1], phases, 1))
x = x.reshape((shape[0], shape[1], phases, 1))
y = y.reshape((shape[0], shape[1], phases, 1))
return T.concatenate([x, y, z], axis=3).reshape((shape[0] * shape[1] * phases, 3)).astype('float32')
开发者ID:pinae,项目名称:simplexnoise,代码行数:9,代码来源:theano-simplex-matrix.py
示例16: _loopoverallballallbatch
def _loopoverallballallbatch(self, ballid):
ox=self.middle[:,(ballid)*3].reshape((self.batchsize,1))
x=T.tile(ox,(1,self.height*self.width)).reshape((self.batchsize,self.height,self.width))
oy=self.middle[:,(ballid)*3+1].reshape((self.batchsize,1))
y=T.tile(oy,(1,self.height*self.width)).reshape((self.batchsize,self.height,self.width))
w=T.tile(T.tile(T.arange(0,self.width),(self.height,)),(self.batchsize,)).reshape((self.batchsize,self.height,self.width))
h=T.tile(T.tile(T.arange(0,self.height).reshape((self.height,1)),(1,self.width)),(self.batchsize,1)).reshape((self.batchsize,self.height,self.width))
multiply=T.tile(self.middle[:,(ballid)*3+2].reshape((self.batchsize,1)),(1,self.height*self.width)).reshape((self.batchsize,self.height,self.width))
results=multiply*T.exp((T.pow(x-w,2)+T.pow(y-h,2))*(-1.0/self.sigma))
return results
开发者ID:caomw,项目名称:CNNHandPoseEstimationTotal,代码行数:10,代码来源:customscan.py
示例17: init_fun_
def init_fun_(self, dim_state, batch_size, gamma, learning_rate, momentum, reward_scaling, reward_scaling_update):
"""Define and compile function to train and evaluate network
:param net: Lasagne output layer
:param dim_state: dimensions of a single state tensor
:param batch_size:
:param gamma: future reward discount factor
:param learning_rate:
:param momentum:
:param reward_scaling:
:param reward_scaling_update:
:return:
"""
if len(dim_state) != 3:
raise ValueError("We only support 3 dimensional states.")
# inputs
old_states, new_states = T.tensor4s('old_states', 'new_states') # (BATCH_SIZE, MEMORY_LENGTH, DIM_STATE[0], DIM_STATE[1])
actions = T.ivector('actions') # (BATCH_SIZE, 1)
rewards = T.vector('rewards') # (BATCH_SIZE, 1)
rs = shared(value=reward_scaling*1.0, name='reward_scaling')
# intermediates
predict_q = lasagne.layers.get_output(layer_or_layers=self.qnn, inputs=old_states)
predict_next_q = lasagne.layers.get_output(layer_or_layers=self.qnn_target, inputs=new_states)
target_q = rewards/rs + gamma*T.max(predict_next_q, axis=1)
# penalty
singularity = 1+1e-3
penalty = T.mean(
1/T.pow(predict_q[T.arange(batch_size), actions]-singularity, 2) +
1/T.pow(predict_q[T.arange(batch_size), actions]+singularity, 2) - 2)
# outputs
loss = T.mean((predict_q[T.arange(batch_size), actions] - target_q)**2) + (1e-5)*penalty
# weight update formulas (mini-batch SGD with momentum)
params = lasagne.layers.get_all_params(self.qnn, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=learning_rate, momentum=momentum)
updates_rs = lasagne.updates.nesterov_momentum(loss, [rs], learning_rate=learning_rate, momentum=momentum)
# functions
fun_train_qnn = theano.function([old_states, actions, rewards, new_states], loss, updates=updates, allow_input_downcast=True)
fun_adapt_rs = theano.function([old_states, actions, rewards, new_states], loss, updates=updates_rs, allow_input_downcast=True)
def fun_clone_target():
lasagne.layers.helper.set_all_param_values(
self.qnn_target,
lasagne.layers.helper.get_all_param_values(self.qnn)
)
fun_q_lookup = theano.function([old_states], predict_q, allow_input_downcast=True)
fun_rs_lookup = rs.get_value
return fun_train_qnn, fun_adapt_rs, fun_clone_target, fun_q_lookup, fun_rs_lookup
开发者ID:zaxliu,项目名称:dqn4wirelesscontrol,代码行数:55,代码来源:qnn.py
示例18: integrand_w_flat
def integrand_w_flat(z, Om, w):
"""
:param z: redshift
:param Om: matter content
:param w: DE EOS
:return: theano array of 1/H(z)
"""
zp = 1 + z
Ode = 1 - Om - Or # Adjust cosmological constant
return T.power((T.pow(zp, 3) * (Or * zp + Om) + Ode * T.pow(zp, 3.0 * (1 + w))), -0.5)
开发者ID:drJfunk,项目名称:supernova,代码行数:11,代码来源:cosmo.py
示例19: _loopoverallball
def _loopoverallball(self, ballid,batchid):
ox=self.middle[batchid][ballid*2].reshape((1,1))
print "ox:",ox.ndim
x=T.tile(ox,(self.height,self.width))
oy=self.middle[batchid][ballid*2+1].reshape((1,1))
y=T.tile(oy,(self.height,self.width))
w=T.tile(T.arange(0,self.width),(self.height,)).reshape((self.height,self.width))
h=T.tile(T.arange(0,self.height).reshape((self.height,1)),(1,self.width))
cof=(T.pow(x-w,2)+T.pow(y-h,2))*(-1.0/self.sigma)
print T.exp(cof).ndim
return T.exp(cof)
开发者ID:caomw,项目名称:CNNHandPoseEstimationTotal,代码行数:11,代码来源:customtest.py
示例20: evolve_system
def evolve_system(self, x, n, k, gamma):
""" Compute time-derivative at current state
Model: dx/dt = k^n / (x^n + K^n) - gamma*x
This leads to 3+ species sustained oscillations. Note that x is matrix.
We have dependency only on preceding variable, which can be efficiently implemented
by rolling the matrix by `shift=-1` along corresponding axis.
"""
temp = T.pow(k, n)/(T.pow(x, n)+T.pow(k,n))
dxdt = T.roll(temp, shift = -1, axis = 1) - gamma*x
return dxdt
开发者ID:martinholub,项目名称:martinholub.github.io,代码行数:12,代码来源:sde_theano.py
注:本文中的theano.tensor.pow函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论