本文整理汇总了Python中theano.tensor.iscalar函数的典型用法代码示例。如果您正苦于以下问题:Python iscalar函数的具体用法?Python iscalar怎么用?Python iscalar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了iscalar函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: build_model
def build_model(self, train_set, test_set, validation_set):
"""
Building the model should be done prior to training. It will implement the training, testing and validation
functions.
This method should be called from any subsequent inheriting model.
:param loss: The loss funciton applied to training (cf. updates.py), e.g. mse.
:param update: The update function (optimization framework) used for training (cf. updates.py), e.g. sgd.
:param update_args: The args for the update function applied to training, e.g. (0.001,).
"""
print "### BUILDING MODEL ###"
self.train_args = {}
self.train_args['inputs'] = OrderedDict({})
self.train_args['outputs'] = OrderedDict({})
self.test_args = {}
self.test_args['inputs'] = OrderedDict({})
self.test_args['outputs'] = OrderedDict({})
self.validate_args = {}
self.validate_args['inputs'] = OrderedDict({})
self.validate_args['outputs'] = OrderedDict({})
self.sym_index = T.iscalar('index')
self.sym_batchsize = T.iscalar('batchsize')
self.sym_lr = T.scalar('learningrate')
self.batch_slice = slice(self.sym_index * self.sym_batchsize, (self.sym_index + 1) * self.sym_batchsize)
self.sh_train_x = theano.shared(np.asarray(train_set[0], dtype=theano.config.floatX), borrow=True)
self.sh_train_t = theano.shared(np.asarray(train_set[1], dtype=theano.config.floatX), borrow=True)
self.sh_test_x = theano.shared(np.asarray(test_set[0], dtype=theano.config.floatX), borrow=True)
self.sh_test_t = theano.shared(np.asarray(test_set[1], dtype=theano.config.floatX), borrow=True)
if validation_set is not None:
self.sh_valid_x = theano.shared(np.asarray(validation_set[0], dtype=theano.config.floatX), borrow=True)
self.sh_valid_t = theano.shared(np.asarray(validation_set[1], dtype=theano.config.floatX), borrow=True)
开发者ID:Britefury,项目名称:auxiliary-deep-generative-models,代码行数:35,代码来源:base.py
示例2: getMinibatchTrainer
def getMinibatchTrainer(self, costFunction, variableToData, rms=True):
# define params
lr = T.fscalar('lr')
start = T.iscalar('start')
end = T.iscalar('end')
# Get the cost and its parameters.
params = costFunction[0]
cost = costFunction[1]
# Get the updates.
updates = self.getUpdates(cost, params, lr, rms)
# Store all state variables.
stateManager = StateManager([u[0] for u in updates])
# Slice the data
givens = dict()
for item in variableToData:
givens[item.variable] = item.slice(start,end)
# Define the training function.
train_model = theano.function(inputs=[theano.Param(start, borrow = True),
theano.Param(end, borrow=True),
theano.Param(lr, borrow=True)],
outputs=theano.Out(cost, borrow=True),
updates=updates,
givens=givens)
return train_model, stateManager
开发者ID:eseaflower,项目名称:SectraML,代码行数:31,代码来源:Trainer.py
示例3: f_train
def f_train(self, t_x, t_corrupt = 0.2, t_rate = 0.1):
""" return training function of the following signiture:
input:
lower and upper indices on training data
alternative training data
return:
likelihood based cost
square distance between training data and prediction
"""
x = T.matrix('x') # pipe data through this symble
q = self.t_corrupt(x, t_corrupt)
h = self.t_encode(q)
z = self.t_decode(h)
L = - T.sum(x * T.log(z) + (1 - x) * T.log(1 - z), axis=1)
cost = T.mean(L) # to be returned
dist = T.mean(T.sqrt(T.sum((x - z) ** 2, axis = 1))) # to be returned
grad = T.grad(cost, self.parm)
diff = [(p, p - t_rate * g) for p, g in zip(self.parm, grad)]
t_fr = T.iscalar()
t_to = T.iscalar()
return theano.function(
[t_fr, t_to],
[cost, dist],
updates = diff,
givens = {x : t_x[t_fr:t_to]},
name = "DA_trainer")
开发者ID:xiaoran831213,项目名称:az,代码行数:32,代码来源:da.py
示例4: getTrainer
def getTrainer(self,lossType="NLL"):
'''
return a function to do MBSGD on (trainX,trainY)
'''
trainY = T.ivector('y')
alpha = T.dscalar('a')
lowIdx = T.iscalar()
highIdx = T.iscalar()
trainX = T.matrix()
if lossType=="aNLL":
loss = self.aNLL(trainY)
elif lossType=='MSE':
loss = self.MSE(trainY)
else:
loss = self.NLL(trainY)
dW = T.grad(cost = loss, wrt = self.W)
db = T.grad(cost = loss, wrt = self.b)
updates = [(self.W,self.W - alpha * dW), (self.b,self.b - alpha * db)]
trainer = theano.function(
inputs = [trainX,trainY,alpha],
outputs = loss,
updates=updates,
givens = {
self.input : trainX,
},
allow_input_downcast=True
)
return trainer
开发者ID:sidnarayanan,项目名称:RelativisticML,代码行数:28,代码来源:Logistic.py
示例5: SimFnIdx
def SimFnIdx(fnsim, embeddings, leftop, rightop):
"""
This function returns a Theano function to measure the similarity score
for a given triplet of entity indexes.
:param fnsim: similarity function (on Theano variables).
:param embeddings: an Embeddings instance.
:param leftop: class for the 'left' operator.
:param rightop: class for the 'right' operator.
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
idxo = T.iscalar('idxo')
idxr = T.iscalar('idxr')
idxl = T.iscalar('idxl')
# Graph
lhs = (embedding.E[:, idxl]).reshape((1, embedding.D))
rhs = (embedding.E[:, idxr]).reshape((1, embedding.D))
rell = (relationl.E[:, idxo]).reshape((1, relationl.D))
relr = (relationr.E[:, idxo]).reshape((1, relationr.D))
simi = fnsim(leftop(lhs, rell), rightop(rhs, relr))
"""
Theano function inputs.
:input idxl: index value of the 'left' member.
:input idxr: index value of the 'right' member.
:input idxo: index value of the relation member.
Theano function output.
:output simi: score value.
"""
return theano.function([idxl, idxr, idxo], [simi],
on_unused_input='ignore')
开发者ID:DevSinghSachan,项目名称:SME,代码行数:33,代码来源:model.py
示例6: multMatVect
def multMatVect(v, A, m1, B, m2):
# TODO : need description for parameter and return
"""
Multiply the first half of v by A with a modulo of m1 and the second half
by B with a modulo of m2.
Notes
-----
The parameters of dot_modulo are passed implicitly because passing them
explicitly takes more time than running the function's C-code.
"""
if multMatVect.dot_modulo is None:
A_sym = tensor.lmatrix('A')
s_sym = tensor.ivector('s')
m_sym = tensor.iscalar('m')
A2_sym = tensor.lmatrix('A2')
s2_sym = tensor.ivector('s2')
m2_sym = tensor.iscalar('m2')
o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
multMatVect.dot_modulo = function(
[A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o, profile=False)
# This way of calling the Theano fct is done to bypass Theano overhead.
f = multMatVect.dot_modulo
f.input_storage[0].storage[0] = A
f.input_storage[1].storage[0] = v[:3]
f.input_storage[2].storage[0] = m1
f.input_storage[3].storage[0] = B
f.input_storage[4].storage[0] = v[3:]
f.input_storage[5].storage[0] = m2
f.fn()
r = f.output_storage[0].storage[0]
return r
开发者ID:bbudescu,项目名称:Theano,代码行数:35,代码来源:rng_mrg.py
示例7: RankRightFnIdx_filtered
def RankRightFnIdx_filtered(fnsim, embeddings, leftop, rightop, subtensorspec=None):
"""
This function returns a Theano function to measure the similarity score of
all 'right' entities given couples of relation and 'left' entities (as
index values).
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo')
rightparts = T.ivector('rightparts')
# Graph
lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the embedding of idxl
if subtensorspec is not None:
# We compute the score only for a subset of entities
rhs = (embedding.E[:, :subtensorspec]).T
else:
rhs = embedding.E.T # rhs: NxD embedding matrix
rhs = rhs[rightparts, :] # select the right parts not appearing
# in the train/valid/test sets
rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the embedding of idxo (relationl)
relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the embedding of idxo (relationr)
tmp = leftop(lhs, rell) # a = rell(lhs)
# b = relr(rhs)
simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b)
return theano.function([idxl, idxo, rightparts], [simi], on_unused_input='ignore')
开发者ID:pminervini,项目名称:ebemkg,代码行数:29,代码来源:evaluation.py
示例8: RankRightFnIdx_Schema
def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None):
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo')
g = T.matrix('g')
# Graph
lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the embedding of idxl
if subtensorspec is not None:
# We compute the score only for a subset of entities
rhs = (embedding.E[:, :subtensorspec]).T
else:
rhs = embedding.E.T # rhs: NxD embedding matrix
rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the embedding of idxo (relationl)
relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the embedding of idxo (relationr)
tmp = leftop(lhs, rell) # a = rell(lhs)
# b = relr(rhs)
# Negative Energy
simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b)
pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo, 1].T
simi = simi - pen_simi
return theano.function([idxl, idxo, g], [simi], on_unused_input='ignore')
开发者ID:emir-munoz,项目名称:schema,代码行数:29,代码来源:evaluation.py
示例9: compile_bn
def compile_bn(data_set, model, make_updates):
"""
データをsharedにして、modelとoptimizerを使ってcomputational graphを作って、
コンパイルする。
Parameters
-----------
data_set : list of numpy.ndarray
feature_vec : ndarray
(n_pixels, D, n_tensors)
gt_vec : ndarray
(n_pixels, D)
test_feature_vec, test_gt_vec
model : models.Rcn1layerとか
optimizer : optimizers.SGDとか
"""
s_input, s_target, s_test_input, s_test_target = share_data_sets(*data_set)
nn, obj, train_mse, model_updates, model_param_l = model.make_graph_train()
test_mse, test_out = model.make_graph_test()
updates, opt_param_list = make_updates(loss=obj, param_list=nn.param_l)
i_batch = T.iscalar("i_batch")
index_list = T.ivector("index_list")
batch_size = T.iscalar("batch_size")
od = OrderedDict()
for k, e in updates.items() + model_updates.items():
od[k] = e
f_train = theano.function(
inputs=[i_batch, index_list, batch_size]+opt_param_list+model_param_l,
updates=od,
givens=[(nn.x_t3, s_input[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]]),
(nn.t_mat, s_target[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]])],
on_unused_input='warn')
f_training_error = theano.function(
inputs=[i_batch, index_list, batch_size]+model_param_l,
outputs=[train_mse],
givens=[(nn.x_t3, s_input[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]]),
(nn.t_mat, s_target[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]])],
on_unused_input='warn')
f_test_error = theano.function(
inputs=[i_batch, index_list, batch_size],
outputs=[test_mse],
givens=[(nn.x_t3, s_test_input[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]]),
(nn.t_mat, s_test_target[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]])])
f_output = theano.function(
inputs=[nn.x_t3],
outputs=[test_out])
result = [f_train, f_training_error, f_test_error, f_output, s_input,
s_target, s_test_input, s_test_target, nn.param_l]
return result
开发者ID:matsui-k20xx,项目名称:d-rcn,代码行数:60,代码来源:compiling.py
示例10: test_compute_lnZ
def test_compute_lnZ(self):
v = T.matrix('v')
z = T.iscalar('z')
V = cartesian([(0, 1)] * self.input_size, dtype=config.floatX)
#H = cartesian([(0, 1)] * self.hidden_size, dtype=config.floatX)
# We simulate having an infinite number of hidden units by adding lot of hidden units with parameters set to 0.
nb_hidden_units_to_add = 10000
model = iRBM(input_size=self.model.input_size,
hidden_size=self.model.hidden_size + nb_hidden_units_to_add,
beta=self.model.beta.get_value())
model.W.set_value(np.r_[self.model.W.get_value(), np.zeros((nb_hidden_units_to_add, model.input_size), dtype=theano.config.floatX)])
model.b.set_value(np.r_[self.model.b.get_value(), np.zeros((nb_hidden_units_to_add,), dtype=theano.config.floatX)])
model.c.set_value(self.model.c.get_value())
v = T.matrix('v')
z = T.iscalar('z')
F_vz = theano.function([v, z], model.F(v, z))
energies = []
for z in range(1, model.hidden_size+1):
energies.append(F_vz(V, z))
lnZ = logsumexp(-np.array(energies)).eval()
lnZ_using_free_energy = theano.function([v], logsumexp(-self.model.free_energy(v)))
assert_almost_equal(lnZ_using_free_energy(V), lnZ, decimal=5) # decimal=5 needed for float32
开发者ID:MarcCote,项目名称:iRBM,代码行数:29,代码来源:test_irbm.py
示例11: compile_functions
def compile_functions(self, opt, **args):
print '... compiling training functions'
gen_cost, gen_show_cost, dis_cost, cost_pfake, cost_ptrue = self.get_cost()
self.opt = opt
gen_updates = self.opt.get_updates(gen_cost, self.gen_params)
dis_updates = self.opt.get_updates(dis_cost, self.dis_params)
self.get_noise = theano.function([],
self.theano_rng.uniform(size=(self.batch_size, self.num_z),
low=-1, high=1)
)
start_index = T.iscalar('start_index')
end_index = T.iscalar('end_index')
if self.uint8_data:
given_train_x = T.cast(self.shared_train[start_index:end_index], dtype='float32')
else:
given_train_x = self.shared_train[start_index:end_index]
self.train_gen_model = theano.function(
[self.z],
gen_show_cost,
updates=gen_updates,
)
self.train_dis_model = theano.function(
[start_index, end_index, self.z],
[cost_pfake, cost_ptrue],
updates=dis_updates,
givens={self.x: given_train_x}
)
开发者ID:ybzhou,项目名称:Gemini,代码行数:33,代码来源:generative_adversarial_net.py
示例12: multMatVect
def multMatVect(v, A, m1, B, m2):
"""
multiply the first half of v by A with a modulo of m1
and the second half by B with a modulo of m2
Note: The parameters of dot_modulo are passed implicitly because passing
them explicitly takes more time then running the function's C-code.
"""
if multMatVect.dot_modulo is None:
A_sym = tensor.lmatrix("A")
s_sym = tensor.ivector("s")
m_sym = tensor.iscalar("m")
A2_sym = tensor.lmatrix("A2")
s2_sym = tensor.ivector("s2")
m2_sym = tensor.iscalar("m2")
o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
multMatVect.dot_modulo = function([A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o)
# This way of calling the Theano fct is done to bypass Theano overhead.
f = multMatVect.dot_modulo
f.input_storage[0].storage[0] = A
f.input_storage[1].storage[0] = v[:3]
f.input_storage[2].storage[0] = m1
f.input_storage[3].storage[0] = B
f.input_storage[4].storage[0] = v[3:]
f.input_storage[5].storage[0] = m2
f.fn()
r = f.output_storage[0].storage[0]
return r
开发者ID:Tanjay94,项目名称:Theano,代码行数:30,代码来源:rng_mrg.py
示例13: init_nnet
def init_nnet(W, n_classes, vec_dim):
"""Initialize neural network.
Args:
W (theano.shared): embedding matrix
n_classes: number of classes to be predicted
vec_dim: dimensionality of the embeddings
"""
w_idx = TT.iscalar(name="w_idx")
y_gold = TT.iscalar(name="y_gold")
embs = W[w_idx]
Theta = theano.shared(value=ORTHOGONAL.sample((n_classes, vec_dim)),
name="Theta")
beta = theano.shared(value=HE_UNIFORM.sample((1, n_classes)), name="beta")
y_probs = TT.nnet.softmax(TT.dot(Theta, embs.T).flatten() + beta).flatten()
params = [Theta]
cost = -TT.mean(TT.log(y_probs[y_gold]))
updates = sgd_updates_adadelta(params, cost)
train = theano.function([w_idx, y_gold], cost, updates=updates)
y_pred = TT.argmax(y_probs)
y_score = y_probs[y_pred]
predict = theano.function([w_idx], (y_pred, y_score))
acc = TT.eq(y_gold, y_pred)
validate = theano.function([w_idx, y_gold], acc)
return (train, validate, predict, params)
开发者ID:WladimirSidorenko,项目名称:SentiLex,代码行数:26,代码来源:tang.py
示例14: build_model
def build_model(shared_params, options, other_params):
"""
Build the complete neural network model and return the symbolic variables
"""
# symbolic variables
x = tensor.matrix(name="x", dtype=floatX)
y1 = tensor.iscalar(name="y1")
y2 = tensor.iscalar(name="y2")
# lstm cell
(ht, ct) = lstm_cell(x, shared_params, options, other_params) # gets the ht, ct
# softmax 1 i.e. frame type prediction
activation = tensor.dot(shared_params['softmax1_W'], ht).transpose() + shared_params['softmax1_b']
frame_pred = tensor.nnet.softmax(activation) # .transpose()
# softmax 2 i.e. gesture class prediction
#
# predicted probability for frame type
f_pred_prob = theano.function([x], frame_pred, name="f_pred_prob")
# predicted frame type
f_pred = theano.function([x], frame_pred.argmax(), name="f_pred")
# cost
cost = ifelse(tensor.eq(y1, 1), -tensor.log(frame_pred[0, 0] + options['log_offset'])
* other_params['begin_cost_factor'],
ifelse(tensor.eq(y1, 2), -tensor.log(frame_pred[0, 1] + options['log_offset'])
* other_params['end_cost_factor'],
ifelse(tensor.eq(y1, 3), -tensor.log(frame_pred[0, 2] + options['log_offset']),
tensor.abs_(tensor.log(y1)))), name='ifelse_cost')
# function for output of the currect lstm cell and softmax prediction
f_model_cell_output = theano.function([x], (ht, ct, frame_pred), name="f_model_cell_output")
# return the model symbolic variables and theano functions
return x, y1, y2, f_pred_prob, f_pred, cost, f_model_cell_output
开发者ID:inblueswithu,项目名称:Theano_Trail,代码行数:35,代码来源:lstm_model_3b.py
示例15: __init__
def __init__(self, in_size, out_size, dim_y, dim_pos, hidden_size_encoder, hidden_size_decoder, cell = "gru", optimizer = "rmsprop", p = 0.5, num_sents = 1):
self.X = T.matrix("X")
self.Y_y = T.matrix("Y_y")
self.Y_pos = T.matrix("Y_pos")
self.in_size = in_size
self.out_size = out_size
self.dim_y = dim_y
self.dim_pos = dim_pos
self.hidden_size_encoder = hidden_size_encoder
self.hidden_size_decoder = hidden_size_decoder
self.cell = cell
self.drop_rate = p
self.num_sents = num_sents
self.is_train = T.iscalar('is_train') # for dropout
self.batch_size = T.iscalar('batch_size') # for mini-batch training
self.mask = T.matrix("mask")
self.mask_y = T.matrix("mask_y")
self.optimizer = optimizer
print "seq2seq out size ", self.out_size
if self.out_size == self.dim_y + self.dim_pos:
print "size right !"
self.define_layers()
self.define_train_test_funcs()
开发者ID:luochuwei,项目名称:POS_tag_NN,代码行数:25,代码来源:Seq2Seq.py
示例16: compile
def compile(self, log_pxz, log_qpz, cost, a_pxz):
batch_idx = T.iscalar()
learning_rate = T.fscalar()
updates, norm_grad = self.hp.optimizer(cost, self.params.values(), lr=learning_rate)
self.outidx = {'cost':0, 'cost_p':1, 'cost_q':2, 'norm_grad':3}
outputs = [cost, log_pxz, log_qpz]
self.train = theano.function(inputs=[batch_idx, learning_rate],
givens={self.X:self.data['tr_X'][batch_idx * self.hp.batch_size :
(batch_idx+1) * self.hp.batch_size]},
outputs=outputs + [norm_grad], updates=updates)
self.validate = theano.function(inputs=[batch_idx],
givens={self.X:self.data['tr_X'][batch_idx * self.hp.test_batch_size :
(batch_idx+1) * self.hp.test_batch_size]},
outputs=outputs)
self.test = theano.function(inputs=[batch_idx],
givens={self.X:self.data['te_X'][batch_idx * self.hp.test_batch_size :
(batch_idx+1) * self.hp.test_batch_size]},
outputs=outputs)
n_samples = T.iscalar()
if self.resample_z:
self.data['ge_Z'] = srnd.normal((self.max_gen_samples, self.n_z), dtype=theano.config.floatX)
else:
self.data['ge_Z'] = shared(np.random.randn(self.max_gen_samples, self.n_z))
self.decode = theano.function(inputs=[n_samples],
givens={self.Z:self.data['ge_Z'][:n_samples]},
outputs=a_pxz)
开发者ID:ronvohra,项目名称:Theano-Lights,代码行数:34,代码来源:modelbase.py
示例17: test_multMatVect
def test_multMatVect():
A1 = tensor.lmatrix('A1')
s1 = tensor.ivector('s1')
m1 = tensor.iscalar('m1')
A2 = tensor.lmatrix('A2')
s2 = tensor.ivector('s2')
m2 = tensor.iscalar('m2')
g0 = rng_mrg.DotModulo()(A1, s1, m1, A2, s2, m2)
f0 = theano.function([A1, s1, m1, A2, s2, m2], g0)
i32max = numpy.iinfo(numpy.int32).max
A1 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
s1 = numpy.random.randint(0, i32max, 3).astype('int32')
m1 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")
A2 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
s2 = numpy.random.randint(0, i32max, 3).astype('int32')
m2 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")
f0.input_storage[0].storage[0] = A1
f0.input_storage[1].storage[0] = s1
f0.input_storage[2].storage[0] = m1
f0.input_storage[3].storage[0] = A2
f0.input_storage[4].storage[0] = s2
f0.input_storage[5].storage[0] = m2
r_a1 = rng_mrg.matVecModM(A1, s1, m1)
r_a2 = rng_mrg.matVecModM(A2, s2, m2)
f0.fn()
r_b = f0.output_storage[0].value
assert numpy.allclose(r_a1, r_b[:3])
assert numpy.allclose(r_a2, r_b[3:])
开发者ID:gyenney,项目名称:Tools,代码行数:34,代码来源:test_rng_mrg.py
示例18: RankLeftFnIdx_Schema
def RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None):
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo')
g = T.matrix('g')
# Graph
if subtensorspec is not None:
# We compute the score only for a subset of entities
lhs = (embedding.E[:, :subtensorspec]).T
else:
lhs = embedding.E.T
rhs = (embedding.E[:, idxr]).reshape((1, embedding.D))
rell = (relationl.E[:, idxo]).reshape((1, relationl.D))
relr = (relationr.E[:, idxo]).reshape((1, relationr.D))
tmp = rightop(rhs, relr)
simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1])))
pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo, 1].T
simi = simi - pen_simi
return theano.function([idxr, idxo, g], [simi], on_unused_input='ignore')
开发者ID:emir-munoz,项目名称:schema,代码行数:26,代码来源:evaluation.py
示例19: compile
def compile(self, model):
assert isinstance(model, Model)
self.model = model
dataset = self.dataset
X, Y = dataset.preproc(dataset.X, dataset.Y)
self.X = theano.shared(X, "X")
self.Y = theano.shared(Y, "Y")
self.logger.info("compiling do_loglikelihood")
n_samples = T.iscalar("n_samples")
batch_idx = T.iscalar("batch_idx")
batch_size = T.iscalar("batch_size")
first = batch_idx * batch_size
last = first + batch_size
X_batch, Y_batch = dataset.late_preproc(self.X[first:last], self.Y[first:last])
log_PX, _, _, _, KL, Hp, Hq = model.log_likelihood(X_batch, n_samples=n_samples)
batch_L = T.sum(log_PX)
batch_L2 = T.sum(log_PX ** 2)
batch_KL = [T.sum(kl) for kl in KL]
batch_Hp = [T.sum(hp) for hp in Hp]
batch_Hq = [T.sum(hq) for hq in Hq]
self.do_loglikelihood = theano.function(
inputs=[batch_idx, batch_size, n_samples],
outputs=[batch_L, batch_L2] + batch_KL + batch_Hp + batch_Hq,
name="do_likelihood",
)
开发者ID:yburda,项目名称:reweighted-ws,代码行数:30,代码来源:__init__.py
示例20: test_clip_grad_int
def test_clip_grad_int():
# test that integers don't crash clip gradient
x = tensor.iscalar()
y = tensor.iscalar()
z = tensor.iscalar()
c = tensor.clip(x, y, z)
tensor.grad(c, [x, y, z])
开发者ID:AI-Cdrone,项目名称:Theano,代码行数:8,代码来源:test_elemwise.py
注:本文中的theano.tensor.iscalar函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论