本文整理汇总了Python中theano.tensor.fmatrix函数的典型用法代码示例。如果您正苦于以下问题:Python fmatrix函数的具体用法?Python fmatrix怎么用?Python fmatrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fmatrix函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, input_layers, *args, **kwargs):
super(LogLossObjective, self).__init__(input_layers, *args, **kwargs)
self.input_systole = input_layers["systole:onehot"]
self.input_diastole = input_layers["diastole:onehot"]
self.target_vars["systole:onehot"] = T.fmatrix("systole_target_onehot")
self.target_vars["diastole:onehot"] = T.fmatrix("diastole_target_onehot")
开发者ID:317070,项目名称:kaggle-heart,代码行数:7,代码来源:objectives.py
示例2: cmp
def cmp(a_shp, b_shp):
a = tensor.fmatrix()
b = tensor.fmatrix()
scalar = tensor.fscalar()
av = my_rand(*a_shp)
bv = my_rand(*b_shp)
f = theano.function(
[a, b],
tensor.dot(a, b) * numpy.asarray(4, 'float32'),
mode=mode_with_gpu)
f2 = theano.function(
[a, b],
tensor.dot(a, b) * numpy.asarray(4, 'float32'))
t = f.maker.fgraph.toposort()
assert len(t) == 4
assert isinstance(t[0].op, tcn.GpuFromHost)
assert isinstance(t[1].op, tcn.GpuFromHost)
assert isinstance(t[2].op, tcn.blas.GpuDot22Scalar)
assert isinstance(t[3].op, tcn.HostFromGpu)
assert numpy.allclose(f(av, bv), f2(av, bv))
f = theano.function([a, b, scalar], tensor.dot(a, b) * scalar,
mode=mode_with_gpu)
f2 = theano.function([a, b, scalar], tensor.dot(a, b) * scalar)
t = f.maker.fgraph.toposort()
assert len(t) == 4
assert isinstance(t[0].op, tcn.GpuFromHost)
assert isinstance(t[1].op, tcn.GpuFromHost)
assert isinstance(t[2].op, tcn.blas.GpuDot22Scalar)
assert isinstance(t[3].op, tcn.HostFromGpu)
assert numpy.allclose(f(av, bv, 0.5), f2(av, bv, 0.5))
开发者ID:gyenney,项目名称:Tools,代码行数:32,代码来源:test_blas.py
示例3: test_pickle_unpickle_without_reoptimization
def test_pickle_unpickle_without_reoptimization():
mode = theano.config.mode
if mode in ["DEBUG_MODE", "DebugMode"]:
mode = "FAST_RUN"
x1 = T.fmatrix('x1')
x2 = T.fmatrix('x2')
x3 = theano.shared(numpy.ones((10, 10), dtype=floatX))
x4 = theano.shared(numpy.ones((10, 10), dtype=floatX))
y = T.sum(T.sum(T.sum(x1**2 + x2) + x3) + x4)
updates = OrderedDict()
updates[x3] = x3 + 1
updates[x4] = x4 + 1
f = theano.function([x1, x2], y, updates=updates, mode=mode)
# now pickle the compiled theano fn
string_pkl = pickle.dumps(f, -1)
# compute f value
in1 = numpy.ones((10, 10), dtype=floatX)
in2 = numpy.ones((10, 10), dtype=floatX)
# test unpickle without optimization
default = theano.config.reoptimize_unpickled_function
try:
# the default is True
theano.config.reoptimize_unpickled_function = False
f_ = pickle.loads(string_pkl)
assert f(in1, in2) == f_(in1, in2)
finally:
theano.config.reoptimize_unpickled_function = default
开发者ID:ALISCIFP,项目名称:Segmentation,代码行数:31,代码来源:test_pickle_unpickle_theano_fn.py
示例4: test_elemwise_composite_float64
def test_elemwise_composite_float64():
# test that we don't fuse composite elemwise with float64 somewhere inside
# nvcc by default downcast them to float32. We would need to tell him not
# to do so, but that possible only on some device.
a = tensor.fmatrix()
b = tensor.fmatrix()
av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
bv = numpy.ones((4, 4), dtype='float32')
def get_all_basic_scalar(composite_op):
l = []
for i in composite_op.env.toposort():
if isinstance(i, theano.scalar.Composite):
l += get_all_basic_scalar(i)
else:
l.append(i)
return l
for mode in [mode_with_gpu, mode_with_gpu.excluding('gpu_after_fusion'),
mode_with_gpu.excluding('elemwise_fusion')]:
f = pfunc([a, b],
tensor.cast(tensor.lt(tensor.cast(a, 'float64') ** 2,
b),
'float32'), mode=mode)
out = f(av, bv)
assert numpy.all(out == ((av ** 2) < bv))
for node in f.maker.env.toposort():
if isinstance(node.op, cuda.GpuElemwise):
if isinstance(node.op.scalar_op, theano.scalar.Composite):
scals = get_all_basic_scalar(node.op.scalar_op)
for s in scals:
assert not any([i.type.dtype == 'float64'
for i in s.inputs + s.outputs])
开发者ID:gexarcha,项目名称:Theano,代码行数:33,代码来源:test_basic_ops.py
示例5: test_local_gpu_elemwise_0
def test_local_gpu_elemwise_0():
"""
Test local_gpu_elemwise_0 when there is a dtype upcastable to float32
"""
a = tensor.bmatrix()
b = tensor.fmatrix()
c = tensor.fmatrix()
a_v = (numpy.random.rand(4, 5) * 10).astype("int8")
b_v = (numpy.random.rand(4, 5) * 10).astype("float32")
c_v = (numpy.random.rand(4, 5) * 10).astype("float32")
# Due to optimization order, this composite is created when all
# the op are on the gpu.
f = theano.function([a, b, c], [a + b + c], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 1
f(a_v, b_v, c_v)
# Now test with the composite already on the cpu before we move it
# to the gpu
a_s = theano.scalar.int8()
b_s = theano.scalar.float32()
c_s = theano.scalar.float32()
out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s + c_s])
out_op = tensor.Elemwise(out_s)
f = theano.function([a, b, c], [out_op(a, b, c)], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 1
f(a_v, b_v, c_v)
开发者ID:OlafLee,项目名称:Theano,代码行数:32,代码来源:test_opt.py
示例6: build_loss_graph
def build_loss_graph(self, saved_graph=None):
print("Building loss graph...")
for l in self.layers:
l.set_training(False)
Sentence = T.fmatrix('Sentence')
Characters = T.ftensor3('Characters')
WordLengths = T.ivector('WordLengths')
GoldPredictions = T.fmatrix('GoldPredictions')
weight_list = self.get_theano_weight_list()
if self.feature_mode == 'character':
result = self.theano_sentence_loss(Characters, WordLengths, GoldPredictions)
input_list = [Characters, WordLengths, GoldPredictions] + list(weight_list)
elif self.feature_mode == 'sentence':
result = self.theano_sentence_loss(Sentence, GoldPredictions)
input_list = [Sentence, GoldPredictions] + list(weight_list)
elif self.feature_mode == 'both':
result = self.theano_sentence_loss(Sentence, Characters, WordLengths, GoldPredictions)
input_list = [Sentence, Characters, WordLengths, GoldPredictions] + list(weight_list)
cgraph = theano.function(inputs=input_list, outputs=result, mode='FAST_RUN', allow_input_downcast=True)
print("Done building graph.")
return cgraph
开发者ID:MichSchli,项目名称:Speciale,代码行数:28,代码来源:abstract_rnn.py
示例7: __theano_build__
def __theano_build__(self):
params = self.params
param_names = self.param_names
hidden_dim = self.hidden_dim
x1 = T.imatrix('x1') # first sentence
x2 = T.imatrix('x2') # second sentence
x1_mask = T.fmatrix('x1_mask') #mask
x2_mask = T.fmatrix('x2_mask')
y = T.ivector('y') # label
y_c = T.ivector('y_c') # class weights
# Embdding words
_E1 = params["E"].dot(params["W"][0]) + params["B"][0]
_E2 = params["E"].dot(params["W"][1]) + params["B"][1]
statex1 = _E1[x1.flatten(), :].reshape([x1.shape[0], x1.shape[1], hidden_dim])
statex2 = _E2[x2.flatten(), :].reshape([x2.shape[0], x2.shape[1], hidden_dim])
def rnn_cell(x, mx, ph, Wh):
h = T.tanh(ph.dot(Wh) + x)
h = mx[:, None] * h + (1-mx[:, None]) * ph
return [h]
[h1], updates = theano.scan(
fn=rnn_cell,
sequences=[statex1, x1_mask],
truncate_gradient=self.truncate,
outputs_info=[dict(initial=T.zeros([self.batch_size, self.hidden_dim]))],
non_sequences=params["W"][2])
[h2], updates = theano.scan(
fn=rnn_cell,
sequences=[statex2, x2_mask],
truncate_gradient=self.truncate,
outputs_info=[dict(initial=h1[-1])],
non_sequences=params["W"][3])
#predict
_s = T.nnet.softmax(h1[-1].dot(params["lrW"][0]) + h2[-1].dot(params["lrW"][1]) + params["lrb"])
_p = T.argmax(_s, axis=1)
_c = T.nnet.categorical_crossentropy(_s, y)
_c = T.sum(_c * y_c)
_l = T.sum(params["lrW"]**2)
_cost = _c + 0.01 * _l
# SGD parameters
learning_rate = T.scalar('learning_rate')
decay = T.scalar('decay')
# Gradients and updates
_grads, _updates = rms_prop(_cost, param_names, params, learning_rate, decay)
# Assign functions
self.bptt = theano.function([x1, x2, x1_mask, x2_mask, y, y_c], _grads)
self.loss = theano.function([x1, x2, x1_mask, x2_mask, y, y_c], _c)
self.weights = theano.function([x1, x2, x1_mask, x2_mask], _s)
self.predictions = theano.function([x1, x2, x1_mask, x2_mask], _p)
self.sgd_step = theano.function(
[x1, x2, x1_mask, x2_mask, y, y_c, learning_rate, decay],
updates=_updates)
开发者ID:wangxggc,项目名称:rnn-theano,代码行数:60,代码来源:rnn.py
示例8: test_does_not_crash
def test_does_not_crash():
Z = T.ftensor3('Z')
W_re = T.fmatrix('W_re')
W_att_in = T.fmatrix('W_att_in')
c = T.fmatrix('c') #initial state
y0 = T.fmatrix('y0') #initial activation
i = T.matrix('i',dtype='int8')
Y, H, d = LSTMCustomTestOpNoInplaceInstance(Z, c, y0, i, W_re, W_att_in)
f = theano.function(inputs=[Z, c, y0, i, W_re, W_att_in], outputs=Y)
n_T = 5
n_batch = 4
n_inp_dim = 3
n_cells = 8
numpy.random.seed(1234)
Z_val = numpy.random.ranf((n_T,n_batch,4*n_cells)).astype('float32')
W_re_val = numpy.random.ranf((n_cells, 4 * n_cells)).astype('float32')
W_att_in_val = numpy.random.ranf((n_cells, 4 * n_cells)).astype('float32')
c_val = numpy.random.ranf((n_batch, n_cells)).astype('float32')
y0_val = numpy.random.ranf((n_batch, n_cells)).astype('float32')
#i_val = numpy.ones((n_T, n_batch), dtype='int8')
i_val = numpy.array([[1,1,1,1,1], [0,0,1,1,1], [0,0,1,1,1], [0,0,1,0,0]], dtype='int8').T
Y_val = numpy.asarray(f(Z_val, c_val, y0_val, i_val, W_re_val, W_att_in_val))
#print Y_val
print("success")
开发者ID:rwth-i6,项目名称:returnn,代码行数:27,代码来源:test_OpLSTMCustom.py
示例9: test_fwd_pass_compatible_with_OpLSTM
def test_fwd_pass_compatible_with_OpLSTM():
Z = T.ftensor3('Z')
W_re = T.fmatrix('W_re')
W_att_in = T.fmatrix('W_att_in')
c = T.fmatrix('c') #initial state
y0 = T.fmatrix('y0') #initial activation
i = T.matrix('i',dtype='int8')
Y, H, d = LSTMCustomTestOpNoInplaceInstance(Z, c, y0, i, W_re, W_att_in)
W_re_modified = W_re + W_att_in
Z_modified = T.inc_subtensor(Z[0], T.dot(y0,W_re_modified))
Y2, H2, d2 = LSTMOpInstance(Z_modified, W_re_modified, c, i)
f = theano.function(inputs=[Z, c, y0, i, W_re, W_att_in], outputs=Y)
g = theano.function(inputs=[Z, W_re, c, y0, i, W_att_in], outputs=Y2)
n_T = 5
n_batch = 4
n_inp_dim = 3
n_cells = 8
numpy.random.seed(1234)
Z_val = numpy.random.ranf((n_T,n_batch,4*n_cells)).astype('float32')
W_re_val = numpy.random.ranf((n_cells, 4 * n_cells)).astype('float32')
W_att_in_val = numpy.random.ranf((n_cells, 4 * n_cells)).astype('float32')
c_val = numpy.random.ranf((n_batch, n_cells)).astype('float32')
y0_val = numpy.random.ranf((n_batch, n_cells)).astype('float32')
#i_val = numpy.ones((n_T, n_batch), dtype='int8')
i_val = numpy.array([[1,1,1,1,1], [0,0,1,1,1], [0,0,1,1,1], [0,0,1,0,0]], dtype='int8').T
Y_val = numpy.asarray(f(Z_val, c_val, y0_val, i_val, W_re_val, W_att_in_val))
Y2_val = numpy.asarray(g(Z_val, W_re_val, c_val, y0_val, i_val, W_att_in_val))
assert numpy.allclose(Y_val, Y2_val)
print("success")
开发者ID:rwth-i6,项目名称:returnn,代码行数:33,代码来源:test_OpLSTMCustom.py
示例10: predict_df
def predict_df(self, input_df = None ):
f = open('/tmp/obj.save', 'rb')
neural_model = cPickle.load(f)
f.close()
X, y = neural_model['enc'].transform(input_df)
# X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.33, random_state=42)
trX, teX, Y_train, Y_test = train_test_split(X, y, test_size=0.33, random_state=42)
trY = one_hot(Y_train, n=2)
teY = one_hot(Y_test, n=2)
X = T.fmatrix()
Y = T.fmatrix()
h, h2, py_x = model(X, neural_model['w_h'], neural_model['w_h2'], neural_model['w_o'], 0., 0.)
y_pred = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(py_x, Y))
gradient = T.grad(cost=cost, wrt=w)
update = [[w, w - gradient * 0.05]]
train = theano.function(inputs=[X, Y], outputs=cost, updates=update, allow_input_downcast=True)
predict = theano.function(inputs=[X], outputs=y_pred, allow_input_downcast=True)
print('Loaded precision:' , np.mean(np.argmax(teY, axis=1) == predict(teX)))
return predict(teX)
开发者ID:qiyangduan,项目名称:stanmo,代码行数:29,代码来源:neuralnet2layer.py
示例11: train
def train(self, trX, teX, trY, teY, plot=True, epochs=TIMES, shortcard=SHORTCARD, speed=SPEED, drop_input=DROP_INPUT, drop_hidden=DROP_HIDDEN, step_show=STEP_SHOW, rho=RHO, epsilon=EPSILON):
X = T.fmatrix()
Y = T.fmatrix()
train_set_n = len(trY)
test_set_n = len(teY)
accuracy_arr = []
diff_arr = []
i_arr = []
noise_py_x = self._model(X, drop_input, drop_hidden)
cost = T.mean(T.nnet.categorical_crossentropy(noise_py_x, Y))
updates = self._RMSprop(cost, lr=speed, rho=rho, epsilon=epsilon)
train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)
for i in range(TIMES):
for start, end in zip(range(0, train_set_n, shortcard), range(shortcard, train_set_n, shortcard)):
cost = train(trX[start:end], trY[start:end])
if i % step_show == 0:
acc = np.mean(np.argmax(teY, axis=1) == self.predict(teX))
accuracy_arr.append(acc)
di = self.get_diff(teX, teY)
diff_arr.append(di)
i_arr.append(i)
print "{0} {1:.3f}% {2:.1f}".format(i, acc * 100, di)
if plot:
self._name = "Epochs: {0}, Shortcard: {1}, Speed: {2:.5f}\n Structure: {3}\n Train: {4}, Test: {5}".format(epochs, shortcard, speed, self._struct, train_set_n, test_set_n)
self._name_f = "epochs_{0}_shortcard_{1}_speed_{2:.5f}_structure_{3}_train_{4}_test_{5}".format(epochs, shortcard, speed, self._struct, train_set_n, test_set_n)
self._plot(i_arr, accuracy_arr, diff_arr)
开发者ID:chepiga1995,项目名称:deep-learning-with-python,代码行数:29,代码来源:neural.py
示例12: test_pycuda_elemwise_kernel
def test_pycuda_elemwise_kernel():
x=T.fmatrix('x')
y=T.fmatrix('y')
f=theano.function([x,y],x+y, mode=mode_with_gpu)
print f.maker.env.toposort()
f2 = theano.function([x,y],x+y, mode=mode_with_gpu.including("local_pycuda_gpu_elemwise_kernel"))
print f2.maker.env.toposort()
assert any([ isinstance(node.op, theano.sandbox.cuda.GpuElemwise) for node in f.maker.env.toposort()])
assert any([ isinstance(node.op, PycudaElemwiseKernelOp) for node in f2.maker.env.toposort()])
val1 = numpy.asarray(numpy.random.rand(5,5), dtype='float32')
val2 = numpy.asarray(numpy.random.rand(5,5), dtype='float32')
#val1 = numpy.ones((5,5))
#val2 = numpy.arange(25).reshape(5,5)
assert (f(val1,val2) == f2(val1,val2)).all()
print f(val1,val2)
print f2(val1,val2)
x3=T.ftensor3('x')
y3=T.ftensor3('y')
z3=T.ftensor3('y')
f4 = theano.function([x3,y3,z3],x3*y3+z3, mode=mode_with_gpu.including("local_pycuda_gpu_elemwise_kernel"))
print f4.maker.env.toposort()
assert any([ isinstance(node.op, PycudaElemwiseKernelOp) for node in f4.maker.env.toposort()])
val1 = numpy.random.rand(2,2,2)
print val1
print f4(val1,val1,val1)
assert numpy.allclose(f4(val1,val1,val1),val1*val1+val1)
开发者ID:HaniAlmousli,项目名称:Theano,代码行数:32,代码来源:test_pycuda_example.py
示例13: get_adadelta_trainer
def get_adadelta_trainer(self, debug=False):
batch_x1 = T.fmatrix('batch_x1')
batch_x2 = T.fmatrix('batch_x2')
batch_y = T.ivector('batch_y')
# compute the gradients with respect to the model parameters
cost = self.cost
gparams = T.grad(cost, self.params)
# compute list of weights updates
updates = OrderedDict()
for accugrad, accudelta, param, gparam in zip(self._accugrads,
self._accudeltas, self.params, gparams):
# c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)
agrad = self._rho * accugrad + (1 - self._rho) * gparam * gparam
dx = - T.sqrt((accudelta + self._eps) / (agrad + self._eps)) * gparam
updates[accudelta] = self._rho * accudelta + (1 - self._rho) * dx * dx
updates[param] = param + dx
updates[accugrad] = agrad
outputs = cost
if debug:
outputs = [cost] + self.params + gparams +\
[updates[param] for param in self.params]
train_fn = theano.function(inputs=[theano.Param(batch_x1),
theano.Param(batch_x2), theano.Param(batch_y)],
outputs=outputs,
updates=updates,
givens={self.x1: batch_x1, self.x2: batch_x2, self.y: batch_y})
return train_fn
开发者ID:RolT,项目名称:timit_tools,代码行数:31,代码来源:nnet_archs.py
示例14: test_graph_opt_caching
def test_graph_opt_caching():
opt_db_file = os.path.join(theano.config.compiledir, 'optimized_graphs.pkl')
if os.path.exists(opt_db_file):
os.remove(opt_db_file)
mode = theano.config.mode
if mode in ["DEBUG_MODE", "DebugMode"]:
mode = "FAST_RUN"
default = theano.config.cache_optimizations
try:
theano.config.cache_optimizations = True
a = T.fmatrix('a')
b = T.fmatrix('b')
c = theano.shared(np.ones((10, 10), dtype=floatX))
d = theano.shared(np.ones((10, 10), dtype=floatX))
e = T.sum(T.sum(T.sum(a ** 2 + b) + c) + d)
f1 = theano.function([a, b], e, mode=mode)
m = T.fmatrix('x1')
n = T.fmatrix('x2')
p = theano.shared(np.ones((10, 10), dtype=floatX))
q = theano.shared(np.ones((10, 10), dtype=floatX))
j = T.sum(T.sum(T.sum(m ** 2 + n) + p) + q)
f2 = theano.function([m, n], j, mode=mode)
in1 = np.ones((10, 10), dtype=floatX)
in2 = np.ones((10, 10), dtype=floatX)
assert f1(in1, in2) == f2(in1, in2)
finally:
theano.config.cache_optimizations = default
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:30,代码来源:test_graph_opt_caching.py
示例15: __init__
def __init__(self, dimX, dimZ, hls, acts):
self.dimZ = dimZ
self.f = MLP(dimX, dimZ, [1200], [tanh, tanh])
self.g = MLP(dimZ, dimX, [1200], [tanh, sigm])
self.generator = MLP(dimZ, dimX, [1200, 1200], [tanh, tanh, sigm])
self.params = self.f.params + self.g.params + self.generator.params
x = T.fmatrix('x')
lr = T.scalar('lr')
noise = T.scalar('noise')
z = self.f(2*x-1)
rx = self.g(z)
cost_recons = ce(rx, x).mean(axis=1).mean(axis=0)
rand = rng_theano.uniform(low=0, high=1, size=z.shape)
nz = self.nearest_neighbour_of_in(rand, z) # nn of rand in z
xnz = self.g(nz)
rxx = self.generator(rand)
cost_gen = ce(rxx, xnz).mean(axis=1).mean(axis=0)
grads_f = T.grad(cost_recons, self.f.params)
grads_g = T.grad(cost_recons, self.g.params)
grads_gen = T.grad(cost_gen, self.generator.params)
grads = grads_f + grads_g + grads_gen
updates = map(lambda (param, grad): (param, param - lr * grad), zip(self.params, grads))
nnd = self.nearest_neighbour_distances(z)
self.train_fn = theano.function([x, lr], [cost_recons, cost_gen, nnd.mean(), nnd.std()], updates=updates)
z = T.fmatrix('z')
self.sample_fn = theano.function([z], self.g(z), allow_input_downcast=True)
self.infer_fn = theano.function([x], self.f(2*x-1), allow_input_downcast=True)
self.generator_fn = theano.function([z], self.g(z), allow_input_downcast=True)
开发者ID:sherjilozair,项目名称:daedalus,代码行数:30,代码来源:__init__.py
示例16: setup_theano
def setup_theano(self):
self.vocab_mat = T.fmatrix('vocab')
self.sample = T.fmatrix('sample')
b = T.fvector('b')
W = T.fmatrix('W')
f = self.transform_function(
W,
b,
self.wordvec_transform(self.sample, self.vocab_mat))
s = T.sum(f)
self.corrupt_sample = T.fmatrix('corrupt-sample')
f_corrupt = self.transform_function(
W,
b,
self.wordvec_transform(self.corrupt_sample, self.vocab_mat))
s_corrupt = T.sum(f_corrupt)
J = T.largest(0, 1 - s + s_corrupt)
self.grad = theano.grad(J, [b, W, self.vocab_mat])
self.grad_fn = theano.function(
[self.sample, self.corrupt_sample, b, W, self.vocab_mat],
self.grad,
allow_input_downcast=True)
self.exec_fn = theano.function([self.sample, b, W, self.vocab_mat],
f,
allow_input_downcast=True)
开发者ID:ririw,项目名称:autoencoder-experiments,代码行数:28,代码来源:autoencoder.py
示例17: get_SGD_trainer
def get_SGD_trainer(self, debug=False):
""" Returns a plain SGD minibatch trainer with learning rate as param.
"""
batch_x1 = T.fmatrix('batch_x1')
batch_x2 = T.fmatrix('batch_x2')
batch_y = T.ivector('batch_y')
learning_rate = T.fscalar('lr') # learning rate to use
# compute the gradients with respect to the model parameters
# using mean_cost so that the learning rate is not too dependent on the batch size
cost = self.mean_cos_sim_cost
gparams = T.grad(cost, self.params)
# compute list of weights updates
updates = OrderedDict()
for param, gparam in zip(self.params, gparams):
updates[param] = param - gparam * learning_rate
outputs = cost
if debug:
outputs = [cost] + self.params + gparams +\
[updates[param] for param in self.params]
train_fn = theano.function(inputs=[theano.Param(batch_x1),
theano.Param(batch_x2), theano.Param(batch_y),
theano.Param(learning_rate)],
outputs=outputs,
updates=updates,
givens={self.x1: batch_x1, self.x2: batch_x2, self.y: batch_y})
return train_fn
开发者ID:Verderey,项目名称:timit_tools,代码行数:30,代码来源:nnet_archs.py
示例18: __init__
def __init__(self, name, input_neurons, output_neurons):
self.input_neurons=input_neurons
self.output_neurons=output_neurons
self.name = name
#Initialize theano variables:
self.W_forget_theano = T.fmatrix(self.name + '_forget_weight')
self.W_input_theano = T.fmatrix(self.name + '_input_weight')
self.W_candidate_theano = T.fmatrix(self.name + '_candidate_weight')
self.W_output_theano = T.fmatrix(self.name + '_output_weight')
#Initialize python variables:
high_init = np.sqrt(6)/np.sqrt(self.input_neurons + 2*self.output_neurons)
low_init = -high_init
s = (self.output_neurons, self.input_neurons + self.output_neurons + 1)
self.W_forget = np.random.uniform(low=low_init, high=high_init, size=s).astype(np.float32)
self.W_input = np.random.uniform(low=low_init, high=high_init, size=s).astype(np.float32)
self.W_candidate = np.random.uniform(low=low_init, high=high_init, size=s).astype(np.float32)
self.W_output = np.random.uniform(low=low_init, high=high_init, size=s).astype(np.float32)
#Initialize forget bias to one:
self.W_forget[-1] = np.ones_like(self.W_forget[-1], dtype=np.float32)
开发者ID:MichSchli,项目名称:Speciale,代码行数:25,代码来源:network_ops.py
示例19: create_encoder_decoder_func
def create_encoder_decoder_func(layers, apply_updates=False):
X = T.fmatrix('X')
X_batch = T.fmatrix('X_batch')
X_hat = get_output(layers['l_decoder_out'], X, deterministic=False)
# reconstruction loss
encoder_decoder_loss = T.mean(
T.mean(T.sqr(X - X_hat), axis=1)
)
if apply_updates:
# all layers that participate in the forward pass should be updated
encoder_decoder_params = get_all_params(
layers['l_decoder_out'], trainable=True)
encoder_decoder_updates = nesterov_momentum(
encoder_decoder_loss, encoder_decoder_params, 0.01, 0.9)
else:
encoder_decoder_updates = None
encoder_decoder_func = theano.function(
inputs=[theano.In(X_batch)],
outputs=encoder_decoder_loss,
updates=encoder_decoder_updates,
givens={
X: X_batch,
},
)
return encoder_decoder_func
开发者ID:hjweide,项目名称:adversarial-autoencoder,代码行数:31,代码来源:theano_funcs.py
示例20: show_patches_on_frames
def show_patches_on_frames(ims, locations_, scales_,
image_shape=(100, 100), patch_shape=(16, 16)):
hyperparameters = {}
hyperparameters["cutoff"] = 3
hyperparameters["batched_window"] = True
location = T.fmatrix()
scale = T.fmatrix()
x = T.fvector()
cropper = LocallySoftRectangularCropper(
patch_shape=patch_shape,
hyperparameters=hyperparameters,
kernel=Gaussian())
patch = cropper.apply(
x.reshape((1, 1,) + image_shape),
np.array([list(image_shape)]),
location,
scale)
get_patch = theano.function([x, location, scale], patch,
allow_input_downcast=True)
final_shape = (image_shape[0], image_shape[0] + patch_shape[0] + 5)
ret = np.ones((ims.shape[0], ) + final_shape + (3,), dtype=np.float32)
for i in range(ims.shape[0]):
im = ims[i]
location_ = locations_[i]
scale_ = scales_[i]
patch_on_frame = show_patch_on_frame(im, location_, scale_)
ret[i, :, :image_shape[1], :] = patch_on_frame
ret[i, -patch_shape[0]:, image_shape[1] + 5:, :] = to_rgb1(
get_patch(im, [location_], [scale_])[0, 0])
return ret
开发者ID:BinbinBian,项目名称:LSTM-Attention,代码行数:30,代码来源:utils.py
注:本文中的theano.tensor.fmatrix函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论