• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python theano.pp函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.pp函数的典型用法代码示例。如果您正苦于以下问题:Python pp函数的具体用法?Python pp怎么用?Python pp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了pp函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: custom_svrg2

def custom_svrg2(loss, params, m, learning_rate=0.01, objective=None, data=None, target=None, getpred=None):

    theano.pp(loss)
    
    grads = theano.grad(loss, params)
    n = data.shape[0]

    updates = OrderedDict()
    rng = T.shared_randomstreams.RandomStreams(seed=149)

    for param, grad in zip(params, grads):
        value = param.get_value(borrow=True)        
        mu = grad / n

        def oneStep(w):
            t = rng.choice(size=(1,), a=n)

            loss_part_tilde = objective(getpred(data[t], param), target[t])
            loss_part_tilde = loss_part_tilde.mean()
            g_tilde = theano.grad(loss_part_tilde, param)
        
            loss_part = objective(getpred(data[t], w), target[t])
            loss_part = loss_part.mean()
            g = theano.grad(loss_part, w)

            w = w - learning_rate * (g - g_tilde + mu)
            return w

        w_tilde, scan_updates = theano.scan(fn=oneStep, outputs_info=param, n_steps=m)

        updates.update(scan_updates)
        updates[param] = w_tilde[-1]

    return updates
开发者ID:myt00seven,项目名称:svrg,代码行数:34,代码来源:large_gpu_cifar10_ffn.py


示例2: compute_gradients

    def compute_gradients(self):
        # maybe doesn't need to be a class variable
        self.grads = T.grad(self.cost, wrt=self.tparams.values())

        #lrate: learning rate
        self.f_populate_gradients, self.f_update_params = self.optimizer()


        # =====================================================================
        # print out the computational graph and make an image of it too
        if self.debug and False:
            # util.colorprint("Following is the graph of the final hidden layer:", "blue")
            # final_activation_fn = theano.function([self.input], final_activation)
            # theano.printing.debugprint(final_activation_fn.maker.fgraph.outputs[0])   
            # util.colorprint("Also, saving png of computational graph:", "blue")
            # theano.printing.pydotprint(final_activation_fn, 
            #     outfile="output/lmlp_final_act_viz.png", 
            #     compact=True,
            #     scan_graphs=True,
            #     var_with_name_simple=True)
            util.colorprint("Following is the graph of the first of the derivatives:", "blue")
            final_grad_fn = theano.function([self.input, self.y], self.grads[0])
            theano.printing.debugprint(final_grad_fn.maker.fgraph.outputs[0]) 
            util.colorprint("Yay colorprinted:", "blue")
            print theano.pp(self.final_activation)
            util.colorprint("Also, saving png of computational graph:", "blue")
            theano.printing.pydotprint(final_grad_fn, 
                outfile="output/lmlp_final_grad_viz.png", 
                compact=True,
                scan_graphs=True,
                var_with_name_simple=True)            
开发者ID:icaswell,项目名称:deep_learning_projects,代码行数:31,代码来源:loopy_mlp.py


示例3: bsgd1

def bsgd1(nn, data, name='sgd', lr=0.022, alpha=0.3, batch_size=500, epochs = 10):
	train_set_x, train_set_y = data[0]
	valid_set_x, valid_set_y = data[1]
	test_set_x, test_set_y = data[2]

	# valid_y_numpy = y_numpy[0]
	# test_y_numpy = y_numpy[1]
	test_y_numpy = map_48_to_39(test_y_numpy)
	valid_y_numpy = map_48_to_39(valid_y_numpy)
	print test_y_numpy

	num_samples = train_set_x.get_value(borrow=True).shape[0] 
	num_batches = num_samples / batch_size 

	layers = nn.layers
	x = T.matrix('x')
	y = T.ivector('y')
	y_eval = T.ivector('y_eval')

	cost = nn.cost(x, y)
	accuracy = nn.calcAccuracy(x, y)
	params = nn.params
	delta_params = nn.delta_params

	print theano.pp(cost)
	# theano.pp(accuracy)

	p_grads = [T.grad(cost=cost, wrt = p) for p in params]  
	# implementing gradient descent with momentum 
	print p_grads
	updates = OrderedDict()
	for dp, gp in zip(delta_params, p_grads):
		updates[dp] = dp*alpha - gp*lr
	for p, dp in zip(params, delta_params):
		updates[p] = p + updates[dp]

	# updates = [(p, p - lr*gp) for p, gp in zip(params, p_grads)]
	index = T.ivector('index')
	batch_sgd_train = theano.function(inputs=[index], outputs=[cost, accuracy], updates=updates, givens={x: train_set_x[index], y:train_set_y[index]})

	batch_sgd_valid = theano.function(inputs=[], outputs=[nn.calcAccuracy(x, y), nn.calcAccuracyTimit(x,y)], givens={x: valid_set_x, y:valid_set_y})

	batch_sgd_test = theano.function(inputs=[], outputs=nn.calcAccuracy(x, y), givens={x: test_set_x, y:test_set_y})

	indices = np.arange(num_samples,  dtype=np.dtype('int32'))
	np.random.shuffle(indices)

	for n in xrange(epochs):
		np.random.shuffle(indices)
		for i in xrange(num_batches):
			batch = indices[i*batch_size: (i+1)*batch_size]
			batch_sgd_train(batch)

		# y_np = y.get_value()
		# print y.eval()

		print "epoch:", n,  "	validation accuracy:",  batch_sgd_valid()


	print batch_sgd_test()
开发者ID:adhaka,项目名称:kthasrdnn,代码行数:60,代码来源:sgd.py


示例4: test_examples_4

 def test_examples_4(self):
     from theano import pp
     x = T.dscalar('x')
     y = x**2
     gy = T.grad(y, x)
     pp(gy)  # print out the gradient prior to optimization
     '((fill((x ** 2), 1.0) * 2) * (x ** (2 - 1)))'
     f = function([x], gy)
     assert f(4)    ==  array(8.0)
     assert f(94.2) == array(188.40000000000001)
开发者ID:AI-Cdrone,项目名称:Theano,代码行数:10,代码来源:test_tutorial.py


示例5: derivative

def derivative():
    x = T.dscalar('x')
    y = x ** 2
    gy = T.grad(y, x)
    print(pp(gy))

    f = function([x], gy)
    print(f(4))
    print(np.allclose(f(94.2), 94.2 * 2))
    print(pp(f.maker.fgraph.outputs[0]))
开发者ID:fyabc,项目名称:TheanoProject,代码行数:10,代码来源:derivatives.py


示例6: gradient

def gradient(a):
    x = T.dscalar('x')
    y = x**2
    z = 1/x
    gy = T.grad(y, x)
    gz = T.grad(z, x)
    print(th.pp(gy))
    print(th.pp(gz))
    f = th.function([x], gy)
    g = th.function([x], gz)
    print(f(a))
    print(g(a))
开发者ID:thbeucher,项目名称:DQN,代码行数:12,代码来源:theanoL.py


示例7: ppth

def ppth(obj, fancy=True, graph=False, fid="/Users/keithd/temp/pydot_graph", fmt="pdf"):
    if graph:
        theano.printing.pydotprint(obj, outfile=fid, format=fmt)
    elif fancy:
        theano.printing.debugprint(obj)
    else:
        return theano.pp(obj)
开发者ID:Bergalerga,项目名称:AIProg,代码行数:7,代码来源:theanobasics.py


示例8: cached_function

def cached_function(inputs, outputs):
    import theano

    with Message("Hashing theano fn"):
        if hasattr(outputs, "__len__"):
            hash_content = tuple(map(theano.pp, outputs))
        else:
            hash_content = theano.pp(outputs)
    cache_key = hex(hash(hash_content) & (2 ** 64 - 1))[:-1]
    cache_dir = Path("~/.hierctrl_cache")
    cache_dir = cache_dir.expanduser()
    cache_dir.mkdir_p()
    cache_file = cache_dir / ("%s.pkl" % cache_key)
    if cache_file.exists():
        with Message("unpickling"):
            with open(cache_file, "rb") as f:
                try:
                    return pickle.load(f)
                except Exception:
                    pass
    with Message("compiling"):
        fun = compile_function(inputs, outputs)
    with Message("picking"):
        with open(cache_file, "wb") as f:
            pickle.dump(fun, f, protocol=pickle.HIGHEST_PROTOCOL)
    return fun
开发者ID:yenchenlin,项目名称:rllab,代码行数:26,代码来源:ext.py


示例9: bsgd

def bsgd(nn, data, name='sgd', lr=0.03, epochs=120, batch_size=500, momentum=0):
	train_set_x, train_set_y = data[0]
	valid_set_x, valid_set_y = data[1]
	test_set_x, test_set_y = data[2]
	# exit()

	num_samples = train_set_x.get_value(borrow=True).shape[0]
	
	num_batches = num_samples / batch_size
	layers = nn.layers

	x = T.matrix('x')
	y = T.ivector('y')
	cost = nn.cost(x, y)
	accuracy = nn.calcAccuracy(x, y)

	params = nn.params
	print theano.pp(cost)

	p_grads = [T.grad(cost=cost, wrt = p) for p in params] 

	print p_grads
	updates = [(p, p - lr*gp) for p, gp in zip(nn.params, p_grads)]


	index = T.ivector('index')

	batch_sgd_train = theano.function(inputs=[index], outputs=[cost, accuracy], updates=updates, givens={x: train_set_x[index], y: train_set_y[index]})	

	batch_sgd_valid = theano.function(inputs=[], outputs=nn.calcAccuracy(x, y), givens={x: valid_set_x, y: valid_set_y})
	
	batch_sgd_test = theano.function(inputs=[], outputs=nn.calcAccuracy(x, y), givens={x: test_set_x, y: test_set_y})

	# indices = range(num_samples)
	indices = np.arange(num_samples, dtype=np.dtype('int32'))

	np.random.shuffle(indices)

	for n in xrange(epochs):
		np.random.shuffle(indices)
		for nb in xrange(num_batches):
			batch = indices[nb*batch_size : (nb+1) * batch_size ]
			batch_sgd_train(batch)

		print "Validation Accuracy:", batch_sgd_valid()

	print "Final Test Accuracy:", batch_sgd_test()
开发者ID:adhaka,项目名称:summers,代码行数:47,代码来源:optimise.py


示例10: getp

 def getp(si, tli, tri, tai, x_tm1, e, l, Wl, Wr, Wv):
     xx = T.concatenate([x_tm1, [self.x0]], axis=0)
     xsi = T.dot(e[si], Wv)
     xsi = xsi[0]
     pl, pl_ = theano.scan(lambda j, Wl, x, l, tli: T.dot(x[tli[j]], Wl[j]) * l[tli[j]],
               sequences=T.arange(tli.shape[0]), non_sequences=[Wl, xx, l, tli])
     xsi += T.sum(pl, axis=0)[0]
     pr, pr_ = theano.scan(lambda j, Wr, x, l, tri: T.dot(x[tri[j]], Wr[j]) * l[tri[j]],
               sequences=T.arange(tri.shape[0]), non_sequences=[Wr, xx, l, tri])
     xsi += T.sum(pr, axis=0)[0]
     pa, pa_ = theano.scan(lambda j, x, l, tai: x[tai[j]] * l[tai[j]],
               sequences=T.arange(tai.shape[0]), non_sequences=[xx, l, tai])
     xsi += T.sum(pa, axis=0)[0]
     xsi /= l[si]
     pp(xsi)
     pp(x_tm1)
     x_t = T.set_subtensor(x_tm1[si], T.tanh(xsi))
     return x_t
开发者ID:HornHehhf,项目名称:Scope-detection,代码行数:18,代码来源:tree_blstm.py


示例11: main

def main():
    x = T.dscalar('x')
    y = T.dscalar('y')
    z = x + y
    f = function([x, y], z)

    xm = T.dmatrix('x')
    ym = T.dmatrix('y')
    fm = function([xm, ym], xm * ym)

    print(pp(xm * ym + 4 / ym))
    print(f(2, 3), fm([[1, 2], [3, 4]], [[5, 6], [7, 8]]))

    xv = T.vector()
    yv = T.vector()
    fv = function([xv, yv], xv ** 2 + yv ** 2 + 2 * xv * yv)

    print(fv([1, 2], [3, 4]))
开发者ID:fyabc,项目名称:TheanoProject,代码行数:18,代码来源:algebra.py


示例12: test_subtensor

def test_subtensor():
    x = theano.tensor.dvector()
    y = x[1]
    assert theano.pp(y) == "<TensorType(float64, vector)>[Constant{1}]"
开发者ID:Faruk-Ahmed,项目名称:Theano,代码行数:4,代码来源:test_printing.py


示例13: f

# coding: utf-8
import numpy
import theano
import theano.tensor as T

x = T.dscalar('x')

y = (T.sqrt(x) + 1) ** 3

dy = T.grad(cost=y, wrt=x)

f = theano.function(inputs=[x], outputs=dy)

print theano.pp(f.maker.fgraph.outputs[0])

print f(2)
print f(3)
开发者ID:MasazI,项目名称:Theano_Exercise,代码行数:17,代码来源:theano_grad_sqrt.py


示例14: function

from theano import pp
from theano import In
from theano import shared

x = numpy.asarray([[1, 2], [3, 4], [5, 6]])
x.shape

x = T.dscalar()
y = T.dscalar()
w = T.dscalar()
z =( x + y)*w
g = 10
f = function([x, In(y, value = 1), In(w, value = 2, name = 'w_by_name')], z)
f(2,3, w_by_name=g)
numpy.allclose(f(16.3, 12.1), 28.4)
print(pp(z))

a = T.vector()
b = T.vector()
target = a ** 2 + b ** 2 + 2 * a * b
f1 = function([a, b], target)
print(f1([1, 2], [4, 5]))

x = T.dmatrix()
s = 1 / (1 + T.exp(-x))
logistic = function([x], s)
m = [[1, 2], [3, 4], [5, 6]]
logistic(m)

s2 = (1 + T.tanh(x/2))/2
logistic2 = function([x], s2)
开发者ID:Aileenshanhong,项目名称:pythonExercise,代码行数:31,代码来源:numpyEx1.py


示例15: TensorType

# >>> x.type
# TensorType(float64, scalar)
# >>> T.dscalar
# TensorType(float64, scalar)
# >>> x.type is T.dscalar
# True

#By calling T.dscalar with a string argument, you create a Variable representing a 
#floating-point scalar quantity with the given name.
 
x = T.dmatrix('x')
y = T.dmatrix('y')
z = x + y
f = function([x, y], z)
print f([[1, 2], [3, 4]], [[10, 20], [30, 40]])
print pp(z)

a =  T.vector() # declare variable
out = a + a ** 10               # build symbolic expression
f = function([a], out)   # compile function
print(f([0, 1, 2]))


x = T.dmatrix('x')
s = 1 / (1 + T.exp(-x))
logistic = function([x], s)
print logistic([[0, 1], [-1, -2]])

s2 = (1 + T.tanh(x / 2)) / 2
logistic2 = function([x], s2)
print logistic2([[0, 1], [-1, -2]])
开发者ID:jingriver,项目名称:testPython,代码行数:31,代码来源:testtheano.py


示例16: print

# このため、出力から順に微分のチェインルールを適用することで、数式の入力変数による
# 微分を式として求めることができる。

# このため、原理的に未定義の関数や陰関数の微分はできない。
# あくまでTheanoの演算を用いて陽に構成された式が対象になる。
# この点がMaximaのような数式処理ソフトとは異なる。

print("倍精度スカラーとその2乗yを定義")
x = T.dscalar('x')
y = x ** 2

print("gyはyのxによる微分")
gy = T.grad(y, x)

print("コンパイル、最適化前のgyを表示")
print("pp(gy) = %s\n" % pp(gy))

'((fill((x ** 2), 1.0) * 2) * (x ** (2 - 1)))'
print("fill(x ** 2, 1.0)はx**2と同じ形のテンソル(ここではスカラー)で全成分が1.0")
print("つまり 1 * 2 * (x ** (2 - 1))で 2*xになっている。")

print("fはgyをコンパイル、最適化したもの. debugprintを見ると2*xになっていることが分かる。")
f = function([x], gy)
print(debugprint(f))

print("さらにfのmaker.fgraph.outputs[0]プロパティをpretty printしても分かる。")
print("pp(f.maker.fgraph.outputs[0]) = %s" % pp(f.maker.fgraph.outputs[0]))

print("f(4) = %f" % f(4))
# array(8.0)
开发者ID:GM3D,项目名称:LearningTheano,代码行数:30,代码来源:derivative.py


示例17: function

# Inspired on the tutorial available at: 
# https://www.analyticsvidhya.com/blog/2016/04/neural-networks-python-theano/

import numpy as np
import theano.tensor as T
from theano import function
from theano import shared
from theano import pp # pretty-print

# multiple output
a = T.dscalar ('a')
f = function ([a], [a ** 2, a ** 3])
print (f(3))

# computing gradients
x = T.dscalar ('x')
y = x ** 3
qy = T.grad (y, x) # qy = 3x ^ 2
f = function ([x], qy)
g = function ([x], y)
print (f (2))
print (g (3))
print (pp(qy))
开发者ID:gustavoem,项目名称:bcc,代码行数:23,代码来源:functions.py


示例18: getUpdateParams

	def getUpdateParams(self):
		update = []
		aux = []

		# Update state
		update.append( (self.params[0], input_layer.output) )

		# Update output
		print 'Length: ' + str(len(self.connections))
		for i, c in enumerate(self.connections):
			aux.append(sparse.structured_dot(
						sparse.transpose(c.input), 
						self.params[2][i] * c.inhibition
						))
		aux2 = aux.pop()
		for a in range(len(aux)):
			aux2 = sparse.add(aux2,aux.pop())
			print aux2
		from theano import pp
		print 'out: '
		print pp(aux2)
		update.append((self.params[1],sparse.transpose(sparse.structured_sigmoid(aux2))))
		# Hardcoded!!
		'''update.append((self.params[1],
			sparse.transpose(
				sparse.structured_sigmoid(sparse.structured_dot(
						sparse.transpose(self.connections[0].input), 
						self.params[2][0])))))
		'''
		'''
		update.append((self.params[1], 
		  sparse.transpose(
			sparse.structured_sigmoid(
				sparse.structured_dot(
					sparse.transpose(self.connections[0].input), 	# Input
					self.params[2][0]))))) 							# Weights
		'''
		# Update weights
		''' #Old ones (OJA)
		for i, w in enumerate(self.params[2]):
			update.append( (w,  
				#layer.params[0]))
				sparse.add( 
					w, 
					self.LR[i]*sparse.transpose(
						sparse.structured_dot(self.params[1], self.x_yw[i])
						)
					)
				))
		'''
		for i, w in enumerate(self.params[2]):
			update.append( (w, #w))
				#layer.params[0]))
					sparse.structured_maximum(
						sparse.add(
							w,
							sparse.add(self.xy[i], 
							self.AWW[i])),
					0)
				) )

		return update
开发者ID:jypuigbo,项目名称:TheanoNNs,代码行数:62,代码来源:theanoCortex04.py


示例19: bsgd_partition

def bsgd_partition(nn, data, name='sgd', lr=0.025, alpha=0.3, batch_size=500, epochs = 10):
	# train_set is a list of trainingsets divided into partitions

	train_set_x, train_set_y = data[0]
	valid_set_x, valid_set_y = data[1]
	test_set_x, test_set_y = data[2]


	num_partitions = len(train_set_x)
	print "number of partitions:", num_partitions
	train_set_x = np.asarray(train_set_x)

	num_samples = train_set_x[0].get_value(borrow=True).shape[0] 
	num_batches = num_samples / batch_size 

	layers = nn.layers
	x = T.matrix('x')
	y = T.ivector('y')

	cost = nn.cost(x, y)
	accuracy = nn.calcAccuracy(x, y)
	params = nn.params
	delta_params = nn.delta_params

	print theano.pp(cost)
	# theano.pp(accuracy)

	p_grads = [T.grad(cost=cost, wrt = p) for p in params]  
	# implementing gradient descent with momentum 
	print p_grads
	updates = OrderedDict()
	for dp, gp in zip(delta_params, p_grads):
		updates[dp] = dp*alpha - gp*lr
	for p, dp in zip(params, delta_params):
		updates[p] = p + updates[dp]

	# updates = [(p, p - lr*gp) for p, gp in zip(params, p_grads)]
	index = T.ivector('index')
	ii = T.ivector('ii')
	y_eval = T.ivector('y_eval')

	batch_sgd_train = theano.function(inputs=[ii, index], outputs=[cost, accuracy], updates=updates, givens={x: train_set_x[index], y:train_set_y[index]})

	batch_sgd_valid = theano.function(inputs=[], outputs=nn.calcAccuracy(x, y), givens={x: valid_set_x, y:valid_set_y})

	batch_sgd_test = theano.function(inputs=[], outputs=nn.calcAccuracy(x, y), givens={x: test_set_x, y:test_set_y})

	indices = np.arange(num_samples,  dtype=np.dtype('int32'))
	np.random.shuffle(indices)

	for n in xrange(epochs):
		np.random.shuffle(indices)
		sup_indices = random.randrange(0, num_partitions)
		sup_indices = np.arange(num_partitions, dtype=np.dtype('int32'))
		for j in xrange(num_partitions):
			sup_index = sup_indices[j]
			for i in xrange(num_batches):
				# batch = [sup_index]
				batch = indices[i*batch_size: (i+1)*batch_size]
				batch_sgd_train([sup_index, batch])

		print "validation accuracy:",  batch_sgd_valid()


	print batch_sgd_test()
开发者ID:adhaka,项目名称:kthasrdnn,代码行数:65,代码来源:sgd.py


示例20: theano_jac

    return g

def theano_jac(u, eps_x=1, eps_y=1):
    J = T.concatenate([theano_grad(u[:,:,0], eps_x, eps_y)[:,:,:,None], theano_grad(u[:,:,1], eps_x, eps_y)[:,:,:,None]], axis=3)
    return J.dimshuffle(0, 1, 3, 2)

def np_jac(u):
    J_np = np.empty((u.shape[0], u.shape[1], 2, 2))
    J_np[:,:,0,:] = np.dstack(np.gradient(u[:,:,0]))
    J_np[:,:,1,:] = np.dstack(np.gradient(u[:,:,1]))
    return J_np

u = T.dtensor3('u')

cost_u_norm = (u**2).sum()
print 'cost_u_norm', pp(cost_u_norm)



#test_u = np.random.rand(2, 3, 2)
#print 'yeah'
#print theano.function([u], T.grad((theano_grad(u[:,:,0])**2).sum(), u))(test_u)



J_u = theano_jac(u)
J_u_func = theano.function([u], J_u)

print pp(u)
test_u = np.random.rand(2, 3, 2)
print 'arr', test_u
开发者ID:hojonathanho,项目名称:timb,代码行数:31,代码来源:test_theano.py



注:本文中的theano.pp函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python theano.scan函数代码示例发布时间:2022-05-27
下一篇:
Python theano.map函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap