本文整理汇总了Python中theano.tensor.round函数的典型用法代码示例。如果您正苦于以下问题:Python round函数的具体用法?Python round怎么用?Python round使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了round函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: compile
def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
self.optimizer = optimizers.get(optimizer)
self.loss = objectives.get(loss)
weighted_loss = weighted_objective(objectives.get(loss))
# input of model
self.X_train = self.get_input(train=True)
self.X_test = self.get_input(train=False)
self.y_train = self.get_output(train=True)
self.y_test = self.get_output(train=False)
# target of model
self.y = T.zeros_like(self.y_train)
self.weights = T.ones_like(self.y_train)
train_loss = weighted_loss(self.y, self.y_train, self.weights)
test_loss = weighted_loss(self.y, self.y_test, self.weights)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
self.y.name = 'y'
if class_mode == "categorical":
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))
elif class_mode == "binary":
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
else:
raise Exception("Invalid class mode:" + str(class_mode))
self.class_mode = class_mode
self.theano_mode = theano_mode
for r in self.regularizers:
train_loss = r(train_loss)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
if type(self.X_train) == list:
train_ins = self.X_train + [self.y, self.weights]
test_ins = self.X_test + [self.y, self.weights]
predict_ins = self.X_test
else:
train_ins = [self.X_train, self.y, self.weights]
test_ins = [self.X_test, self.y, self.weights]
predict_ins = [self.X_test]
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(predict_ins, self.y_test,
allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_loss,
allow_input_downcast=True, mode=theano_mode)
self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
allow_input_downcast=True, mode=theano_mode)
开发者ID:0xa-saline,项目名称:CAPTCHA-breaking,代码行数:60,代码来源:models.py
示例2: build
def build(self,output_type):
#### set up parameter
self.params+=[self.W_hy, self.b_hy]
for param in self.params:
self.updates[param] = theano.shared(
value = np.zeros(
param.get_value(
borrow = True).shape,
dtype = theano.config.floatX),
name = 'updates')
### set up regularizer
self.L1 += T.sum(abs(self.W_hy))
self.L2_sqr += T.sum(self.W_hy**2)
### fianl prediction formular
self.y_pred = T.dot(self.get_output(), self.W_hy) + self.b_hy
self.output_type = output_type
if self.output_type == 'real':
self.y = T.matrix(name = 'y', dtype = theano.config.floatX)
self.loss = lambda y: Loss.mse(self.y_pred,y) # y is input and self.mse(y) is output
self.predict = theano.function(inputs = [self.x, ],
outputs = self.y_pred,
mode = mode)
elif self.output_type == 'binary':
self.y = T.matrix(name = 'y', dtype = 'int32')
self.p_y_given_x = T.nnet.sigmoid(self.y_pred)
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: Loss.nll_binary(self.p_y_given_x,y)
self.predict_proba = theano.function(inputs = [self.x, ],
outputs = self.p_y_given_x,
mode = mode)
self.predict = theano.function(inputs = [self.x, ],
outputs = T.round(self.p_y_given_x),
mode = mode)
elif self.output_type == 'softmax':
self.y = T.vector(name = 'y', dtype = 'int32')
self.p_y_given_x = T.nnet.softmax(self.y_pred)
self.y_out = T.argmax(self.p_y_given_x, axis = -1)
self.loss = lambda y: Loss.nll_multiclass(self.p_y_given_x,y)
self.predict_proba = theano.function(inputs = [self.x, ],
outputs = self.p_y_given_x,
mode = mode)
self.predict = theano.function(inputs = [self.x, ],
outputs = self.y_out, # y-out is calculated by applying argmax
mode = mode)
else:
raise NotImplementedError
开发者ID:chuckgu,项目名称:RNN,代码行数:55,代码来源:Models.py
示例3: my_activation
def my_activation(input):
d = 2
input = input * T.power(10, d)
input = T.round(input)
x = input / T.power(10, d)
abs_x = abs(x)
ret = x / (1. + abs_x)
ret = T.round(ret * T.power(10, d)) / T.power(10, d)
return ret
开发者ID:ma-ver-ick,项目名称:bell-recognition-training,代码行数:11,代码来源:test_mlp_003.py
示例4: compile
def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
self.optimizer = optimizers.get(optimizer)
self.loss = objectives.get(loss)
# input of model
self.X_train = self.get_input(train=True)
self.X_test = self.get_input(train=False)
self.y_train = self.get_output(train=True)
self.y_test = self.get_output(train=False)
# target of model
self.y = T.zeros_like(self.y_train)
train_loss = self.loss(self.y, self.y_train)
test_score = self.loss(self.y, self.y_test)
if class_mode == "categorical":
#just compare whether the most probable is or not
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))
elif class_mode == "binary":
#after make prediction [0,0,1,0] like with round function, compare each class of each sample then accumulate and divide by n*k
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
else:
raise Exception("Invalid class mode:" + str(class_mode))
self.class_mode = class_mode
updates = self.optimizer.get_updates(self.params, self.regularizers, self.constraints, train_loss)
if type(self.X_train) == list:
train_ins = self.X_train + [self.y]
test_ins = self.X_test + [self.y]
predict_ins = self.X_test
else:
train_ins = [self.X_train, self.y]
test_ins = [self.X_test, self.y]
predict_ins = [self.X_test]
#input is [[x1,x2,x3...],[y1,y2,y3]] x1 and y1 are both vector
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(predict_ins, self.y_test,
allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_score,
allow_input_downcast=True, mode=theano_mode)
self._test_with_acc = theano.function(test_ins, [test_score, test_accuracy],
allow_input_downcast=True, mode=theano_mode)
开发者ID:OlafLee,项目名称:keras,代码行数:52,代码来源:models.py
示例5: __init__
def __init__(self, input, n_cents, centers, n_dims, reg):
bias_init = randn(n_dims)
cents_init = centers
sigmas_init = np.abs(randn(n_cents).reshape((n_cents,)))
weights_init = randn(n_cents*n_dims).reshape((n_cents,n_dims))
#regularization
self.reg = reg
#
self.b = theano.shared(bias_init, name='b', borrow=True) #bias
self.c = theano.shared(cents_init, name='c', borrow=True)
self.s = theano.shared(sigmas_init, name='s', borrow=True)
self.w = theano.shared(weights_init, name='w', borrow=True)
#thanks to comments by Pascal on the theano-users group,
#the idea is to use 3d tensors
C = self.c[np.newaxis, :, :]
X = input[:, np.newaxis, :]
difnorm = T.sum((C-X)**2, axis=-1)
a = T.exp(-difnorm * (self.s**2))
self.prob = T.nnet.sigmoid(T.dot(a, self.w) + self.b)
self.pred = T.round(self.prob)
self.pred_func = theano.function([input],outputs=self.pred)
self.prob_func = theano.function([input],outputs=self.prob)
开发者ID:ChenglongChen,项目名称:RBFnet,代码行数:28,代码来源:rbf_theano_2.py
示例6: get_pseudo_likelihood_cost
def get_pseudo_likelihood_cost(self, updates):
""" Stochastic approximation to the pseudo-likelihood
I have no idea why to do this.
"""
# index of bit i in expression p(x_i | x{\i})
bit_i_idx = theano.shared(value=0, name='bit_i_idx')
# binarize the input image by rounding to nearest integer
xi = T.round(self.input) # input? It seems that the sample result has nothing to do with the cost...
# calculate free energy for the given bit configuration
fe_xi = self.free_energy(xi)
# flip bit x_i of matrix xi and preserve all other bits x_{\i}
xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])
# calculate free energy with bit flipped
fe_xi_flip = self.free_energy(xi_flip)
cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip - fe_xi)))
updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible
return cost
开发者ID:playcoin,项目名称:Python_study,代码行数:25,代码来源:rbm_test.py
示例7: sigmoid_readout_old
def sigmoid_readout_old(operators, v_in, h_L, g):
"""Sigmoid readout layer. Cost is the binary crossentropy and
monitor is RMSE.
:param params: list of [weight, bias] with shapes (n_hidden, n_visible)
and (n_visible, )
:param h_L: shape (timesteps, n_visible)
:return: shape (timesteps, n_hidden)
"""
weight = operators[0]
bias = operators[1]
v_pred = g(T.dot(h_L, weight) + bias) # broadcastable bias??
v_pred_c = T.clip(v_pred, 1.0e-7, 1.0 - 1.0e-7)
v_in_c = T.clip(v_in, 1.0e-7, 1.0 - 1.0e-7)
# Cost:
cost = -T.xlogx.xlogy0(v_in_c[1:], v_pred_c[:-1]) - T.xlogx.xlogy0(1 - v_in_c[1:], 1 - v_pred_c[:-1])
cost = cost.sum() / v_in.shape[0]
# Sample is just rounded to nearest integer:
v_sample = T.round(v_pred)
v_sample_c = T.clip(v_sample, 1.0e-7, 1.0 - 1.0e-7)
# Monitor (needs to return something... for now):
monitor = -T.xlogx.xlogy0(v_in_c[1:], v_sample_c[:-1]) - T.xlogx.xlogy0(1 - v_in_c[1:], 1 - v_sample_c[:-1])
monitor = monitor.sum() / v_in.shape[0]
return v_sample, cost, monitor, None
开发者ID:harpone,项目名称:DerpRNN,代码行数:27,代码来源:layers.py
示例8: sigmoid_readout
def sigmoid_readout(operators, v_in, h_L, external):
"""Sigmoid readout layer. Cost is the binary crossentropy and
monitor is RMSE.
:param operators: list of [weight, bias] with shapes (n_hidden, n_visible)
and (n_visible, )
:param h_L: shape (timesteps, n_hidden)
:return: shape (timesteps, n_visible)
"""
weight = operators[0]
bias = operators[1]
v_pred = sigmoid(T.dot(h_L, weight) + bias) # broadcastable bias??
v_pred_c = T.clip(v_pred, 1.0e-7, 1.0 - 1.0e-7)
v_in_c = T.clip(v_in, 1.0e-7, 1.0 - 1.0e-7)
# Sample is just rounded to nearest integer:
v_sample = T.round(v_pred)
v_sample_c = T.clip(v_sample, eps, 1.0 - eps)
# Cost:
# cost = 1000 * ((v_pred[:-1] - v_in[1:]) ** 2).mean()
# cost = -T.xlogx.xlogy0(v_in_c[1:], v_pred_c[:-1]) - \
# T.xlogx.xlogy0(1 - v_in_c[1:], 1 - v_pred_c[:-1])
cost = crossent(v_pred_c[:-1], v_in_c[1:]) # TODO: v_sample_c !!!
cost = cost.mean()
# Monitor:
# monitor = -T.xlogx.xlogy0(v_in_c[1:], v_sample_c[:-1]) - \
# T.xlogx.xlogy0(1 - v_in_c[1:], 1 - v_sample_c[:-1])
monitor = crossent(v_sample_c[:-1], v_in_c[1:])
monitor = monitor.mean()
return v_sample, cost, monitor, None
开发者ID:harpone,项目名称:DerpRNN,代码行数:32,代码来源:layers.py
示例9: __init__
def __init__(self, n, p, *args, **kwargs):
super(Multinomial, self).__init__(*args, **kwargs)
p = p / tt.sum(p, axis=-1, keepdims=True)
n = np.squeeze(n) # works also if n is a tensor
if len(self.shape) > 1:
m = self.shape[-2]
try:
assert n.shape == (m,)
except (AttributeError, AssertionError):
n = n * tt.ones(m)
self.n = tt.shape_padright(n)
self.p = p if p.ndim > 1 else tt.shape_padleft(p)
elif n.ndim == 1:
self.n = tt.shape_padright(n)
self.p = p if p.ndim > 1 else tt.shape_padleft(p)
else:
# n is a scalar, p is a 1d array
self.n = tt.as_tensor_variable(n)
self.p = tt.as_tensor_variable(p)
self.mean = self.n * self.p
mode = tt.cast(tt.round(self.mean), 'int32')
diff = self.n - tt.sum(mode, axis=-1, keepdims=True)
inc_bool_arr = tt.abs_(diff) > 0
mode = tt.inc_subtensor(mode[inc_bool_arr.nonzero()],
diff[inc_bool_arr.nonzero()])
self.mode = mode
开发者ID:bballamudi,项目名称:pymc3,代码行数:29,代码来源:multivariate.py
示例10: _glimpse_sensor
def _glimpse_sensor(self, x_t, l_p):
"""
Parameters:
x_t - 28x28 image
l_p - 2x1 focus vector
Returns:
4x12 matrix
"""
# Turn l_p to the left-top point of rectangle
l_p = l_p * 14 + 14 - 2
l_p = T.cast(T.round(l_p), "int32")
l_p = l_p * (l_p >= 0)
l_p = l_p * (l_p < 24) + (l_p >= 24) * 23
l_p2 = l_p - 2
l_p2 = l_p2 * (l_p2 >= 0)
l_p2 = l_p2 * (l_p2 < 20) + (l_p2 >= 20) * 19
l_p3 = l_p - 6
l_p3 = l_p3 * (l_p3 >= 0)
l_p3 = l_p3 * (l_p3 < 16) + (l_p3 >= 16) * 15
glimpse_1 = x_t[l_p[0]: l_p[0] + 4][:, l_p[1]: l_p[1] + 4]
glimpse_2 = x_t[l_p2[0]: l_p2[0] + 8][:, l_p2[1]: l_p2[1] + 8]
glimpse_2 = theano.tensor.signal.downsample.max_pool_2d(glimpse_2, (2,2))
glimpse_3 = x_t[l_p3[0]: l_p3[0] + 16][:, l_p3[1]: l_p3[1] + 16]
glimpse_3 = theano.tensor.signal.downsample.max_pool_2d(glimpse_3, (4,4))
return T.concatenate([glimpse_1, glimpse_2, glimpse_3])
开发者ID:JunjieHu,项目名称:deepy,代码行数:26,代码来源:baseline_model.py
示例11: __init__
def __init__(self, data, n_in, srng, p, train_flag):
"""
This implements the dropout layer in neural network.
:type data: theano.tensor.dmatrix
:param data: a symbolic tensor of shape (n_examples, n_in)
:type srng: theano.sandbox.rng_mrg.MRG_RandomStreams
:param srng: symbolic random number generator
:type n_in: int
:param n_in: dimensionality of input
:type p: float
:param p: the probability of dropping out
:type train_flag: symbolic boolean
:param train_flag: whether or not it's training
"""
self.input = data
self.in_shape = n_in
self.params = []
rand = T.round(srng.uniform(size=(n_in,), ndim=1))
multiplier = 1.0 / p
self.output = T.switch(train_flag, data * rand, data * multiplier)
开发者ID:suixin661014,项目名称:Project-5168,代码行数:31,代码来源:layers.py
示例12: compile
def compile(self, optimizer, loss, class_mode='categorical'):
self.optimizer = optimizer
self.loss = objectives.get(loss)
self.X_train = self.get_input() # symbolic variable
self.y_train = self.get_output() # symbolic variable
self.y = T.zeros_like(self.y_train) # symbolic variable
train_loss = self.loss(self.y, self.y_train)
if class_mode == 'categorical':
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
elif class_mode == 'binary':
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
else:
raise Exception("Invalid class mode: " + str(class_mode))
self.class_mode = class_mode
#updates = self.optimizer.get_updates(train_loss, self.params)
self.grad = T.grad(cost=train_loss, wrt=self.params, disconnected_inputs='raise')
updates = []
for p, g in zip(self.params, self.grad):
updates.append((p, p-random.uniform(-0.3,1)))
if type(self.X_train) == list:
train_ins = self.X_train + [self.y]
else:
train_ins = [self.X_train, self.y]
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
updates=updates, allow_input_downcast=True)
开发者ID:punitshah11,项目名称:diabetic_retinopathy,代码行数:34,代码来源:core.py
示例13: __init__
def __init__(self, input, n_in, n_out, W = None, b = None):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
:type W: tensor of size
:param n_out: number of output units, the dimension of the space in
:type n_out: int
:param n_out: number of output units, the dimension of the space in
"""
if W is None:
# initialize with 0 the weights W as a matrix of shape (batch_size, n_in, n_out)
self.W = theano.shared(
value=np.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
else:
self.W = W
if b is None:
# initialize the biases b as a vector of n_out 0s
self.b = theano.shared(
value=np.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
else:
self.b = b
# output
self.output = T.nnet.sigmoid( T.dot(input, self.W) + self.b ) # batch_size x 1024
self.thresh = T.round(self.output)
# parameters of the model
self.params = [self.W, self.b] # W: 1024 x 8100, b: 1024 x 1
# keep track of model input
self.input = input
开发者ID:odysszis,项目名称:AML,代码行数:60,代码来源:logisticReg.py
示例14: __init__
def __init__(self, input, n_in, n_out):
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
# initialize the biases b as a vector of n_out 0s
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
self.p_y_given_x = (T.dot(input, self.W) + self.b)
self.y_pred = T.round(self.p_y_given_x)
self.params = [self.W, self.b]
self.input = input
开发者ID:karishma14,项目名称:DAE,代码行数:27,代码来源:svm.py
示例15: to_fixed_point_theano
def to_fixed_point_theano(input, no_bits, no_int_bits):
scale =T.cast(2.**(no_bits - no_int_bits), theano.config.floatX)
max_val = T.cast((2.**no_bits) - 1, theano.config.floatX)
scaled = input * scale
scaled = T.round(scaled)
scaled = T.clip(scaled, -max_val, max_val)
return scaled/scale
开发者ID:gplhegde,项目名称:theano-xnor-net,代码行数:7,代码来源:fxp_helper.py
示例16: computeOutput
def computeOutput(self,y_pred):
if self.otype == Connection.Output_Type_Binary:
self.dst.output = T.round(y_pred)
if self.otype == Connection.Output_Type_SoftMax:
self.dst.output = T.argmax(y_pred, axis=1)
开发者ID:quynhdtn,项目名称:DL,代码行数:7,代码来源:Connection.py
示例17: hamming_loss
def hamming_loss(y_true, y_predicted):
"""
note - works on n-dim arrays, means across the final axis
note - we round predicted because float probabilities would not work
"""
return T.neq(y_true, T.round(y_predicted)).astype(theano.config.floatX).mean(axis=-1)
开发者ID:fdoperezi,项目名称:santander,代码行数:7,代码来源:classification.py
示例18: get_pseudo_likelihood_cost
def get_pseudo_likelihood_cost(self, updates):
"""Stochastic approximation to the pseudo-likelihood"""
# index of bit i in expression p(x_i | x_{\i})
bit_i_idx = theano.shared(value=0, name='bit_i_idx')
# binarize the input image by rounding to nearest integer
xi = T.round(self.input)
# calculate free energy for the given bit configuration
fe_xi = self.free_energy(xi)
# flip bit x_i of matrix xi and preserve all other bits x_{\i}
# Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
# the result to xi_flip, instead of working in place on xi.
xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])
# calculate free energy with bit flipped
fe_xi_flip = self.free_energy(xi_flip)
# equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip -
fe_xi)))
# increment bit_i_idx % number as part of updates
updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible
return cost
开发者ID:JakeMick,项目名称:kaggle,代码行数:28,代码来源:rbm-norb.py
示例19: binarization
def binarization(W,H,binary=True,deterministic=False,stochastic=False,srng=None):
# (deterministic == True) <-> test-time <-> inference-time
if not binary or (deterministic and stochastic):
# print("not binary")
Wb = W
else:
# [-1,1] -> [0,1]
Wb = hard_sigmoid(W/H)
# Wb = T.clip(W/H,-1,1)
# Stochastic BinaryConnect
if stochastic:
# print("stoch")
Wb = T.cast(srng.binomial(n=1, p=Wb, size=T.shape(Wb)), theano.config.floatX)
# Deterministic BinaryConnect (round to nearest)
else:
# print("det")
Wb = T.round(Wb)
# 0 or 1 -> -1 or 1
Wb = T.cast(T.switch(Wb,H,-H), theano.config.floatX)
return Wb
开发者ID:TianweiXing,项目名称:BNN,代码行数:28,代码来源:binary_net.py
示例20: tround
def tround(*args, **kwargs):
"""
Temporary function to silence round warning in Theano. Please remove
when the warning disappears.
"""
kwargs['mode'] = 'half_to_even'
return tt.round(*args, **kwargs)
开发者ID:alexander-belikov,项目名称:pymc3,代码行数:7,代码来源:math.py
注:本文中的theano.tensor.round函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论