本文整理汇总了Python中theano.tensor.floor函数的典型用法代码示例。如果您正苦于以下问题:Python floor函数的具体用法?Python floor怎么用?Python floor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了floor函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: quantized_bprop
def quantized_bprop(self, cost):
"""
bprop equals:
(active_prime) *elem_multiply* error_signal_in * (rep of previous layer)
(rep of previous layer) is recoded as self.x during fprop() process.
Here we quantize (rep of previous layer) and leave the rest as it is.
"""
# the lower 2**(integer power)
index_low = T.switch(self.x > 0., T.floor(T.log2(self.x)), T.floor(T.log2(-self.x)))
index_low = T.clip(index_low, -4, 3)
sign = T.switch(self.x > 0., 1., -1.)
#index_up = index_low + 1 # the upper 2**(integer power) though not used explicitly.
p_up = sign * self.x / 2**(index_low) - 1 # percentage of upper index.
srng = theano.sandbox.rng_mrg.MRG_RandomStreams(self.rng.randint(999999))
index_random = index_low + srng.binomial(n=1, p=p_up, size=T.shape(self.x), dtype=theano.config.floatX)
quantized_rep = sign * 2**index_random
# there is sth wrong with this self-made backprop:
# the code is using BN, but this type of explicit computation is not considering
# gradients caused by BN.
# error = self.activation_prime(self.z) * error_signal_in
error = T.grad(cost=cost, wrt=self.z)
self.dEdW = T.dot(quantized_rep.T, error)
#self.dEdW = T.dot(self.x.T, error)
self.dEdb = T.grad(cost=cost, wrt=self.b)
if self.BN == True:
self.dEda = T.grad(cost=cost, wrt=self.a)
开发者ID:hantek,项目名称:binary_conv,代码行数:29,代码来源:layer_m.py
示例2: ShiftConv
def ShiftConv(w_t_g, s_t, N):
shift = 2.*s_t-1.
Z = T.mod(shift+N, N)
simj = 1 - (Z - T.floor(Z))
imj = T.mod(T.arange(N) + T.iround(T.floor(Z)),N)
w_t_g_roll_1 = T.roll(w_t_g, -T.iround(T.floor(Z)))
w_t_g_roll_2 = T.roll(w_t_g, -(T.iround(T.floor(Z))+1))
w_t_s = w_t_g_roll_1*simj + w_t_g_roll_2*(1-simj)
return w_t_s
开发者ID:chiggum,项目名称:Neural-Turing-Machines,代码行数:9,代码来源:ntm_v1.py
示例3: _interpolate
def _interpolate(im, x, y, out_height, out_width):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, 'float32')
width_f = T.cast(width, 'float32')
zero = T.zeros([], dtype='int64')
max_y = im.shape[1] - 1
max_x = im.shape[2] - 1
# scale indices from [-1, 1] to [0, width/height].
x = (x + 1.0)*(width_f) / 2.0
y = (y + 1.0)*(height_f) / 2.0
x0 = T.cast(T.floor(x), 'int64')
x1 = x0 + 1
y0 = T.cast(T.floor(y), 'int64')
y1 = y0 + 1
# Clip indicies to ensure they are not out of bounds.
x0 = T.clip(x0, zero, max_x)
x1 = T.clip(x1, zero, max_x)
y0 = T.clip(y0, zero, max_y)
y1 = T.clip(y1, zero, max_y)
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = _repeat(
T.arange(num_batch, dtype='int32')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# calculate interpolated values
x0_f = T.cast(x0, 'float32')
x1_f = T.cast(x1, 'float32')
y0_f = T.cast(y0, 'float32')
y1_f = T.cast(y1, 'float32')
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
return output
开发者ID:LamDang,项目名称:Lasagne,代码行数:56,代码来源:special.py
示例4: _interpolate
def _interpolate(im, x, y, out_height, out_width):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, theano.config.floatX)
width_f = T.cast(width, theano.config.floatX)
# clip coordinates to [-1, 1]
x = T.clip(x, -1, 1)
y = T.clip(y, -1, 1)
# scale coordinates from [-1, 1] to [0, width/height - 1]
x = (x + 1) / 2 * (width_f - 1)
y = (y + 1) / 2 * (height_f - 1)
# obtain indices of the 2x2 pixel neighborhood surrounding the coordinates;
# we need those in floatX for interpolation and in int64 for indexing. for
# indexing, we need to take care they do not extend past the image.
x0_f = T.floor(x)
y0_f = T.floor(y)
x1_f = x0_f + 1
y1_f = y0_f + 1
x0 = T.cast(x0_f, 'int64')
y0 = T.cast(y0_f, 'int64')
x1 = T.cast(T.minimum(x1_f, width_f - 1), 'int64')
y1 = T.cast(T.minimum(y1_f, height_f - 1), 'int64')
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = T.repeat(
T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# calculate interpolated values
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
assert str(output.dtype) == theano.config.floatX, str(output.dtype)
return output
开发者ID:BioroboticsLab,项目名称:diktya,代码行数:56,代码来源:image_transform.py
示例5: _interpolate
def _interpolate(im, x, y, out_height, out_width):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, theano.config.floatX)
width_f = T.cast(width, theano.config.floatX)
# scale indices from [-1, 1] to [0, width/height].
x = (x + 1) / 2 * width_f
y = (y + 1) / 2 * height_f
# Clip indices to ensure they are not out of bounds.
max_x = width_f - 1
max_y = height_f - 1
x0 = T.clip(x, 0, max_x)
x1 = T.clip(x + 1, 0, max_x)
y0 = T.clip(y, 0, max_y)
y1 = T.clip(y + 1, 0, max_y)
# We need floatX for interpolation and int64 for indexing.
x0_f = T.floor(x0)
x1_f = T.floor(x1)
y0_f = T.floor(y0)
y1_f = T.floor(y1)
x0 = T.cast(x0, 'int64')
x1 = T.cast(x1, 'int64')
y0 = T.cast(y0, 'int64')
y1 = T.cast(y1, 'int64')
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = T.repeat(
T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# calculate interpolated values
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
return output
开发者ID:rmanor,项目名称:Lasagne,代码行数:56,代码来源:special.py
示例6: MASK_blanking
def MASK_blanking(x_i):
# Find indicies of first and last non-zero value in x_i
idxs = T.nonzero(x_i)[0][[1, -1]]
# Diff = no of non zero values
no_values = idxs[1] - idxs[0]
# Move index inside by proportion of no of values
idxs0 = T.cast(T.floor(idxs[0] + no_values * blank_proportion), 'int32')
idxs1 = T.cast(T.floor(idxs[1] - no_values * blank_proportion), 'int32')
# Return a vector that has a tighter mask than x_i
return T.set_subtensor(T.zeros_like(x_i)[idxs0:idxs1], T.alloc(1., idxs1-idxs0))
开发者ID:LarsHH,项目名称:reconstructionAE,代码行数:10,代码来源:non_fixed_crops_vae.py
示例7: _interpolate
def _interpolate(im, x, y, out_height, out_width, dtype = 'float32'):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, dtype = dtype)
width_f = T.cast(width, dtype = dtype)
# scale coordinates from [-1, 1] to [0, width/height - 1]
idx = ((x >= 0) & (x <= 1) & (y >= 0) & (y <= 1)).nonzero()[0]
# x = (x + 1) / 2 * (width_f - 1)
# y = (y + 1) / 2 * (height_f - 1)
x = x * (width_f - 1)
y = y * (height_f - 1)
# obtain indices of the 2x2 pixel neighborhood surrounding the coordinates;
# we need those in floatX for interpolation and in int64 for indexing. for
# indexing, we need to take care they do not extend past the image.
x0_f = T.floor(x)
y0_f = T.floor(y)
x1_f = x0_f + 1
y1_f = y0_f + 1
x0 = T.cast(x0_f, 'int64')
y0 = T.cast(y0_f, 'int64')
x1 = T.cast(T.minimum(x1_f, width_f - 1), 'int64')
y1 = T.cast(T.minimum(y1_f, height_f - 1), 'int64')
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = T.repeat(
T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a[idx]]
Ib = im_flat[idx_b[idx]]
Ic = im_flat[idx_c[idx]]
Id = im_flat[idx_d[idx]]
# calculate interpolated values
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')[idx, :]
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')[idx, :]
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')[idx, :]
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')[idx, :]
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
# out = T.zeros_like(((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x'))
out = T.zeros_like(im_flat)
return T.set_subtensor(out[idx, :], output)
开发者ID:yancz1989,项目名称:text_warper,代码行数:55,代码来源:layers.py
示例8: _interpolate
def _interpolate(self, im, x, y, downsample_factor):
# constants
num_batch, height, width, channels = im.shape
height_f = T.cast(height, floatX)
width_f = T.cast(width, floatX)
out_height = T.cast(height_f // downsample_factor, 'int64')
out_width = T.cast(width_f // downsample_factor, 'int64')
zero = T.zeros([], dtype='int64')
max_y = T.cast(im.shape[1] - 1, 'int64')
max_x = T.cast(im.shape[2] - 1, 'int64')
# scale indices from [-1, 1] to [0, width/height]
x = (x + 1.0)*(width_f) / 2.0
y = (y + 1.0)*(height_f) / 2.0
# do sampling
x0 = T.cast(T.floor(x), 'int64')
x1 = x0 + 1
y0 = T.cast(T.floor(y), 'int64')
y1 = y0 + 1
x0 = T.clip(x0, zero, max_x)
x1 = T.clip(x1, zero, max_x)
y0 = T.clip(y0, zero, max_y)
y1 = T.clip(y1, zero, max_y)
dim2 = width
dim1 = width*height
base = self._repeat(
T.arange(num_batch, dtype='int32')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels in the flat
# image and restore channels dim
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# and finanly calculate interpolated values
x0_f = T.cast(x0, floatX)
x1_f = T.cast(x1, floatX)
y0_f = T.cast(y0, floatX)
y1_f = T.cast(y1, floatX)
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
return output
开发者ID:berleon,项目名称:seya,代码行数:55,代码来源:attention.py
示例9: _interpolate_bicubic
def _interpolate_bicubic(im, x, y, out_height, out_width):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, theano.config.floatX)
width_f = T.cast(width, theano.config.floatX)
grid = _meshgrid(out_height, out_width)
x_grid_flat = grid[0].flatten()
y_grid_flat = grid[1].flatten()
# clip coordinates to [-1, 1]
x = T.clip(x, -1, 1)
y = T.clip(y, -1, 1)
# scale coordinates from [-1, 1] to [0, width/height - 1]
x = (x + 1) / 2 * (width_f - 1)
y = (y + 1) / 2 * (height_f - 1)
x0_f = T.floor(x)
y0_f = T.floor(y)
x0 = T.cast(x0_f, "int64")
y0 = T.cast(y0_f, "int64")
# return T.concatenate(((x0-x).dimshuffle(0, 'x')**2, 0.0*dg2(x.dimshuffle(0, 'x')), 0.0*dg2(x0.dimshuffle(0, 'x'))), 1)
offsets = np.arange(-1, 3).astype(int)
dim2 = width
dim1 = width * height
base = T.repeat(T.arange(num_batch, dtype="int64") * dim1, out_height * out_width)
# Need to convert (x, y) to linear
def _flat_idx(xx, yy, dim2=dim2):
return base + yy * dim2 + xx
y_locs = [y0 + offset for offset in offsets]
ys = [T.clip(loc, 0, height - 1) for loc in y_locs]
def _cubic_interp_dim(im_flat, other_idx):
"""Cubic interpolation along a dimension
"""
neighbor_locs = [x0 + offset for offset in offsets]
neighbor_idx = [T.clip(nloc, 0, width - 1) for nloc in neighbor_locs]
xidxs = neighbor_idx
yidxs = [other_idx] * len(neighbor_idx)
neighbor_idxs = [_flat_idx(xidx, yidx) for xidx, yidx in zip(xidxs, yidxs)]
values = [im_flat[idx] for idx in neighbor_idxs]
weights = [_cubic_conv_weights(dg2(nloc) - x).dimshuffle(0, "x") for nloc in neighbor_locs]
# Interpolate along x direction
out = T.sum([dg2(v) * w for w, v in zip(weights, values)], axis=0) / T.sum(weights, axis=0)
return out
im_flat = im.reshape((-1, channels))
ims = [_cubic_interp_dim(im_flat, yidx) for yidx in ys]
yweights = [_cubic_conv_weights(dg2(yloc) - y).dimshuffle(0, "x") for yloc in y_locs]
out = T.sum(
[v * _cubic_conv_weights(dg2(yloc) - y).dimshuffle(0, "x") for v, yloc in zip(ims, y_locs)], axis=0
) / T.sum(yweights, axis=0)
return out
开发者ID:poolio,项目名称:deepinterp,代码行数:54,代码来源:bicubic_interp.py
示例10: process
def process(self, input, tparams, BNparams):
b, f, h0, w0 = input.shape
result = []
for h, w in self.pymamid:
win_h = T.ceil(h0 / h).astype('int32')
win_w = T.ceil(w0 / w).astype('int32')
str_h = T.floor(h0 / h).astype('int32')
str_w = T.floor(w0 / w).astype('int32')
result.append(dnn_pool(
img=input, ws=(win_h, win_w), mode=self.mode,
stride=(str_h, str_w), pad=(0, 0)).reshape([b, -1]))
return T.concatenate(result, axis=1)
开发者ID:wufangjie,项目名称:dnn,代码行数:12,代码来源:layers.py
示例11: pool_2d_nxn_regions
def pool_2d_nxn_regions(inputs, output_size, mode='max'):
"""
Performs a pooling operation that results in a fixed size:
output_size x output_size.
Used by SpatialPyramidPoolingLayer. Refer to appendix A in [1]
Parameters
----------
inputs : a tensor with 4 dimensions (N x C x H x W)
output_size: integer
The output size of the pooling operation
mode : string
Pooling mode, one of 'max', 'average_inc_pad', 'average_exc_pad'
Defaults to 'max'.
Returns a list of tensors, for each output bin.
The list contains output_size*output_size elements, where
each element is a 3D tensor (N x C x 1)
References
----------
.. [1] He, Kaiming et al (2015):
Spatial Pyramid Pooling in Deep Convolutional Networks
for Visual Recognition.
http://arxiv.org/pdf/1406.4729.pdf.
"""
if mode == 'max':
pooling_op = T.max
elif mode in ['average_inc_pad', 'average_exc_pad']:
pooling_op = T.mean
else:
msg = "Mode must be either 'max', 'average_inc_pad' or "
msg += "'average_exc_pad'. Got '{0}'"
raise ValueError(msg.format(mode))
h, w = inputs.shape[2:]
result = []
n = float(output_size)
for row in range(output_size):
for col in range(output_size):
start_h = T.floor(row / n * h).astype('int32')
end_h = T.ceil((row + 1) / n * h).astype('int32')
start_w = T.floor(col / n * w).astype('int32')
end_w = T.ceil((col + 1) / n * w).astype('int32')
pooling_region = inputs[:, :, start_h:end_h, start_w:end_w]
this_result = pooling_op(pooling_region, axis=(2, 3))
result.append(this_result.dimshuffle(0, 1, 'x'))
return result
开发者ID:HapeMask,项目名称:Lasagne,代码行数:52,代码来源:pool.py
示例12: discretized_logistic
def discretized_logistic(mean, logscale, binsize, sample=None):
scale = T.exp(logscale)
if sample is None:
u = G.rng_curand.uniform(size=mean.shape)
_y = T.log(-u/(u-1)) #inverse CDF of the logistic
sample = mean + scale * _y #sample from the actual logistic
sample = T.floor(sample/binsize)*binsize #discretize the sample
_sample = (T.floor(sample/binsize)*binsize - mean)/scale
logps = T.log( T.nnet.sigmoid(_sample + binsize/scale) - T.nnet.sigmoid(_sample) + 1e-7)
logp = logps.flatten(2).sum(axis=1)
#raise Exception()
entr = logscale.flatten(2)
entr = entr.sum(axis=1) + 2. * entr.shape[1].astype(G.floatX)
return RandomVariable(sample, logp, entr, mean=mean, logscale=logscale, logps=logps)
开发者ID:gburt,项目名称:iaf,代码行数:14,代码来源:rand.py
示例13: discretized_gaussian
def discretized_gaussian(mean, logvar, binsize, sample=None):
scale = T.exp(.5*logvar)
if sample is None:
_y = G.rng_curand.normal(size=mean.shape)
sample = mean + scale * _y #sample from the actual logistic
sample = T.floor(sample/binsize)*binsize #discretize the sample
_sample = (T.floor(sample/binsize)*binsize - mean)/scale
def _erf(x):
return T.erf(x/T.sqrt(2.))
logp = T.log( _erf(_sample + binsize/scale) - _erf(_sample) + 1e-7) + T.log(.5)
logp = logp.flatten(2).sum(axis=1)
#raise Exception()
entr = (.5 * (T.log(2 * math.pi) + 1 + logvar)).flatten(2).sum(axis=1)
return RandomVariable(sample, logp, entr, mean=mean, logvar=logvar)
开发者ID:gburt,项目名称:iaf,代码行数:14,代码来源:rand.py
示例14: create_learning_rate_func
def create_learning_rate_func(solver_params):
base = tt.fscalar('base')
gamma = tt.fscalar('gamma')
power = tt.fscalar('power')
itrvl = tt.fscalar('itrvl')
iter = tt.scalar('iter')
if solver_params['lr_type']=='inv':
lr_ = base * tt.pow(1 + gamma * iter, -power)
lr = t.function(
inputs=[iter, t.Param(base, default=solver_params['base']), t.Param(gamma, default=solver_params['gamma']), t.Param(power, default=solver_params['power'])],
outputs=lr_)
elif solver_params['lr_type']=='fixed':
lr_ = base
lr = t.function(
inputs=[iter, t.Param(base, default=solver_params['base'])],
outputs=lr_,
on_unused_input='ignore')
elif solver_params['lr_type']=='episodic':
lr_ = base / (tt.floor(iter/itrvl) + 1)
lr = t.function(
inputs=[iter, t.Param(base, default=solver_params['base']), t.Param(itrvl, default=solver_params['interval'])],
outputs=lr_,
on_unused_input='ignore')
return lr
开发者ID:bentzinir,项目名称:Buffe,代码行数:30,代码来源:learning_rate.py
示例15: get_output_for
def get_output_for(self, input, **kwargs):
p = self.p
k = self.k
nbatches = input.shape[0]
x_len = self.x_len
# x_len = 30
# x = input.reshape((nbatches, x_len))
x = input.reshape((nbatches, x_len))
p_floor = T.floor(p)
p_ceil = T.ceil(p)
# Deltas
p_delta = p - p_floor
ep_delta = T.exp(k*-p_delta)
p2_delta = 1 - p_delta
ep2_delta = T.exp(k*-p2_delta)
p0_delta = 1 + p_delta
ep0_delta = T.exp(k*-p0_delta)
ep_sum = ep_delta + ep2_delta + ep0_delta
perm1 = x[:, (T.cast(p_floor, 'int32'))%x_len]
perm2 = x[:, (T.cast(p_ceil, 'int32')+1)%x_len]
perm0 = x[:, (T.cast(p_floor, 'int32')-1)%x_len]
perm1_factor = ep_delta * perm1
perm2_factor = ep2_delta * perm2
perm3_factor = ep0_delta * perm0
res = (perm1_factor + perm2_factor + perm3_factor) / ep_sum
return res.reshape(input.shape)
开发者ID:zenna,项目名称:ig,代码行数:33,代码来源:permute.py
示例16: matrix_noise3d
def matrix_noise3d(input_vectors, perm, grad3, vertex_table):
skew_factors = (input_vectors[:, 0] + input_vectors[:, 1] + input_vectors[:, 2]) * 1.0 / 3.0
skewed_vectors = T.floor(input_vectors + skew_factors[:, np.newaxis])
unskew_factors = (skewed_vectors[:, 0] + skewed_vectors[:, 1] + skewed_vectors[:, 2]) * 1.0 / 6.0
offsets_0 = input_vectors - (skewed_vectors - unskew_factors[:, np.newaxis])
vertex_table_x_index = T.ge(offsets_0[:, 0], offsets_0[:, 1])
vertex_table_y_index = T.ge(offsets_0[:, 1], offsets_0[:, 2])
vertex_table_z_index = T.ge(offsets_0[:, 0], offsets_0[:, 2])
simplex_vertices = vertex_table[
vertex_table_x_index,
vertex_table_y_index,
vertex_table_z_index].reshape((input_vectors.shape[0], 2, 3))
offsets_1 = offsets_0 - simplex_vertices[:, 0] + 1.0 / 6.0
offsets_2 = offsets_0 - simplex_vertices[:, 1] + 1.0 / 3.0
offsets_3 = offsets_0 - 0.5
masked_skewed_vectors = T.bitwise_and(skewed_vectors.astype('int32'), 255)
gi0s = perm[masked_skewed_vectors[:, 0] + perm[
masked_skewed_vectors[:, 1] + perm[
masked_skewed_vectors[:, 2]].astype('int32')].astype('int32')] % 12
gi1s = perm[masked_skewed_vectors[:, 0] + simplex_vertices[:, 0, 0] + perm[
masked_skewed_vectors[:, 1] + simplex_vertices[:, 0, 1] + perm[
masked_skewed_vectors[:, 2] + simplex_vertices[:, 0, 2]].astype('int32')].astype('int32')] % 12
gi2s = perm[masked_skewed_vectors[:, 0] + simplex_vertices[:, 1, 0] + perm[
masked_skewed_vectors[:, 1] + simplex_vertices[:, 1, 1] + perm[
masked_skewed_vectors[:, 2] + simplex_vertices[:, 1, 2]].astype('int32')].astype('int32')] % 12
gi3s = perm[masked_skewed_vectors[:, 0] + 1 + perm[
masked_skewed_vectors[:, 1] + 1 + perm[
masked_skewed_vectors[:, 2] + 1].astype('int32')].astype('int32')] % 12
n0s = calculate_gradient_contribution(offsets_0, gi0s, grad3)
n1s = calculate_gradient_contribution(offsets_1, gi1s, grad3)
n2s = calculate_gradient_contribution(offsets_2, gi2s, grad3)
n3s = calculate_gradient_contribution(offsets_3, gi3s, grad3)
return 23.0 * (n0s + n1s + n2s + n3s)
开发者ID:zheng-xq,项目名称:simplexnoise,代码行数:33,代码来源:theano-simplex-matrix.py
示例17: compute_hard_windows
def compute_hard_windows(self, image_shape, location, scale):
# find topleft(front) and bottomright(back) corners for each patch
a = location - 0.5 * (T.cast(self.patch_shape, theano.config.floatX) / scale)
b = location + 0.5 * (T.cast(self.patch_shape, theano.config.floatX) / scale)
# grow by three patch pixels
a -= self.kernel.k_sigma_radius(self.cutoff, scale)
b += self.kernel.k_sigma_radius(self.cutoff, scale)
# clip to fit inside image and have nonempty window
a = T.clip(a, 0, image_shape - 1)
b = T.clip(b, a + 1, image_shape)
if self.batched_window:
# take the bounding box of all windows; now the slices
# will have the same length for each sample and scan can
# be avoided. comes at the cost of typically selecting
# more of the input.
a = a.min(axis=0, keepdims=True)
b = b.max(axis=0, keepdims=True)
# make integer
a = T.cast(T.floor(a), 'int16')
b = T.cast(T.ceil(b), 'int16')
return a, b
开发者ID:mohammadpz,项目名称:rna,代码行数:26,代码来源:crop.py
示例18: _warp_times
def _warp_times(self, t):
delta = tt.shape_padleft(t) / tt.shape_padright(self.period, t.ndim)
delta += tt.shape_padright(self._base_time, t.ndim)
ind = tt.cast(tt.floor(delta), "int64")
dt = tt.stack([ttv[tt.clip(ind[i], 0, ttv.shape[0]-1)]
for i, ttv in enumerate(self.ttvs)], -1)
return tt.shape_padright(t) + dt
开发者ID:dfm,项目名称:exoplanet,代码行数:7,代码来源:ttv.py
示例19: inv
def inv(self, output):
output = (output.dimshuffle(0,1,2,'x',3,'x')
.repeat(self.pool_shape[1], axis=5)
.repeat(self.pool_shape[0], axis=3))
if self.depooler == 'random':
unpooled = (
self.input_shape[0], self.input_shape[1],
self.input_shape[2]//self.pool_shape[0], self.pool_shape[0],
self.input_shape[3]//self.pool_shape[1], self.pool_shape[1])
pooled = (
self.input_shape[0], self.input_shape[1],
self.input_shape[2]//self.pool_shape[0], 1,
self.input_shape[3]//self.pool_shape[1], 1)
output_mask = self.theano_rng.uniform(size=unpooled, dtype=theano.config.floatX)
output_mask = output_mask / output_mask.max(axis=5).max(axis=3).dimshuffle(0,1,2,'x',3,'x')
output_mask = T.floor(output_mask)
return (output_mask * output).reshape(self.input_shape)
else:
output = self.depooler(output, axis=5)
output = self.depooler(output, axis=3)
return output
开发者ID:Brimborough,项目名称:deep-motion-analysis,代码行数:26,代码来源:Pool2DLayer.py
示例20: generate_forward_diffusion_sample
def generate_forward_diffusion_sample(self, X_noiseless):
"""
Corrupt a training image with t steps worth of Gaussian noise, and
return the corrupted image, as well as the mean and covariance of the
posterior q(x^{t-1}|x^t, x^0).
"""
X_noiseless = X_noiseless.reshape(
(-1, self.n_colors, self.spatial_width, self.spatial_width))
n_images = X_noiseless.shape[0].astype('int16')
rng = Random().theano_rng
# choose a timestep in [1, self.trajectory_length-1].
# note the reverse process is fixed for the very
# first timestep, so we skip it.
# TODO for some reason random_integer is missing from the Blocks
# theano random number generator.
t = T.floor(rng.uniform(size=(1,1), low=1, high=self.trajectory_length,
dtype=theano.config.floatX))
t_weights = self.get_t_weights(t)
N = rng.normal(size=(n_images, self.n_colors, self.spatial_width, self.spatial_width),
dtype=theano.config.floatX)
# noise added this time step
beta_forward = self.get_beta_forward(t)
# decay in noise variance due to original signal this step
alpha_forward = 1. - beta_forward
# compute total decay in the fraction of the variance due to X_noiseless
alpha_arr = 1. - self.beta_arr
alpha_cum_forward_arr = T.extra_ops.cumprod(alpha_arr).reshape((self.trajectory_length,1))
alpha_cum_forward = T.dot(t_weights.T, alpha_cum_forward_arr)
# total fraction of the variance due to noise being mixed in
beta_cumulative = 1. - alpha_cum_forward
# total fraction of the variance due to noise being mixed in one step ago
beta_cumulative_prior_step = 1. - alpha_cum_forward/alpha_forward
# generate the corrupted training data
X_uniformnoise = X_noiseless + (rng.uniform(size=(n_images, self.n_colors, self.spatial_width, self.spatial_width),
dtype=theano.config.floatX)-T.constant(0.5,dtype=theano.config.floatX))*T.constant(self.uniform_noise,dtype=theano.config.floatX)
X_noisy = X_uniformnoise*T.sqrt(alpha_cum_forward) + N*T.sqrt(1. - alpha_cum_forward)
# compute the mean and covariance of the posterior distribution
mu1_scl = T.sqrt(alpha_cum_forward / alpha_forward)
mu2_scl = 1. / T.sqrt(alpha_forward)
cov1 = 1. - alpha_cum_forward/alpha_forward
cov2 = beta_forward / alpha_forward
lam = 1./cov1 + 1./cov2
mu = (
X_uniformnoise * mu1_scl / cov1 +
X_noisy * mu2_scl / cov2
) / lam
sigma = T.sqrt(1./lam)
sigma = sigma.reshape((1,1,1,1))
mu.name = 'mu q posterior'
sigma.name = 'sigma q posterior'
X_noisy.name = 'X_noisy'
t.name = 't'
return X_noisy, t, mu, sigma
开发者ID:Sohl-Dickstein,项目名称:Diffusion-Probabilistic-Models,代码行数:60,代码来源:model.py
注:本文中的theano.tensor.floor函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论