• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.as_tensor函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.as_tensor函数的典型用法代码示例。如果您正苦于以下问题:Python as_tensor函数的具体用法?Python as_tensor怎么用?Python as_tensor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了as_tensor函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: maxpool_3D

def maxpool_3D(input, ds, ignore_border=False):
   
    #input.dimshuffle (0, 2, 1, 3, 4)   # convert to make video in back. 
    # no need to reshuffle. 
    if input.ndim < 3:
        raise NotImplementedError('max_pool_3d requires a dimension >= 3')

    # extract nr dimensions
    vid_dim = input.ndim
    # max pool in two different steps, so we can use the 2d implementation of 
    # downsamplefactormax. First maxpool frames as usual. 
    # Then maxpool the time dimension. Shift the time dimension to the third 
    # position, so rows and cols are in the back


    # extract dimensions
    frame_shape = input.shape[-2:]
    
    # count the number of "leading" dimensions, store as dmatrix
    batch_size = T.prod(input.shape[:-2])
    batch_size = T.shape_padright(batch_size,1)
    
    # store as 4D tensor with shape: (batch_size,1,height,width)
    new_shape = T.cast(T.join(0, batch_size,
                                        T.as_tensor([1,]), 
                                        frame_shape), 'int32')
    input_4D = T.reshape(input, new_shape, ndim=4)

    # downsample mini-batch of videos in rows and cols
    op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)          # so second and third dimensions of ds are for height and width
    output = op(input_4D)
    # restore to original shape                                     
    outshape = T.join(0, input.shape[:-2], output.shape[-2:])
    out = T.reshape(output, outshape, ndim=input.ndim)

    # now maxpool time
    # output (time, rows, cols), reshape so that time is in the back
    shufl = (list(range(vid_dim-3)) + [vid_dim-2]+[vid_dim-1]+[vid_dim-3])
    input_time = out.dimshuffle(shufl)
    # reset dimensions
    vid_shape = input_time.shape[-2:]
    
    # count the number of "leading" dimensions, store as dmatrix
    batch_size = T.prod(input_time.shape[:-2])
    batch_size = T.shape_padright(batch_size,1)
    
    # store as 4D tensor with shape: (batch_size,1,width,time)
    new_shape = T.cast(T.join(0, batch_size,
                                        T.as_tensor([1,]), 
                                        vid_shape), 'int32')
    input_4D_time = T.reshape(input_time, new_shape, ndim=4)
    # downsample mini-batch of videos in time
    op = DownsampleFactorMax((1,ds[0]), ignore_border)            # Here the time dimension is downsampled. 
    outtime = op(input_4D_time)
    # output 
    # restore to original shape (xxx, rows, cols, time)
    outshape = T.join(0, input_time.shape[:-2], outtime.shape[-2:])
    shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
    #rval = T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
    return T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
开发者ID:kli-nlpr,项目名称:Convolutional-Neural-Networks,代码行数:60,代码来源:core.py


示例2: _initial_part_matrix

 def _initial_part_matrix(self, part, size, deterministic):
     if size is None:
         size = 1
     length, dist_name, dist_map = self._choose_alternative(
         part,
         (self.local_size, self.initial_dist_local_name, self.initial_dist_local_map),
         (self.global_size, self.initial_dist_global_name, self.initial_dist_global_map)
     )
     dtype = self.symbolic_initial_global_matrix.dtype
     if length == 0:  # in this case theano fails to compute sample of correct size
         return tt.ones((size, 0), dtype)
     length = tt.as_tensor(length)
     size = tt.as_tensor(size)
     shape = tt.stack((size, length))
     # apply optimizations if possible
     if not isinstance(deterministic, tt.Variable):
         if deterministic:
             return tt.ones(shape, dtype) * dist_map
         else:
             return getattr(self._rng, dist_name)(shape)
     else:
         sample = getattr(self._rng, dist_name)(shape)
         initial = tt.switch(
             deterministic,
             tt.ones(shape, dtype) * dist_map,
             sample
         )
         return initial
开发者ID:aasensio,项目名称:pymc3,代码行数:28,代码来源:opvi.py


示例3: theano_scan_color

def theano_scan_color(writer, draw_fn):
	with writer as writer_buf:
		writer_buf_reshaped = writer_buf.reshape((Screen.screen_vane_count, Screen.screen_max_magnitude, 3))
		vane_matrix = [[[float(vane), float(vane), float(vane)] for px in range(Screen.screen_max_magnitude)]
					   for vane in range(Screen.screen_vane_count)]
		px_matrix =   [[[float(px),float(px),float(px)] for px in range(Screen.screen_max_magnitude)]
					   for vane in range(Screen.screen_vane_count)]
		col_matrix =  [[[float(0), float(1), float(2)] for px in range(Screen.screen_max_magnitude)]
					   for vane in range(Screen.screen_vane_count)]
		vane_vec = T.as_tensor(vane_matrix)
		px_vec = T.as_tensor(px_matrix)
		col_vec = T.as_tensor(col_matrix)
		step = T.fscalar('step')

		draw_fn_with_step = draw_fn(step)
		f, _ = theano.map(draw_fn_with_step, [vane_vec, px_vec, col_vec])

		fn_actual = theano.function([step], f, allow_input_downcast=True, on_unused_input='ignore')

		step_actual = 0
		while True:
			writer.frame_ready()
			start = time.time()
			writer_buf_reshaped[:] = fn_actual(step_actual)
			step_actual -= 1
			done = time.time()
			fps = 1.0/(done - start)
			if fps < TARGET_FPS:
				logging.warning('Frame rate is %f, which is lower than target %d', fps, TARGET_FPS)
开发者ID:jarrahl,项目名称:skyscreen,代码行数:29,代码来源:theano_examples.py


示例4: get_aggregator

    def get_aggregator(self):
        initialized = shared_like(0.)
        numerator_acc = shared_like(self.numerator)
        denominator_acc = shared_like(self.denominator)

        # Dummy default expression to use as the previously-aggregated
        # value, that has the same shape as the new result
        numerator_zeros = tensor.as_tensor(self.numerator).zeros_like()
        denominator_zeros = tensor.as_tensor(self.denominator).zeros_like()

        conditional_update_num = self.numerator + ifelse(initialized,
                                                         numerator_acc,
                                                         numerator_zeros)
        conditional_update_den = self.denominator + ifelse(initialized,
                                                           denominator_acc,
                                                           denominator_zeros)

        initialization_updates = [(numerator_acc,
                                   tensor.zeros_like(numerator_acc)),
                                  (denominator_acc,
                                   tensor.zeros_like(denominator_acc)),
                                  (initialized, 0.)]
        accumulation_updates = [(numerator_acc,
                                 conditional_update_num),
                                (denominator_acc,
                                 conditional_update_den),
                                (initialized, 1.)]
        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(numerator_acc /
                                                  denominator_acc))
        return aggregator
开发者ID:AdityoSanjaya,项目名称:blocks,代码行数:33,代码来源:aggregation.py


示例5: test_chunk_unchunk_grad2

def test_chunk_unchunk_grad2():
  n_time = 101
  n_batch = 3
  n_dim = 5
  numpy.random.seed(1234)
  _x = numpy.random.randn(n_time, n_batch, n_dim).astype(f32)
  _Dx2 = numpy.random.randn(n_time, n_batch, n_dim).astype(f32)
  _index = numpy.ones((n_time, n_batch), dtype="int8")
  x = T.as_tensor(_x)
  Dx2 = T.as_tensor(_Dx2)
  index = T.as_tensor(_index)
  chunk_size = 11
  chunk_step = 7

  out, oindex = chunk(x, index=index, chunk_size=chunk_size, chunk_step=chunk_step)
  chunk_op = NativeOp.Chunking().make_op()
  assert type(out.owner.op) is type(chunk_op)

  x2, index2, factors = unchunk(out, index=oindex, chunk_size=chunk_size, chunk_step=chunk_step, n_time=x.shape[0], n_batch=x.shape[1])
  unchunk_op = NativeOp.UnChunking().make_op()
  assert type(x2.owner.op) is type(unchunk_op)

  Dout, _, _, _, _, _ = unchunk_op.grad(x2.owner.inputs, (Dx2, None, None))
  Dx, _, _, _, _ = chunk_op.grad(out.owner.inputs, (Dout, None))
  _Dx = Dx.eval()
  assert_almost_equal(_Dx, _Dx2)
开发者ID:rwth-i6,项目名称:returnn,代码行数:26,代码来源:test_NativeOp_chunk.py


示例6: dynamic_kmaxPooling

    def dynamic_kmaxPooling(self, curConv_out, k):
        neighborsForPooling = TSN.images2neibs(ten4=curConv_out, neib_shape=(1,curConv_out.shape[3]), mode='ignore_borders')
        self.neighbors = neighborsForPooling

        neighborsArgSorted = T.argsort(neighborsForPooling, axis=1)
        kNeighborsArg = neighborsArgSorted[:,-k:]
        #self.bestK = kNeighborsArg
        kNeighborsArgSorted = T.sort(kNeighborsArg, axis=1)

        ii = T.repeat(T.arange(neighborsForPooling.shape[0]), k)
        jj = kNeighborsArgSorted.flatten()
        pooledkmaxTmp = neighborsForPooling[ii, jj]
        new_shape = T.cast(T.join(0, 
                           T.as_tensor([neighborsForPooling.shape[0]]),
                           T.as_tensor([k])),
                           'int64')
        pooledkmax_matrix = T.reshape(pooledkmaxTmp, new_shape, ndim=2)

        rightWidth=self.unifiedWidth-k            
        right_padding = T.zeros((neighborsForPooling.shape[0], rightWidth), dtype=theano.config.floatX)
        matrix_padded = T.concatenate([pooledkmax_matrix, right_padding], axis=1)      
        #recover tensor form
        new_shape = T.cast(T.join(0, curConv_out.shape[:-2],
                           T.as_tensor([curConv_out.shape[2]]),
                           T.as_tensor([self.unifiedWidth])),
                           'int64')

        curPooled_out = T.reshape(matrix_padded, new_shape, ndim=4)
                
        return curPooled_out
开发者ID:rgtjf,项目名称:DeepLearning,代码行数:30,代码来源:HKDefined.py


示例7: max_pool

def max_pool(images, imgshp, maxpoolshp):
    """Implements a max pooling layer

    Takes as input a 2D tensor of shape batch_size x img_size and
    performs max pooling.  Max pooling downsamples by taking the max
    value in a given area, here defined by maxpoolshp. Outputs a 2D
    tensor of shape batch_size x output_size.

    :param images: 2D tensor containing images on which to apply convolution.
                   Assumed to be of shape batch_size x img_size
    :param imgshp: tuple containing image dimensions
    :param maxpoolshp: tuple containing shape of area to max pool over

    :return: out1, symbolic result (2D tensor)
    :return: out2, logical shape of the output
    """
    N = numpy
    poolsize = N.int64(N.prod(maxpoolshp))

    # imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
    # in the first case, default nfeatures to 1
    if N.size(imgshp) == 2:
        imgshp = (1,) + imgshp

    # construct indices and index pointers for sparse matrix, which,
    # when multiplied with input images will generate a stack of image
    # patches
    indices, indptr, spmat_shape, sptype, outshp = \
            convolution_indices.conv_eval(imgshp, maxpoolshp,
                                          maxpoolshp, mode='valid')

#    print 'XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX'
#    print 'imgshp = ', imgshp
#    print 'maxpoolshp = ', maxpoolshp
#    print 'outshp = ', outshp

    # build sparse matrix, then generate stack of image patches
    csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,
                                    indptr, spmat_shape)
    patches = sparse.structured_dot(csc, images.T).T

    pshape = tensor.stack([images.shape[0] *\
                               tensor.as_tensor(N.prod(outshp)),
                           tensor.as_tensor(imgshp[0]),
                           tensor.as_tensor(poolsize)])
    patch_stack = tensor.reshape(patches, pshape, ndim=3)

    out1 = tensor.max(patch_stack, axis=2)

    pshape = tensor.stack([images.shape[0],
                           tensor.as_tensor(N.prod(outshp)),
                           tensor.as_tensor(imgshp[0])])
    out2 = tensor.reshape(out1, pshape, ndim=3)

    out3 = tensor.DimShuffle(out2.broadcastable, (0, 2, 1))(out2)

    return tensor.flatten(out3, 2), outshp
开发者ID:12190143,项目名称:Theano,代码行数:57,代码来源:sp.py


示例8: _grad_single

    def _grad_single(self, ct, s, lnC2, GAMMI2):
        lnC = lnC2
        GAMMI = GAMMI2
        v = self.v#T.as_tensor(self.v)[:,ct:]
        v0 = T.as_tensor(v[v[:,0]==0, :])
        v1 = T.as_tensor(v[v[:,0]==1, :])

        cnp = v.shape[0]

        # Gradient of fE wrt the priors over final state
        [ofE, oxS], upd_fE_single = th.scan(fn=self._free_energy,
                                   sequences=v,
                                   non_sequences=[s,self.h,lnC,self.b])
        ofE0 = ofE[v0].sum()
        ofE1 = ofE[v1].sum()

        dFE0dlnC = T.jacobian(ofE0, lnC)
        dFE1dlnC = T.jacobian(ofE1, lnC)
        dFEdlnC  = T.jacobian(ofE,  lnC)
        ofE_ = T.vector()
        ofE_.tag.test_value = ofE.tag.test_value

        # Gradient of Gamma with respect to its initial condition:
        GAMMA, upd_GAMMA = th.scan(fn=self._upd_gamma,
               outputs_info=[GAMMI],
               non_sequences=[ofE, self.lambd, self.alpha, self.beta, cnp],
               n_steps=4)
        dGdg = T.grad(GAMMA[-1], GAMMI)

        dGdfE = T.jacobian(GAMMA[-1], ofE)
        dGdlnC = dGdfE.dot(dFEdlnC)

        out1 = ofE0
        out2 = ofE1
        maxout = T.max([out1, out2])

        exp_out1 = T.exp(GAMMA[-1]*(out1 - maxout))
        exp_out2 = T.exp(GAMMA[-1]*(out2 - maxout))
        norm_const = exp_out1 + exp_out2

        # Derivative wrt the second output (gammi):
        Jac1_gammi = (-(out1-out2)*dGdg*
                T.exp(GAMMA[-1]*(out1+out2 - 2*maxout))/(norm_const**2))
        Jac2_gammi = -Jac1_gammi
#        dfd1_tZ = Jac1_gammi*dCdf[1][0]+ Jac2_gammi*dCdf[1][1]

        # Derivative wrt first input (lnc)
        Jac1_lnC = (T.exp(GAMMA[-1]*(out1 + out2 - 2*maxout))/(norm_const**2)*
                  (-dGdlnC*(out1 - out2) - GAMMA[-1]*(dFE0dlnC - dFE1dlnC)))
        Jac2_lnC = -Jac1_lnC

        Jac1 = T.concatenate([T.stack(Jac1_gammi), Jac1_lnC])
        Jac2 = T.concatenate([T.stack(Jac2_gammi), Jac2_lnC])
        self.debug = [Jac1_lnC, Jac2_lnC, Jac2_gammi, Jac1_gammi, dFE0dlnC,
                      dFE1dlnC, dGdg, out1, out2, v0, v1, v, ct]
        return Jac1, Jac2
开发者ID:dcuevasr,项目名称:actinf,代码行数:56,代码来源:actinfThClass.py


示例9: symbolic_g

 def symbolic_g(self, symbolic_X_list,t):
     '''
     the gx for every state x must be a matrix with dimensions [number_of_rollouts,x_dim, control_dim]
     with x.shape = [number_of_rollouts, x_dim]
     '''
     x = symbolic_X_list[0]
     y = symbolic_X_list[1]
     gx = T.as_tensor(np.ones([1,self.control_dimensions]))
     gy = T.as_tensor(np.ones([1,self.control_dimensions]))
     return [gx,gy]
开发者ID:DoTha,项目名称:ParallelPice,代码行数:10,代码来源:Theano_LQ1.py


示例10: __init__

 def __init__(self, n_units, **kwargs):
   super(LSTMS, self).__init__(
     n_units=n_units,
     n_in=n_units * 4,  # input gate, forget gate, output gate, net input
     n_out=n_units,
     n_re=n_units * 4,
     n_act=2  # output, cell state
   )
   self.o_output = T.as_tensor(numpy.ones((n_units,), dtype='float32'))
   self.o_h = T.as_tensor(numpy.ones((n_units,), dtype='float32'))
开发者ID:rwth-i6,项目名称:returnn,代码行数:10,代码来源:NetworkRecurrentLayer.py


示例11: max_pool_3d

def max_pool_3d(input, ds, ignore_border=False):
	"""
		Takes as input a N-D tensor, where N >= 3. It downscales the input video by
		the specified factor, by keeping only the maximum value of non-overlapping
		patches of size (ds[0],ds[1],ds[2]) (time, height, width)  
		
		:type input: N-D theano tensor of input images.
		:param input: input images. Max pooling will be done over the 3 last dimensions.
		:type ds: tuple of length 3
		:param ds: factor by which to downscale. (2,2,2) will halve the video in each dimension.
		:param ignore_border: boolean value. Example when True, (5,5,5) input with ds=(2,2,2) will generate a
		(2,2,2) output. (3,3,3) otherwise.
	"""
	if input.ndim < 3:
		raise NotImplementedError('max_pool_3d requires a dimension >= 3')
		
	vid_dim = input.ndim
	#Maxpool frame
	frame_shape = input.shape[-2:]

	# count the number of "leading" dimensions, store as dmatrix
	batch_size = T.prod(input.shape[:-2])
	batch_size = T.shape_padright(batch_size,1)
	new_shape = T.cast(T.join(0, batch_size,T.as_tensor([1,]),frame_shape), 'int32')
	
	input_4D = T.reshape(input, new_shape, ndim=4)
	# downsample mini-batch of videos in rows and cols
	op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)
	output = op(input_4D)
	# restore to original shape
	outshape = T.join(0, input.shape[:-2], output.shape[-2:])
	out = T.reshape(output, outshape, ndim=input.ndim)
	
	#Maxpool time 
	# output (time, rows, cols), reshape so that time is in the back
	shufl = (list(range(vid_dim-4)) + list(range(vid_dim-3,vid_dim))+[vid_dim-4])
	input_time = out.dimshuffle(shufl)
	# reset dimensions
	vid_shape = input_time.shape[-2:]
	# count the number of "leading" dimensions, store as dmatrix
	batch_size = T.prod(input_time.shape[:-2])
	batch_size = T.shape_padright(batch_size,1)
	# store as 4D tensor with shape: (batch_size,1,width,time)
	new_shape = T.cast(T.join(0, batch_size,T.as_tensor([1,]),vid_shape), 'int32')
	input_4D_time = T.reshape(input_time, new_shape, ndim=4)
	# downsample mini-batch of videos in time
	op = DownsampleFactorMax((1,ds[0]), ignore_border)
	outtime = op(input_4D_time)
	# restore to original shape (xxx, rows, cols, time)
	outshape = T.join(0, input_time.shape[:-2], outtime.shape[-2:])
	shufl = (list(range(vid_dim-4)) + [vid_dim-1] + list(range(vid_dim-4,vid_dim-1)))
	#shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
	return T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
开发者ID:IITM-DONLAB,项目名称:python-dnn,代码行数:53,代码来源:max_pool.py


示例12: make_node

 def make_node(self, frames, n, axis):
     """ compute an n-point fft of frames along given axis """
     _frames = tensor.as_tensor(frames, ndim=2)
     _n = tensor.as_tensor(n, ndim=0)
     _axis = tensor.as_tensor(axis, ndim=0)
     if self.half and _frames.type.dtype.startswith('complex'):
         raise TypeError('Argument to HalfFFT must not be complex', frames)
     spectrogram = tensor.zmatrix()
     buf = generic()
     # The `buf` output is present for future work
     # when we call FFTW directly and re-use the 'plan' that FFTW creates.
     # In that case, buf would store a CObject encapsulating the plan.
     rval = Apply(self, [_frames, _n, _axis], [spectrogram, buf])
     return rval
开发者ID:ChienliMa,项目名称:Theano,代码行数:14,代码来源:fourier.py


示例13: updates

    def updates(self, gradients):
        """
        Return symbolic updates to apply given a set of gradients
        on the parameters being optimized.

        Parameters
        ----------
        gradients : list of tensor_likes
            List of symbolic gradients for the parameters contained
            in self.params, in the same order as in self.params.

        Returns
        -------
        updates : dict
            A dictionary with the shared variables in self.params as keys
            and a symbolic expression of how they are to be updated each
            SGD step as values.

        Notes
        -----
        `cost_updates` is a convenient helper function that takes all
        necessary gradients with respect to a given symbolic cost.
        """
        ups = {}
        # Add the learning rate/iteration updates
        l_ups, learn_rates = self.learning_rate_updates()
        safe_update(ups, l_ups)

        # Get the updates from sgd_updates, a PyLearn library function.
        p_up = dict(sgd_updates(self.params, gradients, learn_rates))

        # Add the things in p_up to ups
        safe_update(ups, p_up)

        # Clip the values if needed.
        # We do not want the clipping values to force an upcast
        # of the update: updates should have the same type as params
        for param, (p_min, p_max) in self.clipping_values.iteritems():
            p_min = tensor.as_tensor(p_min)
            p_max = tensor.as_tensor(p_max)
            dtype = param.dtype
            if p_min.dtype != dtype:
                p_min = tensor.cast(p_min, dtype)
            if p_max.dtype != dtype:
                p_max = tensor.cast(p_max, dtype)
            ups[param] = tensor.clip(ups[param], p_min, p_max)

        # Return the updates dictionary.
        return ups
开发者ID:jaberg,项目名称:pylearn,代码行数:49,代码来源:optimizer.py


示例14: test_2

def test_2():
  n_time = 11
  n_dim = 5
  numpy.random.seed(1234)
  _x = numpy.random.randn(n_time, n_dim).astype(f32)
  _idx = numpy.random.randint(0, n_dim, (n_time,))
  assert _idx.shape == (n_time,)
  x = T.as_tensor(_x)
  idx = T.as_tensor(_idx)
  y = subtensor_batched_index(x, idx)
  ts = T.arange(x.shape[0])
  y2 = x[ts, idx[ts]]
  _y = y.eval()
  _y2 = y2.eval()
  assert_almost_equal(_y, _y2)
开发者ID:rwth-i6,项目名称:returnn,代码行数:15,代码来源:test_NativeOp_subtensor_batched_index.py


示例15: grad

    def grad(self, inputs, dCdf):
        CT = T.as_tensor(self.ct)
        S = T.as_tensor(self.s)
        (jac1, jac2), _ = th.scan(fn=self._grad_single,
                                  sequences=[CT, S],
                                  non_sequences=[inputs[0][1:], inputs[0][0]])

#        for t in self.ct:
#            out = self._grad_single(t, s)

#        Jac1 = T.reshape(jac1, newshape=(1,-1))
#        Jac2 = T.reshape(jac2, newshape=(1,-1))
        Jac = T.concatenate([jac1, jac2], axis=0)
#        return Jac1*dCdf[0][0] + Jac2*dCdf[0][1],
        return Jac.T.dot(dCdf[0]),
开发者ID:dcuevasr,项目名称:actinf,代码行数:15,代码来源:actinfThClass.py


示例16: max_pool_3d

def max_pool_3d(input, ds, ignore_border=False):
    # [n,c,x,y,z]以外の入力は受け付けない
    if input.ndim != 5:
        raise NotImplementedError(
            'max_pool_3d requires a input [n, c, x, y, z]')

    # 入力次元
    vid_dim = input.ndim

    # [y, z]フレームの次元数
    frame_shape = input.shape[-2:]

    # バッチサイズ
    # フレーム次元以外の全ての次元の要素数を掛け合わせる
    batch_size = T.prod(input.shape[:-2])
    # http://deeplearning.net/software/theano/library/tensor/basic.html#theano.tensor.shape_padright
    batch_size = T.shape_padright(batch_size, 1)


    new_shape = T.cast(T.join(0, batch_size, T.as_tensor([1, ]), frame_shape),
                       'int32')
    input_4D = T.reshape(input, new_shape, ndim=4)

    op = DownsampleFactorMax((ds[1], ds[2]), ignore_border)
    output = op(input_4D)
    outshape = T.join(0, input.shape[:-2], output.shape[-2:])
    out = T.reshape(output, outshape, ndim=input.ndim)

    shufl = (
        list(range(vid_dim - 3)) + [vid_dim - 2] + [vid_dim - 1] + [
            vid_dim - 3])
    input_time = out.dimshuffle(shufl)
    vid_shape = input_time.shape[-2:]

    batch_size = T.prod(input_time.shape[:-2])
    batch_size = T.shape_padright(batch_size, 1)

    new_shape = T.cast(T.join(0, batch_size,
                              T.as_tensor([1, ]),
                              vid_shape), 'int32')
    input_4D_time = T.reshape(input_time, new_shape, ndim=4)
    op = DownsampleFactorMax((1, ds[0]), ignore_border)
    outtime = op(input_4D_time)
    outshape = T.join(0, input_time.shape[:-2], outtime.shape[-2:])
    shufl = (
        list(range(vid_dim - 3)) + [vid_dim - 1] + [vid_dim - 3] + [
            vid_dim - 2])
    return T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
开发者ID:kanairen,项目名称:CubicCNN,代码行数:48,代码来源:__pool.py


示例17: max_pool_switch_2d

def max_pool_switch_2d(input, ds, ignore_border=None, st=None, padding=(0, 0),
            index_type='flattened', index_scope='local'):

    if input.ndim < 2:
        raise NotImplementedError('max_pool_switched_2d requires a dimension >= 2')
    if ignore_border is None:
        ignore_border = False
    if input.ndim == 4:
        op = MaxPoolSwitch(ds, ignore_border, st=st, padding=padding,
                  index_type=index_type, index_scope=index_scope)
        output = op(input)
        return output

    # extract image dimensions
    img_shape = input.shape[-2:]

    # count the number of "leading" dimensions, store as dmatrix
    batch_size = T.prod(input.shape[:-2])
    batch_size = T.shape_padright(batch_size, 1)

    # store as 4D tensor with shape: (batch_size,1,height,width)
    new_shape = T.cast(T.join(0, batch_size,
                                        T.as_tensor([1]),
                                        img_shape), 'int64')
    input_4D = T.reshape(input, new_shape, ndim=4)

    # downsample mini-batch of images
    op = MaxPoolSwitch(ds, ignore_border, st=st, padding=padding,
              index_type=index_type, index_scope=index_scope)
    output = op(input_4D)

    # restore to original shape
    outshp = T.join(0, input.shape[:-2], output.shape[-2:])
    return T.reshape(output, outshp, ndim=input.ndim)
开发者ID:bokorn,项目名称:Keras-and-Theano-layers-for-Switched-Pooling,代码行数:34,代码来源:theano_switched_pooling.py


示例18: max_pool_2d

def max_pool_2d(input, ds, ignore_border=False):
    """
    Takes as input a N-D tensor, where N >= 2. It downscales the input image by
    the specified factor, by keeping only the maximum value of non-overlapping
    patches of size (ds[0],ds[1])

    :type input: N-D theano tensor of input images.
    :param input: input images. Max pooling will be done over the 2 last dimensions.
    :type ds: tuple of length 2
    :param ds: factor by which to downscale. (2,2) will halve the image in each dimension.
    :param ignore_border: boolean value. When True, (5,5) input with ds=(2,2) will generate a
      (2,2) output. (3,3) otherwise.
    """
    if input.ndim < 2:
        raise NotImplementedError("max_pool_2d requires a dimension >= 2")

    # extract image dimensions
    img_shape = input.shape[-2:]

    # count the number of "leading" dimensions, store as dmatrix
    batch_size = tensor.prod(input.shape[:-2])
    batch_size = tensor.shape_padright(batch_size, 1)

    # store as 4D tensor with shape: (batch_size,1,height,width)
    new_shape = tensor.cast(tensor.join(0, batch_size, tensor.as_tensor([1]), img_shape), "int64")
    input_4D = tensor.reshape(input, new_shape, ndim=4)

    # downsample mini-batch of images
    op = DownsampleFactorMax(ds, ignore_border)
    output = op(input_4D)

    # restore to original shape
    outshp = tensor.join(0, input.shape[:-2], output.shape[-2:])
    return tensor.reshape(output, outshp, ndim=input.ndim)
开发者ID:igul222,项目名称:Theano,代码行数:34,代码来源:downsample.py


示例19: test_1

def test_1():
  n_time = 11
  n_batch = 3
  n_dim = 5
  numpy.random.seed(1234)
  _x = numpy.random.randn(n_time, n_batch, n_dim).astype(f32)
  _idx = numpy.random.randint(0, n_dim, (n_time, n_batch))
  assert _idx.shape == (n_time, n_batch)
  x = T.as_tensor(_x)
  idx = T.as_tensor(_idx)
  y = subtensor_batched_index(x, idx)
  ts = T.arange(x.shape[0] * x.shape[1])
  y2 = x.reshape((ts.shape[0], x.shape[2]))[ts, idx.flatten()[ts]].reshape(idx.shape)
  _y = y.eval()
  _y2 = y2.eval()
  assert_almost_equal(_y, _y2)
开发者ID:rwth-i6,项目名称:returnn,代码行数:16,代码来源:test_NativeOp_subtensor_batched_index.py


示例20: __init__

  def __init__(self, factor=numpy.sqrt(2), decay=1.0, min_factor=None, padding=False, **kwargs):
    super(ConvFMPLayer, self).__init__(**kwargs)
    if min_factor is None:
      min_factor = factor
    factor = T.maximum(factor * (decay ** self.network.epoch), numpy.float32(min_factor))
    sizes_raw = self.source.output_sizes

    # handle size problems
    if not padding:
      padding = T.min(self.source.output_sizes / factor) <= 0
      padding = theano.printing.Print(global_fn=maybe_print_pad_warning)(padding)

    fixed_sizes = T.maximum(sizes_raw, T.cast(T.as_tensor(
      [factor + self.filter_height - 1, factor + self.filter_width - 1]), 'float32'))
    sizes = ifelse(padding, fixed_sizes, sizes_raw)
    X_size = T.cast(T.max(sizes, axis=0), "int32")

    def pad_fn(x_t, s):
      x = T.alloc(numpy.cast["float32"](0), X_size[0], X_size[1], self.X.shape[3])
      x = T.set_subtensor(x[:s[0], :s[1]], x_t[:s[0], :s[1]])
      return x

    fixed_X, _ = theano.scan(pad_fn, [self.X.dimshuffle(2, 0, 1, 3), T.cast(sizes_raw, "int32")])
    fixed_X = fixed_X.dimshuffle(1, 2, 0, 3)
    self.X = ifelse(padding, T.unbroadcast(fixed_X, 3), self.X)

    conv_out = CuDNNConvHWBCOpValidInstance(self.X, self.W, self.b)
    conv_out_sizes = self.conv_output_size_from_input_size(sizes)
    self.output, self.output_sizes = fmp(conv_out, conv_out_sizes, T.cast(factor,'float32'))
开发者ID:rwth-i6,项目名称:returnn,代码行数:29,代码来源:NetworkTwoDLayer.py



注:本文中的theano.tensor.as_tensor函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.as_tensor_variable函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.argsort函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap