• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.flatten函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.flatten函数的典型用法代码示例。如果您正苦于以下问题:Python flatten函数的具体用法?Python flatten怎么用?Python flatten使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了flatten函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: unet_crossentropy_loss_sampled

def unet_crossentropy_loss_sampled(y_true, y_pred):
    print 'unet_crossentropy_loss_sampled'
    epsilon = 1.0e-4
    y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
    y_true = T.flatten(y_true)
    # this seems to work
    # it is super ugly though and I am sure there is a better way to do it
    # but I am struggling with theano to cooperate
    # filter the right indices
    indPos = T.nonzero(y_true)[0] # no idea why this is a tuple
    indNeg = T.nonzero(1-y_true)[0]
    # shuffle
    n = indPos.shape[0]
    indPos = indPos[srng.permutation(n=n)]
    n = indNeg.shape[0]
    indNeg = indNeg[srng.permutation(n=n)]
    # take equal number of samples depending on which class has less
    n_samples = T.cast(T.min([T.sum(y_true), T.sum(1-y_true)]), dtype='int64')

    indPos = indPos[:n_samples]
    indNeg = indNeg[:n_samples]
    loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
    average_loss = T.mean(loss_vector)
    print 'average_loss:', average_loss
    return average_loss
开发者ID:Rhoana,项目名称:icon,代码行数:25,代码来源:oldunet2.py


示例2: loop

        def loop(i, x, p, t):
            p_class_t = p[i, t[i]]

            return T.dot(
                T.flatten(T.grad(p_class_t, x)[i]),
                T.flatten(x[i])
            )
开发者ID:AndreasMadsen,项目名称:course-02460,代码行数:7,代码来源:scale_invariant.py


示例3: __call__

 def __call__(self, x, leak):
     f1 = 0.5 * (1 + leak)
     f2 = 0.5 * (1 - leak)
     if leak.ndim == 1:
         return T.flatten(f1, 1)[0] * x + T.flatten(f2, 1)[0] * abs(x)
     else:
         return f1 * x + f2 * abs(x)
开发者ID:IndicoDataSolutions,项目名称:Foxhound,代码行数:7,代码来源:activations.py


示例4: build_model

def build_model(tparams, options, Wemb):
	trng = RandomStreams(123)
	use_noise = theano.shared(numpy_floatX(0.))

	x = T.matrix('x', dtype='int32')
	t = T.matrix('t', dtype=config.floatX)
	mask = T.matrix('mask', dtype=config.floatX)
	y = T.vector('y', dtype='int32')

	n_timesteps = x.shape[0]
	n_samples = x.shape[1]

	x_emb = Wemb[x.flatten()].reshape([n_timesteps,n_samples,options['embDimSize']])
	x_t_emb = T.concatenate([t.reshape([n_timesteps,n_samples,1]), x_emb], axis=2) #Adding the time element to the embedding

	proj = gru_layer(tparams, x_t_emb, options, mask=mask)
	if options['use_dropout']: proj = dropout_layer(proj, use_noise, trng)

	p_y_given_x = T.nnet.sigmoid(T.dot(proj, tparams['W_logistic']) + tparams['b_logistic'])
	L = -(y * T.flatten(T.log(p_y_given_x)) + (1 - y) * T.flatten(T.log(1 - p_y_given_x)))
	cost = T.mean(L)

	if options['L2_reg'] > 0.: cost += options['L2_reg'] * (tparams['W_logistic'] ** 2).sum()

	return use_noise, x, t, mask, y, p_y_given_x, cost
开发者ID:mp2893,项目名称:rnn_predict,代码行数:25,代码来源:gru_emb_time.py


示例5: _recurrence

        def _recurrence(v_h_, x_h_, v_t_, x_t_, a_t_, is_aggressive):

            state = tt.concatenate([v_h_, x_h_, tt.flatten(v_t_), tt.flatten(x_t_), tt.flatten(a_t_)])

            h0 = tt.dot(state, self.W_a_0) + self.b_a_0
            relu0 = tt.nnet.relu(h0)

            h1 = tt.dot(relu0, self.W_a_1) + self.b_a_1
            relu1 = tt.nnet.relu(h1)

            h2 = tt.dot(relu1, self.W_a_2) + self.b_a_2
            relu2 = tt.nnet.relu(h2)

            a = tt.dot(relu2, self.W_a_c)

            v_h, x_h, v_t, x_t, a_t, cost_transition = _step_state(v_h_, x_h_, v_t_, x_t_, a_t_, a, is_aggressive)

            # cost:

            # 0. smooth acceleration policy
            cost_accel = tt.abs_(a)

            # 1. forcing the host to move forward (until the top point of the roundabout)
            cost_progress = tt.nnet.relu(0.5*self.two_pi_r-x_h)

            # 2. keeping distance from close vehicles
            x_abs_diffs = tt.abs_(x_h - x_t)

            cost_accident =  tt.mean(3*tt.nnet.relu( self.require_distance-x_abs_diffs )) * (x_h > - 0.5*self.host_length) #tt.nnet.sigmoid(x_h + 0.5*self.host_length)

            cost = self.alpha_accel * cost_accel + self.alpha_progress * cost_progress + self.alpha_accident * cost_accident

            return (v_h, x_h, v_t, x_t, a_t, cost, cost_transition), t.scan_module.until(x_h[0]>=0.45*self.two_pi_r)
开发者ID:bentzinir,项目名称:Buffe,代码行数:33,代码来源:controller.py


示例6: apply

    def apply(self, dataset, can_fit=True):
        x = dataset.get_design_matrix()

        denseX = T.matrix(dtype=x.dtype)

        image_shape = (len(x),) + self.img_shape
        X = denseX.reshape(image_shape)
        filters = gaussian_filter_9x9().reshape((1,1,9,9))

        convout = conv.conv2d(input = X,
                             filters = filters,
                             image_shape = image_shape,
                             filter_shape = (1, 1, 9, 9),
                             border_mode='full')

        # For each pixel, remove mean of 9x9 neighborhood
        centered_X = X - convout[:,:,4:-4,4:-4]
        
        # Scale down norm of 9x9 patch if norm is bigger than 1
        sum_sqr_XX = conv.conv2d(input = centered_X**2,
                             filters = filters,
                             image_shape = image_shape,
                             filter_shape = (1, 1, 9, 9),
                             border_mode='full')
        denom = T.sqrt(sum_sqr_XX[:,:,4:-4,4:-4])
        per_img_mean = T.mean(T.flatten(denom, outdim=3), axis=2)
        divisor = T.largest(per_img_mean.dimshuffle((0,1,'x','x')), denom)

        new_X = centered_X / divisor
        new_X = T.flatten(new_X, outdim=2)

        f = theano.function([denseX], new_X)
        dataset.set_design_matrix(f(x))
开发者ID:HoldenCaulfieldRye,项目名称:caffe-1,代码行数:33,代码来源:preproc.py


示例7: unet_crossentropy_loss_sampled

    def unet_crossentropy_loss_sampled(y_true, y_pred):
        epsilon = 1.0e-4
        y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
        y_true = T.flatten(y_true)
        # this seems to work
        # it is super ugly though and I am sure there is a better way to do it
        # but I am struggling with theano to cooperate
        # filter the right indices
        classPos = 1
        classNeg = 0
        indPos   = T.eq(y_true, classPos).nonzero()[0]
        indNeg   = T.eq(y_true, classNeg).nonzero()[0]
        #pos      = y_true[ indPos ]
        #neg      = y_true[ indNeg ]

        # shuffle
        n = indPos.shape[0]
        indPos = indPos[UNET.srng.permutation(n=n)]
        n = indNeg.shape[0]
        indNeg = indNeg[UNET.srng.permutation(n=n)]
        # take equal number of samples depending on which class has less
        n_samples = T.cast(T.min([ indPos.shape[0], indNeg.shape[0]]), dtype='int64')
        #n_samples = T.cast(T.min([T.sum(y_true), T.sum(1-y_true)]), dtype='int64')

        indPos = indPos[:n_samples]
        indNeg = indNeg[:n_samples]
        #loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
        loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(y_pred_clipped[indNeg]))
        loss_vector = T.clip(loss_vector, epsilon, 1.0-epsilon)
        average_loss = T.mean(loss_vector)
        if T.isnan(average_loss):
            average_loss = T.mean( y_pred_clipped[indPos])
        return average_loss
开发者ID:Rhoana,项目名称:icon,代码行数:33,代码来源:unet.py


示例8: test_shape

def test_shape():
    x = T.tensor3()
    x_flat_2_mat = T.flatten(x, 2)
    x_flat_2_vec = T.flatten(x, 1)
    flat_f = theano.function([x], [x_flat_2_mat, x_flat_2_vec])
    flat_mat_val, flat_vec_val = flat_f(tensor3_val)
    print 'flatten to 2-d array:'
    print flat_mat_val
    print 'flatten to 1-d array:'
    print flat_vec_val

    x_mat = T.matrix()
    x_mat_2_t3 = T.reshape(x_mat, (2, 2, 2))
    x_mat_2_vec = T.reshape(x_mat, (8,))
    reshape_f = theano.function([x_mat], [x_mat_2_t3, x_mat_2_vec])
    """
    t3_shape = T.lvector()
    vec_shape = T.lvector()
    x_mat_2_t3 = T.reshape(x_mat, t3_shape, 3)
    x_mat_2_vec = T.reshape(x_mat, vec_shape, 1)
    reshape_f = theano.function([x_mat, t3_shape, vec_shape], [x_mat_2_t3, x_mat_2_vec])
    """
    mat_2_t3_val, mat_2_vec_val = reshape_f(flat_mat_val)
    print 'reshape 2-d array to 3-d array:'
    print mat_2_t3_val
    print 'reshape 2-d array to 1-d array:'
    print mat_2_vec_val
开发者ID:guomxin,项目名称:learn_theano,代码行数:27,代码来源:test_shape.py


示例9: __create_node_set

    def __create_node_set(self, n_features, n_output, data_in, note_set_name, weightsFunc = None,):
        prev_out = data_in
        prev_dim = n_features
        layers = []
        n_weights = 0
        weights_list = []
        state = None
        for i_h_layer in range(0,len(self.hidden_dimensions)):
            n_hidden_nodes = self.hidden_dimensions[i_h_layer]
            weights = None
            #weights = np.ones((prev_dim,n_hidden_nodes)) - 0.5
            bias = None
            if weightsFunc is not None:
                weights,bias,state = weightsFunc(i_h_layer,state)
            #acutal hidden layer
            hidden_layer = Layer(data_in=prev_out,
                                    n_input=prev_dim,
                                    n_output=n_hidden_nodes,
                                    link_function=self.link_function_hidden,
                                    weights=weights,
                                    bias=bias,
                                    name=note_set_name + " Hidden Layer")
            weights_list.append(hidden_layer.weights)
            weights_list.append(hidden_layer.bias)
            layers.append(hidden_layer)
            n_weights += (prev_dim+1)*n_hidden_nodes
            prev_out = hidden_layer.output
            prev_dim = n_hidden_nodes

        weights = None
        #weights = np.ones((prev_dim,n_output)) - 0.5
        bias = None
        if weightsFunc is not None:
            weights,bias,state = weightsFunc(len(self.hidden_dimensions),state)
        output_layer = Layer(
            data_in=prev_out,
            n_input=prev_dim,
            n_output=n_output,
            link_function=self.link_function_output,
            weights=weights,
            bias=bias,
            name=note_set_name + " Output Layer")
        weights_list.append(output_layer.weights)
        weights_list.append(output_layer.bias)
        layers.append(output_layer)
        n_weights += (prev_dim+1)*n_output

        #concatenate weights into one huge vector
        flat_weights = T.concatenate([T.flatten(item) for item in weights_list])
        flat_weights.name = "Network " + note_set_name + " Weights"
        #compute MSE
        y = self.__y
        errors = y - output_layer.output
        mse = T.mean(T.sqr(errors))
        normalized_mse = mse / 2.0
        normalized_mse.name = note_set_name + " MSE"
        grads = T.concatenate([T.flatten(item) for item in T.grad(normalized_mse, weights_list)])
        grads.name = note_set_name + " Gradients"
        return layers,grads,normalized_mse,weights_list, n_weights, flat_weights
开发者ID:Algomorph,项目名称:PySCG,代码行数:59,代码来源:neural_net.py


示例10: lower_bound

 def lower_bound(self):
     mu = T.flatten(self.trunc_output, outdim=2)
     inp = T.flatten(self.inpt, outdim=2)
     if self.out_distribution == True:
         sigma = T.mean(T.flatten(self.trunk_sigma, outdim=2))
     else:
         sigma = 0
         # log_gauss =  0.5*np.log(2 * np.pi) + 0.5*sigma + 0.5 * ((inp - mu) / T.exp(sigma))**2.
     log_gauss = T.sum(0.5 * np.log(2 * np.pi) + 0.5 * sigma + 0.5 * ((inp - mu) / T.exp(sigma)) ** 2.0, axis=1)
     return T.mean(log_gauss - self.latent_layer.prior)
开发者ID:KyriacosShiarli,项目名称:SingNet,代码行数:10,代码来源:convVAE2_basic_avgPool.py


示例11: model

def model(X1, X2, w1, w2, w3, p_drop_conv):
    # first half of the first layer
    l1a = T.flatten(dropout(T.mean(rectify(conv2d(X1, w1, border_mode='valid')), axis=3), p_drop_conv), outdim=2)
    # second half of the first layer
    l1b = T.flatten(dropout(T.mean(rectify(conv2d(X2, w2, border_mode='valid')), axis=3), p_drop_conv), outdim=2)
    # combine two pars as first layer
    l1 = T.concatenate([l1a, l1b], axis=1)    
    # combine two pars as first layer
    pyx = T.dot(l1, w3)
    return pyx
开发者ID:r3fang,项目名称:foo,代码行数:10,代码来源:DL_enhancer.py


示例12: t_unroll_ae

def t_unroll_ae(wts, bs, tied_wts=False):
    ''' Flattens matrices and concatenates to a vector - specifically for autoencoders '''

    # if we have tied weights, this vector will be comprised of a single matrix and two
    # distinct bias vectors
    if tied_wts:
        v = np.array([], type=theano.config.floatX)
        v = T.concatenate(
            (v, T.flatten(wts[0]), T.flatten(bs[0]), T.flatten(bs[1])))
        return v
    return t_unroll(wts, bs)
开发者ID:avasbr,项目名称:nnet_theano,代码行数:11,代码来源:nnetutils.py


示例13: model

def model(X,
    h2_u, h3_u,
    h2_s, h3_s,
    w, w2, g2, b2, w3, g3, b3, wy
    ):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2, u=h2_u, s=h2_s))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3, u=h3_u, s=h3_s))
    h = T.flatten(dnn_pool(h, (4, 4), (4, 4), mode='max'), 2)
    h2 = T.flatten(dnn_pool(h2, (2, 2), (2, 2), mode='max'), 2)
    h3 = T.flatten(dnn_pool(h3, (1, 1), (1, 1), mode='max'), 2)
    f = T.concatenate([h, h2, h3], axis=1)
    return [f]
开发者ID:10sun,项目名称:dcgan_code,代码行数:13,代码来源:svhn_semisup_analysis.py


示例14: model

def model(X, w1, w2, w3, p_drop_conv, p_drop_hidden):
    l1a = rectify(conv2d(X, w1, border_mode='full'))
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    dropout(T.flatten(max_pool_2d(rectify(conv2d(X, w2)), (2,2)), outdim=2), 0.3)
    
    l2a = rectify(conv2d(l1, w2))
    l2b = max_pool_2d(l2a, (2, 2))
    l2 = T.flatten(l2b, outdim=2)
    l2 = dropout(l2, p_drop_conv)

    pyx = softmax(T.dot(l2, w3))
    return l1, l2, pyx
开发者ID:kawasakin,项目名称:foo,代码行数:14,代码来源:5_convolutional_net.py


示例15: set_sampling_function

def set_sampling_function(decoder_feature_function,
                          decoder_red_function,
                          decoder_green_function,
                          decoder_blue_function):

    hidden_data = T.matrix(name='hidden_data',
                           dtype=theano.config.floatX)

    # decoder
    decoder_outputs = decoder_feature_function(hidden_data)
    decoder_feature = decoder_outputs[1]
    decoder_red     = decoder_red_function(decoder_feature)
    decoder_green   = decoder_green_function(decoder_feature)
    decoder_blue    = decoder_blue_function(decoder_feature)

    num_samples = decoder_red.shape[0]
    num_rows    = decoder_red.shape[2]
    num_cols    = decoder_red.shape[3]
    num_pixels  = num_rows*num_cols

    # shape = (num_samples, num_intensity, num_pixels)
    decoder_red   = T.flatten(decoder_red, 3)
    decoder_green = T.flatten(decoder_green, 3)
    decoder_blue  = T.flatten(decoder_blue, 3)
    # shape = (num_samples, num_pixels, num_intensity)
    decoder_red   = T.swapaxes(decoder_red, axis1=1, axis2=2)
    decoder_green = T.swapaxes(decoder_green, axis1=1, axis2=2)
    decoder_blue  = T.swapaxes(decoder_blue, axis1=1, axis2=2)
    # shape = (num_samples*num_pixels, num_intensity)
    decoder_red   = decoder_red.reshape((num_samples*num_pixels, -1))
    decoder_green = decoder_green.reshape((num_samples*num_pixels, -1))
    decoder_blue  = decoder_blue.reshape((num_samples*num_pixels, -1))
    # softmax
    decoder_red   = T.argmax(T.nnet.softmax(decoder_red),axis=1)
    decoder_green = T.argmax(T.nnet.softmax(decoder_green),axis=1)
    decoder_blue  = T.argmax(T.nnet.softmax(decoder_blue),axis=1)

    decoder_red   = decoder_red.reshape((num_samples, 1, num_rows, num_cols))
    decoder_green = decoder_green.reshape((num_samples, 1, num_rows, num_cols))
    decoder_blue  = decoder_blue.reshape((num_samples, 1, num_rows, num_cols))

    decoder_image = T.concatenate([decoder_red, decoder_green, decoder_blue], axis=1)

    function_inputs = [hidden_data,]
    function_outputs = [decoder_image,]

    function = theano.function(inputs=function_inputs,
                               outputs=function_outputs,
                               on_unused_input='ignore')
    return function
开发者ID:taesupkim,项目名称:dcgan_code,代码行数:50,代码来源:moment_ae_face_0.py


示例16: gauss_style_loss

def gauss_style_loss(x_truth, x_guess, log_var=0., scale=1., use_huber=False):
    # compute gram matrices for the two batches of convolutional features
    g_t = T.flatten(gram_matrix(x_truth), 2)
    g_g = T.flatten(gram_matrix(x_guess), 2)
    # get normalization factors based on the size of feature maps
    # N = T.cast(x_truth.shape[1], 'floatX')
    # M = T.cast(x_truth.shape[2] * x_truth.shape[3], 'floatX')
    # compute a pseudo-Gaussian loss on difference between gram matrices
    loss = log_prob_gaussian(g_t, g_g, log_vars=log_var, do_sum=False,
                             use_huber=use_huber, mask=None)
    # take sum over gram matrix entries and normalize for feature map size
    # loss = (scale / (N**2. * M)) * T.sum(loss, axis=1, keepdims=False)
    loss = T.sum(loss, axis=1, keepdims=False)
    return loss
开发者ID:smajida,项目名称:MatryoshkaNetworks,代码行数:14,代码来源:costs.py


示例17: L2SVMcost

    def L2SVMcost(self, y):
        """Return the mean of the negative log-likelihood of the prediction
        of this model under a given target distribution.

        .. math::

            \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
            \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
                \ell (\theta=\{W,b\}, \mathcal{D})

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label

        Note: we use the mean instead of the sum so that
              the learning rate is less dependent on the batch size
        """

        '''p = -T.ones_like((y.shape[0],7))
        
        result, updates = theano.scan(fn = lambda p,y: T.basic.set_subtensor(p[i,y[i]]=1),
                                                                outputs_info = -T.ones_like((y.shape[0],7)),
                                                                non_sequences = y,
                                                                n_steps = y.shape[0])
        final_result = result[-1]
        f = theano.function([y,p],final_result,updates = updates)
                                                                
        for i in xrange(500):
                p = T.basic.set_subtensor(p[i,y[i]]=1)
        print p.shape
        print f(y,p)
        print f(y,p).shape'''
        # y.shape[0] is (symbolically) the number of rows in y, i.e.,
        # number of examples (call it n) in the minibatch
        # T.arange(y.shape[0]) is a symbolic vector which will contain
        # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
        # Log-Probabilities (call it LP) with one row per example and
        # one column per class LP[T.arange(y.shape[0]),y] is a vector
        # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
        # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
        # the mean (across minibatch examples) of the elements in v,
        # i.e., the mean log-likelihood across the minibatch.
        z = 0.5*T.dot( T.flatten(self.W,outdim=1), T.flatten(self.W, outdim=1)) + 0.5*T.dot( T.flatten(self.b,outdim=1), T.flatten(self.b, outdim=1)) +0.6* T.sum(T.maximum(0,(1-self.p_y_given_x *y)),axis=1).mean()
        #zk = theano.tensor.scalar('zk')
        #zp = theano.printing.Print('this is a very important value')(zk)
        #f = theano.function([zk],zp)
        #z = theano.shared(z)
        #f(z)
        return z
开发者ID:DeeperCS,项目名称:mitosis-detection,代码行数:49,代码来源:mlp_bak.py


示例18: create_model

def create_model():
    """Create the deep autoencoder model with Blocks, and load MNIST."""
    mlp = MLP(activations=[Logistic(), Logistic(), Logistic(), None,
                           Logistic(), Logistic(), Logistic(), Logistic()],
              dims=[784, 1000, 500, 250, 30, 250, 500, 1000, 784],
              weights_init=Sparse(15, IsotropicGaussian()),
              biases_init=Constant(0))
    mlp.initialize()

    x = tensor.matrix('features')
    x_hat = mlp.apply(tensor.flatten(x, outdim=2))
    squared_err = SquaredError().apply(tensor.flatten(x, outdim=2), x_hat)
    cost = BinaryCrossEntropy().apply(tensor.flatten(x, outdim=2), x_hat)

    return x, cost, squared_err
开发者ID:mohammadpz,项目名称:DAE_Experiments,代码行数:15,代码来源:mnist_autoencoder.py


示例19: set_updater_function

def set_updater_function(feature_extractor,
                         sample_generator,
                         generator_parameters,
                         generator_optimizer):
    # set input data, hidden data
    input_data  = T.tensor4(name='input_data',
                            dtype=theano.config.floatX)
    hidden_data = T.matrix(name='hidden_data',
                           dtype=theano.config.floatX)

    # extract feature from input data
    positive_features = feature_extractor(input_data)

    # sample data
    negative_features = sample_generator(hidden_data)
    negative_data     = negative_features[-1]
    negative_features = negative_features[:-1]

    # moment matching
    moment_match_cost = 0
    for i in xrange(len(positive_features)):
        pos_feat = positive_features[i]
        neg_feat = negative_features[i]
        moment_match_cost += T.mean(T.sqr(T.mean(pos_feat, axis=0)-T.mean(neg_feat, axis=0)))
        moment_match_cost += T.mean(T.sqr(T.mean(T.sqr(pos_feat), axis=0)-T.mean(T.sqr(neg_feat), axis=0)))

    pos_feat = T.flatten(input_data, 2)
    neg_feat = T.flatten(negative_data, 2)
    moment_match_cost += T.mean(T.sqr(T.mean(pos_feat, axis=0)-T.mean(neg_feat, axis=0)))
    moment_match_cost += T.mean(T.sqr(T.mean(T.sqr(pos_feat), axis=0)-T.mean(T.sqr(neg_feat), axis=0)))

    generator_updates = generator_optimizer(generator_parameters,
                                            moment_match_cost)

    # updater function input
    updater_function_inputs  = [input_data,
                               hidden_data]

    # updater function output
    updater_function_outputs = [moment_match_cost,
                                negative_data]

    # updater function
    updater_function = theano.function(inputs=updater_function_inputs,
                                       outputs=updater_function_outputs,
                                       updates=generator_updates,
                                       on_unused_input='ignore')
    return updater_function
开发者ID:taesupkim,项目名称:dcgan_code,代码行数:48,代码来源:vgg_moment_match_face_0.py


示例20: lp_norm

    def lp_norm(self, n, k, r, c, z):
        '''
        Lp = ( 1/n * sum(|x_i|^p, 1..n))^(1/p) where p = 1 + ln(1+e^P)
        :param n:
        :param k:
        :param r:
        :param c:
        :param z:
        :return:
        '''
        ds0, ds1 = self.pool_size
        st0, st1 = self.stride
        pad_h = self.pad[0]
        pad_w = self.pad[1]

        row_st = r * st0
        row_end = T.minimum(row_st + ds0, self.img_rows)
        row_st = T.maximum(row_st, self.pad[0])
        row_end = T.minimum(row_end, self.x_m2d + pad_h)

        col_st = c * st1
        col_end = T.minimum(col_st + ds1, self.img_cols)
        col_st = T.maximum(col_st, self.pad[1])
        col_end = T.minimum(col_end, self.x_m1d + pad_w)

        Lp = T.pow(
                T.mean(T.pow(
                        T.abs_(T.flatten(self.y[n, k, row_st:row_end, col_st:col_end], 1)),
                        1 + T.log(1 + T.exp(self.P))
                )),
                1 / (1 + T.log(1 + T.exp(self.P)))
        )

        return T.set_subtensor(z[n, k, r, c], Lp)
开发者ID:alxrsngrtn,项目名称:LearnedNormPooling,代码行数:34,代码来源:layer.py



注:本文中的theano.tensor.flatten函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.floor函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.fill函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap