• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.any函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.any函数的典型用法代码示例。如果您正苦于以下问题:Python any函数的具体用法?Python any怎么用?Python any使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了any函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: accurate_pixels_class

    def accurate_pixels_class(self, y):
        """
        Returns number of correctly classified pixels per class
        and total number of pixels per class.
        (pair of numpy 1d arrays)

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label
        """
        # check if y has same dimension of y_pred
        if y.ndim != self.y_pred.ndim:
            raise TypeError(
                'y should have the same shape as self.y_pred',
                ('y', y.type, 'y_pred', self.y_pred.type)
            )
        # check if y is of the correct datatype
        if not y.dtype.startswith('int'):
            raise NotImplementedError()

        correct = T.zeros((self.n_classes), dtype='int32')
        total = T.zeros((self.n_classes), dtype='int32')
        for i in range(self.n_classes):
            correct = T.set_subtensor(
                correct[i],
                T.switch(
                    T.any(T.eq(y, i)),
                    T.sum(T.eq(y[T.eq(y, i).nonzero()],
                               self.y_pred[T.eq(y, i).nonzero()])),
                    0)
                )
            total = T.set_subtensor(total[i], T.sum(T.eq(y, i)))
        return correct, total
开发者ID:corba777,项目名称:theano-conv-semantic,代码行数:33,代码来源:log_reg.py


示例2: __init__

    def __init__(self, n_comp=10, verbose=False):

        # Theano initialization
        self.T_weights = shared(np.eye(n_comp, dtype=np.float32))
        self.T_bias = shared(np.ones((n_comp, 1), dtype=np.float32))

        T_p_x_white = T.fmatrix()
        T_lrate = T.fscalar()
        T_block = T.fscalar()
        T_unmixed = T.dot(self.T_weights,T_p_x_white) + T.addbroadcast(self.T_bias,1)
        T_logit = 1 - 2 / (1 + T.exp(-T_unmixed))

        T_out =  self.T_weights +  T_lrate * T.dot(T_block * T.identity_like(self.T_weights) + T.dot(T_logit, T.transpose(T_unmixed)), self.T_weights)
        T_bias_out = self.T_bias + T_lrate * T.reshape(T_logit.sum(axis=1), (-1,1))
        T_max_w = T.max(self.T_weights)
        T_isnan = T.any(T.isnan(self.T_weights))

        self.w_up_fun = theano.function([T_p_x_white, T_lrate, T_block],
                                        [T_max_w, T_isnan],
                                        updates=[(self.T_weights, T_out),
                                                 (self.T_bias, T_bias_out)],
                                        allow_input_downcast=True)

        T_matrix = T.fmatrix()
        T_cov = T.dot(T_matrix,T.transpose(T_matrix))/T_block
        self.cov_fun = theano.function([T_matrix, T_block], T_cov, allow_input_downcast=True)
        
        self.loading = None
        self.sources = None
        self.weights = None
        self.n_comp = n_comp
        self.verbose = verbose
开发者ID:edamaraju,项目名称:ica,代码行数:32,代码来源:ica_gpu.py


示例3: cost

 def cost(self):
   """
   :rtype: (theano.Variable | None, dict[theano.Variable,theano.Variable] | None)
   :returns: cost, known_grads
   """
   known_grads = None
   if self.loss == 'ce' or self.loss == 'priori':
     if self.attrs.get("target", "").endswith("[sparse:coo]"):
       assert isinstance(self.y, tuple)
       assert len(self.y) == 3
       from NativeOp import crossentropy_softmax_and_gradient_z_sparse
       y_mask = self.network.j[self.attrs.get("target", "").replace("[sparse:coo]", "[sparse:coo:2:0]")]
       ce, grad_z = crossentropy_softmax_and_gradient_z_sparse(
         self.z, self.index, self.y[0], self.y[1], self.y[2], y_mask)
       return self.norm * T.sum(ce), {self.z: grad_z}
     if self.y_data_flat.type == T.ivector().type:
       # Use crossentropy_softmax_1hot to have a more stable and more optimized gradient calculation.
       # Theano fails to use it automatically; I guess our self.i indexing is too confusing.
       #idx = self.index.flatten().dimshuffle(0,'x').repeat(self.y_m.shape[1],axis=1) # faster than line below
       #nll, pcx = T.nnet.crossentropy_softmax_1hot(x=self.y_m * idx, y_idx=self.y_data_flat * self.index.flatten())
       nll, pcx = T.nnet.crossentropy_softmax_1hot(x=self.y_m[self.i], y_idx=self.y_data_flat[self.i])
       #nll, pcx = T.nnet.crossentropy_softmax_1hot(x=self.y_m, y_idx=self.y_data_flat)
       #nll = -T.log(T.nnet.softmax(self.y_m)[self.i,self.y_data_flat[self.i]])
       #z_c = T.exp(self.z[:,self.y])
       #nll = -T.log(z_c / T.sum(z_c,axis=2,keepdims=True))
       #nll, pcx = T.nnet.crossentropy_softmax_1hot(x=self.y_m, y_idx=self.y_data_flat)
       #nll = T.set_subtensor(nll[self.j], T.constant(0.0))
     else:
       nll = -T.dot(T.log(T.clip(self.p_y_given_x[self.i], 1.e-38, 1.e20)), self.y_data_flat[self.i].T)
     return self.norm * T.sum(nll), known_grads
   elif self.loss == 'entropy':
     h_e = T.exp(self.y_m) #(TB)
     pcx = T.clip((h_e / T.sum(h_e, axis=1, keepdims=True)).reshape((self.index.shape[0],self.index.shape[1],self.attrs['n_out'])), 1.e-6, 1.e6) # TBD
     ee = -T.sum(pcx[self.i] * T.log(pcx[self.i])) # TB
     #nll, pcxs = T.nnet.crossentropy_softmax_1hot(x=self.y_m[self.i], y_idx=self.y[self.i])
     nll, _ = T.nnet.crossentropy_softmax_1hot(x=self.y_m, y_idx=self.y_data_flat) # TB
     ce = nll.reshape(self.index.shape) * self.index # TB
     y = self.y_data_flat.reshape(self.index.shape) * self.index # TB
     f = T.any(T.gt(y,0), axis=0) # B
     return T.sum(f * T.sum(ce, axis=0) + (1-f) * T.sum(ee, axis=0)), known_grads
     #return T.sum(T.switch(T.gt(T.sum(y,axis=0),0), T.sum(ce, axis=0), -T.sum(ee, axis=0))), known_grads
     #return T.switch(T.gt(T.sum(self.y_m[self.i]),0), T.sum(nll), -T.sum(pcx * T.log(pcx))), known_grads
   elif self.loss == 'priori':
     pcx = self.p_y_given_x[self.i, self.y_data_flat[self.i]]
     pcx = T.clip(pcx, 1.e-38, 1.e20)  # For pcx near zero, the gradient will likely explode.
     return -T.sum(T.log(pcx)), known_grads
   elif self.loss == 'sse':
     if self.y_data_flat.dtype.startswith('int'):
       y_f = T.cast(T.reshape(self.y_data_flat, (self.y_data_flat.shape[0] * self.y_data_flat.shape[1]), ndim=1), 'int32')
       y_oh = T.eq(T.shape_padleft(T.arange(self.attrs['n_out']), y_f.ndim), T.shape_padright(y_f, 1))
       return T.mean(T.sqr(self.p_y_given_x[self.i] - y_oh[self.i])), known_grads
     else:
       #return T.sum(T.sum(T.sqr(self.y_m - self.y.reshape(self.y_m.shape)), axis=1)[self.i]), known_grads
       return T.sum(T.sqr(self.y_m[self.i] - self.y_data_flat.reshape(self.y_m.shape)[self.i])), known_grads
       #return T.sum(T.sum(T.sqr(self.z - (self.y.reshape((self.index.shape[0], self.index.shape[1], self.attrs['n_out']))[:self.z.shape[0]])), axis=2).flatten()[self.i]), known_grads
       #y_z = T.set_subtensor(T.zeros((self.index.shape[0],self.index.shape[1],self.attrs['n_out']), dtype='float32')[:self.z.shape[0]], self.z).flatten()
       #return T.sum(T.sqr(y_z[self.i] - self.y[self.i])), known_grads
       #return T.sum(T.sqr(self.y_m - self.y[:self.z.shape[0]*self.index.shape[1]]).flatten()[self.i]), known_grads
   else:
     assert False, "unknown loss: %s" % self.loss
开发者ID:chagge,项目名称:returnn,代码行数:60,代码来源:NetworkOutputLayer.py


示例4: in_transit

    def in_transit(self, t, r=0.0, texp=None):
        """Get a list of timestamps that are in transit

        Args:
            t (vector): A vector of timestamps to be evaluated.
            r (Optional): The radii of the planets.
            texp (Optional[float]): The exposure time.

        Returns:
            The indices of the timestamps that are in transit.

        """

        z = tt.zeros_like(self.a)
        r = tt.as_tensor_variable(r) + z
        R = self.r_star + z

        # Wrap the times into time since transit
        hp = 0.5 * self.period
        dt = tt.mod(self._warp_times(t) - self.t0 + hp, self.period) - hp

        if self.ecc is None:
            # Equation 14 from Winn (2010)
            k = r / R
            arg = tt.square(1 + k) - tt.square(self.b)
            factor = R / (self.a * self.sin_incl)
            hdur = hp * tt.arcsin(factor * tt.sqrt(arg)) / np.pi
            t_start = -hdur
            t_end = hdur
            flag = z

        else:
            M_contact = self.contact_points_op(
                self.a, self.ecc, self.cos_omega, self.sin_omega,
                self.cos_incl + z, self.sin_incl + z, R + r)
            flag = M_contact[2]

            t_start = (M_contact[0] - self.M0) / self.n
            t_start = tt.mod(t_start + hp, self.period) - hp
            t_end = (M_contact[1] - self.M0) / self.n
            t_end = tt.mod(t_end + hp, self.period) - hp

            t_start = tt.switch(tt.gt(t_start, 0.0),
                                t_start - self.period, t_start)
            t_end = tt.switch(tt.lt(t_end, 0.0),
                              t_end + self.period, t_end)

        if texp is not None:
            t_start -= 0.5*texp
            t_end += 0.5*texp

        mask = tt.any(tt.and_(dt >= t_start, dt <= t_end), axis=-1)
        result = ifelse(tt.all(tt.eq(flag, 0)),
                        tt.arange(t.size)[mask],
                        tt.arange(t.size))

        return result
开发者ID:dfm,项目名称:exoplanet,代码行数:57,代码来源:keplerian.py


示例5: _step

 def _step(input, *states):
     output, new_states = step_function(input, states)
     if masking:
         # if all-zero input timestep, return
         # all-zero output and unchanged states
         switch = T.any(input, axis=-1, keepdims=True)
         output = T.switch(switch, output, 0. * output)
         return_states = []
         for state, new_state in zip(states, new_states):
             return_states.append(T.switch(switch, new_state, state))
         return [output] + return_states
     else:
         return [output] + new_states
开发者ID:lxastro,项目名称:dlx,代码行数:13,代码来源:keras_utils.py


示例6: grad

    def grad(self, inputs, gradients):
        """
        Cholesky decomposition reverse-mode gradient update.

        Symbolic expression for reverse-mode Cholesky gradient taken from [0]_

        References
        ----------
        .. [0] I. Murray, "Differentiation of the Cholesky decomposition",
           http://arxiv.org/abs/1602.07527

        """

        x = inputs[0]
        dz = gradients[0]
        chol_x = self(x)

        # Replace the cholesky decomposition with 1 if there are nans
        # or solve_upper_triangular will throw a ValueError.
        if self.on_error == 'nan':
            ok = ~tensor.any(tensor.isnan(chol_x))
            chol_x = tensor.switch(ok, chol_x, 1)
            dz = tensor.switch(ok, dz, 1)

        # deal with upper triangular by converting to lower triangular
        if not self.lower:
            chol_x = chol_x.T
            dz = dz.T

        def tril_and_halve_diagonal(mtx):
            """Extracts lower triangle of square matrix and halves diagonal."""
            return tensor.tril(mtx) - tensor.diag(tensor.diagonal(mtx) / 2.)

        def conjugate_solve_triangular(outer, inner):
            """Computes L^{-T} P L^{-1} for lower-triangular L."""
            return solve_upper_triangular(
                outer.T, solve_upper_triangular(outer.T, inner.T).T)

        s = conjugate_solve_triangular(
            chol_x, tril_and_halve_diagonal(chol_x.T.dot(dz)))

        if self.lower:
            grad = tensor.tril(s + s.T) - tensor.diag(tensor.diagonal(s))
        else:
            grad = tensor.triu(s + s.T) - tensor.diag(tensor.diagonal(s))

        if self.on_error == 'nan':
            return [tensor.switch(ok, grad, np.nan)]
        else:
            return [grad]
开发者ID:HapeMask,项目名称:Theano,代码行数:50,代码来源:slinalg.py


示例7: compile_prop_f

    def compile_prop_f(self, signals, has_input, min_tau=0.0):
        tau_in = T.scalar('min_tau', dtype=FLOATX)
        inputs = [tau_in]
        x = self.signal(signals)

        # Get estimate of the state from layer above
        estimate = self.estimate(signals)

        # Feedforward originates from previous layer's state or given input
        if not has_input:
            feedforward = self.feedforward(signals)
            has_nans = T.as_tensor_variable(0)
            nans = 0.0
        else:
            input_t = T.matrix('input', dtype=FLOATX)
            inputs += [input_t]
            nans = T.isnan(input_t)
            has_nans = T.any(nans)
            feedforward = T.where(nans, 0.0, input_t)

        self.info('Compiling propagation: [%6s] -> %4s <- [%6s]' %
                  (",".join([p.name for p in self.prev] if self.prev else 'u/y'),
                   self.name,
                   ",".join([p.name for p in self.next] if self.next else '')))

        # Apply nonlinearity to feedforward path only
        if self.nonlin:
            feedforward = self.nonlin(feedforward)

        if self.merge_op:
            assert not self.persistent, 'cannot combine with merge_op'
            new_value = self.merge_op(feedforward, estimate)
        elif self.persistent:
            new_value = feedforward
        else:
            new_value = feedforward - estimate

        # If predicting missing values, force them to zero in residual so
        # that they don't influence learning
        new_value = ifelse(has_nans, T.where(nans, 0.0, new_value), new_value)

        (new_X, t, d) = lerp(x.var, new_value, tau_in)
        d = T.max(d)
        updates = [(x.var, ifelse(self.enabled, new_X, x.var))]

        return theano.function(inputs=inputs,
                               outputs=d,
                               updates=updates)
开发者ID:arasmus,项目名称:eca,代码行数:48,代码来源:eca.py


示例8: _step

 def _step(*args):
     global single_result
     input = args[0]
     states = args[1:]
     output, new_states = step_function(input, states)
     if masking:
         # if all-zero input timestep, return
         # all-zero output and unchanged states
         switch = T.any(input)
         output = T.switch(switch, output, 0. * output)
         return_states = []
         for state, new_state in zip(states, new_states):
             return_states.append(T.switch(switch, new_state, state))
         return [output] + return_states
     else:
         return [output] + new_states
开发者ID:sfwlily,项目名称:keras,代码行数:16,代码来源:theano_backend.py


示例9: _step

 def _step(input, *args): 
     # separate states and contexts
     states = args[0:nb_states]
     output, other_outputs, new_states = step_function(input, args)
     if masking:
         # if all-zero input timestep, return
         # all-zero output and unchanged states
         switch = T.any(input, axis=-1, keepdims=True)
         output = T.switch(switch, output, 0. * output)
         for other_output in other_outputs:
             other_output = T.switch(switch, other_output, 0. * other_output)      
         return_states = []
         for state, new_state in zip(states, new_states):
             return_states.append(T.switch(switch, new_state, state))
         return [output] + other_outputs + return_states
     else:
         return [output] + other_outputs + new_states
开发者ID:lxastro,项目名称:lxnn,代码行数:17,代码来源:theano_backend_lx.py


示例10: in_transit

    def in_transit(self, t, r=None, texp=None):
        """Get a list of timestamps that are in transit

        Args:
            t (vector): A vector of timestamps to be evaluated.
            r (Optional): The radii of the planets.
            texp (Optional[float]): The exposure time.

        Returns:
            The indices of the timestamps that are in transit.

        """
        dt = tt.mod(tt.shape_padright(t) - self._ref_time, self.period)
        dt -= self._half_period
        if self.r is None:
            tol = 0.5 * self.duration
        else:
            x = (r + self.r_star)**2 - self._b_norm**2
            tol = tt.sqrt(x) / self.speed
        if texp is not None:
            tol += 0.5 * texp
        mask = tt.any(tt.abs_(dt) < tol, axis=-1)
        return tt.arange(t.size)[mask]
开发者ID:dfm,项目名称:exoplanet,代码行数:23,代码来源:simple.py


示例11: __init__


#.........这里部分代码省略.........
                if ed_type == "VED":
                    seq2seq_model = DistributedSequenceEncoder(rng, layer_input, dur_input)
                    layer_input   = T.concatenate((seq2seq_model.encoded_output, frame_feat_input), axis=1)
                    input_size    = input_size+4
                # hierarchical encoder-decoder
                elif ed_type == "HED":
                    seg_len       = layer_input.size//input_size
                    seg_dur_input = dur_input[prev_seg_end: prev_seg_end+seg_len]
                    num_of_segs   = T.sum(seg_dur_input)
                    seq2seq_model = DistributedSequenceEncoder(rng, layer_input, seg_dur_input)
                    addfeat_input = frame_feat_input[0:num_of_segs, MLU_div[encoder_count]:MLU_div[encoder_count+1]]  
                    layer_input   = T.concatenate((seq2seq_model.encoded_output, addfeat_input), axis=1)
                    input_size    = input_size + (MLU_div[encoder_count+1]-MLU_div[encoder_count])
                    prev_seg_end  = prev_seg_end + seg_len
                    encoder_count = encoder_count + 1

            # hidden layer activation
            if hidden_layer_type[i] in self.list_of_activations:
                hidden_activation = hidden_layer_type[i].lower()
                hidden_layer = GeneralLayer(rng, layer_input, input_size, hidden_layer_size[i], activation=hidden_activation, p=self.dropout_rate, training=self.is_train)
            elif hidden_layer_type[i] == 'TANHE' or hidden_layer_type[i] == 'SIGMOIDE':
                hidden_activation = hidden_layer_type[i][0:-1].lower()
                hidden_layer = GeneralLayer(rng, layer_input, input_size, hidden_layer_size[i], activation=hidden_activation, p=self.dropout_rate, training=self.is_train)
            elif hidden_layer_type[i] == 'TANH_LHUC':
                hidden_layer = SigmoidLayer_LHUC(rng, layer_input, input_size, hidden_layer_size[i], activation=T.tanh, p=self.dropout_rate, training=self.is_train)
            elif hidden_layer_type[i] == 'SLSTM' or hidden_layer_type[i] == 'SLSTME':
                hidden_layer = SimplifiedLstm(rng, layer_input, input_size, hidden_layer_size[i], p=self.dropout_rate, training=self.is_train, rnn_batch_training=self.rnn_batch_training)
            elif hidden_layer_type[i] == 'SLSTMD':
                hidden_layer = SimplifiedLstmDecoder(rng, layer_input, input_size, hidden_layer_size[i], self.n_out, p=self.dropout_rate, training=self.is_train, rnn_batch_training=self.rnn_batch_training)
            elif hidden_layer_type[i] == 'SGRU':
                hidden_layer = SimplifiedGRU(rng, layer_input, input_size, hidden_layer_size[i], p=self.dropout_rate, training=self.is_train, rnn_batch_training=self.rnn_batch_training)
            elif hidden_layer_type[i] == 'GRU':
                hidden_layer = GatedRecurrentUnit(rng, layer_input, input_size, hidden_layer_size[i], p=self.dropout_rate, training=self.is_train, rnn_batch_training=self.rnn_batch_training)
            elif hidden_layer_type[i] == 'LSTM' or hidden_layer_type[i] == 'LSTME':
                hidden_layer = VanillaLstm(rng, layer_input, input_size, hidden_layer_size[i], p=self.dropout_rate, training=self.is_train, rnn_batch_training=self.rnn_batch_training)
            elif hidden_layer_type[i] == 'LSTMD':
                hidden_layer = VanillaLstmDecoder(rng, layer_input, input_size, hidden_layer_size[i], self.n_out, p=self.dropout_rate, training=self.is_train, rnn_batch_training=self.rnn_batch_training)
            elif hidden_layer_type[i] == 'BSLSTM' or hidden_layer_type[i] == 'BSLSTME':
                hidden_layer = BidirectionSLstm(rng, layer_input, input_size, hidden_layer_size[i], hidden_layer_size[i], p=self.dropout_rate, training=self.is_train, rnn_batch_training=self.rnn_batch_training)
            elif hidden_layer_type[i] == 'BLSTM' or hidden_layer_type[i] == 'BLSTME':
                hidden_layer = BidirectionLstm(rng, layer_input, input_size, hidden_layer_size[i], hidden_layer_size[i], p=self.dropout_rate, training=self.is_train, rnn_batch_training=self.rnn_batch_training)
            elif hidden_layer_type[i] == 'RNN' or hidden_layer_type[i] == 'RNNE':
                hidden_layer = VanillaRNN(rng, layer_input, input_size, hidden_layer_size[i], p=self.dropout_rate, training=self.is_train, rnn_batch_training=self.rnn_batch_training)
            elif hidden_layer_type[i] == 'RNND':
                hidden_layer = VanillaRNNDecoder(rng, layer_input, input_size, hidden_layer_size[i], self.n_out, p=self.dropout_rate, training=self.is_train, rnn_batch_training=self.rnn_batch_training)
            elif hidden_layer_type[i] == 'LSTM_LHUC':
                hidden_layer = VanillaLstm_LHUC(rng, layer_input, input_size, hidden_layer_size[i], p=self.dropout_rate, training=self.is_train, rnn_batch_training=self.rnn_batch_training)
            else:
                logger.critical("This hidden layer type: %s is not supported right now! \n Please use one of the following: SLSTM, BSLSTM, TANH, SIGMOID\n" %(hidden_layer_type[i]))
                sys.exit(1)

            self.rnn_layers.append(hidden_layer)
            self.params.extend(hidden_layer.params)

        input_size = hidden_layer_size[-1]
        if hidden_layer_type[-1] in BLSTM_variants:
            input_size = hidden_layer_size[-1]*2

        if hidden_layer_type[-1] in Decoder_variants:
            self.final_layer = self.rnn_layers[-1]
        else:
            output_activation = output_type.lower()
            if output_activation == 'linear':
                self.final_layer = LinearLayer(rng, self.rnn_layers[-1].output, input_size, self.n_out)
            elif output_activation == 'recurrent':
                self.final_layer = RecurrentOutputLayer(rng, self.rnn_layers[-1].output, input_size, self.n_out, rnn_batch_training=self.rnn_batch_training)
            elif output_type.upper() in self.list_of_activations:
                self.final_layer = GeneralLayer(rng, self.rnn_layers[-1].output, input_size, self.n_out, activation=output_activation)
            else:
                logger.critical("This output layer type: %s is not supported right now! \n Please use one of the following: LINEAR, BSLSTM\n" %(output_type))
                sys.exit(1)

            self.params.extend(self.final_layer.params)

        self.updates = {}
        for param in self.params:
            self.updates[param] = theano.shared(value = np.zeros(param.get_value(borrow = True).shape,
                                                dtype = theano.config.floatX), name = 'updates')

        if self.loss_function == 'CCE':
            self.finetune_cost = self.categorical_crossentropy_loss(self.final_layer.output, self.y) 
            self.errors        = self.categorical_crossentropy_loss(self.final_layer.output, self.y) 
        elif self.loss_function == 'Hinge':    
            self.finetune_cost = self.multiclass_hinge_loss(self.final_layer.output, self.y)
            self.errors        = self.multiclass_hinge_loss(self.final_layer.output, self.y)
        elif self.loss_function == 'MMSE':
            if self.rnn_batch_training:
                self.y_mod = T.reshape(self.y, (-1, n_out))
                self.final_layer_output = T.reshape(self.final_layer.output, (-1, n_out))

                nonzero_rows = T.any(self.y_mod, 1).nonzero()
            
                self.y_mod = self.y_mod[nonzero_rows]
                self.final_layer_output = self.final_layer_output[nonzero_rows]
            
                self.finetune_cost = T.mean(T.sum((self.final_layer_output - self.y_mod) ** 2, axis=1))
                self.errors = T.mean(T.sum((self.final_layer_output - self.y_mod) ** 2, axis=1))
            else:
                self.finetune_cost = T.mean(T.sum((self.final_layer.output - self.y) ** 2, axis=1))
                self.errors = T.mean(T.sum((self.final_layer.output - self.y) ** 2, axis=1))
开发者ID:CSTR-Edinburgh,项目名称:merlin,代码行数:101,代码来源:hed_rnn.py


示例12: get_reward

    def get_reward(self,session_states,session_actions,batch_i):
        """
        WARNING! this runs on a single session, not on a batch
        reward given for taking the action in current environment state
        arguments:
            session_states float[batch_id, memory_id]: environment state before taking action
            session_actions int[batch_id]: agent action at this tick
        returns:
            reward float[batch_id]: reward for taking action from the given state
        """
        #unpach states and actions
        session_states = check_list(session_states)[0]
        session_actions = check_list(session_actions)[0]
        
        
        time_range = T.arange(session_actions.shape[0])
        

        has_tried_already = session_states[time_range,session_actions]
        session_is_active = T.eq(session_states[:,self.end_action_id],0)
        
        has_finished_now = T.eq(session_actions,self.end_action_id)
        has_finished_now = T.set_subtensor(has_finished_now[-1],1)
        end_tick = has_finished_now.nonzero()[0][0]
        
        action_is_categorical = in1d(session_actions, self.category_action_ids)
                
        response = self.joint_data[batch_i,session_actions].ravel()
        
        at_least_one_category_guessed = T.any(action_is_categorical[:end_tick] & (response[:end_tick]>0))

        
        #categorical and attributes
        reward_for_intermediate_action = T.switch(
            action_is_categorical,
            response*(self.rw["category_positive"]-self.rw["category_negative"]) + self.rw["category_negative"],
            response*(self.rw["attribute_positive"]-self.rw["attribute_negative"]) + self.rw["attribute_negative"]
        )
        reward_for_intermediate_action_first_time = T.switch(
                has_tried_already,
                self.rw["repeated_poll"],
                reward_for_intermediate_action,
            )

        #ending session
        reward_for_end_action = T.switch(at_least_one_category_guessed, #if chosen at least 1 category
                                          self.rw["end_action"],   #do not penalize
                                          self.rw["end_action_if_no_category_predicted"])  #else punish
        
        #include end action
        reward_for_action = T.switch(
            has_finished_now,
            reward_for_end_action,
            reward_for_intermediate_action_first_time,
        )
        
        
        final_reward = T.switch(
            session_is_active,
            reward_for_action,
            0,

            
        )
        
        
        return final_reward.astype(theano.config.floatX)
开发者ID:AndreySheka,项目名称:AgentNet,代码行数:67,代码来源:__init__.py


示例13: logpow

def logpow(x, m):
    """
    Calculates log(x**m) since m*log(x) will fail when m, x = 0.
    """
    # return m * log(x)
    return T.switch(T.any(T.eq(x, 0)), -np.inf, m * T.log(x))
开发者ID:DukasGuo,项目名称:pymc3,代码行数:6,代码来源:dist_math.py


示例14: get_output_mask

 def get_output_mask(self, train=False):
     X = self.get_input(train)
     return T.any(T.ones_like(X) * (1.0 - T.eq(X, self.mask_value)), axis=-1)
开发者ID:nehz,项目名称:keras,代码行数:3,代码来源:core.py


示例15: compute_step

    def compute_step(self, param, previous_step):
        not_finite = tensor.any(tensor.or_(
            tensor.isnan(previous_step), tensor.isinf(previous_step)))
        step = tensor.switch(not_finite, self.scaler * param, previous_step)

        return step, []
开发者ID:Fdenpc,项目名称:blocks,代码行数:6,代码来源:__init__.py


示例16: logpow

def logpow(x, m):
    """
    Calculates log(x**m) since m*log(x) will fail when m, x = 0.
    """
    # return m * log(x)
    return switch(any(eq(x, 0)), -inf, m * log(x))
开发者ID:MogeiWang,项目名称:pymc3,代码行数:6,代码来源:dist_math.py


示例17: unroll_scan

def unroll_scan(fn, sequences=(), outputs_info=(), non_sequences=(), n_steps=None,
                go_backwards=False):
  """
  Helper function to unroll for loops. Can be used to unroll theano.scan.
  The parameter names are identical to theano.scan, please refer to here
  for more information.

  Note that this function does not support the truncate_gradient
  setting from theano.scan.

  Code adapted from https://github.com/Lasagne/Lasagne.
  Thank you!

  Parameters
  ----------

  fn : function
      Function that defines calculations at each step.

  sequences : TensorVariable or list of TensorVariables
      List of TensorVariable with sequence data. The function iterates
      over the first dimension of each TensorVariable.

  outputs_info : list of TensorVariables
      List of tensors specifying the initial values for each recurrent
      value.

  non_sequences: list of TensorVariables
      List of theano.shared variables that are used in the step function.

  n_steps: int
      Number of steps to unroll.

  go_backwards: bool
      If true the recursion starts at sequences[-1] and iterates
      backwards.

  Returns
  -------
  Tuple of the form (outputs, updates).
  outputs is a list of TensorVariables. Each element in the list gives the recurrent
  values at each time step.
  updates is an empty dict for now.
  """
  if not isinstance(sequences, (list, tuple)):
    sequences = [sequences]
  sequences = list(sequences)
  outputs_info = list(outputs_info)
  non_sequences = list(non_sequences)

  # When backwards reverse the recursion direction
  counter = range(n_steps)
  if go_backwards:
    counter = counter[::-1]

  output = []
  prev_vals = outputs_info
  until = []
  for i in counter:
    assert len(prev_vals) == len(outputs_info)
    prev_vals = [prev for prev, out_info in zip(prev_vals, outputs_info) if out_info is not None]
    step_input = [s[i] for s in sequences] + prev_vals + non_sequences
    out_ = fn(*step_input)
    # The returned values from step can be either a TensorVariable,
    # a list, or a tuple.  Below, we force it to always be a list.
    if isinstance(out_, T.TensorVariable):
      out_ = [out_]
    if isinstance(out_, tuple):
      if len(out_) >= 1 and isinstance(out_[0], (list, tuple)):
        if len(out_) >= 2:
          assert not out_[1], "shared var updates not supported"
        if len(out_) >= 3:
          assert isinstance(out_[2], theano.scan_module.until)
          until.append(T.neq(out_[2].condition, 0))
        out_ = list(out_[0])
      else:
        out_ = list(out_)
    output.append(out_)

    prev_vals = output[-1]

  # iterate over each scan output and convert it to same format as scan:
  # [[output11, output12,...output1n],
  # [output21, output22,...output2n],...]
  output_scan = []
  for i in range(len(output[0])):
    l = map(lambda x: x[i], output)
    output_scan.append(T.stack(*l))

  if until:
    assert len(until) == n_steps
    until_conds = T.stack(*until)
    new_len = T.switch(T.any(until_conds),
                       T.minimum(T.argmax(until_conds) + 1, n_steps),
                       n_steps)
    output_scan = [out[:new_len] for out in output_scan]

  if len(output_scan) == 1:
    output_scan = output_scan[0]
  updates = {}
#.........这里部分代码省略.........
开发者ID:atuxhe,项目名称:returnn,代码行数:101,代码来源:TheanoUtil.py


示例18: pgrad

 def pgrad(g_out):
     g_out = T.clip(g_out, self.clip_lower_bound, self.clip_upper_bound)
     g_out = ifelse(T.any(T.isnan(g_out)), T.ones_like(g_out)*0.00001, g_out)
     return g_out
开发者ID:npow,项目名称:visual_qa,代码行数:4,代码来源:main.py


示例19: setup_backprop

  def setup_backprop(self):
    eta = T.scalar('eta_for_backprop')
    x = T.lvector('x_for_backprop')
    y = T.lvector('y_for_backprop')
    y_in_x_inds = T.lmatrix('y_in_x_inds_for_backprop')
    dec_init_state, annotations = self._symb_encoder(x)

    def decoder_recurrence(y_t, cur_y_in_x_inds, h_prev, annotations, *params):
      h_for_write = self.spec.decoder.get_h_for_write(h_prev)
      scores = self.spec.get_attention_scores(h_for_write, annotations)
      alpha = self.spec.get_alpha(scores)
      c_t = self.spec.get_context(alpha, annotations)
      write_dist = self.spec.f_write(h_for_write, c_t, scores)
      base_p_y_t = write_dist[y_t] 
      if self.spec.attention_copying:
        copying_p_y_t = T.dot(
            write_dist[self.out_vocabulary.size():],
            cur_y_in_x_inds)
        p_y_t = base_p_y_t + copying_p_y_t
      else:
        p_y_t = base_p_y_t
      h_t = self.spec.f_dec(y_t, c_t, h_prev)
      return (h_t, p_y_t)

    dec_results, _ = theano.scan(
        fn=decoder_recurrence, sequences=[y, y_in_x_inds],
        outputs_info=[dec_init_state, None],
        non_sequences=[annotations] + self.spec.get_all_shared())
    p_y_seq = dec_results[1]
    log_p_y = T.sum(T.log(p_y_seq))
    gradients = T.grad(log_p_y, self.params)

    # Do the updates here
    updates = []
    if self.spec.step_rule in ('adagrad', 'rmsprop'):
      # Adagrad updates
      for p, g, c in zip(self.params, gradients, self.grad_cache):
        grad_norm = g.norm(2)
        clipped_grad = ifelse(grad_norm >= CLIP_THRESH, 
                              g * CLIP_THRESH / grad_norm, g)
        if self.spec.step_rule == 'adagrad':
          new_c = c + clipped_grad ** 2
        else:  # rmsprop
          decay_rate = 0.9  # Use fixed decay rate of 0.9
          new_c = decay_rate * c + (1.0 - decay_rate) * clipped_grad ** 2
        new_p = p + eta * clipped_grad / T.sqrt(new_c + 1e-4)
        has_non_finite = T.any(T.isnan(new_p) + T.isinf(new_p))
        updates.append((p, ifelse(has_non_finite, p, new_p)))
        updates.append((c, ifelse(has_non_finite, c, new_c)))
    elif self.spec.step_rule == 'nesterov':
      # Nesterov momentum
      for p, g, v in zip(self.params, gradients, self.grad_cache):
        grad_norm = g.norm(2)
        clipped_grad = ifelse(grad_norm >= CLIP_THRESH, 
                              g * CLIP_THRESH / grad_norm, g)
        new_v = NESTEROV_MU * v + eta * clipped_grad
        new_p = p - NESTEROV_MU * v + (1 + NESTEROV_MU) * new_v
        has_non_finite = (T.any(T.isnan(new_p) + T.isinf(new_p)) +
                          T.any(T.isnan(new_v) + T.isinf(new_v)))
        updates.append((p, ifelse(has_non_finite, p, new_p)))
        updates.append((v, ifelse(has_non_finite, v, new_v)))
    else:
      # Simple SGD updates
      for p, g in zip(self.params, gradients):
        grad_norm = g.norm(2)
        clipped_grad = ifelse(grad_norm >= CLIP_THRESH, 
                              g * CLIP_THRESH / grad_norm, g)
        new_p = p + eta * clipped_grad
        has_non_finite = T.any(T.isnan(new_p) + T.isinf(new_p))
        updates.append((p, ifelse(has_non_finite, p, new_p)))
        #updates.append((p, new_p))

    self._backprop = theano.function(
        inputs=[x, y, eta, y_in_x_inds],
        outputs=[p_y_seq, log_p_y],
        updates=updates)
开发者ID:arunchaganty,项目名称:nn-semparse,代码行数:76,代码来源:attention.py


示例20: any

def any(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical OR).
    '''
    return T.any(x, axis=axis, keepdims=keepdims)
开发者ID:sfwlily,项目名称:keras,代码行数:4,代码来源:theano_backend.py



注:本文中的theano.tensor.any函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.arange函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.and_函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap