• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.div函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.div函数的典型用法代码示例。如果您正苦于以下问题:Python div函数的具体用法?Python div怎么用?Python div使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了div函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self, label, clauses, save_path=""):
     print "defining the knowledge base", label
     self.label = label
     self.clauses = clauses
     self.parameters = [par for cl in self.clauses for par in cl.parameters]
     if not self.clauses:
         self.tensor = tf.constant(1.0)
     else:
         clauses_value_tensor = tf.concat(0, [cl.tensor for cl in clauses])
         if default_clauses_aggregator == "min":
             print "clauses aggregator is min"
             self.tensor = tf.reduce_min(clauses_value_tensor)
         if default_clauses_aggregator == "mean":
             print "clauses aggregator is mean"
             self.tensor = tf.reduce_mean(clauses_value_tensor)
         if default_clauses_aggregator == "hmean":
             print "clauses aggregator is hmean"
             self.tensor = tf.div(tf.to_float(tf.size(clauses_value_tensor)), tf.reduce_sum(tf.inv(clauses_value_tensor), keep_dims=True))
         if default_clauses_aggregator == "wmean":
             print "clauses aggregator is weighted mean"
             weights_tensor = tf.constant([cl.weight for cl in clauses])
             self.tensor = tf.div(tf.reduce_sum(tf.mul(weights_tensor, clauses_value_tensor)), tf.reduce_sum(weights_tensor))
     if default_positive_fact_penality != 0:
         self.loss = smooth(self.parameters) + \
                     tf.mul(default_positive_fact_penality, self.penalize_positive_facts()) - \
                     PR(self.tensor)
     else:
         self.loss = smooth(self.parameters) - PR(self.tensor)
     self.save_path = save_path
     self.train_op = train_op(self.loss, default_optimizer)
     self.saver = tf.train.Saver(max_to_keep=20)
     print "knowledge base", label, "is defined"
开发者ID:ivanDonadello,项目名称:knowPic,代码行数:32,代码来源:logictensornetworks.py


示例2: loglik_discrete

def loglik_discrete(a, b, y_, u_, output_collection=(), name=None):
    """Returns element-wise Weibull censored discrete log-likelihood.

    Unit-discretized weibull log-likelihood. loss=-loglikelihood.

    .. note::
        All input values must be of same type and shape.

    :param a:alpha. Positive nonzero `Tensor`.
    :type a: `float32` or `float64`.
    :param b:beta.  Positive nonzero `Tensor`.
    :type b: `float32` or `float64`.
    :param y_: time to event. Positive nonzero `Tensor` 
    :type y_: `float32` or `float64`.
    :param u_: indicator. 0.0 if right censored, 1.0 if uncensored `Tensor`
    :type u_: `float32` or `float64`.
    :param output_collection:name of the collection to collect result of this op.
    :type output_collection: Tuple of Strings.
    :param String name: name of the operation.
    :return: A `Tensor` of log-likelihoods of same shape as a, b, y_, u_.
    """

    with tf.name_scope(name, "weibull_loglik_discrete", [a, b, y_, u_]):
        hazard0 = tf.pow(tf.div(y_ + 1e-35, a), b)  # 1e-9 safe, really
        hazard1 = tf.pow(tf.div(y_ + 1.0, a), b)
        loglik = tf.multiply(u_, tf.log(
            tf.exp(hazard1 - hazard0) - 1.0)) - hazard1

        tf.add_to_collection(output_collection, loglik)
    return(loglik)
开发者ID:g6t,项目名称:wtte-rnn,代码行数:30,代码来源:tensorflow.py


示例3: scaled_squared_distance

    def scaled_squared_distance(self, X, Y):
        """ Computes the squared distance.

        Parameters
        ----------
        X : np or tf nd.array. shape = (x_samples, n_dim)
            One of the design matrices
        Y : np or tf nd.array. shape = (y_samples, n_dim)
            One of the design matrices
        
        Returns
        -------
        NA : tf nd.array. shape = (x_samples, y_samples)
            Scaled squared distance matrix M where M[i, j] is the sq distance
            between X[i] and Y[j]
        """
        # Scale X and Y accordingly
        Xs, Ys = (tf.div(X, self.length_scales), tf.div(Y, self.length_scales))
        # Create matrix of ones
        Xo = tf.ones(tf.pack([tf.shape(X)[0], 1]))
        Yo = tf.ones(tf.pack([1, tf.shape(Y)[0]]))
        # Precompute squared norms for rows of each matrix
        Xsqn = tf.reshape(tf.reduce_sum(tf.square(Xs), 1), tf.shape(Xo))
        Ysqn = tf.reshape(tf.reduce_sum(tf.square(Ys), 1), tf.shape(Yo))
        # Precompute "interaction" norm
        XYn = tf.matmul(Xs, tf.transpose(Ys))
        # Return the matrix of squared distances
        return tf.matmul(Xsqn, Yo) + tf.matmul(Xo, Ysqn) - 2*XYn
开发者ID:kashizui,项目名称:automated-statistician,代码行数:28,代码来源:kernels.py


示例4: cell_locate

def cell_locate(size, bbox, S):

    """ 
    locate the center of ground truth in which grid cell

    """
    x = tf.cast(tf.slice(bbox, [0,0], [-1,1]), tf.float32)
    y = tf.cast(tf.slice(bbox, [0,1], [-1,1]), tf.float32)
    w = tf.cast(tf.slice(bbox, [0,2], [-1,1]), tf.float32)
    h = tf.cast(tf.slice(bbox, [0,3], [-1,1]), tf.float32)


    height, width = size

    cell_w = width / S
    cell_h = height / S

    center_y = tf.add(y, tf.mul(h, 0.5))
    center_x = tf.add(x, tf.mul(w, 0.5))

    cell_coord_x = tf.cast(tf.div(center_x, cell_w), tf.int32)
    cell_coord_y = tf.cast(tf.div(center_y, cell_h), tf.int32)

    cell_num = tf.add(tf.mul(cell_coord_y, S), cell_coord_x)

    return cell_num
开发者ID:Johannes-brahms,项目名称:Yolo,代码行数:26,代码来源:yolo_utils.py


示例5: sample

    def sample(self, projected_output):
        """Return integer ID tensor representing the sampled word.
        
        Args:
            projected_output: Tensor [1, 1, state_size], representing a single
                decoder timestep output. 
        """
        # TODO: We really need a tf.control_dependencies check here (for rank).
        with tf.name_scope('decoder_sampler', values=[projected_output]):

            # Protect against extra size-1 dimensions; grab the 1D tensor
            # of size state_size.
            logits = tf.squeeze(projected_output)
            if self.temperature < 0.02:
                return tf.argmax(logits, axis=0)

            # Convert logits to probability distribution.
            probabilities = tf.div(logits, self.temperature)
            projected_output = tf.div(
                tf.exp(probabilities),
                tf.reduce_sum(tf.exp(probabilities), axis=-1))

            # Sample 1 time from the probability distribution.
            sample_ID = tf.squeeze(
                tf.multinomial(tf.expand_dims(probabilities, 0), 1))
        return sample_ID
开发者ID:laurii,项目名称:DeepChatModels,代码行数:26,代码来源:decoders.py


示例6: __init__

    def __init__(self, action1_bounds, action2_bounds, session):
        self.graph = session.graph
        with self.graph.as_default():
            self.sess = session

            self.action_bounds = [[action1_bounds[1], action2_bounds[1]],
                                  [action1_bounds[0], action2_bounds[0]]]

            self.action_size = len(self.action_bounds[0])
            self.action_input = tf.placeholder(tf.float32, [None, self.action_size])

            self.p_max = tf.constant(self.action_bounds[0], dtype=tf.float32)
            self.p_min = tf.constant(self.action_bounds[1], dtype=tf.float32)

            self.p_range = tf.constant([x - y for x, y in zip(self.action_bounds[0], self.action_bounds[1])],
                                       dtype=tf.float32)

            self.p_diff_max = tf.div(-self.action_input + self.p_max, self.p_range)
            self.p_diff_min = tf.div(self.action_input - self.p_min, self.p_range)

            self.zeros_act_grad_filter = tf.zeros([self.action_size])
            self.act_grad = tf.placeholder(tf.float32, [None, self.action_size])

            self.grad_inverter = tf.select(tf.greater(self.act_grad, self.zeros_act_grad_filter),
                                           tf.mul(self.act_grad, self.p_diff_max),
                                           tf.mul(self.act_grad, self.p_diff_min))
开发者ID:JakobBreuninger,项目名称:neurobotics,代码行数:26,代码来源:grad_inverter.py


示例7: compute_auc

 def compute_auc(tp, fn, tn, fp, name):
   """Computes the roc-auc or pr-auc based on confusion counts."""
   rec = tf.div(tp + epsilon, tp + fn + epsilon)
   if curve == 'ROC':
     fp_rate = tf.div(fp, fp + tn + epsilon)
     x = fp_rate
     y = rec
   elif curve == 'R':  # recall auc
     x = tf.linspace(1., 0., num_thresholds)
     y = rec
   else:  # curve == 'PR'.
     prec = tf.div(tp + epsilon, tp + fp + epsilon)
     x = rec
     y = prec
   if summation_method == 'trapezoidal':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   (y[:num_thresholds - 1] + y[1:]) / 2.),
       name=name)
   elif summation_method == 'minoring':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   tf.minimum(y[:num_thresholds - 1], y[1:])),
       name=name)
   elif summation_method == 'majoring':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   tf.maximum(y[:num_thresholds - 1], y[1:])),
       name=name)
   else:
     raise ValueError('Invalid summation_method: %s' % summation_method)
开发者ID:fossabot,项目名称:SiamFC-TensorFlow,代码行数:31,代码来源:track_metrics.py


示例8: cosineface_losses

def cosineface_losses(embedding, labels, out_num, w_init=None, s=30., m=0.4):
    '''
    :param embedding: the input embedding vectors
    :param labels:  the input labels, the shape should be eg: (batch_size, 1)
    :param s: scalar value, default is 30
    :param out_num: output class num
    :param m: the margin value, default is 0.4
    :return: the final cacualted output, this output is send into the tf.nn.softmax directly
    '''
    with tf.variable_scope('cosineface_loss'):
        # inputs and weights norm
        embedding_norm = tf.norm(embedding, axis=1, keep_dims=True)
        embedding = tf.div(embedding, embedding_norm, name='norm_embedding')
        weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
                                  initializer=w_init, dtype=tf.float32)
        weights_norm = tf.norm(weights, axis=0, keep_dims=True)
        weights = tf.div(weights, weights_norm, name='norm_weights')
        # cos_theta - m
        cos_t = tf.matmul(embedding, weights, name='cos_t')
        cos_t_m = tf.subtract(cos_t, m, name='cos_t_m')

        mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
        inv_mask = tf.subtract(1., mask, name='inverse_mask')

        output = tf.add(s * tf.multiply(cos_t, inv_mask), s * tf.multiply(cos_t_m, mask), name='cosineface_loss_output')
    return output
开发者ID:xy694942097,项目名称:InsightFace_TF,代码行数:26,代码来源:face_losses.py


示例9: cross_entropy

    def cross_entropy(u, label_u, alpha=0.5, normed=False):

        label_ip = tf.cast(
            tf.matmul(label_u, tf.transpose(label_u)), tf.float32)
        s = tf.clip_by_value(label_ip, 0.0, 1.0)

        # compute balance param
        # s_t \in {-1, 1}
        s_t = tf.multiply(tf.add(s, tf.constant(-0.5)), tf.constant(2.0))
        sum_1 = tf.reduce_sum(s)
        sum_all = tf.reduce_sum(tf.abs(s_t))
        balance_param = tf.add(tf.abs(tf.add(s, tf.constant(-1.0))),
                               tf.multiply(tf.div(sum_all, sum_1), s))

        if normed:
            # ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
            ip_1 = tf.matmul(u, tf.transpose(u))

            def reduce_shaper(t):
                return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])
            mod_1 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(u)),
                                      reduce_shaper(tf.square(u)), transpose_b=True))
            ip = tf.div(ip_1, mod_1)
        else:
            ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
        ones = tf.ones([tf.shape(u)[0], tf.shape(u)[0]])
        return tf.reduce_mean(tf.multiply(tf.log(ones + tf.exp(alpha * ip)) - s * alpha * ip, balance_param))
开发者ID:AllenMao,项目名称:DeepHash,代码行数:27,代码来源:dhn.py


示例10: getBandWidth

    def getBandWidth(self,input_x,input_y,n_source,n_target,dim):
        ''' calculate bandwidth
        gamma = 1/E(||x-y||) 
        :param input_x:
        :param input_y:
        :param sigma:
        :param n_source:
        :param n_target:
        :param dim:
        :return: gamma
        '''
        x = tf.cast(input_x, tf.float32)
        y = tf.cast(input_y, tf.float32)
        counter = tf.constant(float(n_source))
        sum_up = tf.constant(.0)
        shape = [1, dim]
        for s in range(n_source):
            list1 = tf.slice(x, [s, 0], shape)
            list2 = tf.slice(y, [s, 0], shape)

            # get ||x-y||
            squared = tf.square(tf.sub(list1, list2))
            norm = tf.reduce_sum(tf.sqrt(squared))
            norm = tf.div(norm,tf.constant(float(dim)))

            sum_up  = tf.add(sum_up,tf.to_float(norm))


        gamma = tf.div(counter,sum_up)

        return gamma
开发者ID:IreneZihuiLi,项目名称:deeplearning,代码行数:31,代码来源:Maximum_Mean_Discrepancy.py


示例11: filters_bank

def filters_bank(M, N, J, L=8):
    filters = {}
    filters['psi'] = []

    offset_unpad = 0
    for j in range(J):
        for theta in range(L):
            psi = {}
            psi['j'] = j
            psi['theta'] = theta
            psi_signal = morlet_2d(M, N, 0.8 * 2**j, (int(L - L / 2 - 1) - theta) * np.pi / L, 3.0 / 4.0 * np.pi / 2**j,offset=offset_unpad)  # The 5 is here just to match the LUA implementation :)
            psi_signal_fourier = fft.fft2(psi_signal)
            for res in range(j + 1):
                psi_signal_fourier_res = crop_freq(psi_signal_fourier, res)
                psi[res] = tf.constant(np.stack((np.real(psi_signal_fourier_res), np.imag(psi_signal_fourier_res)), axis=2))
                psi[res] = tf.div(psi[res], (M * N // 2**(2 * j)), name="psi_theta%s_j%s" % (theta, j))
            filters['psi'].append(psi)

    filters['phi'] = {}
    phi_signal = gabor_2d(M, N, 0.8 * 2**(J - 1), 0, 0, offset=offset_unpad)
    phi_signal_fourier = fft.fft2(phi_signal)
    filters['phi']['j'] = J
    for res in range(J):
        phi_signal_fourier_res = crop_freq(phi_signal_fourier, res)
        filters['phi'][res] = tf.constant(np.stack((np.real(phi_signal_fourier_res), np.imag(phi_signal_fourier_res)), axis=2))
        filters['phi'][res] = tf.div(filters['phi'][res], (M * N // 2 ** (2 * J)), name="phi_res%s" % res)

    return filters
开发者ID:MiG-Kharkov,项目名称:DeepLearningImplementations,代码行数:28,代码来源:filters_bank.py


示例12: allreduce

def allreduce(tensor, average=True):
  """Perform an MPI allreduce on a tf.Tensor or tf.IndexedSlices.

  Arguments:
  tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
          The shape of the input must be identical across all ranks.
  average: If True, computes the average over all ranks.
           Otherwise, computes the sum over all ranks.

  This function performs a bandwidth-optimal ring allreduce on the input
  tensor. If the input is an tf.IndexedSlices, the function instead does an
  allgather on the values and the indices, effectively doing an allreduce on
  the represented tensor.
  """
  if isinstance(tensor, tf.IndexedSlices):
    # For IndexedSlices, do two allgathers intead of an allreduce.
    mpi_size = tf.cast(size(), tensor.values.dtype)
    values = allgather(tensor.values)
    indices = allgather(tensor.indices)

    # To make this operation into an average, divide all gathered values by
    # the MPI size.
    new_values = tf.div(values, mpi_size) if average else values
    return tf.IndexedSlices(new_values, indices,
                            dense_shape=tensor.dense_shape)
  else:
    mpi_size = tf.cast(size(), tensor.dtype)
    summed_tensor = _allreduce(tensor)
    new_tensor = (tf.div(summed_tensor, mpi_size)
                  if average else summed_tensor)
    return new_tensor
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:31,代码来源:__init__.py


示例13: tf_bivariate_normal

def tf_bivariate_normal(y, mu, sigma, rho, n_mixtures, batch_size):
    mu = tf.verify_tensor_all_finite(mu, "Mu not finite!")
    y = tf.verify_tensor_all_finite(y, "Y not finite!")
    delta = tf.sub(tf.tile(tf.expand_dims(y, 1), [1, n_mixtures, 1]), mu)
    delta = tf.verify_tensor_all_finite(delta, "Delta not finite!")
    sigma = tf.verify_tensor_all_finite(sigma, "Sigma not finite!")
    s = tf.reduce_prod(sigma, 2)
    s = tf.verify_tensor_all_finite(s, "S not finite!")
    # -1 <= rho <= 1
    z = tf.reduce_sum(tf.square(tf.div(delta, sigma + epsilon) + epsilon), 2) - \
        2 * tf.div(tf.mul(rho, tf.reduce_prod(delta, 2)), s + epsilon)
    
    z = tf.verify_tensor_all_finite(z, "Z not finite!")
    # 0 < negRho <= 1
    rho = tf.verify_tensor_all_finite(rho, "rho in bivariate normal not finite!")
    negRho = tf.clip_by_value(1 - tf.square(rho), epsilon, 1.0)
    negRho = tf.verify_tensor_all_finite(negRho, "negRho not finite!")
    # Note that if negRho goes near zero, or z goes really large, this explodes.
    negRho = tf.verify_tensor_all_finite(negRho, "negRho in bivariate normal not finite!")
    
    result = tf.clip_by_value(tf.exp(tf.div(-z, 2 * negRho)), 1.0e-8, 1.0e8)
    result = tf.verify_tensor_all_finite(result, "Result in bivariate normal not finite!")
    denom = 2 * np.pi * tf.mul(s, tf.sqrt(negRho))
    denom = tf.verify_tensor_all_finite(denom, "Denom in bivariate normal not finite!")
    result = tf.clip_by_value(tf.div(result, denom + epsilon), epsilon, 1.0)
    result = tf.verify_tensor_all_finite(result, "Result2 in bivariate normal not finite!")
    return result, delta
开发者ID:cybercom-finland,项目名称:location_tracking_ml,代码行数:27,代码来源:model.py


示例14: batch_sample_with_temperature

def batch_sample_with_temperature(a, temperature=1.0):
    '''this function is like sample_with_temperature except it can handle batch input a of [batch_size x logits]
        this function takes logits input, and produces a specific number from the array. This is all done on the gpu
        because this function uses tensorflow
        As you increase the temperature, you will get more diversified output but with more errors (usually gramatical if you're
            doing text)
    args:
        Logits -- this must be a 2d array [batch_size x logits]
        Temperature -- how much variance you want in output
    returns:
        Selected number from distribution
    '''

    '''
    Equation can be found here: https://en.wikipedia.org/wiki/Softmax_function (under reinforcement learning)
        Karpathy did it here as well: https://github.com/karpathy/char-rnn/blob/4297a9bf69726823d944ad971555e91204f12ca8/sample.lua'''
    '''a is [batch_size x logits]'''
    with tf.op_scope([a,temperature], "batch_sample_with_temperature"):

        exponent_raised = tf.exp(tf.div(a, temperature)) #start by reduction of temperature, and get rid of negative numbers with exponent
        matrix_X = tf.div(exponent_raised, tf.reduce_sum(exponent_raised, reduction_indices = 1)) #this will yield probabilities!
        matrix_U = tf.random_uniform(tf.shape(a), minval = 0, maxval = 1)
        final_number = tf.argmax(tf.sub(matrix_X, matrix_U), dimension = 1) #you want dimension = 1 because you are argmaxing across rows.

    return final_number
开发者ID:viswajithiii,项目名称:cs224d-project,代码行数:25,代码来源:seq2seq_custom.py


示例15: getkernel

    def getkernel(self,input_x,input_y,n_source,n_target,dim,sigma):
        '''

        :param x: sourceMatrix
        :param y: targetMatrix
        :param n_source: # of source samples
        :param n_target: # of target samples
        :param dim: # of input dimension(features)
        :return: a scala showing the MMD
        '''
        # ---------------------------------------
        # x = tf.convert_to_tensor(input_x,dtype=tf.float32)
        # y = tf.convert_to_tensor(input_y, dtype=tf.float32)


        x = tf.cast(input_x,tf.float32)
        y = tf.cast(input_y, tf.float32)


        k_ss = k_st = k_tt = tf.constant(0.)
        n_ss = n_st = n_tt = tf.constant(0.)
        flag = tf.constant(1.)
        signal = tf.constant(-2.0)
        shape = [1,dim]
        for s in range(n_source):
            for s_ in range(n_source):
                list1 = tf.slice(x, [s, 0], shape)
                list2 = tf.slice(x, [s_, 0], shape)
                k_ss = tf.add(self.gaussiankernel(list1,list2,sigma),k_ss)
                n_ss = tf.add(n_ss,flag)


        for t in range(n_target):
            for t_ in range(n_target):
                list1 = tf.slice(y, [t, 0], shape)
                list2 = tf.slice(y, [t_, 0], shape)
                k_tt = tf.add(self.gaussiankernel(list1, list2, sigma), k_tt)
                n_st = tf.add(n_st, flag)


        for s in range(n_source):
            for t in range(n_target):
                list1 = tf.slice(x, [s, 0], shape)
                list2 = tf.slice(y, [t, 0], shape)
                k_st = tf.add(self.gaussiankernel(list1, list2, sigma), k_st)
                n_tt = tf.add(n_tt, flag)




        term1 = tf.div(k_ss,n_ss )
        term2 = tf.div( k_tt, n_tt)
        term3 = tf.mul(signal, tf.div(k_st,n_st))
        term4 = tf.add(term1,term2)

        kernel = tf.add(term3, term4)


        return kernel
开发者ID:IreneZihuiLi,项目名称:deeplearning,代码行数:59,代码来源:Maximum_Mean_Discrepancy.py


示例16: gaussian_cost

 def gaussian_cost(t, o):
     s = 1.0  # For now take unit variance
     norm = tf.sub(o, t)
     z = tf.square(tf.div(norm, s))
     result = tf.exp(tf.div(-z, 2.0))
     denom = 2.0 * np.pi * s
     p = tf.div(result, denom)
     return -tf.log(p)
开发者ID:RobRomijnders,项目名称:DRAW_1D,代码行数:8,代码来源:DRAW_ecg.py


示例17: build_search_images

  def build_search_images(self):
    """Crop search images from the input image based on the last target position

    1. The input image is scaled such that the area of target&context takes up to (scale_factor * z_image_size) ^ 2
    2. Crop an image patch as large as x_image_size centered at the target center.
    3. If the cropped image region is beyond the boundary of the input image, mean values are padded.
    """
    model_config = self.model_config
    track_config = self.track_config

    size_z = model_config['z_image_size']
    size_x = track_config['x_image_size']
    context_amount = 0.5

    num_scales = track_config['num_scales']
    scales = np.arange(num_scales) - get_center(num_scales)
    assert np.sum(scales) == 0, 'scales should be symmetric'
    search_factors = [track_config['scale_step'] ** x for x in scales]

    frame_sz = tf.shape(self.image)
    target_yx = self.target_bbox_feed[0:2]
    target_size = self.target_bbox_feed[2:4]
    avg_chan = tf.reduce_mean(self.image, axis=(0, 1), name='avg_chan')

    # Compute base values
    base_z_size = target_size
    base_z_context_size = base_z_size + context_amount * tf.reduce_sum(base_z_size)
    base_s_z = tf.sqrt(tf.reduce_prod(base_z_context_size))  # Canonical size
    base_scale_z = tf.div(tf.to_float(size_z), base_s_z)
    d_search = (size_x - size_z) / 2.0
    base_pad = tf.div(d_search, base_scale_z)
    base_s_x = base_s_z + 2 * base_pad
    base_scale_x = tf.div(tf.to_float(size_x), base_s_x)

    boxes = []
    for factor in search_factors:
      s_x = factor * base_s_x
      frame_sz_1 = tf.to_float(frame_sz[0:2] - 1)
      topleft = tf.div(target_yx - get_center(s_x), frame_sz_1)
      bottomright = tf.div(target_yx + get_center(s_x), frame_sz_1)
      box = tf.concat([topleft, bottomright], axis=0)
      boxes.append(box)
    boxes = tf.stack(boxes)

    scale_xs = []
    for factor in search_factors:
      scale_x = base_scale_x / factor
      scale_xs.append(scale_x)
    self.scale_xs = tf.stack(scale_xs)

    # Note we use different padding values for each image
    # while the original implementation uses only the average value
    # of the first image for all images.
    image_minus_avg = tf.expand_dims(self.image - avg_chan, 0)
    image_cropped = tf.image.crop_and_resize(image_minus_avg, boxes,
                                             box_ind=tf.zeros((track_config['num_scales']), tf.int32),
                                             crop_size=[size_x, size_x])
    self.search_images = image_cropped + avg_chan
开发者ID:fossabot,项目名称:SiamFC-TensorFlow,代码行数:58,代码来源:inference_wrapper.py


示例18: __init__

    def __init__(self, num_neurons, norm_constants, file_restore=None):
        """
        Creates variables for a three layer network
        :param num_neurons: Tuple of number of neurons per layer
        :param norm_constants: List of normalization constants for (x, y, force)
        """
        self.sess = sess = tf.InteractiveSession()
        with tf.name_scope('Input'):
            self.x = x = tf.placeholder(tf.float32, shape=[None, num_neurons[0]], name='X')
            norm_x = tf.constant(norm_constants, name='normX')
            norm_input = tf.div(x, norm_x)
            self.y_ = y_ = tf.placeholder(tf.float32, shape=[None, num_neurons[2]], name='y_')
            norm_y = tf.constant(norm_constants[0:2] * int(num_neurons[2]/2), name='normY')
            norm_desired_output = tf.div(y_, norm_y)
            tf.histogram_summary('Input/x', x)
            tf.histogram_summary('Input/normalized_x', norm_input)
            tf.histogram_summary('Input/y_', y_)
            tf.histogram_summary('Input/normalized_y_', norm_desired_output)

        with tf.name_scope('Hidden'):
            W1 = tf.Variable(tf.random_uniform([num_neurons[0], num_neurons[1]], -1.0, 1.0), name='W1')
            b1 = tf.Variable(tf.constant(0.1, shape=(num_neurons[1],)), name='b1')
            h = tf.nn.sigmoid(tf.matmul(norm_input, W1) + b1, name='h')
            tf.histogram_summary('Hidden/W1', W1)
            tf.histogram_summary('Hidden/b1', b1)
            tf.histogram_summary('Hidden/h', h)

        with tf.name_scope('Output'):
            W2 = tf.Variable(tf.random_uniform([num_neurons[1], num_neurons[2]], -1.0, 1.0), name='W2')
            b2 = tf.Variable(tf.constant(0.1, shape=(num_neurons[2],)), name='b2')
            self.y = y = tf.nn.sigmoid(tf.matmul(h, W2) + b2, name='y')
            self.out = tf.mul(y, norm_y)
            tf.histogram_summary('Output/W2', W2)
            tf.histogram_summary('Output/b2', b2)
            tf.histogram_summary('Output/y', y)
            tf.histogram_summary('Output/out', self.out)

        with tf.name_scope('Error'):
            self.error = tf.reduce_mean(tf.nn.l2_loss(tf.sub(y, norm_desired_output)), name='Error')
            #tf.summary.scalar("Error", self.error)
            self.error_summary = tf.scalar_summary("Error", self.error)

        # Merge all the summaries
        #self.merged = tf.summary.merge_all()
        self.merged = tf.merge_all_summaries()
        self.train_writer = tf.train.SummaryWriter(LOG_DIR + '/train', sess.graph)
        self.val_writer = tf.train.SummaryWriter(LOG_DIR + '/val', sess.graph)

        # Prepare for saving network state
        self.saver = tf.train.Saver()
        if file_restore is None:
            sess.run(tf.initialize_all_variables())
        else:
            self.saver.restore(self.sess, file_restore)
            print_msg("Model restored from ", file_restore)

        self.trained_cycles = 0
开发者ID:blackzafiro,项目名称:SmallDemos,代码行数:57,代码来源:FFPredict.py


示例19: Moment

def Moment(k, tensor, standardize=False, reduction_indices=None, mask=None):
  """Compute the k-th central moment of a tensor, possibly standardized.

  Args:
    k: Which moment to compute. 1 = mean, 2 = variance, etc.
    tensor: Input tensor.
    standardize: If True, returns the standardized moment, i.e. the central
      moment divided by the n-th power of the standard deviation.
    reduction_indices: Axes to reduce across. If None, reduce to a scalar.
    mask: Mask to apply to tensor.

  Returns:
    The mean and the requested moment.
  """
  warnings.warn("Moment is deprecated. "
                "Will be removed in DeepChem 1.4.", DeprecationWarning)
  if reduction_indices is not None:
    reduction_indices = np.atleast_1d(reduction_indices).tolist()

  # get the divisor
  if mask is not None:
    tensor = Mask(tensor, mask)
    ones = tf.constant(1, dtype=tf.float32, shape=tensor.get_shape())
    divisor = tf.reduce_sum(
        Mask(ones, mask), axis=reduction_indices, keep_dims=True)
  elif reduction_indices is None:
    divisor = tf.constant(np.prod(tensor.get_shape().as_list()), tensor.dtype)
  else:
    divisor = 1.0
    for i in range(len(tensor.get_shape())):
      if i in reduction_indices:
        divisor *= tensor.get_shape()[i].value
    divisor = tf.constant(divisor, tensor.dtype)

  # compute the requested central moment
  # note that mean is a raw moment, not a central moment
  mean = tf.div(
      tf.reduce_sum(tensor, axis=reduction_indices, keep_dims=True), divisor)
  delta = tensor - mean
  if mask is not None:
    delta = Mask(delta, mask)
  moment = tf.div(
      tf.reduce_sum(
          math_ops.pow(delta, k), axis=reduction_indices, keep_dims=True),
      divisor)
  moment = tf.squeeze(moment, reduction_indices)
  if standardize:
    moment = tf.multiply(
        moment,
        math_ops.pow(
            tf.rsqrt(Moment(2, tensor, reduction_indices=reduction_indices)[1]),
            k))

  return tf.squeeze(mean, reduction_indices), moment
开发者ID:AhlamMD,项目名称:deepchem,代码行数:54,代码来源:utils.py


示例20: content_loss

 def content_loss(self, layers):
     activations = [self.activations_for_layer(i) for i in layers]
     activation_diffs = [
         tf.sub(
             tf.tile(tf.slice(a, [self.num_style, 0, 0, 0], [self.num_content, -1, -1, -1]), [self.num_synthesized - self.num_content + 1, 1, 1, 1]),
             tf.slice(a, [self.num_style + self.num_content, 0, 0, 0], [self.num_content, -1, -1, -1]))
         for a in activations]
     # This normalizer is in JCJohnson's paper, but not Gatys' I think?
     Ns = [a.get_shape().as_list()[1] * a.get_shape().as_list()[2] * a.get_shape().as_list()[3] for a in activations]
     content_loss = tf.div(tf.add_n([tf.div(tf.reduce_sum(tf.square(a)), n) for a, n in zip(activation_diffs, Ns)]), 2.0)
     return content_loss
开发者ID:ProofByConstruction,项目名称:texture-networks,代码行数:11,代码来源:vgg_network.py



注:本文中的tensorflow.div函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.divide函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.diag_part函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap