• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.sub函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.sub函数的典型用法代码示例。如果您正苦于以下问题:Python sub函数的具体用法?Python sub怎么用?Python sub使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sub函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: IoU

def IoU(bbox, gt):

    # bbox = [ x , y , w , h ] ( x , y  left up)

    shape = [-1, 1]

    x1 = tf.maximum(tf.cast(bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,0], tf.float32), shape))
    y1 = tf.maximum(tf.cast(bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,1], tf.float32), shape))
    x2 = tf.minimum(tf.cast(bbox[2] + bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,2] + gt[:,0], tf.float32), shape))
    y2 = tf.minimum(tf.cast(bbox[3] + bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,3] + gt[:,1], tf.float32), shape))


    inter_w = tf.sub(x2,x1)

    inter_h = tf.sub(y2,y1)

    inter = tf.cast(inter_w * inter_h, tf.float32)

    bounding_box = tf.cast(tf.mul(bbox[2],bbox[3]), tf.float32)

    ground_truth = tf.reshape(tf.cast(tf.mul(gt[:,2],gt[:,3]), tf.float32), shape)

    #iou = tf.div(inter,tf.sub(tf.add(bounding_box,tf.reshape(ground_truth,shape)),inter))

    iou = inter / (bounding_box + ground_truth - inter)

    # limit the iou range between 0 and 1
    
    mask_less = tf.cast(tf.logical_not(tf.less(iou, tf.zeros_like(iou))), tf.float32)
    #mask_great = tf.cast(tf.logical_not(tf.greater(iou, tf.ones_like(iou))), tf.float32)
    
    iou = tf.mul(iou, mask_less)
    #iou = tf.mul(iou, positive_mask)
    
    return iou
开发者ID:Johannes-brahms,项目名称:Yolo,代码行数:35,代码来源:utils.py


示例2: __init__

    def __init__(self, num_features, num_output, l2_reg_lambda=0.0, neg_output=False):
        self.input_x = tf.placeholder(tf.float32, [None, num_features], name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, num_output], name="input_y")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0)

        with tf.name_scope("softmax"):
            filter_shape = [num_features, num_output]
            W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1))
            b = tf.Variable(tf.constant(0.1, shape=[num_output]))

            self.raw_scores = tf.nn.xw_plus_b(self.input_x, W, b, name="scores")
            if neg_output:
                self.scores = tf.nn.elu(self.raw_scores, name="tanh")

            else:
                self.scores = tf.nn.relu(self.raw_scores, name="relu")


            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)

        with tf.name_scope("loss"):
            self.losses = tf.square(tf.sub(self.scores, self.input_y))
            self.avgloss = tf.reduce_mean(tf.abs(tf.sub(self.scores, self.input_y)))
            self.loss = tf.reduce_mean(self.losses) + l2_reg_lambda * l2_loss
开发者ID:bgshin,项目名称:cnntweets,代码行数:27,代码来源:regression.py


示例3: lossFunction

def lossFunction(logits, labels, scale_factor):
    print "TrackNet:  building loss function..."
    logit_trans, logit_rot = tf.split(1,2,logits)
    label_trans, label_rot = tf.split(1,2,labels)
    trans_loss = tf.nn.l2_loss(tf.sub(logit_trans, label_trans))
    rot_loss = tf.mul(scale_factor, tf.nn.l2_loss(tf.sub(logit_trans, label_trans)))
    return tf.add(trans_loss,rot_loss)
开发者ID:qenops,项目名称:RGBDAugmentedReality,代码行数:7,代码来源:trackNet.py


示例4: r_loss

def r_loss(communities = 2, group_size = 10, seed=None, p=0.4, q=0.05, r=1.0, projection_dim=2):
    """testing to see if the loss will decrease backproping through very simple function"""
    B = np.asarray(balanced_stochastic_blockmodel(communities, group_size, p, q, seed)).astype(np.double)
    B = tf.cast(B, tf.float64)
    Diag = tf.diag(tf.reduce_sum(B,0))
    Diag = tf.cast(Diag, tf.float64)

    #r_grid = tf.linspace(r_min, r_max, grid_size)
    r = tf.cast(r, tf.float64)
    
    BH = (tf.square(r)-1)*tf.diag(tf.ones(shape=[communities*group_size], dtype=tf.float64))-tf.mul(r, B)+Diag 
    
    with tf.Session() as sess:
        eigenval, eigenvec = tf.self_adjoint_eig(BH)
        eigenvec_proj = tf.slice(eigenvec, [0,0], [communities*group_size, projection_dim])
                
        true_assignment_a = tf.concat(0, [-1*tf.ones([group_size], dtype=tf.float64),
                                      tf.ones([group_size], dtype=tf.float64)])
        true_assignment_b = -1*true_assignment_a
        true_assignment_a = tf.expand_dims(true_assignment_a, 1)
        true_assignment_b = tf.expand_dims(true_assignment_b, 1)

            
        projected_a = tf.matmul(tf.matmul(eigenvec_proj, tf.transpose(eigenvec_proj)), true_assignment_a)#tf.transpose(true_assignment_a))
        projected_b = tf.matmul(tf.matmul(eigenvec_proj, tf.transpose(eigenvec_proj)), true_assignment_b)#tf.transpose(true_assignment_b))
            
        loss = tf.minimum(tf.reduce_sum(tf.square(tf.sub(projected_a, true_assignment_a))),
                              tf.reduce_sum(tf.square(tf.sub(projected_b, true_assignment_b))))
            

        d = sess.run(loss)
    return d
开发者ID:lishali,项目名称:clusternet,代码行数:32,代码来源:error_bars_loss.py


示例5: convert_to_one

def convert_to_one(bbox, width, height, S):

    x, y, w, h = bbox

    x = tf.cast(x, tf.float32)
    y = tf.cast(y, tf.float32)
    w = tf.cast(w, tf.float32)
    h = tf.cast(h, tf.float32)

    global_center_x = tf.mul(tf.add(tf.mul(x, 2), w), 0.5)
    global_center_y = tf.mul(tf.add(tf.mul(y, 2), h), 0.5)

    w = tf.div(w, width)
    h = tf.div(h, height)

    cell_w = tf.cast(tf.div(tf.cast(width, tf.int32), S), tf.float32)
    cell_h = tf.cast(tf.div(tf.cast(height, tf.int32), S), tf.float32)

    cell_coord_x = tf.cast(tf.cast(tf.div(global_center_x, cell_w), tf.int32), tf.float32)
    cell_coord_y = tf.cast(tf.cast(tf.div(global_center_y, cell_h), tf.int32), tf.float32)

    offset_x = tf.div(tf.sub(global_center_x, tf.mul(cell_coord_x, cell_w)), cell_w)
    offset_y = tf.div(tf.sub(global_center_y, tf.mul(cell_coord_y, cell_h)), cell_h)

    assert offset_x.dtype == tf.float32 and \
            offset_y.dtype == tf.float32 and \
            w.dtype == tf.float32 and \
            h.dtype == tf.float32

    bbox = [offset_x, offset_y, w, h]

    return bbox
开发者ID:Johannes-brahms,项目名称:Yolo,代码行数:32,代码来源:yolo_utils.py


示例6: _build_loss

    def _build_loss(self):

        with tf.variable_scope("loss"):

            # Compute y_j = r_j * discount*best_qvalue
            self.tf_discount = tf.constant(self.discount)
            self.qtarget = tf.add(self.pl_rewards, tf.mul(1.0-self.pl_terminals, tf.mul(self.tf_discount, self.pl_qtargets)))

            # Select Q-values for given actions
            self.actions_one_hot = tf.one_hot(self.pl_actions, self.num_actions, 1.0, 0.0)
            self.qvalue_pred = tf.reduce_sum(tf.mul(self.qvalues, self.actions_one_hot), reduction_indices=1)

            # Difference between target and predicted Q-network output
            self.delta = tf.sub(self.qtarget, self.qvalue_pred)

            if self.clip_delta > 0:
                # Perform clipping of the error term, default clipping is to (-1, +1) range
                self.quadratic_part = tf.minimum(tf.abs(self.delta), tf.constant(self.clip_delta))
                self.linear_part    = tf.sub(tf.abs(self.delta), self.quadratic_part)
                self.delta_square   = tf.mul(tf.constant(0.5), tf.square(self.quadratic_part)) + (self.clip_delta*self.linear_part)
                #self.delta_clipped = tf.clip_by_value(self.delta, -1.0*self.clip_delta, self.clip_delta)
                #self.delta_square  = tf.square(self.delta_clipped)
            else:
                # No error clipping
                self.delta_square  = tf.square(self.delta)

        # Actual loss
        if self.batch_accumulator == "sum":
           self.loss = tf.reduce_sum(self.delta_square)
        else:
           self.loss = tf.reduce_mean(self.delta_square)

        # Running average of the loss for TensorBoard
        self.loss_moving_avg    = tf.train.ExponentialMovingAverage(decay=0.999)
        self.loss_moving_avg_op = self.loss_moving_avg.apply([self.loss])
开发者ID:tomrunia,项目名称:DeepReinforcementLearning-Atari,代码行数:35,代码来源:qnetwork.py


示例7: __init__

    def __init__(self, inputX, C=None, hidden_dims=[300,150,300], lambda1=0.01, lambda2=0.01, activation='tanh', \
                weight_init='uniform', noise=None, learning_rate=0.1, optimizer='Adam'):

        self.noise = noise
        n_sample, n_feat = inputX.shape

        # M must be a even number
        assert len(hidden_dims) % 2 == 1

        # Add the end layer
        hidden_dims.append(n_feat)

        # self.depth = len(dims)

        # This is not the symbolic variable of tensorflow, this is real!
        self.inputX = inputX

        if C is None:
            # Transpose the matrix first, and get the whole matrix of C
            self.inputC = sparseCoefRecovery(inputX.T)
        else:
            self.inputC = C

        self.C = tf.placeholder(dtype=tf.float32, shape=[None, None], name='C')

        self.hidden_layers = []
        self.X = self._add_noise(tf.placeholder(dtype=tf.float32, shape=[None, n_feat], name='X'))

        input_hidden = self.X
        weights, biases = init_layer_weight(hidden_dims, inputX, weight_init)

        # J3 regularization term
        J3_list = []
        for init_w, init_b in zip(weights, biases):
            self.hidden_layers.append(DenseLayer(input_hidden, init_w, init_b, activation=activation))
            input_hidden = self.hidden_layers[-1].output
            J3_list.append(tf.reduce_mean(tf.square(self.hidden_layers[-1].w)))
            J3_list.append(tf.reduce_mean(tf.square(self.hidden_layers[-1].b)))

        J3 = lambda2 * tf.add_n(J3_list)

        self.H_M = self.hidden_layers[-1].output
        # H(M/2) the output of the mid layer
        self.H_M_2 = self.hidden_layers[(len(hidden_dims)-1)/2].output

        # calculate loss J1
        # J1 = tf.nn.l2_loss(tf.sub(self.X, self.H_M))

        J1 = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(self.X, self.H_M))))

        # calculate loss J2
        J2 = lambda1 * tf.sqrt(tf.reduce_mean(tf.square(tf.sub(tf.transpose(self.H_M_2), \
                                     tf.matmul(tf.transpose(self.H_M_2), self.C)))))

        self.cost = J1 + J2 + J3

        self.optimizer = optimize(self.cost, learning_rate, optimizer)
开发者ID:tonyabracadabra,项目名称:Deep-Subspace-Clustering,代码行数:57,代码来源:dsc.py


示例8: metric_single

def metric_single(training, test, scale_frac, scales):
    """Calculates the distance between a training and test instance."""
    if scale_frac == 0:
        distance = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(training, test)), reduction_indices=1, keep_dims=True))
    else:
        distance = tf.sqrt(
            tf.reduce_sum(tf.square(tf.div(tf.sub(training, test), scales)), reduction_indices=1, keep_dims=True)
        )
    return distance
开发者ID:AidanGG,项目名称:tensorflow_tmva,代码行数:9,代码来源:knn.py


示例9: binary_cross_entropy

def binary_cross_entropy(prediction, target):
    """
    let o=prediction, t=target
    -(t*log(o) + (1-t)*log(1-o))
    
    Adds a small (1e-12) value to the logarithms to avoid log(0)
    """
    op1 = tf.mul(target, tf.log(prediction + 1e-12))
    op2 = tf.mul(tf.sub(1., target), tf.log(tf.sub(1., prediction) + 1e-12))
    return tf.neg(tf.add(op1, op2))
开发者ID:CellProfiling,项目名称:AutomaticProteinLocalization,代码行数:10,代码来源:tensordnn.py


示例10: comU

def comU(a, b, tag = 2):

    fea = []
    fea.append(cosine_distance(a, b))
    #fea.append(tf.sqrt(tf.reduce_sum(tf.square(tf.sub(a,b)), axis=1)))
    fea.append(tf.sqrt(tf.reduce_sum(tf.square(tf.sub(a,b)), axis=1)))
    if tag == 2:
        fea.append(tf.reduce_max(tf.abs(tf.sub(a, b)), axis=1))
    #print 'fea=', fea
    return tf.pack(fea, axis=1)
开发者ID:QuickyFinger,项目名称:Attention-Based-Multi-Perspective-Convolutional-Neural-Networks-for-Textual-Similarity-Measurement,代码行数:10,代码来源:train.py


示例11: norm

def norm(name, input_layer):
    """
    Batch-normalizes the layer as in http://arxiv.org/abs/1502.03167
    This is important since it allows the different scales to talk to each other when they get joined.
    """
    mean, variance = tf.nn.moments(input_layer, [0, 1, 2])
    variance_epsilon = 0.01  # TODO: Check what this value should be
    inv = tf.rsqrt(variance + variance_epsilon)
    scale = tf.Variable(tf.random_uniform([1]), name="scale")  # TODO: How should these initialize?
    offset = tf.Variable(tf.random_uniform([1]), name="offset")
    return tf.sub(tf.mul(tf.mul(scale, inv), tf.sub(input_layer, mean)), offset, name=name)
开发者ID:fgeorg,项目名称:texture-networks,代码行数:11,代码来源:texture_network.py


示例12: tf_2d_normal

 def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
   # eq # 24 and 25 of http://arxiv.org/abs/1308.0850
   norm1 = tf.sub(x1, mu1)
   norm2 = tf.sub(x2, mu2)
   s1s2 = tf.mul(s1, s2)
   z = tf.square(tf.div(norm1, s1))+tf.square(tf.div(norm2, s2))-2*tf.div(tf.mul(rho, tf.mul(norm1, norm2)), s1s2)
   negRho = 1-tf.square(rho)
   result = tf.exp(tf.div(-z,2*negRho))
   denom = 2*np.pi*tf.mul(s1s2, tf.sqrt(negRho))
   result = tf.div(result, denom)
   return result
开发者ID:DanialBahrami,项目名称:write-rnn-tensorflow,代码行数:11,代码来源:model.py


示例13: tf_2d_normal

def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
  #Inspired from Hardmaru's implementation on Github
  norm1 = tf.sub(x1, mu1)
  norm2 = tf.sub(x2, mu2)
  s1s2 = tf.mul(s1, s2)
  z = tf.square(tf.div(norm1, s1))+tf.square(tf.div(norm2, s2))-2*tf.div(tf.mul(rho, tf.mul(norm1, norm2)), s1s2)
  negRho = 1-tf.square(rho)
  result = tf.exp(tf.div(-z,2*negRho))
  denom = 2*np.pi*tf.mul(s1s2, tf.sqrt(negRho))
  result = tf.div(result, denom)
  return result
开发者ID:RobRomijnders,项目名称:attention,代码行数:11,代码来源:attention_main_gauss.py


示例14: spatial_batch_norm

def spatial_batch_norm(input_layer, name='spatial_batch_norm'):
    """
    Batch-normalizes the layer as in http://arxiv.org/abs/1502.03167
    This is important since it allows the different scales to talk to each other when they get joined.
    """
    mean, variance = tf.nn.moments(input_layer, [0, 1, 2])
    variance_epsilon = 0.01  # TODO: Check what this value should be
    inv = tf.rsqrt(variance + variance_epsilon)
    num_channels = input_layer.get_shape().as_list()[3]  # TODO: Clean this up
    scale = tf.Variable(tf.random_uniform([num_channels]), name='scale')  # TODO: How should these initialize?
    offset = tf.Variable(tf.random_uniform([num_channels]), name='offset')
    return_val = tf.sub(tf.mul(tf.mul(scale, inv), tf.sub(input_layer, mean)), offset, name=name)
    return return_val
开发者ID:ProofByConstruction,项目名称:texture-networks,代码行数:13,代码来源:network_helpers.py


示例15: loss_with_step

 def loss_with_step(self):
     margin = 5.0
     labels_t = self.y_
     labels_f = tf.sub(1.0, self.y_, name="1-yi")          # labels_ = !labels;
     eucd2 = tf.pow(tf.sub(self.o1, self.o2), 2)
     eucd2 = tf.reduce_sum(eucd2, 1)
     eucd = tf.sqrt(eucd2+1e-6, name="eucd")
     C = tf.constant(margin, name="C")
     pos = tf.mul(labels_t, eucd, name="y_x_eucd")
     neg = tf.mul(labels_f, tf.maximum(0.0, tf.sub(C, eucd)), name="Ny_C-eucd")
     losses = tf.add(pos, neg, name="losses")
     loss = tf.reduce_mean(losses, name="loss")
     return loss
开发者ID:koosyong,项目名称:siamese_tf_mnist,代码行数:13,代码来源:inference.py


示例16: mem_body

	def mem_body(self, step, story_len, facts, q_double, mem_state_double):
		print ("!!!!!!!!!!!!!!!!!!!!!")
		z = tf.concat(1, [tf.mul(tf.gather(facts, step), q_double), tf.mul(tf.gather(facts, step), mem_state_double), 
			tf.abs(tf.sub(tf.gather(facts, step), q_double)), tf.abs(tf.sub(tf.gather(facts, step), mem_state_double))])
		# record Z (all episodic memory states)
		def f1(): return seq2seq.feedforward_nn(z, self.attention_ff_size, self.attention_ff_l1_size, self.attention_ff_l2_size)
		def f2(): return tf.concat(0, [tf.reshape(tf.to_float(self.episodic_array),[-1]), tf.reshape(seq2seq.feedforward_nn(z, self.attention_ff_size, self.attention_ff_l1_size, self.attention_ff_l2_size),[-1])])
		
		self.episodic_array = tf.cond(tf.less(step,1), f1, f2)
		print (self.episodic_array)
		print ('=-=-=-=-=', tf.to_float(self.episodic_array), seq2seq.feedforward_nn(z, self.attention_ff_size, self.attention_ff_l1_size, self.attention_ff_l2_size))
		step =tf.add(step, 1)
		return step, story_len, facts, q_double, mem_state_double
开发者ID:sufengniu,项目名称:DMN-tensorflow,代码行数:13,代码来源:DMN_BACKUP.py


示例17: getNeighborWeights

    def getNeighborWeights(self, transformedCoordinates, clampedCoordinatesList):
        flooredCoordinates = tf.slice(clampedCoordinatesList[0], [0, 1], [tf.shape(clampedCoordinatesList[0])[0], 3])

        if self.isVerbose:
            transformedCoordinates = tf.Print(transformedCoordinates, [transformedCoordinates], summarize=1000)
            flooredCoordinates     = tf.Print(flooredCoordinates, [flooredCoordinates], summarize=1000)
        
        deltas = tf.sub(transformedCoordinates, flooredCoordinates)
        
        if self.isVerbose:
            deltas = tf.Print(deltas, [deltas], summarize=1000)

        deltaW = self.sliceIndex(deltas, 2)
        deltaH = self.sliceIndex(deltas, 1)
        deltaC = self.sliceIndex(deltas, 0)

        if self.isVerbose:
            deltaW = tf.Print(deltaW, [deltaW], summarize=1000)
            deltaH = tf.Print(deltaH, [deltaH], summarize=1000)
            deltaC = tf.Print(deltaC, [deltaC], summarize=1000)

        #just declare for concisely writing the various weights
        ConstantOne = tf.constant([1], dtype=tf.float32)

        W_lll = tf.mul(tf.mul(tf.sub(ConstantOne, deltaW) , tf.sub(ConstantOne, deltaH)) , tf.sub(ConstantOne, deltaC))
        W_llu = tf.mul(tf.mul(tf.sub(ConstantOne, deltaW) , tf.sub(ConstantOne, deltaH)) , deltaC                     )
        W_lul = tf.mul(tf.mul(tf.sub(ConstantOne, deltaW) , deltaH                     ) , tf.sub(ConstantOne, deltaC))
        W_luu = tf.mul(tf.mul(tf.sub(ConstantOne, deltaW) , deltaH                     ) , deltaC                     )
        W_ull = tf.mul(tf.mul(deltaW                      , tf.sub(ConstantOne, deltaH)) , tf.sub(ConstantOne, deltaC))
        W_ulu = tf.mul(tf.mul(deltaW                      , tf.sub(ConstantOne, deltaH)) , deltaC                     )
        W_uul = tf.mul(tf.mul(deltaW                      , deltaH                     ) , tf.sub(ConstantOne, deltaC))
        W_uuu = tf.mul(tf.mul(deltaW                      , deltaH                     ) , deltaC                     )

        if self.isVerbose:
            W_lll = tf.Print(W_lll, [W_llu], summarize=1000)
            W_llu = tf.Print(W_llu, [W_lll], summarize=1000)
            W_lul = tf.Print(W_lul, [W_lul], summarize=1000)
            W_luu = tf.Print(W_luu, [W_luu], summarize=1000)
            W_ull = tf.Print(W_ull, [W_ull], summarize=1000)
            W_ulu = tf.Print(W_ulu, [W_ulu], summarize=1000)
            W_uul = tf.Print(W_uul, [W_uul], summarize=1000)
            W_uuu = tf.Print(W_uuu, [W_uuu], summarize=1000)

        weightList = []

        weightList.append(W_lll) 
        weightList.append(W_llu) 
        weightList.append(W_lul) 
        weightList.append(W_luu) 
        weightList.append(W_ull) 
        weightList.append(W_ulu) 
        weightList.append(W_uul) 
        weightList.append(W_uuu) 
       

        return weightList
开发者ID:sudnya,项目名称:misc,代码行数:56,代码来源:SpatialTransformerLayer.py


示例18: kMeansCluster

def kMeansCluster(vector_values, num_clusters, max_num_steps, stop_coeficient = 0.0):
  vectors = tf.constant(vector_values)
  centroids = tf.Variable(tf.slice(tf.random_shuffle(vectors),
                                   [0,0],[num_clusters,-1]))
  old_centroids = tf.Variable(tf.zeros([num_clusters,2]))
  centroid_distance = tf.Variable(tf.zeros([num_clusters,2]))

  expanded_vectors = tf.expand_dims(vectors, 0)
  expanded_centroids = tf.expand_dims(centroids, 1)

  print expanded_vectors.get_shape()
  print expanded_centroids.get_shape()

  distances = tf.reduce_sum(
    tf.square(tf.sub(expanded_vectors, expanded_centroids)), 2)
  assignments = tf.argmin(distances, 0)

  means = tf.concat(0, [
    tf.reduce_mean(
        tf.gather(vectors,
                  tf.reshape(
                    tf.where(
                      tf.equal(assignments, c)
                    ),[1,-1])
                 ),reduction_indices=[1])
    for c in xrange(num_clusters)])

  save_old_centroids = tf.assign(old_centroids, centroids)

  update_centroids = tf.assign(centroids, means)
  init_op = tf.initialize_all_variables()

  performance = tf.assign(centroid_distance, tf.sub(centroids, old_centroids))
  check_stop = tf.reduce_sum(tf.abs(performance))

  with tf.Session() as sess:
    sess.run(init_op)
    for step in xrange(max_num_steps):
      print "Running step " + str(step)
      sess.run(save_old_centroids)
      _, centroid_values, assignment_values = sess.run([update_centroids,
                                                        centroids,
                                                        assignments])
      sess.run(check_stop)
      current_stop_coeficient = check_stop.eval()
      print "coeficient:", current_stop_coeficient
      if current_stop_coeficient <= stop_coeficient:
        break

    return centroid_values, assignment_values
开发者ID:forfish,项目名称:cestlavie,代码行数:50,代码来源:test_K-means.py


示例19: convert_to_reality

def convert_to_reality(bbox, width, height, S):

    relative_center_x, relative_center_y, global_w, global_h = bbox

    w = tf.cast(tf.cast(tf.mul(global_w, width), tf.int32), tf.float32)
    h = tf.cast(tf.cast(tf.mul(global_h, height), tf.int32), tf.float32)

    index = tf.reshape(tf.range(S * S),[-1,1])

    cell_coord_y = tf.cast(tf.div(index, S), tf.float32)
    cell_coord_x = tf.cast(tf.mod(index, S), tf.float32)


    S = tf.cast(S, tf.float32)

    width = tf.cast(width, tf.float32)
    height = tf.cast(height, tf.float32)


    cell_w = tf.cast(width / S, tf.float32)
    cell_h = tf.cast(height / S, tf.float32)

    
    #real_x_left_up = tf.reshape((cell_coord_x + relative_center_x) * cell_w - w / 2,[-1])

    #real_y_left_up = tf.reshape((cell_coord_y + relative_center_y) * cell_h - h / 2, [-1])

    real_x_left_up = tf.sub(tf.add(tf.reshape(tf.mul(cell_coord_x, cell_w), [-1]), relative_center_x * cell_w), tf.cast(w * 0.5, tf.float32))
    real_y_left_up = tf.sub(tf.add(tf.reshape(tf.mul(cell_coord_y, cell_h), [-1]), relative_center_y * cell_h), tf.cast(h * 0.5, tf.float32))


    real_x_left_up = tf.cast(tf.nn.relu(real_x_left_up), tf.int32)
    real_y_left_up = tf.cast(tf.nn.relu(real_y_left_up), tf.int32)
    w = tf.cast(w, tf.int32)
    h = tf.cast(h, tf.int32)

    print 'real x ', relative_center_x.get_shape()
    print 'real w' , w.get_shape()



    """
    assert real_x_left_up.dtype == tf.int32 and \
           real_y_left_up.dtype == tf.int32 and \
            w.dtype == tf.int32 and \
            h.dtype == tf.int32
    """
    bbox = [real_x_left_up, real_y_left_up, w, h]

    return bbox
开发者ID:Johannes-brahms,项目名称:Yolo,代码行数:50,代码来源:yolo_utils.py


示例20: _multichannel_image_summary

def _multichannel_image_summary(name, images, perm=[0, 3, 1, 2], max_summary_images=16):
    _min = tf.reduce_min(images)
    _max = tf.reduce_max(images)
    _ = tf.mul(tf.div(tf.add(images, _min), tf.sub(_max, _min)), 255.0)
    _ = tf.transpose(_, perm=perm)
    shape = _.get_shape().as_list()
    tf.image_summary(name, tf.reshape(tf.transpose(_, perm=perm), [reduce(lambda x,y:x*y, shape)/(shape[3]*shape[2]), shape[2], shape[3], 1]), max_images=max_summary_images)
开发者ID:wbaek,项目名称:tensorflow-tutorials,代码行数:7,代码来源:helper.py



注:本文中的tensorflow.sub函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.subtract函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.string_split函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap