• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.abs函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.abs函数的典型用法代码示例。如果您正苦于以下问题:Python abs函数的具体用法?Python abs怎么用?Python abs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了abs函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_loss

    def get_loss(self, x1, x2,
                 tau=0.25,  # time step
                 lbda=0.15,  # weight parameter for the data term
                 theta=0.3,  # weight parameter for (u - v)^2
                 warps=5,  # number of warpings per scale
                 zfactor=0.5,  # factor for building the image piramid
                 max_scales=5,  # maximum number of scales for image piramid
                 max_iterations=5  # maximum number of iterations for optimization
                 ):

        u1, u2, rho = self.tvnet_flow(x1, x2,
                                      tau=tau, lbda=lbda, theta=theta, warps=warps,
                                      zfactor=zfactor, max_scales=max_scales,
                                      max_iterations=max_iterations)

        # computing loss
        u1x, u1y = self.forward_gradient(u1, 'u1')
        u2x, u2y = self.forward_gradient(u2, 'u2')


        u1_flat = tf.reshape(u1, (tf.shape(x2)[0], 1, x2.shape[1].value * x2.shape[2].value))
        u2_flat = tf.reshape(u2, (tf.shape(x2)[0], 1, x2.shape[1].value * x2.shape[2].value))

        x2_warp = self.warp_image(x2, u1_flat, u2_flat)
        x2_warp = tf.reshape(x2_warp, tf.shape(x2))
        loss = lbda * tf.reduce_mean(tf.abs(x2_warp - x1)) + tf.reduce_mean(
            tf.abs(u1x) + tf.abs(u1y) + tf.abs(u2x) + tf.abs(u2y))
        return loss, u1, u2
开发者ID:jiujing23333,项目名称:tvnet,代码行数:28,代码来源:tvnet.py


示例2: huber_loss

def huber_loss(y, y_predicted, m=1.0):
  """Huber loss."""
  t = y - y_predicted
  # Note that enabling eager execution lets you use Python control flow and
  # specificy dynamic TensorFlow computations. Contrast this implementation
  # to the graph-construction one found in `utils`, which uses `tf.cond`.
  return t ** 2 if tf.abs(t) <= m else m * (2 * tf.abs(t) - m)
开发者ID:XJTUeducation,项目名称:stanford-tensorflow-tutorials,代码行数:7,代码来源:04_linreg_eager.py


示例3: huber_loss

def huber_loss(x, delta=1.0):
    # https://en.wikipedia.org/wiki/Huber_loss
    return tf.select(
        tf.abs(x) < delta,
        tf.square(x) * 0.5,
        delta * (tf.abs(x) - 0.5 * delta)
    )
开发者ID:Neithardt-zn,项目名称:homework,代码行数:7,代码来源:dqn_utils.py


示例4: __init__

    def __init__(self,
                 sess,
                 dataset_name='facades',
                 checkpoint_dir=None):
        self.sess = sess
        self.dataset_name = dataset_name
        self.checkpoint_dir = checkpoint_dir

        self.real_data = tf.placeholder(tf.float32,
                                        [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3 + 3],
                                        name='input_images')
        self.real_A = self.real_data[:, :, :, :3]
        self.real_B = self.real_data[:, :, :, 3:6]

        self.fake_B = generator(self.real_A, name="generatorA2B")
        self.fake_A = generator(self.real_B, name="generatorB2A")
        self.fake_B_fake_A = generator(self.fake_B, reuse=True, name="generatorB2A")
        self.fake_A_fake_B = generator(self.fake_A, reuse=True, name="generatorA2B")

        self.DA_real = discriminator(self.real_A, reuse=False, name="descriminatorA")
        self.DB_real = discriminator(self.real_B, reuse=False, name="descriminatorB")
        self.DA_fake = discriminator(self.fake_A, reuse=True, name="descriminatorA")
        self.DB_fake = discriminator(self.fake_B, reuse=True, name="descriminatorB")

        self.g_loss_a2b = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DB_fake, labels=tf.ones_like(self.DB_fake))) + 100 * tf.reduce_mean(
            tf.abs(self.real_A - self.fake_B_fake_A)) + 100 * tf.reduce_mean(
            tf.abs(self.real_B - self.fake_B))
        self.g_loss_b2a = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DA_fake, labels=tf.ones_like(self.DA_fake))) + 100 * tf.reduce_mean(
            tf.abs(self.real_B - self.fake_A_fake_B)) + 100 * tf.reduce_mean(
            tf.abs(self.real_A - self.fake_A))
        self.g_loss = self.g_loss_a2b + self.g_loss_b2a

        self.d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DB_fake, labels=tf.zeros_like(self.DB_fake))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DB_real, labels=tf.ones_like(self.DB_real))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DA_fake, labels=tf.zeros_like(self.DA_fake))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DA_real, labels=tf.ones_like(self.DA_real)))

        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        self.g_loss_a2b_sum = tf.summary.scalar("g_loss_a2b", self.g_loss_a2b)
        self.g_loss_b2a_sum = tf.summary.scalar("g_loss_b2a", self.g_loss_b2a)
        self.real_A_sum = tf.summary.image("real_A", self.real_A)
        self.real_B_sum = tf.summary.image("real_B", self.real_B)
        self.fake_A_sum = tf.summary.image("fake_A", self.fake_A)
        self.fake_B_sum = tf.summary.image("fake_B", self.fake_B)
        self.fake_AB_sum = tf.summary.image("fake_AB", self.fake_A_fake_B)
        self.fake_BA_sum = tf.summary.image("fake_BA", self.fake_B_fake_A)

        self.d_sum = tf.summary.merge([self.d_loss_sum])
        self.g_sum = tf.summary.merge([self.g_loss_sum, self.g_loss_a2b_sum, self.g_loss_b2a_sum,
                                       self.real_A_sum, self.real_B_sum, self.fake_A_sum,
                                       self.fake_B_sum, self.fake_AB_sum, self.fake_BA_sum])

        training_vars = tf.trainable_variables()
        self.d_vars = [var for var in training_vars if 'd_' in var.name]
        self.g_vars = [var for var in training_vars if 'g_' in var.name]
        self.saver = tf.train.Saver(max_to_keep=5)
开发者ID:yaoyaowd,项目名称:tensorflow_demo,代码行数:60,代码来源:cycle_model.py


示例5: add_dyprune

def add_dyprune(weights):
    crate = config.crate[weights.name[:-2]] #hyperpara C rate
    prune_mask = tf.Variable(tf.ones_like(weights),name=weights.name[:-2]+'mask', trainable=False)

    #calculate mask
    mean = tf.divide(tf.reduce_sum(tf.multiply(tf.abs(weights),prune_mask)),tf.reduce_sum(prune_mask))
    var = tf.multiply(weights,prune_mask)
    var = tf.square(var)
    mean_q = tf.square(mean)*tf.reduce_sum(prune_mask)
    var = tf.reduce_sum(var) - mean_q
    var = tf.divide(var,tf.reduce_sum(prune_mask))
    var = tf.sqrt(var)
    t1_lower = (mean+var*crate)*0.25 #hyperpara a
    t1_upper = (mean+var*crate)*0.45 #hyperpara b
    
    indicator_lower1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_lower)    
    indicator_upper1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_upper)
    indicator_matrix1 = tf.greater_equal(prune_mask, tf.zeros_like(weights))
    indicator_matrix1 = tf.logical_and(indicator_matrix1,indicator_lower1)
    indicator_matrix1 = tf.logical_or(indicator_matrix1,indicator_upper1)
    indicator_matrix1 = tf.to_float(indicator_matrix1)
    update = prune_mask.assign(indicator_matrix1)

    prune_fc = tf.multiply(weights, prune_mask)
    return prune_fc
开发者ID:Ewenwan,项目名称:Project,代码行数:25,代码来源:densenetfinalDNS.py


示例6: sampled_softmax

    def sampled_softmax(tensor, weights):
        max_val = tf.reduce_max(tensor * tf.abs(weights), 1, keep_dims=True)
        tensor_rescaled = tensor - max_val
        tensor_exp = tf.exp(tensor_rescaled)
        tensor_sum = tf.reduce_sum(tensor_exp * tf.abs(weights), 1, keep_dims=True)

        return (tensor_exp / tensor_sum) * tf.abs(weights)  # all ignored elements will have a prob of 0.
开发者ID:bxshi,项目名称:ProjE,代码行数:7,代码来源:ProjE_sigmoid.py


示例7: NTanh

def NTanh(x,
          use_noise,
          alpha=1.05,
          c=0.5, half_normal=False):
    """
    Noisy Hard Tanh Units: NAN without learning p
    ----------------------------------------------------
    Arguments:
        x: tensorflow tensor variable, input of the function.
        use_noise: bool, whether to add noise or not to the activations, this is in particular
        useful for the test time, in order to disable the noise injection.
        c: float, standard deviation of the noise
        alpha: the leaking rate from the linearized function to the nonlinear one.
    """


    threshold = 1.0
    signs = tf.sign(x)
    delta = tf.abs(x) - threshold

    scale = c * (tf.sigmoid(delta**2) - 0.5)**2
    if alpha > 1.0 and  half_normal:
           scale *= -1.0
    zeros=tf.zeros(tf.shape(x), dtype=tf.float32, name=None)
    def noise_func() :return tf.abs(tf.random_normal(tf.shape(x), mean=0.0, stddev=1.0, dtype=tf.float32))
    def zero_func (): return zeros+ 0.797  if half_normal   else zeros
    noise=tf.cond(use_noise,noise_func,zero_func)

    eps = scale * noise + alpha * delta
    z = x - signs * eps
    test=tf.cast(tf.greater_equal(tf.abs(x) , threshold),tf.float32)
    res = test * z + (1. - test) *  HardTanh(x)


    return res
开发者ID:caglar,项目名称:noisy_units,代码行数:35,代码来源:nunits.py


示例8: _update_lipschitz

  def _update_lipschitz(self,v,i):
    config = self.config
    if len(v.shape) > 1:
      k = self.config.weight_constraint_k or 100.0000
      wi_hat = v
      if len(v.shape) == 4:
        #fij = tf.reduce_sum(tf.abs(wi_hat),  axis=[0,1])
        fij = wi_hat
        fij = tf.reduce_sum(tf.abs(fij),  axis=[1])
        fij = tf.reduce_max(fij,  axis=[0])
      else:
        fij = wi_hat

      if self.config.ortho_pnorm == "inf":
        wp = tf.reduce_max(tf.reduce_sum(tf.abs(fij), axis=0), axis=0)
      else:
        # conv
        wp = tf.reduce_max(tf.reduce_sum(tf.abs(fij), axis=1), axis=0)
      ratio = (1.0/tf.maximum(1.0, wp/k))
      
      if self.config.weight_bounce:
        bounce = tf.minimum(1.0, tf.ceil(wp/k-0.999))
        ratio -= tf.maximum(0.0, bounce) * 0.2

      if self.config.weight_scaleup:
        up = tf.minimum(1.0, tf.ceil(0.02-wp/k))
        ratio += tf.maximum(0.0, up) * k/wp * 0.2

      wi = ratio*(wi_hat)
      #self.gan.metrics['wi'+str(i)]=wp
      #self.gan.metrics['wk'+str(i)]=ratio
      #self.gan.metrics['bouce'+str(i)]=bounce
      return tf.assign(v, wi)
    return None
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:34,代码来源:weight_constraint_train_hook.py


示例9: attention_mechanism_parallel

    def attention_mechanism_parallel(self,c_full,m,q,i):
        """ parallel implemtation of gate function given a list of candidate sentence, a query, and previous memory.
        Input:
           c_full: candidate fact. shape:[batch_size,story_length,hidden_size]
           m: previous memory. shape:[batch_size,hidden_size]
           q: question. shape:[batch_size,hidden_size]
        Output: a scalar score (in batch). shape:[batch_size,story_length]
        """
        q=tf.expand_dims(q,axis=1) #[batch_size,1,hidden_size]
        m=tf.expand_dims(m,axis=1) #[batch_size,1,hidden_size]

        # 1.define a large feature vector that captures a variety of similarities between input,memory and question vector: z(c,m,q)
        c_q_elementwise=tf.multiply(c_full,q)          #[batch_size,story_length,hidden_size]
        c_m_elementwise=tf.multiply(c_full,m)          #[batch_size,story_length,hidden_size]
        c_q_minus=tf.abs(tf.subtract(c_full,q))        #[batch_size,story_length,hidden_size]
        c_m_minus=tf.abs(tf.subtract(c_full,m))        #[batch_size,story_length,hidden_size]
        # c_transpose Wq
        c_w_q=self.x1Wx2_parallel(c_full,q,"c_w_q"+str(i))   #[batch_size,story_length,hidden_size]
        c_w_m=self.x1Wx2_parallel(c_full,m,"c_w_m"+str(i))   #[batch_size,story_length,hidden_size]
        # c_transposeWm
        q_tile=tf.tile(q,[1,self.story_length,1])     #[batch_size,story_length,hidden_size]
        m_tile=tf.tile(m,[1,self.story_length,1])     #[batch_size,story_length,hidden_size]
        z=tf.concat([c_full,m_tile,q_tile,c_q_elementwise,c_m_elementwise,c_q_minus,c_m_minus,c_w_q,c_w_m],2) #[batch_size,story_length,hidden_size*9]
        # 2. two layer feed foward
        g=tf.layers.dense(z,self.hidden_size*3,activation=tf.nn.tanh)  #[batch_size,story_length,hidden_size*3]
        g=tf.layers.dense(g,1,activation=tf.nn.sigmoid)                #[batch_size,story_length,1]
        g=tf.squeeze(g,axis=2)                                         #[batch_size,story_length]
        return g
开发者ID:AmjadHisham,项目名称:text_classification,代码行数:28,代码来源:a8_dynamic_memory_network.py


示例10: cycle_consistency_loss

 def cycle_consistency_loss(self, G, F, x, y):
   """ cycle consistency loss (L1 norm)
   """
   forward_loss = tf.reduce_mean(tf.abs(F(G(x))-x))
   backward_loss = tf.reduce_mean(tf.abs(G(F(y))-y))
   loss = self.lambda1*forward_loss + self.lambda2*backward_loss
   return loss
开发者ID:jnmaomao,项目名称:CycleGAN-TensorFlow,代码行数:7,代码来源:model.py


示例11: __loss__

    def __loss__(self):
        """
        Calculate loss
        :return:
        """

        # Context loss L2
        predict_image = tf.abs(tf.complex(real=self.predict_g2['real'], imag=self.predict_g2['imag']))
        label_image = tf.abs(tf.complex(real=self.labels['real'], imag=self.labels['imag']))
        self.context_loss = tf.reduce_mean(tf.square(tf.contrib.layers.flatten(predict_image - label_image)))

        # self.context_loss = tf.reduce_mean(tf.square(real_diff) + tf.square(imag_diff), name='Context_loss_mean')
        print("You are using L2 loss")

        tf.summary.scalar('g_loss_context_only', self.context_loss, collections='G2')

        self.g_loss = self.FLAGS.gen_loss_context * self.context_loss
        # self.g_loss = self.FLAGS.gen_loss_adversarial * g_loss + self.FLAGS.gen_loss_context * context_loss
        tf.summary.scalar('g_loss_plus_context', self.g_loss, collections='G2')

        # if len(self.regularization_values) > 0:
        # reg_loss_g = self.reg_w * tf.reduce_sum(self.regularization_values)
        self.reg_loss_g = self.get_weights_regularization(dump=self.FLAGS.dump_debug, collection='G2')
        self.g_loss_no_reg = self.g_loss
        self.g_loss += self.reg_loss_g
        if self.FLAGS.dump_debug:
            tf.summary.scalar('g_loss_plus_context_plus_reg', self.g_loss, collections='G2')
            tf.summary.scalar('g_loss_reg_only', self.reg_loss_g, collections='D')
开发者ID:shohad25,项目名称:thesis,代码行数:28,代码来源:k_space_gl_g2_unet_DabsGloss.py


示例12: almost_equal

def almost_equal(a, b):
    """
    :param a: tensor :param b: tensor
    :returns equivalent to numpy: a == b, if a and b were ndarrays
    """
    not_almost_equal = tf.abs(tf.sign(tf.round(a - b)))
    return tf.abs(not_almost_equal - 1)
开发者ID:lobachevzky,项目名称:movies,代码行数:7,代码来源:ops.py


示例13: huber_loss

def huber_loss(x, delta=1.0):
    """Reference: https://en.wikipedia.org/wiki/Huber_loss"""
    return tf.where(
        tf.abs(x) < delta,
        tf.square(x) * 0.5,
        delta * (tf.abs(x) - 0.5 * delta)
    )
开发者ID:Divyankpandey,项目名称:baselines,代码行数:7,代码来源:tf_util.py


示例14: get_train

def get_train(train_ph_dict,var_dict,var_ph_dict):
    mid0 = tf.one_hot(train_ph_dict['choice_0'], 9, axis=-1, dtype=tf.float32)
    mid0 = mid0 * get_q(train_ph_dict['state_0'],var_dict)
    mid0 = tf.reduce_sum(mid0, reduction_indices=[1])

    mid1 = get_q(train_ph_dict['state_1'],var_ph_dict)
    mid1 = tf.reduce_max(mid1, reduction_indices=[1])  
    mid1 = mid1 * train_ph_dict['cont']
    mid1 = mid1 * tf.constant(TRAIN_BETA)

    l2r = tf.constant(0.0)
    cell_count = tf.constant(0.0)
    for v in var_dict.values():
        l2r = l2r + get_l2(v)
        cell_count = cell_count + tf.to_float(tf.size(v))
    l2r = l2r / cell_count
    l2r = l2r / tf.constant(ELEMENT_L2_FACTOR*ELEMENT_L2_FACTOR)
    l2r = l2r * tf.constant(L2_WEIGHT)
    
    mid = mid0-mid1-train_ph_dict['reward_1']
#    mid = mid * mid
    mid = tf.abs(mid)
    mid = tf.reduce_mean(mid)
    score_diff = mid
    mid = mid + l2r
    mid = mid + ( tf.abs( tf.reduce_mean(var_dict['b5']) ) * tf.constant(L2_WEIGHT) )

    loss = mid

    mid = tf.train.GradientDescentOptimizer(0.5).minimize(mid,var_list=var_dict.values())
    train = mid
    
    return train, loss, score_diff
开发者ID:luzi82,项目名称:codelog.tensorflow.tictactoe,代码行数:33,代码来源:deeplearn2.py


示例15: reshape_stft

def reshape_stft(stfts, num_mel_bins):
    magnitude_spectrograms = tf.abs(stfts)
    num_spectrogram_bins = magnitude_spectrograms.shape[-1].value

    # scale frequency to mel scale and put into bins to reduce dimensionality
    lower_edge_hertz, upper_edge_hertz = 30.0, 17000.0

    linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(
        num_mel_bins, num_spectrogram_bins, utils.sample_rate, lower_edge_hertz,
        upper_edge_hertz)
    mel_spectrograms = tf.tensordot(magnitude_spectrograms, linear_to_mel_weight_matrix, 1)
    mel_spectrograms.set_shape(
        magnitude_spectrograms.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))

    # log scale the mel bins to better represent human loudness perception
    log_offset = 1e-6
    log_mel_spectrograms = tf.log(mel_spectrograms + log_offset)

    # compute first order differential and concat. "It indicates a raise or reduction of the energy for each
    # frequency bin at a frame relative to its predecessor"
    first_order_diff = tf.abs(
        tf.subtract(log_mel_spectrograms, tf.manip.roll(log_mel_spectrograms, shift=1, axis=1)))
    mel_fod = tf.concat([log_mel_spectrograms, first_order_diff], 1)

    return mel_fod
开发者ID:nearlyeveryone,项目名称:bpm,代码行数:25,代码来源:bpm_estimator.py


示例16: cross_entropy

    def cross_entropy(u, label_u, alpha=0.5, normed=False):

        label_ip = tf.cast(
            tf.matmul(label_u, tf.transpose(label_u)), tf.float32)
        s = tf.clip_by_value(label_ip, 0.0, 1.0)

        # compute balance param
        # s_t \in {-1, 1}
        s_t = tf.multiply(tf.add(s, tf.constant(-0.5)), tf.constant(2.0))
        sum_1 = tf.reduce_sum(s)
        sum_all = tf.reduce_sum(tf.abs(s_t))
        balance_param = tf.add(tf.abs(tf.add(s, tf.constant(-1.0))),
                               tf.multiply(tf.div(sum_all, sum_1), s))

        if normed:
            # ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
            ip_1 = tf.matmul(u, tf.transpose(u))

            def reduce_shaper(t):
                return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])
            mod_1 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(u)),
                                      reduce_shaper(tf.square(u)), transpose_b=True))
            ip = tf.div(ip_1, mod_1)
        else:
            ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
        ones = tf.ones([tf.shape(u)[0], tf.shape(u)[0]])
        return tf.reduce_mean(tf.multiply(tf.log(ones + tf.exp(alpha * ip)) - s * alpha * ip, balance_param))
开发者ID:AllenMao,项目名称:DeepHash,代码行数:27,代码来源:dhn.py


示例17: check_convergence

    def check_convergence(self, new_T0, new_transition, new_emission):
        
        delta_T0 = tf.reduce_max(tf.abs(self.T0 - new_T0)) < self.epsilon
        delta_T = tf.reduce_max(tf.abs(self.T - new_transition)) < self.epsilon
        delta_E = tf.reduce_max(tf.abs(self.E - new_emission)) < self.epsilon

        return tf.logical_and(tf.logical_and(delta_T0, delta_T), delta_E)
开发者ID:aliziaei,项目名称:HiddenMarkovModel_TensorFlow,代码行数:7,代码来源:HiddenMarkovModel.py


示例18: crop_or_pad

def crop_or_pad(image, curr_height, curr_width, new, height=True, crop=True):
  """Crops or pads an image.

  Args:
    image: 3-D float32 `Tensor` image.
    curr_height: Int, current height.
    curr_width: Int, current width.
    new: Int, new width or height.
    height: Boolean, cropping or padding for height.
    crop: Boolean, True if we're cropping, False if we're padding.
  Returns:
    image: 3-D float32 `Tensor` image.
  """
  # Crop the image to fit the new shape.
  abs_diff = tf.abs(new-curr_height)//2 if height else tf.abs(new-curr_width)//2
  offset_x = 0 if height else abs_diff
  offset_y = abs_diff if height else 0

  # We process height first, so always pad/crop to new height.
  target_height = new
  # We process height first, so pad/crop to new width only if not doing height.
  target_width = curr_width if height else new

  if crop:
    image = tf.image.crop_to_bounding_box(
        image, offset_y, offset_x, target_height, target_width)
  else:
    image = tf.image.pad_to_bounding_box(
        image, offset_y, offset_x, target_height, target_width)
  return image
开发者ID:NoPointExc,项目名称:models,代码行数:30,代码来源:preprocessing.py


示例19: __init__

    def __init__(self, shape, lambda1 = 0.1, lambda2 = 0.1, mu = 0.1):
        """Initialize the ChanVese segmenter

        Arguments:
        shape (required) -- size of the image to segment

        lambda1 (default : 0.1) -- The cost of labeling pixels type 1 (check the Class docstring). This argument (as well as lambda2) can be used if the segmentation should be biased in one direction or the other. It's not deterministic what bits of the image get labeled with either lambda though -- this (as well as lambda2) will likely be a bit of a guess and check parameter.

        lambda2 (default : 0.1) -- The cost of labeling pixels type 2 (check the Class docstring)

        mu (default : 0.1) -- This is the cost of having a boundary. A higher value will mean less boundaries
        """
        xs = range(3)
        ys = range(3)
        Xs, Ys = numpy.meshgrid(xs, ys)
        Rs = numpy.sqrt((Xs - 1.0)**2 + (Ys - 1.0)**2)

        kernelBlurCpu = numpy.exp(-Rs / (2.0 * 0.75**2)).astype('float32')
        kernelBlurCpu /= numpy.linalg.norm(kernelBlurCpu.flatten())
        
        self.kernel = tf.constant(kernelBlurCpu.reshape([3, 3, 1, 1]))

        self.I = tf.Variable(tf.truncated_normal(shape = [1, shape[0], shape[1], 1], mean = 0.0, stddev = 0.1))
        
        self.u1 = tf.Variable(1.0)
        self.u2 = tf.Variable(-1.0)

        self.G = tf.placeholder(tf.float32, shape = shape)

        self.Gv = tf.Variable(numpy.zeros([1, shape[0], shape[1], 1]).astype('float32'))
        self.initialize = self.Gv.assign(tf.reshape(self.G, shape = [1, shape[0], shape[1], 1]))
        self.initialize2 = self.I.assign(tf.reshape(self.G, shape = [1, shape[0], shape[1], 1]))

        self.blur = tf.nn.conv2d(self.I, self.kernel, strides = [1, 1, 1, 1], padding = 'SAME')

        self.Gv = tf.Variable(numpy.zeros([1, shape[0], shape[1], 1]).astype('float32'))

        self.u1m = tf.abs(self.blur - self.u1)
        self.u2m = tf.abs(self.blur - self.u2)

        ones = numpy.ones((1, shape[0], shape[1], 1)).astype('float32')
        zeros = numpy.zeros((1, shape[0], shape[1], 1)).astype('float32')

        self.lambda1 = lambda1
        self.lambda2 = lambda2
        self.mu = mu

        eta = 0.1
        self.conv = eta / (numpy.pi * (eta**2 + self.blur**2))

        self.u1t = self.lambda1 * tf.reduce_sum(tf.select(self.u2m > self.u1m, (self.Gv - self.u1)**2, zeros))
        self.u2t = self.lambda2 * tf.reduce_sum(tf.select(self.u2m <= self.u1m, (self.Gv - self.u2)**2, zeros))

        self.edgeLoss = self.mu * tf.reduce_sum(tf.abs(self.conv))

        self.loss = self.u1t + self.u2t + self.edgeLoss

        self.shape = shape

        self.train_step = tf.train.AdamOptimizer(1.0e-1).minimize(self.loss, var_list = [self.I, self.u1, self.u2])
开发者ID:bbbales2,项目名称:microstructure_python,代码行数:60,代码来源:chanvese.py


示例20: __init__

    def __init__(self, inpt, n_in, n_hidden, n_out):
        """
        inpt: tf.Tensor, shape [n_examples, n_in]
        n_in: int, the dimensionality of input
        n_hidden: int, number of hidden units
        n_out: int, number of output units
        """
        # hidden layer
        self.hiddenLayer = HiddenLayer(inpt, n_in=n_in, n_out=n_hidden)
        # output layer (logistic layer)
        self.outputLayer = LogisticRegression(self.hiddenLayer.output, n_in=n_hidden,
                                              n_out=n_out)
        # L1 norm
        self.L1 = tf.reduce_sum(tf.abs(self.hiddenLayer.W)) + \
                  tf.reduce_sum(tf.abs(self.outputLayer.W))
        # L2 norm
        self.L2 = tf.reduce_sum(tf.square(self.hiddenLayer.W)) + \
                  tf.reduce_sum(tf.square(self.outputLayer.W))
        # cross_entropy cost function
        self.cost = self.outputLayer.cost
        # accuracy function
        self.accuracy = self.outputLayer.accuarcy

        # params
        self.params = self.hiddenLayer.params + self.outputLayer.params
        # keep track of input
        self.input = inpt
开发者ID:dyz-zju,项目名称:MVision,代码行数:27,代码来源:02_mlp.py



注:本文中的tensorflow.abs函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.add函数代码示例发布时间:2022-05-27
下一篇:
Python tensorboardX.SummaryWriter类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap