• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.cond函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.cond函数的典型用法代码示例。如果您正苦于以下问题:Python cond函数的具体用法?Python cond怎么用?Python cond使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cond函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: gibbs_step

 def gibbs_step(count, k, xk, raw_xk, W=W):
     hk = gibbs_forward(xk, W, bh, binary)
     C1 = tf.cond(count + 1 >= k, lambda: tf.constant(c_1), lambda: tf.constant(0.5))
     C2 = tf.cond(count + 1 >= k, lambda: tf.constant(c_2), lambda: tf.constant(0.5))
     raw_xk = tf.sigmoid(tf.matmul(hk, tf.transpose(W)) + bv)
     xk = sample(raw_xk, binary=binary, c_1=c_1, c_2=c_2)
     return count+1, k, xk, raw_xk
开发者ID:dshieble,项目名称:Musical_Matrices,代码行数:7,代码来源:RBM.py


示例2: perform

  def perform(self, observ):
    """Compute batch of actions and a summary for a batch of observation.

    Args:
      observ: Tensor of a batch of observations for all agents.

    Returns:
      Tuple of action batch tensor and summary tensor.
    """
    with tf.name_scope('perform/'):
      observ = self._observ_filter.transform(observ)
      network = self._network(observ[:, None], tf.ones(observ.shape[0]), self._last_state)
      action = tf.cond(self._is_training, network.policy.sample, lambda: network.mean)
      logprob = network.policy.log_prob(action)[:, 0]
      # pylint: disable=g-long-lambda
      summary = tf.cond(
          self._should_log, lambda: tf.summary.merge([
              tf.summary.histogram('mean', network.mean[:, 0]),
              tf.summary.histogram('std', tf.exp(network.logstd[:, 0])),
              tf.summary.histogram('action', action[:, 0]),
              tf.summary.histogram('logprob', logprob)
          ]), str)
      # Remember current policy to append to memory in the experience callback.
      with tf.control_dependencies([
          utility.assign_nested_vars(self._last_state, network.state),
          self._last_action.assign(action[:, 0]),
          self._last_mean.assign(network.mean[:, 0]),
          self._last_logstd.assign(network.logstd[:, 0])
      ]):
        return tf.check_numerics(action[:, 0], 'action'), tf.identity(summary)
开发者ID:bulletphysics,项目名称:bullet3,代码行数:30,代码来源:algorithm.py


示例3: batchnorm

 def batchnorm(self, Ylogits, offset, convolutional=False):
     """batchnormalization.
     Args:
         Ylogits: 1D向量或者是3D的卷积结果。
         num_updates: 迭代的global_step
         offset:表示beta,全局均值;在 RELU 激活中一般初始化为 0.1。
         scale:表示lambda,全局方差;在 sigmoid 激活中需要,这 RELU 激活中作用不大。
         m: 表示batch均值;v:表示batch方差。
         bnepsilon:一个很小的浮点数,防止除以 0.
     Returns:
         Ybn: 和 Ylogits 的维度一样,就是经过 Batch Normalization 处理的结果。
         update_moving_everages:更新mean和variance,主要是给最后的 test 使用。
     """
     exp_moving_avg = tf.train.ExponentialMovingAverage(0.999,
                                                        self._global_step)  # adding the iteration prevents from averaging across non-existing iterations
     bnepsilon = 1e-5
     if convolutional:
         mean, variance = tf.nn.moments(Ylogits, [0, 1, 2])
     else:
         mean, variance = tf.nn.moments(Ylogits, [0])
     update_moving_everages = exp_moving_avg.apply([mean, variance])
     m = tf.cond(self.tst, lambda: exp_moving_avg.average(mean), lambda: mean)
     v = tf.cond(self.tst, lambda: exp_moving_avg.average(variance), lambda: variance)
     Ybn = tf.nn.batch_normalization(Ylogits, m, v, offset, None, bnepsilon)
     return Ybn, update_moving_everages
开发者ID:brucexia6116,项目名称:zhihu-text-classification,代码行数:25,代码来源:network.py


示例4: _augment

    def _augment(self, image, bboxes=None, default_prob=0.5):
        """Applies different data augmentation techniques.

        Uses the list of data augmentation configurations, each data
        augmentation technique has a probability assigned to it (or just uses
        the default value for the dataset).

        Procedures are applied sequentially on top of each other according to
        the order defined in the config.

        TODO: We need a better way to ensure order using YAML config without
        ending up with a list of single-key dictionaries.

        Args:
            image: A Tensor of shape (height, width, 3).
            bboxes: A Tensor of shape (total_bboxes, 5).

        Returns:
            image: A Tensor of shape (height, width, 3).
            bboxes: A Tensor of shape (total_bboxes, 5) of type tf.int32.
        """
        applied_data_augmentation = []
        for aug_config in self._data_augmentation:
            if len(aug_config.keys()) != 1:
                raise ValueError(
                    'Invalid data_augmentation definition: "{}"'.format(
                        aug_config))

            aug_type = list(aug_config.keys())[0]
            if aug_type not in DATA_AUGMENTATION_STRATEGIES:
                tf.logging.warning(
                    'Invalid data augmentation strategy "{}". Ignoring'.format(
                        aug_type))
                continue

            aug_config = aug_config[aug_type]
            aug_fn = DATA_AUGMENTATION_STRATEGIES[aug_type]

            random_number = tf.random_uniform([], seed=self._seed)
            prob = tf.to_float(aug_config.pop('prob', default_prob))
            apply_aug_strategy = tf.less(random_number, prob)

            augmented = aug_fn(image, bboxes, **aug_config)

            image = tf.cond(
                apply_aug_strategy,
                lambda: augmented['image'],
                lambda: image
            )

            if bboxes is not None:
                bboxes = tf.cond(
                    apply_aug_strategy,
                    lambda: augmented.get('bboxes'),
                    lambda: bboxes
                )

            applied_data_augmentation.append({aug_type: apply_aug_strategy})

        return image, bboxes, applied_data_augmentation
开发者ID:quan821223,项目名称:pulmonary-nodules-MaskRCNN,代码行数:60,代码来源:object_detection_dataset.py


示例5: _define_step

  def _define_step(self, done, score, summary):
    """Combine operations of a phase.

    Keeps track of the mean score and when to report it.

    Args:
      done: Tensor indicating whether current score can be used.
      score: Tensor holding the current, possibly intermediate, score.
      summary: Tensor holding summary string to write if not an empty string.

    Returns:
      Tuple of summary tensor, mean score, and new global step. The mean score
      is zero for non reporting steps.
    """
    if done.shape.ndims == 0:
      done = done[None]
    if score.shape.ndims == 0:
      score = score[None]
    score_mean = streaming_mean.StreamingMean((), tf.float32)
    with tf.control_dependencies([done, score, summary]):
      done_score = tf.gather(score, tf.where(done)[:, 0])
      submit_score = tf.cond(tf.reduce_any(done), lambda: score_mean.submit(done_score), tf.no_op)
    with tf.control_dependencies([submit_score]):
      mean_score = tf.cond(self._report, score_mean.clear, float)
      steps_made = tf.shape(score)[0]
      next_step = self._step.assign_add(steps_made)
    with tf.control_dependencies([mean_score, next_step]):
      return tf.identity(summary), mean_score, next_step, steps_made
开发者ID:bulletphysics,项目名称:bullet3,代码行数:28,代码来源:loop.py


示例6: encoder_body

        def encoder_body(time, old_state, output_ta_t):
            x_t = input_ta.read(time)

            con = tf.concat(1, [x_t, old_state])
            z = tf.sigmoid(tf.matmul(con, W_z) + b_z)
            r = tf.sigmoid(tf.matmul(con, W_r) + b_r)
            con = tf.concat(1, [x_t, r*old_state])
            h = tf.tanh(tf.matmul(con, W_h) + b_h)
            new_state = (1-z)*h + z*old_state

            output_ta_t = output_ta_t.write(time, new_state)

            def updateall():
                return new_state

            def updatesome():
                if reverse:
                    return tf.select(
                        tf.greater_equal(time, max_sequence_length-lengths),
                        new_state,
                        old_state)
                else:
                    return tf.select(tf.less(time, lengths), new_state, old_state)

            if reverse:
                state = tf.cond(
                    tf.greater_equal(time, max_sequence_length-min_sequence_length),
                    updateall,
                    updatesome)
            else:
                state = tf.cond(tf.less(time, min_sequence_length), updateall, updatesome)

            return (time + 1, state, output_ta_t)
开发者ID:Styrke,项目名称:master-code,代码行数:33,代码来源:rnn_dot.py


示例7: while_exit_cond

    def while_exit_cond(result, logits, loss):  # pylint: disable=unused-argument
      """Exit the loop either if reach decode_length or EOS."""
      length = common_layers.shape_list(result)[1]

      not_overflow = length < decode_length

      if self._problem_hparams.stop_at_eos:

        def fn_not_eos():
          return tf.not_equal(  # Check if the last predicted element is a EOS
              tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID)

        not_eos = tf.cond(
            # We only check for early stoping if there is at least 1 element (
            # otherwise not_eos will crash)
            tf.not_equal(length, 0),
            fn_not_eos,
            lambda: True,
        )

        return tf.cond(
            tf.equal(batch_size, 1),
            # If batch_size == 1, we check EOS for early stoping
            lambda: tf.logical_and(not_overflow, not_eos),
            # Else, just wait for max length
            lambda: not_overflow)
      return not_overflow
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:27,代码来源:t2t_model.py


示例8: enc_step

 def enc_step(step):
   """Encoder step."""
   if autoenc_decay < 1.0:
     quant_step = autoenc_quantize(step, 16, nmaps, self.do_training)
     if backward:
       exp_glob = tf.train.exponential_decay(1.0, self.global_step - 10000,
                                             1000, autoenc_decay)
       dec_factor = 1.0 - exp_glob  # * self.do_training
       dec_factor = tf.cond(tf.less(self.global_step, 10500),
                            lambda: tf.constant(0.05), lambda: dec_factor)
     else:
       dec_factor = 1.0
     cur = tf.cond(tf.less(tf.random_uniform([]), dec_factor),
                   lambda: quant_step, lambda: step)
   else:
     cur = step
   if dropout > 0.0001:
     cur = tf.nn.dropout(cur, keep_prob)
   if act_noise > 0.00001:
     cur += tf.truncated_normal(tf.shape(cur)) * act_noise_scale
   # Do nconvs-many CGRU steps.
   if do_jit and tf.get_variable_scope().reuse:
     with jit_scope():
       for layer in xrange(nconvs):
         cur = conv_gru([], cur, kw, kh, nmaps, conv_rate(layer),
                        cutoff, "ecgru_%d" % layer, do_layer_norm)
   else:
     for layer in xrange(nconvs):
       cur = conv_gru([], cur, kw, kh, nmaps, conv_rate(layer),
                      cutoff, "ecgru_%d" % layer, do_layer_norm)
   return cur
开发者ID:Jmq14,项目名称:models,代码行数:31,代码来源:neural_gpu.py


示例9: loop_fn

    def loop_fn(time, cell_output, cell_state, loop_state):
        emit_output = cell_output  # == None for time == 0
        
        if cell_output is None:  # time == 0
            next_cell_state = initial_state
        else:
            next_cell_state = cell_state
        
        elements_finished = (time >= sequence_length)
        
        finished = tf.reduce_all(elements_finished)
        
        next_input = tf.cond(
            finished,
            lambda: tf.zeros([batch_size, input_size], dtype=tf.float32),
            lambda: inputs_ta.read(time))

        next_target = tf.cond(
            finished,
            lambda: tf.zeros([batch_size, target_size], dtype=tf.float32),
            lambda: targets_ta.read(time))

        
        if loop_state is None:
            next_input = tf.expand_dims(next_input, 1)
            next_input = tf.pad(next_input, [[0,0], [window-1, 0], [0,0]])
        else:
            next_input = cat_hist(loop_state, next_input, 1)
        
        next_loop_state = next_input

        return (elements_finished, RnnHistInputTuple(next_input, next_target), next_cell_state, emit_output, next_loop_state)
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:32,代码来源:hist_rnn.py


示例10: _g_recurrence_2

        def _g_recurrence_2(i, x_t, gen_x, h_tm1, h_tm1_manager, last_goal, real_goal):
            # with tf.device('/cpu:0'):
            cur_sen = tf.cond(i > 0, lambda:
            tf.split(tf.concat([tf.transpose(gen_x.stack(), perm=[1, 0]), self.padding_array], 1),
                     [self.sequence_length, i - 1], 1)[0], lambda: self.padding_array)
            with tf.variable_scope(self.scope):
                feature = self.FeatureExtractor_unit(cur_sen, self.drop_out)
            h_t_Worker = self.g_worker_recurrent_unit(x_t, h_tm1)  # hidden_memory_tuple
            o_t_Worker = self.g_worker_output_unit(h_t_Worker)  # batch x vocab , logits not prob

            o_t_Worker = tf.reshape(o_t_Worker, [self.batch_size, self.num_vocabulary, self.goal_size])

            h_t_manager = self.g_manager_recurrent_unit(feature, h_tm1_manager)
            sub_goal = self.g_manager_output_unit(h_t_manager)
            sub_goal = tf.nn.l2_normalize(sub_goal, 1)

            real_sub_goal = tf.add(last_goal, sub_goal)
            w_g = tf.matmul(real_goal, self.g_change)  # batch x goal_size
            w_g = tf.nn.l2_normalize(w_g, 1)
            w_g = tf.expand_dims(w_g, 2)  # batch x goal_size x 1

            x_logits = tf.matmul(o_t_Worker, w_g)
            x_logits = tf.squeeze(x_logits)

            log_prob = tf.log(tf.nn.softmax(x_logits))
            next_token = tf.cast(tf.reshape(tf.multinomial(log_prob, 1), [self.batch_size]), tf.int32)
            x_tp1 = tf.nn.embedding_lookup(self.g_embeddings, next_token)  # batch x emb_dim
            with tf.control_dependencies([cur_sen]):
                gen_x = gen_x.write(i - 1, next_token)  # indices, batch_size
            return i + 1, x_tp1, gen_x, h_t_Worker, h_t_manager, \
                   tf.cond(((i) % self.step_size) > 0, lambda: real_sub_goal,
                           lambda: tf.constant(0.0, shape=[self.batch_size, self.goal_out_size])), \
                   tf.cond(((i) % self.step_size) > 0, lambda: real_goal, lambda: real_sub_goal)
开发者ID:IshJ,项目名称:Texygen,代码行数:33,代码来源:LeakganGenerator.py


示例11: _create_learning_rate

def _create_learning_rate(hyperparams, step_var):
  """Creates learning rate var, with decay and switching for CompositeOptimizer.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    step_var: tf.Variable, global training step.

  Raises:
    ValueError: If the composite optimizer is set, but not correctly configured.

  Returns:
    a scalar `Tensor`, the learning rate based on current step and hyperparams.
  """
  if hyperparams.learning_method != 'composite':
    base_rate = hyperparams.learning_rate
    adjusted_steps = step_var
  else:
    spec = hyperparams.composite_optimizer_spec
    switch = tf.less(step_var, spec.switch_after_steps)
    base_rate = tf.cond(switch, lambda: tf.constant(spec.method1.learning_rate),
                        lambda: tf.constant(spec.method2.learning_rate))
    if spec.reset_learning_rate:
      adjusted_steps = tf.cond(switch, lambda: step_var,
                               lambda: step_var - spec.switch_after_steps)
    else:
      adjusted_steps = step_var

  return tf.train.exponential_decay(
      learning_rate=base_rate,
      global_step=adjusted_steps,
      decay_steps=hyperparams.decay_steps,
      decay_rate=hyperparams.decay_base,
      staircase=hyperparams.decay_staircase)
开发者ID:ALISCIFP,项目名称:models,代码行数:34,代码来源:graph_builder.py


示例12: apply_stats

    def apply_stats(self, statsUpdates):
        """ compute stats and update/apply the new stats to the running average
        """

        def updateAccumStats():
            if self._full_stats_init:
                return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op)
            else:
                return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter))

        def updateRunningAvgStats(statsUpdates, fac_iter=1):
            # return tf.cond(tf.greater_equal(self.factor_step,
            # tf.convert_to_tensor(fac_iter)), lambda:
            # tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)
            return tf.group(*self._apply_stats(statsUpdates))

        if self._async_stats:
            # asynchronous stats update
            update_stats = self._apply_stats(statsUpdates)

            queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[
                                 item.get_shape() for item in update_stats])
            enqueue_op = queue.enqueue(update_stats)

            def dequeue_stats_op():
                return queue.dequeue()
            self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])
            update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(
                0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))
        else:
            # synchronous stats update
            update_stats_op = tf.cond(tf.greater_equal(
                self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)
        self._update_stats_op = update_stats_op
        return update_stats_op
开发者ID:IcarusTan,项目名称:baselines,代码行数:35,代码来源:kfac.py


示例13: __imagenet_data_process_function

 def __imagenet_data_process_function(self, x, y):
     with tf.name_scope("imagenet_data_aug") as scope:
         #random scale
         #apparently, this works better than what we have:
         #https://github.com/facebook/fb.resnet.torch
         #but let's use the 'original' formulation for now
         #randomly sample a size in specified range
         random_size = tf.squeeze(tf.random_uniform((1, 1), 256, 480, dtype=tf.int32, name="random_scale_size"))
         #rescale smaller size with this factor
         tf.cond(tf.greater(tf.shape(x)[0], tf.shape(x)[1]), 
             lambda: tf.image.resize_images(x, [tf.shape(x)[0] * (tf.shape(x)[1] / random_size), random_size]),
             lambda: tf.image.resize_images(x, [random_size, tf.shape(x)[1] * (tf.shape(x)[0] / random_size)]))
         x = tf.image.resize_images(x, [224, 224])
         #random flip
         x = tf.image.flip_left_right(x)
         #random crop
         x = tf.random_crop(x, [224, 224, 3])
         #colour augmentation
         #this is a little more involved than I first thought
         #lets pick the inception colour distortion
         #https://github.com/tensorflow/models/blob/master/inception/inception/image_processing.py
         x = tf.image.random_brightness(x, max_delta=32. / 255.)
         x = tf.image.random_saturation(x, lower=0.5, upper=1.5)
         x = tf.image.random_hue(x, max_delta=0.2)
         x = tf.image.random_contrast(x, lower=0.5, upper=1.5)
         x = tf.clip_by_value(x, 0.0, 1.0)
         #normalisation
         x = tf.image.per_image_standardization(x)
     return [x, y]
开发者ID:deworrall92,项目名称:groupConvolutions,代码行数:29,代码来源:settings.py


示例14: batchnorm

def batchnorm(Ylogits, Offset, Scale, is_test, iteration):
    exp_moving_avg = tf.train.ExponentialMovingAverage(0.998, iteration) # adding the iteration prevents from averaging across non-existing iterations
    bnepsilon = 1e-5
    mean, variance = tf.nn.moments(Ylogits, [0])
    update_moving_averages = exp_moving_avg.apply([mean, variance])
    m = tf.cond(is_test, lambda: exp_moving_avg.average(mean), lambda: mean)
    v = tf.cond(is_test, lambda: exp_moving_avg.average(variance), lambda: variance)
    Ybn = tf.nn.batch_normalization(Ylogits, m, v, Offset, Scale, bnepsilon)
    return Ybn, update_moving_averages
开发者ID:obaidpervaizgill,项目名称:tensorflow-without-a-phd,代码行数:9,代码来源:mnist_4.0_batchnorm_five_layers_sigmoid.py


示例15: _get_valid_sample_fraction

def _get_valid_sample_fraction(labels, p=0):
    """return fraction of non-negative examples, the ignored examples have been marked as negative"""
    num_valid = tf.reduce_sum(tf.cast(tf.greater_equal(labels, p), tf.float32))
    num_example = tf.cast(tf.size(labels), tf.float32)
    frac = tf.cond(tf.greater(num_example, 0), lambda:num_valid / num_example,  
            lambda: tf.cast(0, tf.float32))
    frac_ = tf.cond(tf.greater(num_valid, 0), lambda:num_example / num_valid, 
            lambda: tf.cast(0, tf.float32))
    return frac, frac_
开发者ID:imyourm8,项目名称:FastMaskRCNN,代码行数:9,代码来源:pyramid_network.py


示例16: __init__

    def __init__(self, input_size, output_size):
        self.graph = tf.Graph()
        self.hyper_cnt = input_size
        self.save_path = "fit_trend.ckpt"

        self.collect_counter = 0
        self.fit_loss_collect = list()
        self.stable_loss_predict_collect = list()
        self.hp_collect = [list() for _ in range(self.hyper_cnt)]
        self.gradient_collect = [list() for _ in range(self.hyper_cnt)]
        self.stable_loss_label_collect = list()

        self.hp_norms = list()
        self.has_init = False

        with self.graph.as_default():
            # 接收输入
            self.ph_hypers = tf.placeholder(tf.float32, shape=[self.hyper_cnt], name='ph_hypers')
            self.tf_hypers, self.reset_vars = assign_diffable_vars2tensor(self.ph_hypers, self.hyper_cnt)
            rnn_step = 5
            trend_input = tf.concat(0, [self.tf_hypers for _ in range(rnn_step)])
            # 通过一个RNN
            trend_outputs = rnn(trend_input, n_hidden=128)
            print('rnn output')
            print(tf.concat(0, trend_outputs))
            # RNN接一个DNN
            trend_output = dnn(tf.concat(0, trend_outputs), [1, output_size])
            print('dnn output')
            print(trend_output)
            self.predict = trend_output
            # 实际的trend
            self.train_label = tf.placeholder(tf.float32, shape=[output_size], name='train_label')
            # 预测准确率,predict和trend的几何距离
            predict_accuracy = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(trend_output, self.train_label)))) / output_size
            # predict_accuracy /= tf.reduce_mean(tf.concat(0, self.train_label))
            # 稳定时损失,最后一个损失
            stable_loss = tf.unpack(tf.unpack(trend_output)[0])[-1]
            print(stable_loss)
            self.is_fit = tf.placeholder(tf.bool, name='is_fit')
            self.loss = tf.cond(self.is_fit, lambda: predict_accuracy, lambda: stable_loss)

            # 优化器
            self.var_s = tf.trainable_variables()
            self.v_hp_s = self.var_s[0: self.hyper_cnt]
            self.v_fit_s = [v for v in self.var_s if v not in self.v_hp_s]
            self.grads = var_gradient(self.v_hp_s, self.loss, start_rate=0.1, lrd=False)

            def optimize_fit():
                optimizer_fit = var_optimizer(self.v_fit_s, self.loss)
                return optimizer_fit

            def optimize_hp():
                optimizer_hp = var_optimizer(self.v_hp_s, self.loss, start_rate=0.1, lrd=False)
                return optimizer_hp

            self.optimizer = tf.cond(self.is_fit, optimize_fit, optimize_hp)
            self.saver = tf.train.Saver()
开发者ID:ahangchen,项目名称:NN,代码行数:57,代码来源:hp2trend.py


示例17: style_loss

def style_loss(CNN_structure, const_layers, var_layers, content_segs, style_segs, weight):
    loss_styles = []
    layer_count = float(len(const_layers))
    layer_index = 0

    _, content_seg_height, content_seg_width, _ = content_segs[0].get_shape().as_list()
    _, style_seg_height, style_seg_width, _ = style_segs[0].get_shape().as_list()
    for layer_name in CNN_structure:
        layer_name = layer_name[layer_name.find("/") + 1:]

        # downsampling segmentation
        if "pool" in layer_name:
            content_seg_width, content_seg_height = int(math.ceil(content_seg_width / 2)), int(math.ceil(content_seg_height / 2))
            style_seg_width, style_seg_height = int(math.ceil(style_seg_width / 2)), int(math.ceil(style_seg_height / 2))

            for i in xrange(len(content_segs)):
                content_segs[i] = tf.image.resize_bilinear(content_segs[i], tf.constant((content_seg_height, content_seg_width)))
                style_segs[i] = tf.image.resize_bilinear(style_segs[i], tf.constant((style_seg_height, style_seg_width)))

        elif "conv" in layer_name:
            for i in xrange(len(content_segs)):
                # have some differences on border with torch
                content_segs[i] = tf.nn.avg_pool(tf.pad(content_segs[i], [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT"), \
                ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='VALID')
                style_segs[i] = tf.nn.avg_pool(tf.pad(style_segs[i], [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT"), \
                ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='VALID')

        if layer_name == var_layers[layer_index].name[var_layers[layer_index].name.find("/") + 1:]:
            print("Setting up style layer: <{}>".format(layer_name))
            const_layer = const_layers[layer_index]
            var_layer = var_layers[layer_index]

            layer_index = layer_index + 1

            layer_style_loss = 0.0
            for content_seg, style_seg in zip(content_segs, style_segs):
                gram_matrix_const = gram_matrix(tf.multiply(const_layer, style_seg))
                style_mask_mean   = tf.reduce_mean(style_seg)
                gram_matrix_const = tf.cond(tf.greater(style_mask_mean, 0.),
                                        lambda: gram_matrix_const / (tf.to_float(tf.size(const_layer)) * style_mask_mean),
                                        lambda: gram_matrix_const
                                    )

                gram_matrix_var   = gram_matrix(tf.multiply(var_layer, content_seg))
                content_mask_mean = tf.reduce_mean(content_seg)
                gram_matrix_var   = tf.cond(tf.greater(content_mask_mean, 0.),
                                        lambda: gram_matrix_var / (tf.to_float(tf.size(var_layer)) * content_mask_mean),
                                        lambda: gram_matrix_var
                                    )

                diff_style_sum    = tf.reduce_mean(tf.squared_difference(gram_matrix_const, gram_matrix_var)) * content_mask_mean

                layer_style_loss += diff_style_sum

            loss_styles.append(layer_style_loss * weight)
    return loss_styles
开发者ID:4ever911,项目名称:deep-photo-styletransfer-tf,代码行数:56,代码来源:photo_style.py


示例18: distort_image

def distort_image(image, input_width, input_height, output_side):
    """Applies random distortion to the image.
    The output image is output_side x output_side x 3
    """

    def random_crop_it():
        """Random crops image, after resizing it to output_side +10 x output_side+10"""
        resized_img = resize_bl(image, output_side + 10)
        return tf.random_crop(resized_img, [output_side, output_side, 3])

    def resize_it():
        """Resize the image using resize_bl"""
        return resize_bl(image, output_side)

    # if input.width >= output.side + 10 and input.heigth >= output.side + 10
    #   resize it to output.side + 10 x output.size + 10 and random crop it
    # else resize it
    increased_output_side = tf.constant(output_side + 10, dtype=tf.int64)
    image = tf.cond(
        tf.logical_and(
            tf.greater_equal(input_width, increased_output_side),
            tf.greater_equal(input_height, increased_output_side)),
        random_crop_it, resize_it)

    # Apply random distortions to the image
    flipped_image = tf.image.random_flip_left_right(image)

    # randomize the order of the random distortions
    def fn1():
        """Applies random brightness, saturation, hue, contrast"""
        distorted_image = tf.image.random_brightness(
            flipped_image, max_delta=32. / 255.)
        distorted_image = tf.image.random_saturation(
            distorted_image, lower=0.5, upper=1.5)
        distorted_image = tf.image.random_hue(distorted_image, max_delta=0.2)
        distorted_image = tf.image.random_contrast(
            distorted_image, lower=0.5, upper=1.5)
        return distorted_image

    def fn2():
        """Applies random brightness, contrast, saturation, hue"""
        distorted_image = tf.image.random_brightness(
            flipped_image, max_delta=32. / 255.)
        distorted_image = tf.image.random_contrast(
            distorted_image, lower=0.5, upper=1.5)
        distorted_image = tf.image.random_saturation(
            distorted_image, lower=0.5, upper=1.5)
        distorted_image = tf.image.random_hue(distorted_image, max_delta=0.2)

        return distorted_image

    p_order = tf.random_uniform(
        shape=[], minval=0.0, maxval=1.0, dtype=tf.float32)
    distorted_image = tf.cond(tf.less(p_order, 0.5), fn1, fn2)
    distorted_image = tf.clip_by_value(distorted_image, 0.0, 1.0)
    return distorted_image
开发者ID:galeone,项目名称:pgnet,代码行数:56,代码来源:image_processing.py


示例19: optimOp

 def optimOp():
     def updateOptimOp():
         if self._full_stats_init:
             return tf.cond(tf.greater(self.factor_step, tf.convert_to_tensor(0)), lambda: optim.apply_gradients(list(zip(u, varlist))), tf.no_op)
         else:
             return optim.apply_gradients(list(zip(u, varlist)))
     if self._full_stats_init:
         return tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), updateOptimOp, tf.no_op)
     else:
         return tf.cond(tf.greater_equal(self.sgd_step, self._cold_iter), updateOptimOp, tf.no_op)
开发者ID:IcarusTan,项目名称:baselines,代码行数:10,代码来源:kfac.py


示例20: summary

  def summary(self):
    """Summary string of mean and standard deviation.

    Returns:
      Summary tensor.
    """
    with tf.name_scope(self._name + '/summary'):
      mean_summary = tf.cond(self._count > 0, lambda: self._summary('mean', self._mean), str)
      std_summary = tf.cond(self._count > 1, lambda: self._summary('stddev', self._std()), str)
      return tf.summary.merge([mean_summary, std_summary])
开发者ID:bulletphysics,项目名称:bullet3,代码行数:10,代码来源:normalize.py



注:本文中的tensorflow.cond函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.conj函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.concat_v2函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap