• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.norm函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.norm函数的典型用法代码示例。如果您正苦于以下问题:Python norm函数的具体用法?Python norm怎么用?Python norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了norm函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: cosineface_losses

def cosineface_losses(embedding, labels, out_num, w_init=None, s=30., m=0.4):
    '''
    :param embedding: the input embedding vectors
    :param labels:  the input labels, the shape should be eg: (batch_size, 1)
    :param s: scalar value, default is 30
    :param out_num: output class num
    :param m: the margin value, default is 0.4
    :return: the final cacualted output, this output is send into the tf.nn.softmax directly
    '''
    with tf.variable_scope('cosineface_loss'):
        # inputs and weights norm
        embedding_norm = tf.norm(embedding, axis=1, keep_dims=True)
        embedding = tf.div(embedding, embedding_norm, name='norm_embedding')
        weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
                                  initializer=w_init, dtype=tf.float32)
        weights_norm = tf.norm(weights, axis=0, keep_dims=True)
        weights = tf.div(weights, weights_norm, name='norm_weights')
        # cos_theta - m
        cos_t = tf.matmul(embedding, weights, name='cos_t')
        cos_t_m = tf.subtract(cos_t, m, name='cos_t_m')

        mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
        inv_mask = tf.subtract(1., mask, name='inverse_mask')

        output = tf.add(s * tf.multiply(cos_t, inv_mask), s * tf.multiply(cos_t_m, mask), name='cosineface_loss_output')
    return output
开发者ID:xy694942097,项目名称:InsightFace_TF,代码行数:26,代码来源:face_losses.py


示例2: tf_summary

 def tf_summary(self):
     tf.summary.scalar('cost', self.cost)
     tf.summary.scalar('w_fnorm', tf.norm(self.W, ord='euclidean', axis=[-2,-1]))   # Frobenius Norm
     tf.summary.scalar('b_1norm', tf.norm(self.b, ord=1))
     tf.summary.scalar('b_2norm', tf.norm(self.b, ord=2))
     self.summary = tf.summary.merge_all()   # for saving in the epoch/iteration
     self.sw = tf.summary.FileWriter(self.result_dir, self.sess.graph)
开发者ID:jamescfli,项目名称:PythonTest,代码行数:7,代码来源:make_logistic_regression_model.py


示例3: rothk_penalty

    def rothk_penalty(self, d_real, d_fake):
        config = self.config
        g_sample = self.gan.uniform_sample
        x = self.gan.inputs.x
        gradx = tf.gradients(d_real, [x])[0]
        gradg = tf.gradients(d_fake, [g_sample])[0]
        gradx = tf.reshape(gradx, [self.ops.shape(gradx)[0], -1])
        gradg = tf.reshape(gradg, [self.ops.shape(gradg)[0], -1])
        gradx_norm = tf.norm(gradx, axis=1, keep_dims=True)
        gradg_norm = tf.norm(gradg, axis=1, keep_dims=True)
        if int(gradx_norm.get_shape()[0]) != int(d_real.get_shape()[0]):
            print("Condensing along batch for rothk")
            gradx_norm = tf.reduce_mean(gradx_norm, axis=0)
            gradg_norm = tf.reduce_mean(gradg_norm, axis=0)
        gradx = tf.square(gradx_norm) * tf.square(1-tf.nn.sigmoid(d_real))
        gradg = tf.square(gradg_norm) * tf.square(tf.nn.sigmoid(d_fake))
        loss = gradx + gradg
        loss *= config.rothk_lambda or 1
        if config.rothk_decay:
            decay_function = config.decay_function or tf.train.exponential_decay
            decay_steps = config.decay_steps or 50000
            decay_rate = config.decay_rate or 0.9
            decay_staircase = config.decay_staircase or False
            global_step = tf.train.get_global_step()
            loss = decay_function(loss, global_step, decay_steps, decay_rate, decay_staircase)

        return loss
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:27,代码来源:base_loss.py


示例4: p_norm

def p_norm(tensor,order):
    if type(order) in [int,float]:
        return tf.norm(tensor,ord=order)
    elif type(order) in [list,tuple]:
        return [tf.norm(tensor,ord=order_item) for order_item in order]
    else:
        raise ValueError('Unrecognized order of p_norm: %s'%str(order))
开发者ID:liuchen11,项目名称:SSDForRNN,代码行数:7,代码来源:norm.py


示例5: find_best_k

def find_best_k(X, Z):
    best_k = 1
    best_valid_loss = float("inf")
    for k in [1, 3, 5, 50]:
        sess = tf.InteractiveSession()

        dist = calculate_euclidean_distance(X, Z)
        # print(sess.run(dist, feed_dict={X: trainData, Z: testData}))
        r = calculate_responsibilities(dist, k=k)
        prediction = tf.matmul(r, casted_train_target)

        train_losses = tf.norm(trainTarget - prediction)
        valid_losses = tf.norm(validTarget - prediction)
        valid_losses = sess.run(valid_losses, feed_dict={X: trainData, Z: validData})
        test_losses = tf.norm(testTarget - prediction)
        print("Training/Validation/Testing loss for k={:d} is {:f}/{:f}/{:f}"
              .format(k, sess.run(train_losses, feed_dict={X: trainData, Z: trainData}),
                      valid_losses,
                      sess.run(test_losses, feed_dict={X: trainData, Z: testData})))

        if valid_losses < best_valid_loss:
            best_k = k
            best_valid_loss = valid_losses

    return best_k, best_valid_loss
开发者ID:mchenchen,项目名称:Course-Work,代码行数:25,代码来源:a1_2.py


示例6: _l1_loss

 def _l1_loss(self, hparams):
     l1_loss = tf.zeros([1], dtype=tf.float32)
     # embedding_layer l2 loss
     for param in self.embed_params:
         l1_loss = tf.add(l1_loss, tf.multiply(hparams.embed_l1, tf.norm(param, ord=1)))
     params = self.layer_params
     for param in params:
         l1_loss = tf.add(l1_loss, tf.multiply(hparams.layer_l1, tf.norm(param, ord=1)))
     return l1_loss
开发者ID:zeroToAll,项目名称:tensorflow_practice,代码行数:9,代码来源:base_model.py


示例7: apply_gradients

  def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    var_list = [ v for _,v in grads_and_vars]
    with ops.init_scope():
        zt = [self._get_or_make_slot(v, v, "zt", self._name) for _,v in grads_and_vars]
        slots_list = []
        for name in self.optimizer.get_slot_names():
            for var in self.optimizer.variables():
                self._get_or_make_slot(var, var, "zt", "zt")
    self._prepare()

    def _name(post, s):
        ss = s.split(":")
        return ss[0] + "_" + post + "_dontsave"
    zt = [self.get_slot(v, "zt") for _,v in grads_and_vars]
    xt = [tf.Variable(v, name=_name("gigaxt",v.name)) for _,v in grads_and_vars]
    tmp = [tf.Variable(v, name=_name("gigatmp",v.name)) for _,v in grads_and_vars]
    xslots_list = []
    zslots_list = []
    tmpslots_list = []
    slots_vars = []
    for name in self.optimizer.get_slot_names():
        for var in self.optimizer.variables():
            slots_vars += [var]
            xslots_list.append(tf.Variable(var))
            zslots_list.append(self._get_or_make_slot(var, var, "zt", "zt"))
            tmpslots_list.append(tf.Variable(var, name=_name("gigaslottmp", var.name)))


    restored_vars = var_list + slots_vars
    zt_vars = zt + zslots_list
    xt_vars = xt + xslots_list
    tmp_vars = tmp + tmpslots_list
    all_grads = [ g for g, _ in grads_and_vars ]
    # store variables for resetting

    op1 = tf.group(*[tf.assign(w, v) for w,v in zip(tmp_vars, restored_vars)]) # store tmp_vars

    with tf.get_default_graph().control_dependencies([op1]):
        op2 = self.optimizer.apply_gradients(grads_and_vars.copy(), global_step=global_step, name=name)
        with tf.get_default_graph().control_dependencies([op2]):
            op3 = tf.group(*[tf.assign(w, v) for w,v in zip(xt_vars, restored_vars)]) # store xt^+1 in xt_vars
            with tf.get_default_graph().control_dependencies([op3]):
                op4 = tf.group(*[tf.assign(w, v) for w,v in zip(restored_vars, zt_vars)]) # restore vars to zt (different weights)
                with tf.get_default_graph().control_dependencies([op4]):
                    op5 = self.optimizer2.apply_gradients(grads_and_vars.copy(), global_step=global_step, name=name) # zt+1
                    with tf.get_default_graph().control_dependencies([op5]):
                        zt1_xt1 = [_restored_vars - _xt1_vars for _restored_vars, _xt1_vars in zip(restored_vars, xt_vars)]
                        St1 = [tf.minimum(1.0, tf.norm(_zt1_vars-_zt_vars) / tf.norm(_zt1_xt1)) for _zt1_vars, _zt_vars, _zt1_xt1 in zip(restored_vars, zt_vars, zt1_xt1)]
                        self.gan.add_metric('st1',tf.reduce_mean(tf.add_n(St1)/len(St1)))
                        #self.gan.add_metric('xzt1',tf.norm(xt_vars[0]-zt_vars[0]))
                        nextw = [_xt_t1 + _St1 * _zt1_xt1 for _xt_t1, _St1, _zt1_xt1 in zip(xt_vars, St1, zt1_xt1)]
                        op6 = tf.group(*[tf.assign(w, v) for w,v in zip(zt_vars, restored_vars)]) # set zt+1
                        with tf.get_default_graph().control_dependencies([op6]):
                            op7 = tf.group(*[tf.assign(w, v) for w,v in zip(restored_vars, nextw)]) # set xt+1
                            with tf.get_default_graph().control_dependencies([op7]):
                                return tf.no_op()
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:56,代码来源:giga_wolf_optimizer.py


示例8: s_norm

def s_norm(tensor,order):
    s,U,V=tf.svd(tensor,full_matrices=False)
    result=None
    if type(order) in [int,float]:
        result=tf.norm(s,ord=order)
    elif type(order) in [list,tuple]:
        result=[tf.norm(s,ord=order_item) for order_item in order]
    else:
        raise ValueError('Unrecognized order of s_norm: %s'%str(order))
    return s,result
开发者ID:liuchen11,项目名称:SSDForRNN,代码行数:10,代码来源:norm.py


示例9: __tensor_norm__

 def __tensor_norm__(self,tensor,order):
     if order in ['Si']:           # Schatten inf norm
         s,U,V=tf.svd(tensor,full_matrices=False)
         return tf.norm(s,ord=np.inf)
     elif order[0]=='S':           # Schatten norm
         s,U,V=tf.svd(tensor,full_matrices=False)
         sub_order=int(order[1:])
         return tf.norm(s,ord=sub_order)
     else:
         sub_order=int(order)
         return tf.norm(tensor,ord=sub_order)
开发者ID:liuchen11,项目名称:SSDForRNN,代码行数:11,代码来源:RNNs.py


示例10: body

  def body(self, features):
    hp = self.hparams
    # pylint: disable=eval-used
    if hp.image_input_type == "image":
      image_feat = vqa_layers.image_embedding(
          features["inputs"],
          model_fn=eval(hp.image_model_fn),
          trainable=hp.train_resnet,
          is_training=hp.mode == tf.estimator.ModeKeys.TRAIN)
    else:
      image_feat = features["inputs"]

    image_feat = common_layers.flatten4d3d(image_feat)
    image_feat = common_layers.dense(image_feat, hp.hidden_size)
    utils.collect_named_outputs("norms", "image_feat_after_proj",
                                tf.norm(image_feat, axis=-1))

    question = common_layers.flatten4d3d(features["question"])
    utils.collect_named_outputs("norms", "question_embedding",
                                tf.norm(question, axis=-1))
    (encoder_input, encoder_self_attention_bias,
     encoder_decoder_attention_bias) = prepare_image_question_encoder(
         image_feat, question, hp)

    encoder_input = tf.nn.dropout(
        encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout)

    encoder_output, _ = recurrent_transformer_decoder(
        encoder_input, None, encoder_self_attention_bias, None,
        hp, name="encoder")
    utils.collect_named_outputs(
        "norms", "encoder_output", tf.norm(encoder_output, axis=-1))

    # scale query by sqrt(hidden_size)
    query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5
    query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0)
    batch_size = common_layers.shape_list(encoder_input)[0]
    query = tf.tile(query, [batch_size, 1, 1])
    query = tf.nn.dropout(
        query, keep_prob=1.-hp.layer_prepostprocess_dropout)

    decoder_output, _ = recurrent_transformer_decoder(
        query, encoder_output, None, encoder_decoder_attention_bias,
        hp, name="decoder")
    utils.collect_named_outputs("norms", "decoder_output",
                                tf.norm(decoder_output, axis=-1))

    norm_tensors = utils.convert_collection_to_dict("norms")
    vqa_layers.summarize_tensors(norm_tensors, tag="norms/")

    # Expand dimension 1 and 2
    return tf.expand_dims(decoder_output, axis=1)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:52,代码来源:vqa_recurrent_self_attention.py


示例11: image_encoder

def image_encoder(image_feat,
                  hparams,
                  name="image_encoder",
                  save_weights_to=None,
                  make_image_summary=True):
  """A stack of self attention layers."""

  x = image_feat
  image_hidden_size = hparams.image_hidden_size or hparams.hidden_size
  image_filter_size = hparams.image_filter_size or hparams.filter_size
  with tf.variable_scope(name):
    for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
      with tf.variable_scope("layer_%d" % layer):
        with tf.variable_scope("self_attention"):
          y = vqa_layers.multihead_attention(
              common_layers.layer_preprocess(x, hparams),
              None,
              None,
              hparams.attention_key_channels or image_hidden_size,
              hparams.attention_value_channels or image_hidden_size,
              image_hidden_size,
              hparams.num_heads,
              hparams.attention_dropout,
              attention_type=hparams.image_self_attention_type,
              save_weights_to=save_weights_to,
              make_image_summary=make_image_summary,
              scale_dotproduct=hparams.scale_dotproduct,
          )
          utils.collect_named_outputs(
              "norms", "image_feat_self_attention_%d"%(layer),
              tf.norm(y, axis=-1))
          x = common_layers.layer_postprocess(x, y, hparams)
          utils.collect_named_outputs(
              "norms", "image_feat_self_attention_postprocess_%d"%(layer),
              tf.norm(x, axis=-1))
        with tf.variable_scope("ffn"):
          y = common_layers.dense_relu_dense(
              common_layers.layer_preprocess(x, hparams),
              image_filter_size,
              image_hidden_size,
              dropout=hparams.relu_dropout,
          )
          utils.collect_named_outputs(
              "norms", "image_feat_ffn_%d"%(layer), tf.norm(y, axis=-1))
          x = common_layers.layer_postprocess(x, y, hparams)
          utils.collect_named_outputs(
              "norms", "image_feat_ffn_postprocess_%d"%(layer),
              tf.norm(x, axis=-1))
    # if normalization is done in layer_preprocess, then it should also be done
    # on the output, since the output can grow very large, being the sum of
    # a whole stack of unnormalized layer outputs.
    return common_layers.layer_preprocess(x, hparams)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:52,代码来源:vqa_self_attention.py


示例12: project_gradient_layer

 def project_gradient_layer(gs):
     if self.config.norm == 'softmax':
         return tf.nn.softmax(gs)
     elif self.config.norm == 'euclidean':
         return gs / (tf.sqrt(tf.reduce_sum(tf.square(gs)))+1e-8)
     elif self.config.norm == 'inf':
         return gs / (tf.norm(gs, ord=np.inf)+1e-8)
     elif self.config.norm == 'max':
         return gs / (tf.reduce_max(tf.abs(gs))+1e-8)
     elif self.config.norm == False:
         return gs
     else:
         return gs / (tf.norm(gs, ord=self.config.norm)+1e-8)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:13,代码来源:gradient_magnitude_optimizer.py


示例13: nearest

def nearest(x, means, hparams):
  """Find the nearest means to elements in x."""
  x, means = tf.stop_gradient(x), tf.stop_gradient(means)
  x_flat = tf.reshape(x, [-1, hparams.hidden_size])
  x_norm = tf.norm(x_flat, axis=-1, keep_dims=True)
  means_norm = tf.norm(means, axis=-1, keep_dims=True)
  dist = x_norm + tf.transpose(means_norm) - 2 * tf.matmul(x_flat, means,
                                                           transpose_b=True)
  _, nearest_idx = tf.nn.top_k(- dist, k=1)
  nearest_hot = tf.one_hot(tf.squeeze(nearest_idx, axis=1), hparams.v_size)
  shape = common_layers.shape_list(x)
  shape[-1] = hparams.v_size
  nearest_hot = tf.reshape(nearest_hot, shape=shape)
  return tf.stop_gradient(nearest_hot)
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:14,代码来源:transformer_vae.py


示例14: _cross_l_loss

 def _cross_l_loss(self):
     """Construct L1-norm and L2-norm on cross network parameters for loss function.
     Returns:
         obj: Regular loss value on cross network parameters.
     """
     cross_l_loss = tf.zeros([1], dtype=tf.float32)
     for param in self.cross_params:
         cross_l_loss = tf.add(
             cross_l_loss, tf.multiply(self.hparams.cross_l1, tf.norm(param, ord=1))
         )
         cross_l_loss = tf.add(
             cross_l_loss, tf.multiply(self.hparams.cross_l2, tf.norm(param, ord=2))
         )
     return cross_l_loss
开发者ID:David-Li-L,项目名称:recommenders,代码行数:14,代码来源:base_model.py


示例15: _PerCentroidNormalization

  def _PerCentroidNormalization(self, unnormalized_vector):
    """Perform per-centroid normalization.

    Args:
      unnormalized_vector: [KxD] float tensor.

    Returns:
      per_centroid_normalized_vector: [KxD] float tensor, with normalized
        aggregated residuals. Some residuals may be all-zero.
      visual_words: Int tensor containing indices of visual words which are
        present for the set of features.
    """
    unnormalized_vector = tf.reshape(
        unnormalized_vector,
        [self._codebook_size, self._feature_dimensionality])
    per_centroid_norms = tf.norm(unnormalized_vector, axis=1)

    visual_words = tf.reshape(
        tf.where(
            tf.greater(per_centroid_norms, tf.sqrt(_NORM_SQUARED_TOLERANCE))),
        [-1])

    per_centroid_normalized_vector = tf.math.l2_normalize(
        unnormalized_vector, axis=1, epsilon=_NORM_SQUARED_TOLERANCE)

    return per_centroid_normalized_vector, visual_words
开发者ID:rder96,项目名称:models,代码行数:26,代码来源:feature_aggregation_extractor.py


示例16: _make_activity_op

    def _make_activity_op(self, input_tensor):
        """ Creates the op for calculating the activity of a SOM
        :param input_tensor: A tensor to calculate the activity of. Must be of shape `[batch_size, dim]` where `dim` is
        the dimensionality of the SOM's weights.
        :return A handle to the newly created activity op:
        """
        with self._graph.as_default():
            with tf.name_scope("Activity"):
                # This constant controls the width of the gaussian.
                # The closer to 0 it is, the wider it is.
                c = tf.constant(self._c, dtype="float32")
                # Get the euclidean distance between each neuron and the input vectors
                dist = tf.norm(tf.subtract(
                        tf.expand_dims(self._weights, axis=0),
                        tf.expand_dims(input_tensor, axis=1)),
                    name="Distance")  # [batch_size, neurons]

                # Calculate the Gaussian of the activity. Units with distances closer to 0 will have activities
                # closer to 1.
                activity = tf.exp(tf.multiply(tf.pow(dist, 2), c), name="Gaussian")

                # Convert the activity into a softmax probability distribution
                if self._softmax_activity:
                    activity = tf.divide(tf.exp(activity),
                                         tf.expand_dims(tf.reduce_sum(tf.exp(activity), axis=1), axis=-1),
                                         name="Softmax")

                return tf.identity(activity, name="Output")
开发者ID:alexander-gabriel,项目名称:tensorflow-som,代码行数:28,代码来源:tf_som.py


示例17: dia

def dia(model, config, scope, connectsegment, connectfeature):
	with tf.variable_scope(scope), tf.name_scope(scope):
		with tf.variable_scope('inputs'), tf.name_scope('inputs'):
			model['%s_in0length_segment' %scope] = model['%s_out0length' %connectsegment]
			model['%s_in1length_segment' %scope] = model['%s_out1length' %connectsegment]
			model['%s_in2length_segment' %scope] = model['%s_out2length' %connectsegment]
			model['%s_maxin2length_segment' %scope] = model['%s_maxout2length' %connectsegment]
			model['%s_in0length_feature' %scope] = model['%s_out0length' %connectfeature]
			model['%s_in1length_feature' %scope] = model['%s_out1length' %connectfeature]
			model['%s_in2length_feature' %scope] = model['%s_out2length' %connectfeature]
			model['%s_maxin2length_feature' %scope] = model['%s_maxout2length' %connectfeature]
			model['%s_inputs_segment' %scope] = tf.squeeze(model['%s_outputs' %connectsegment], 2, '%s_inputs_segment' %scope)
			model['%s_inputs_feature' %scope] = tf.unstack(tf.transpose(model['%s_outputs' %connectfeature], [1, 0, 2]), name = '%s_inputs_feature' %scope)
			model['%s_out0length' %scope] = model['%s_in0length_feature' %scope]
			model['%s_out1length' %scope] = config.getint('global', 'speaker_size')
			model['%s_out2length' %scope] = tf.stack([config.getint('global', 'speaker_size') for _ in xrange(model['%s_out0length' %scope])])
			model['%s_maxout2length' %scope] = config.getint('global', 'speaker_size')

		with tf.variable_scope('outputs'), tf.name_scope('outputs'):
			model['%s_topsegmentvalues' %scope], model['%s_topsegmentindices' %scope] = tf.nn.top_k(tf.transpose(model['%s_inputs_segment' %scope], [1, 0]), config.getint('global', 'speaker_size'))
			model['%s_scores' %scope] = [tf.gather(feature, index) for feature, index in zip(model['%s_inputs_feature' %scope], tf.unstack(model['%s_topsegmentindices' %scope]))]
			model['%s_normalizedscores' %scope]  = [tf.divide(score, tf.norm(score, 2, 1, True)) for score in model['%s_scores' %scope]]
			model['%s_outputs' %scope] = tf.add(0.5, tf.multiply(0.5, tf.stack([tf.matmul(score, score, transpose_b = True) for score in model['%s_normalizedscores' %scope]], name = '%s_outputs' %scope)))

	return model
开发者ID:aaiijmrtt,项目名称:DEEPSPEECH,代码行数:25,代码来源:dia.py


示例18: step

    def step(self, inputs, states):
        # Split the hidden state into blocks (each U, V, W are shared across blocks).
        state = tf.split(states[0], self._num_blocks, axis=1)
        print('state after split', state)

        next_states = []
        for j, state_j in enumerate(state):  # Hidden State (j)
            key_j = tf.expand_dims(self._keys[j], axis=0)
            gate_j = self.get_gate(state_j, key_j, inputs)
            candidate_j = self.get_candidate(state_j, key_j, inputs, self.U, self.V, self.W, self.U_bias)

            # Equation 4: h_j <- h_j + g_j * h_j^~
            # Perform an update of the hidden state (memory).
            state_j_next = state_j + tf.expand_dims(gate_j, -1) * candidate_j

            # Equation 5: h_j <- h_j / \norm{h_j}
            # Forget previous memories by normalization.
            state_j_next_norm = tf.norm(
                tensor=state_j_next,
                ord='euclidean',
                axis=-1,
                keep_dims=True)
            state_j_next_norm = tf.where(
                tf.greater(state_j_next_norm, 0.0),
                state_j_next_norm,
                tf.ones_like(state_j_next_norm))
            state_j_next = state_j_next / state_j_next_norm
            next_states.append(state_j_next)
        state_next = tf.concat(next_states, axis=1)
        return state_next, [state_next]
开发者ID:hsakas,项目名称:Recurrent-Entity-Network-EntNet,代码行数:30,代码来源:RENLayer.py


示例19: build_graph

 def build_graph(self, left, right, gt_flow):
     x = self.preprocess(left, right)
     prediction = self.graph_structure(x)
     prediction = self.postprocess(prediction)
     tf.identity(prediction, name="prediction")
     # endpoint error
     tf.reduce_mean(tf.norm(prediction - gt_flow, axis=1), name='epe')
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:7,代码来源:flownet_models.py


示例20: build_arch

def build_arch(input, is_train, num_classes):
    data_size = int(input.get_shape()[1])
    # initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
    # bias_initializer = tf.constant_initializer(0.0)
    # weights_regularizer = tf.contrib.layers.l2_regularizer(5e-04)

    with slim.arg_scope([slim.conv2d], trainable=is_train):#, activation_fn=None, , , biases_initializer=bias_initializer, weights_regularizer=weights_regularizer
        with tf.variable_scope('conv1') as scope:
            output = slim.conv2d(input, num_outputs=256, kernel_size=[9, 9], stride=1, padding='VALID', scope=scope)
            data_size = data_size-8
            assert output.get_shape() == [cfg.batch_size, data_size, data_size, 256]
            tf.logging.info('conv1 output shape: {}'.format(output.get_shape()))

        with tf.variable_scope('primary_caps_layer') as scope:
            output = slim.conv2d(output, num_outputs=32*8, kernel_size=[9, 9], stride=2, padding='VALID', scope=scope)#, activation_fn=None
            output = tf.reshape(output, [cfg.batch_size, -1, 8])
            output = squash(output)
            data_size = int(np.floor((data_size-8)/2))
            assert output.get_shape() == [cfg.batch_size, data_size*data_size*32, 8]
            tf.logging.info('primary capsule output shape: {}'.format(output.get_shape()))

        with tf.variable_scope('digit_caps_layer') as scope:
            with tf.variable_scope('u') as scope:
                u_hats = vec_transform(output, num_classes, 16)
                assert u_hats.get_shape() == [cfg.batch_size, num_classes, data_size*data_size*32, 16]
                tf.logging.info('digit_caps_layer u_hats shape: {}'.format(u_hats.get_shape()))

            with tf.variable_scope('routing') as scope:
                output = dynamic_routing(u_hats)
                assert output.get_shape() == [cfg.batch_size, num_classes, 16]
                tf.logging.info('the output capsule has shape: {}'.format(output.get_shape()))

        output_len = tf.norm(output, axis=-1)

    return output, output_len
开发者ID:lzqkean,项目名称:deep_learning,代码行数:35,代码来源:capsnet_dynamic_routing.py



注:本文中的tensorflow.norm函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.not_equal函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.no_op函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap