• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.squeeze函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.squeeze函数的典型用法代码示例。如果您正苦于以下问题:Python squeeze函数的具体用法?Python squeeze怎么用?Python squeeze使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了squeeze函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: step_loss

    def step_loss(self, state, action, time):
        # cost:
        x_h = tf.slice(state, [0, self.x_h_field[0]], [-1, 1])
        x_t = tf.slice(state, [0, self.x_t_field[0]], [-1, self.n_t])

        # 0. smooth acceleration policy
        cost_accel = tf.square(action)
        cost_accel_d = tf.mul(tf.pow(self.gamma, time), cost_accel)

        # 1. forcing the host to move forward (until the right point of the roundabout)
        cost_prog = tf.square(self.x_goal - x_h)
        cost_prog_d = tf.mul(tf.pow(self.gamma, time), cost_prog)
        cost_prog_d = tf.squeeze(cost_prog_d, squeeze_dims=[1])

        # 2. keeping distance from vehicles ahead
        # distance to other vehicles
        x_abs_diffs = tf.abs(x_h - x_t)

        # punish only vehicles closer than "require distance"
        cost_acci = tf.nn.relu(self.require_distance - x_abs_diffs)

        # punish only w.r.t vehicles ahead
        cost_acci = tf.mul(cost_acci, tf.to_float(x_h < x_t))

        # sum over all vehicles
        cost_acci = tf.reduce_sum(cost_acci)

        # punish only when host is inside the roundabout (or very close to enter)
        cost_acci = tf.mul(cost_acci, tf.to_float(x_h > -0.5 * self.host_length))

        cost_acci_d = tf.mul(tf.pow(self.gamma, time), cost_acci)
        cost_acci_d = tf.squeeze(cost_acci_d, squeeze_dims=[1])

        return tf.transpose(tf.pack(values=[cost_accel_d, cost_prog_d, cost_acci_d], name='scan_return'))
开发者ID:bentzinir,项目名称:Buffe,代码行数:34,代码来源:roundabout.py


示例2: entropy

    def entropy(self, n, p):
        # Note that given n and p where p is a probability vector of
        # length k, the entropy requires a sum over all
        # possible configurations of a k-vector which sums to n. It's
        # expensive.
        # http://stackoverflow.com/questions/36435754/generating-a-numpy-array-with-all-combinations-of-numbers-that-sum-to-less-than
        sess = tf.Session()
        n = sess.run(tf.cast(tf.squeeze(n), dtype=tf.int32))
        sess.close()
        p = tf.cast(tf.squeeze(p), dtype=tf.float32)
        if isinstance(n, np.int32):
            k = get_dims(p)[0]
            max_range = np.zeros(k, dtype=np.int32) + n
            x = np.array([i for i in product(*(range(i+1) for i in max_range))
                                 if sum(i)==n])
            logpmf = self.logpmf(x, n, p)
            return tf.reduce_sum(tf.mul(tf.exp(logpmf), logpmf))
        else:
            out = []
            for j in range(n.shape[0]):
                k = get_dims(p)[0]
                max_range = np.zeros(k, dtype=np.int32) + n[j]
                x = np.array([i for i in product(*(range(i+1) for i in max_range))
                                     if sum(i)==n[j]])
                logpmf = self.logpmf(x, n[j], p[j, :])
                out += [tf.reduce_sum(tf.mul(tf.exp(logpmf), logpmf))]

            return tf.pack(out)
开发者ID:crack521,项目名称:edward,代码行数:28,代码来源:distributions.py


示例3: construct_embedding

  def construct_embedding(self):
    """Builds an embedding function on top of images.

    Method to be overridden by implementations.

    Returns:
      embeddings: A 2-d float32 `Tensor` of shape [batch_size, embedding_size]
        holding the embedded images.
    """
    with tf.variable_scope('tcn_net', reuse=self._reuse) as vs:
      self._adaptation_scope = vs.name
      net = self._pretrained_output

      # Define some adaptation blocks on top of the pre-trained resnet output.
      adaptation_blocks = []
      adaptation_block_params = [map(
          int, i.split('_')) for i in self._config.adaptation_blocks.split('-')]
      for i, (depth, num_units) in enumerate(adaptation_block_params):
        block = resnet_v2.resnet_v2_block(
            'adaptation_block_%d' % i, base_depth=depth, num_units=num_units,
            stride=1)
        adaptation_blocks.append(block)

      # Stack them on top of the resent output.
      net = resnet_utils.stack_blocks_dense(
          net, adaptation_blocks, output_stride=None)

      # Average pool the output.
      net = tf.reduce_mean(net, [1, 2], name='adaptation_pool', keep_dims=True)

      if self._config.emb_connection == 'fc':
        # Use fully connected layer to project to embedding layer.
        fc_hidden_sizes = self._config.fc_hidden_sizes
        if fc_hidden_sizes == 'None':
          fc_hidden_sizes = []
        else:
          fc_hidden_sizes = map(int, fc_hidden_sizes.split('_'))
        fc_hidden_keep_prob = self._config.dropout.keep_fc
        net = tf.squeeze(net)
        for fc_hidden_size in fc_hidden_sizes:
          net = slim.layers.fully_connected(net, fc_hidden_size)
          if fc_hidden_keep_prob < 1.0:
            net = slim.dropout(net, keep_prob=fc_hidden_keep_prob,
                               is_training=self._is_training)

        # Connect last FC layer to embedding.
        embedding = slim.layers.fully_connected(net, self._embedding_size,
                                                activation_fn=None)
      else:
        # Use 1x1 conv layer to project to embedding layer.
        embedding = slim.conv2d(
            net, self._embedding_size, [1, 1], activation_fn=None,
            normalizer_fn=None, scope='embedding')
        embedding = tf.squeeze(embedding)

      # Optionally L2 normalize the embedding.
      if self._embedding_l2:
        embedding = tf.nn.l2_normalize(embedding, dim=1)

      return embedding
开发者ID:ALISCIFP,项目名称:models,代码行数:60,代码来源:model.py


示例4: testSlowVsFast

  def testSlowVsFast(self):
    model, features = get_model(transformer.transformer_small())

    decode_length = 3

    out_logits, _ = model(features)
    out_logits = tf.squeeze(out_logits, axis=[2, 3])
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
        labels=tf.reshape(features["targets"], [-1]))
    loss = tf.reduce_mean(loss)
    apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)

    with self.test_session():
      tf.global_variables_initializer().run()
      for _ in range(100):
        apply_grad.run()

    model.set_mode(tf.estimator.ModeKeys.PREDICT)

    with tf.variable_scope(tf.get_variable_scope(), reuse=True):
      greedy_result = model._slow_greedy_infer(
          features, decode_length)["outputs"]
      greedy_result = tf.squeeze(greedy_result, axis=[2, 3])

      fast_result = model._greedy_infer(features, decode_length)["outputs"]

    with self.test_session():
      greedy_res = greedy_result.eval()
      fast_res = fast_result.eval()

    self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length))
    self.assertAllClose(greedy_res, fast_res)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:33,代码来源:transformer_test.py


示例5: _inference

    def _inference(self, x, site, dropout):
        # Get each image from the pair
        print(x.get_shape())
        x_0 = tf.squeeze(x[:, :, :, 0])
        x_1 = tf.squeeze(x[:, :, :, 1])

        # Share weights between the two models of the pair
        with tf.variable_scope("siamese") as scope:
            model0 = self.build_model(x_0)
            scope.reuse_variables()
            model1 = self.build_model(x_1)

        # Dot product layer
        x = self.corr_layer(model0, model1)

        N, M, F = x.get_shape()
        x = tf.reshape(x, [int(N), int(M*F)])

        site = tf.expand_dims(site, axis=1)
        x = tf.concat(1, [x, site])

        for i, M in enumerate(self.M[:-1]):
            with tf.variable_scope('fc{}'.format(i + 1)):
                x = tf.nn.dropout(x, dropout)
                x = self.fc(x, M)

        # Logits linear layer
        with tf.variable_scope('logits'):
            x = tf.nn.dropout(x, dropout)
            x = self.fc(x, self.M[-1], relu=False)

        return tf.squeeze(x)  # tf.sigmoid(x)
开发者ID:parisots,项目名称:gcn_metric_learning,代码行数:32,代码来源:models_siamese.py


示例6: preprocess_for_test

def preprocess_for_test(image, gt_boxes, gt_masks):


    ih, iw = tf.shape(image)[0], tf.shape(image)[1]

    ## min size resizing
    new_ih, new_iw = preprocess_utils._smallest_size_at_least(ih, iw, cfg.FLAGS.image_min_size)
    image = tf.expand_dims(image, 0)
    image = tf.image.resize_bilinear(image, [new_ih, new_iw], align_corners=False)
    image = tf.squeeze(image, axis=[0])

    gt_masks = tf.expand_dims(gt_masks, -1)
    gt_masks = tf.cast(gt_masks, tf.float32)
    gt_masks = tf.image.resize_nearest_neighbor(gt_masks, [new_ih, new_iw], align_corners=False)
    gt_masks = tf.cast(gt_masks, tf.int32)
    gt_masks = tf.squeeze(gt_masks, axis=[-1])

    scale_ratio = tf.to_float(new_ih) / tf.to_float(ih)
    gt_boxes = preprocess_utils.resize_gt_boxes(gt_boxes, scale_ratio)
    
    ## zero mean image
    image = tf.cast(image, tf.float32)
    image = image / 256.0
    image = (image - 0.5) * 2.0
    image = tf.expand_dims(image, axis=0)

    ## rgb to bgr
    image = tf.reverse(image, axis=[-1])

    return image, gt_boxes, gt_masks 
开发者ID:Kairobo,项目名称:FastMaskRCNN,代码行数:30,代码来源:coco_v1.py


示例7: inference_input

def inference_input():
    """Returns ops that convert raw image data to a 4D tensor representing a single image.

    Taken from:
    https://github.com/tensorflow/serving/blob/master/tensorflow_serving/example/inception_export.py

    The input to the first op can be read using:
    tf.gfile.FastGFile(image_filename, 'r').read()

    """
    # Decode image into float range [0,1]
    jpegs = tf.placeholder(tf.string, shape=(1), name='input')
    image_buffer = tf.squeeze(jpegs, [0])
    image = tf.image.decode_jpeg(image_buffer, channels=3)
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    image = tf.image.central_crop(image, central_fraction=0.875)
    image = tf.expand_dims(image, 0)
    image = tf.image.resize_bilinear(image, [FLAGS.image_size, FLAGS.image_size], align_corners=False)
    image = tf.squeeze(image, [0])

    # Rescale the image to [-1,-1]
    image = tf.sub(image, 0.5)
    image = tf.mul(image, 2.0)
    images = tf.expand_dims(image, 0)

    return images, jpegs
开发者ID:andreesteva,项目名称:library,代码行数:26,代码来源:make_inference_graph.py


示例8: randomly_scale_image_and_label

def randomly_scale_image_and_label(image, label=None, scale=1.0):
  """Randomly scales image and label.

  Args:
    image: Image with shape [height, width, 3].
    label: Label with shape [height, width, 1].
    scale: The value to scale image and label.

  Returns:
    Scaled image and label.
  """
  # No random scaling if scale == 1.
  if scale == 1.0:
    return image, label
  image_shape = tf.shape(image)
  new_dim = tf.cast(
      tf.cast([image_shape[0], image_shape[1]], tf.float32) * scale,
      tf.int32)

  # Need squeeze and expand_dims because image interpolation takes
  # 4D tensors as input.
  image = tf.squeeze(tf.image.resize_bilinear(
      tf.expand_dims(image, 0),
      new_dim,
      align_corners=True), [0])
  if label is not None:
    label = tf.squeeze(tf.image.resize_nearest_neighbor(
        tf.expand_dims(label, 0),
        new_dim,
        align_corners=True), [0])

  return image, label
开发者ID:Exscotticus,项目名称:models,代码行数:32,代码来源:preprocess_utils.py


示例9: build_network

    def build_network(self):
        net_tensors = self.net_tensors
        with self.net_graph.as_default(), tf.device(self.net_device):
            logits = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, self.image_classes))
            labels = tf.placeholder(dtype=tf.int32, shape=(self.batch_size,))
            lambs = tf.placeholder(dtype=tf.float32, shape=(self.image_classes,))
            # put a sigfunction on logits and then transpose
            logits = tf.transpose(framwork.sig_func(logits))
            # according to the labels, erase rows which is not in labels

            labels_unique = tf.constant(range(self.image_classes), dtype=tf.int32)
            labels_num = self.image_classes
            logits = tf.gather(logits, indices=labels_unique)
            lambs = tf.gather(lambs, indices=labels_unique)
            # set the value of each row to True when it occurs in labels
            templete = tf.tile(tf.expand_dims(labels_unique, dim=1), [1, self.batch_size])
            labels_expand = tf.tile(tf.expand_dims(labels, dim=0), [labels_num, 1])
            indict_logic = tf.equal(labels_expand, templete)
            # split the tensor along rows
            logit_list = tf.split(0, labels_num, logits)
            indict_logic_list = tf.split(0, labels_num, indict_logic)
            lamb_list = tf.split(0, self.image_classes, lambs)
            logit_list = [tf.squeeze(item) for item in logit_list]
            indict_logic_list = [tf.squeeze(item) for item in indict_logic_list]
            left_right_tuples = list()
            for i in range(self.image_classes):
                left_right_tuples.append(framwork.lamb_func(logit_list[i], indict_logic_list[i], lamb=lamb_list[i]))
            # func = framwork.lamb_func()
            # left_right_tuples = map(func, logit_list, indict_logic_list, lamb_list)
            net_tensors.update({'left_right_tuples': left_right_tuples, 'logits': logits, 'labels': labels,
                                'lambs': lambs})
开发者ID:chengyang317,项目名称:information_pursuit,代码行数:31,代码来源:infor_net.py


示例10: build_detector

    def build_detector(self):
        img_size = self.config['image_size']
        self.image_ph = tf.placeholder(shape=[None, None, 3],
                                       dtype=tf.float32, name='img_ph')
        self.seg_ph = tf.placeholder(shape=[None, None], dtype=tf.int32, name='seg_ph')

        img = tf.image.resize_bilinear(tf.expand_dims(self.image_ph, 0),
                                       (img_size, img_size))
        self.net.create_trunk(img)

        if args.detect:
            self.net.create_multibox_head(self.loader.num_classes)
            confidence = tf.nn.softmax(tf.squeeze(self.net.outputs['confidence']))
            location = tf.squeeze(self.net.outputs['location'])
            self.nms(location, confidence, self.bboxer.tiling)

        if args.segment:
            self.net.create_segmentation_head(self.loader.num_classes)
            self.segmentation = self.net.outputs['segmentation']
            seg_shape = tf.shape(self.image_ph)[:2]
            self.segmentation = tf.image.resize_bilinear(self.segmentation, seg_shape)

            self.segmentation = tf.cast(tf.argmax(tf.squeeze(self.segmentation), axis=-1), tf.int32)
            self.segmentation = tf.reshape(self.segmentation, seg_shape)
            self.segmentation.set_shape([None, None])

            if not self.no_gt:
                easy_mask = self.seg_ph <= self.loader.num_classes
                predictions = tf.boolean_mask(self.segmentation, easy_mask)
                labels = tf.boolean_mask(self.seg_ph, easy_mask)
                self.mean_iou, self.iou_update = mean_iou(predictions, labels, self.loader.num_classes)
            else:
                self.mean_iou = tf.constant(0)
                self.iou_update = tf.constant(0)
开发者ID:heidongxianhau,项目名称:blitznet,代码行数:34,代码来源:detector.py


示例11: buildConvolution

	def buildConvolution(self):
		q_embedding = self.tensors['q_embedding']
		a_embedding = self.tensors['a_embedding']
		with tf.name_scope('convolution'):
			filter_shape = (self.params['filters'][0], self.wdim, 1, self.params['nb_filter'])
			W = glorot_normal(filter_shape, name="W")
			b = tf.Variable(tf.constant(0.0, shape=(self.params['nb_filter'],)), name="b")
			q_conv = tf.nn.conv2d(
				tf.expand_dims(q_embedding, -1),
				W,
				strides=[1,1,1,1],
				padding="VALID",
				name="q_conv"
			)
			a_conv = tf.nn.conv2d(
				tf.expand_dims(a_embedding, -1),
				W,
				strides=[1,1,1,1],
				padding="VALID",
				name = "a_conv"
			)
			q_conv = tf.squeeze(q_conv, [2])
			a_conv = tf.squeeze(a_conv, [2])
			# shape = (batch, q_length, NUM_FILTERS)
			q_relu = tf.nn.relu(tf.nn.bias_add(q_conv, b), name="q_relu")
			# shape = (batch, a_length, NUM_FILTERS)
			a_relu = tf.nn.relu(tf.nn.bias_add(a_conv, b), name="q_relu")
		self.tensors['q_conv'] = q_conv
		self.tensors['a_conv'] = a_conv
		self.tensors['q_relu'] = q_relu
		self.tensors['a_relu'] = a_relu
		self.tensors.setdefault('weights', []).append(b)
		self.tensors.setdefault('summary', []).append(tf.nn.zero_fraction(a_relu))
开发者ID:3rduncle,项目名称:knowledgeflow,代码行数:33,代码来源:apn.py


示例12: _add_box_predictions_to_feature_maps

  def _add_box_predictions_to_feature_maps(self, feature_maps):
    """Adds box predictors to each feature map and returns concatenated results.

    Args:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]

    Returns:
      box_encodings: 4-D float tensor of shape [batch_size, num_anchors,
          box_code_dimension] containing predicted boxes.
      class_predictions_with_background: 2-D float tensor of shape
          [batch_size, num_anchors, num_classes+1] containing class predictions
          (logits) for each of the anchors.  Note that this tensor *includes*
          background class predictions (at class index 0).

    Raises:
      RuntimeError: if the number of feature maps extracted via the
        extract_features method does not match the length of the
        num_anchors_per_locations list that was passed to the constructor.
      RuntimeError: if box_encodings from the box_predictor does not have
        shape of the form  [batch_size, num_anchors, 1, code_size].
    """
    num_anchors_per_location_list = 1
    if len(feature_maps) != len(num_anchors_per_location_list):
      raise RuntimeError('the number of feature maps must match the '
                         'length of self.anchors.NumAnchorsPerLocation().')
    box_encodings_list = []
    mask_encodings_list = []
    for idx, (feature_map, num_anchors_per_location
             ) in enumerate(zip(feature_maps, num_anchors_per_location_list)):
      box_predictor_scope = 'BoxPredictor_{}'.format(idx)
      box_predictions = self._box_predictor.predict(feature_map,
                                                    num_anchors_per_location,
                                                    box_predictor_scope)
      box_encodings = box_predictions[bpredictor.BOX_ENCODINGS]
      mask_encodings = box_predictions[bpredictor.MASK_PREDICTIONS]

      box_encodings_shape = box_encodings.get_shape().as_list()
      if len(box_encodings_shape) != 5 or box_encodings_shape[2] != 1:
        raise RuntimeError('box_encodings from the box_predictor must be of '
                           'shape `[batch_size, num_anchors, 1, code_size]`; '
                           'actual shape', box_encodings_shape)
      box_encodings = tf.squeeze(box_encodings, axis=2)
      mask_encodings = tf.squeeze(mask_encodings, axis=2)
      box_encodings_list.append(box_encodings)
      mask_encodings_list.append(mask_encodings)
    
    """ 
    num_predictions = sum(
        [tf.shape(box_encodings)[1] for box_encodings in box_encodings_list])
    num_anchors = self.anchors.num_boxes()
    anchors_assert = tf.assert_equal(num_anchors, num_predictions, [
        'Mismatch: number of anchors vs number of predictions', num_anchors,
        num_predictions
    ])
    """
    
    box_encodings = tf.concat(box_encodings_list, 1)
    mask_encodings = tf.concat(mask_encodings_list, 1)
    return box_encodings, mask_encodings
开发者ID:chenxiang204,项目名称:code,代码行数:60,代码来源:east_meta_architectures.py


示例13: _match

  def _match(self, similarity_matrix, valid_rows):
    """Bipartite matches a collection rows and columns. A greedy bi-partite.

    TODO(rathodv): Add num_valid_columns options to match only that many columns
    with all the rows.

    Args:
      similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
        where higher values mean more similar.
      valid_rows: A boolean tensor of shape [N] indicating the rows that are
        valid.

    Returns:
      match_results: int32 tensor of shape [M] with match_results[i]=-1
        meaning that column i is not matched and otherwise that it is matched to
        row match_results[i].
    """
    valid_row_sim_matrix = tf.gather(similarity_matrix,
                                     tf.squeeze(tf.where(valid_rows), axis=-1))
    invalid_row_sim_matrix = tf.gather(
        similarity_matrix,
        tf.squeeze(tf.where(tf.logical_not(valid_rows)), axis=-1))
    similarity_matrix = tf.concat(
        [valid_row_sim_matrix, invalid_row_sim_matrix], axis=0)
    # Convert similarity matrix to distance matrix as tf.image.bipartite tries
    # to find minimum distance matches.
    distance_matrix = -1 * similarity_matrix
    num_valid_rows = tf.reduce_sum(tf.to_float(valid_rows))
    _, match_results = image_ops.bipartite_match(
        distance_matrix, num_valid_rows=num_valid_rows)
    match_results = tf.reshape(match_results, [-1])
    match_results = tf.cast(match_results, tf.int32)
    return match_results
开发者ID:pcm17,项目名称:models,代码行数:33,代码来源:bipartite_matcher.py


示例14: conpute_loss

def conpute_loss(scores, target):
    """ Compute the perplexity of the batch 
    
    Args:
        scores: 3D tensor, shape=(BATCH_SIZE, 1, S_FRENCH, T_FRENCH)
        target: 4D tensor, shape=(BATCH_SIZE, 1, S_FRENCH, T_FRENCH)
        
    Returns:
        tf.float32 tensor
    """
    
    with tf.name_scope('Loss_computation'):
        sortie_loss = tf.squeeze(target)    
        scores = tf.squeeze(scores) 
        
        loss = tf.reduce_sum(tf.mul(scores, sortie_loss), reduction_indices=2) # Get the activation of the target token
        #loss = tf.reduce_sum(loss,reduction_indices=2)
        loss = tf.clip_by_value(loss, clip_value_min=1e-10, clip_value_max=1.0)
        #loss = 
        loss = tf.reduce_sum(tf.log(loss), reduction_indices=1)
        loss = -tf.reduce_mean(loss)
        
        l2_weights = 0.00
        with tf.variable_scope('Embeddings', reuse=True):
            w = tf.get_variable('weights')
            b = tf.get_variable('biases')
            loss = loss + l2_weights*tf.nn.l2_loss(w) + l2_weights*tf.nn.l2_loss(b)
            
        with tf.variable_scope('Decoding', reuse=True):
            w = tf.get_variable('weights')
            b = tf.get_variable('biases')
            loss = loss + l2_weights*tf.nn.l2_loss(w) + l2_weights*tf.nn.l2_loss(b)
        
        return loss
开发者ID:alexisrosuel,项目名称:NMT,代码行数:34,代码来源:NMT.py


示例15: convolution

 def convolution(self, inputs, num_units):
     x = tf.expand_dims(inputs, 3)
     chan_in = 1
     
     #Bigram
     w_bigram = tf.get_variable("w_bigram", shape= [2,50,chan_in,num_units],
                              initializer= tf.contrib.layers.xavier_initializer_conv2d())
     b_bigram = tf.get_variable("b_bigram", shape= [num_units])
     y_bigram = self.nonlin(tf.nn.conv2d(x, w_bigram, strides= [1,1,1,1], padding='VALID') + b_bigram)
     h_bigram = tf.reduce_max(tf.squeeze(y_bigram) , 1)
     
     #Trigram
     w_trigram = tf.get_variable("w_trigram", shape= [3,50,chan_in,num_units],
                              initializer= tf.contrib.layers.xavier_initializer_conv2d())
     b_trigram = tf.get_variable("b_trigram", shape= [num_units])
     y_trigram = self.nonlin(tf.nn.conv2d(x, w_trigram, strides= [1,1,1,1], padding='VALID') + b_trigram)
     h_trigram = tf.reduce_max(tf.squeeze(y_trigram) , 1)
     
     #Quin-gram
     w_quingram = tf.get_variable("w_quingram", shape= [3,50,chan_in,num_units],
                              initializer= tf.contrib.layers.xavier_initializer_conv2d())
     b_quingram = tf.get_variable("b_quingram", shape= [num_units])
     y_quingram = self.nonlin(tf.nn.conv2d(x, w_trigram, strides= [1,1,1,1], padding='VALID') + b_trigram)
     h_quingram = tf.reduce_max(tf.squeeze(y_quingram) , 1)
     
     if self.hyperparams['conv_type'] == 'bigram':
         h = h_bigram
     elif self.hyperparams['conv_type'] == 'trigram':
         h = h_trigram
     elif self.hyperparams['conv_type'] == 'quingram':
         h = h_quingram            
     elif self.hyperparams['conv_type'] == 'inception':
         h = tf.concat(1, [h_bigram, h_trigram, h_quingram])
         
     return h
开发者ID:akuefler,项目名称:GenerativeModels,代码行数:35,代码来源:networks.py


示例16: unpool_layer2x2

    def unpool_layer2x2(self, x, raveled_argmax, out_shape):
        argmax = self.unravel_argmax(raveled_argmax, tf.to_int64(out_shape))
        output = tf.zeros([out_shape[1], out_shape[2], out_shape[3]])

        height = tf.shape(output)[0]
        width = tf.shape(output)[1]
        channels = tf.shape(output)[2]

        t1 = tf.to_int64(tf.range(channels))
        t1 = tf.tile(t1, [((width + 1) // 2) * ((height + 1) // 2)])
        t1 = tf.reshape(t1, [-1, channels])
        t1 = tf.transpose(t1, perm=[1, 0])
        t1 = tf.reshape(t1, [channels, (height + 1) // 2, (width + 1) // 2, 1])

        t2 = tf.squeeze(argmax)
        t2 = tf.pack((t2[0], t2[1]), axis=0)
        t2 = tf.transpose(t2, perm=[3, 1, 2, 0])

        t = tf.concat(3, [t2, t1])
        indices = tf.reshape(t, [((height + 1) // 2) * ((width + 1) // 2) * channels, 3])

        x1 = tf.squeeze(x)
        x1 = tf.reshape(x1, [-1, channels])
        x1 = tf.transpose(x1, perm=[1, 0])
        values = tf.reshape(x1, [-1])

        delta = tf.SparseTensor(indices, values, tf.to_int64(tf.shape(output)))
        return tf.expand_dims(tf.sparse_tensor_to_dense(tf.sparse_reorder(delta)), 0)
开发者ID:BenJamesbabala,项目名称:Tensorflow-DeconvNet-Segmentation,代码行数:28,代码来源:DeconvNet.py


示例17: _log_unnormalized_prob

 def _log_unnormalized_prob(self, x):
   mean = tf.squeeze(tf.gather(x, [0], axis=-1), axis=-1)
   precision = self._maybe_assert_valid_sample(
       tf.squeeze(tf.gather(x, [1], axis=-1), axis=-1))
   return (tf.math.xlogy(self.concentration - 0.5, precision)
           - self.rate * precision
           - 0.5 * self._lambda * precision * tf.square(mean - self.loc))
开发者ID:imito,项目名称:odin,代码行数:7,代码来源:normal_gamma.py


示例18: get_content_based_attention_vectors

            def get_content_based_attention_vectors(query_vectors):
                '''
                    function that returns the alpha_content vector using the yt-1 (query vectors)
                '''
                # use the W_f and b_f to transform the query_vectors to the shape of f_values
                f_trans_query_vectors = tf.matmul(W_f, tf.transpose(query_vectors)) + b_f
                # use the W_c and b_c to transform the query_vectors to the shape of h_values
                h_trans_query_vectors = tf.matmul(W_c, tf.transpose(query_vectors)) + b_c

                # transpose and expand the dims of the f_trans_query_vectors
                f_trans_query_matrices = tf.expand_dims(tf.transpose(f_trans_query_vectors), axis=-1)
                # obtain the field attention_values by using the matmul operation
                field_attention_values = tf.matmul(tf_field_embedded, f_trans_query_matrices)

                # perform the same process for the h_trans_query_vectors
                h_trans_query_matrices = tf.expand_dims(tf.transpose(h_trans_query_vectors), axis=-1)
                hidden_attention_values = tf.matmul(encoded_input, h_trans_query_matrices)

                # drop the last dimension (1 sized)
                field_attention_values = tf.squeeze(field_attention_values, axis=[-1])
                hidden_attention_values = tf.squeeze(hidden_attention_values, axis=[-1])


                # free up non_required resources:
                ret_value = tf.nn.softmax(field_attention_values * hidden_attention_values, name="softmax")

                # return the element wise multiplied values followed by softmax
                return ret_value
开发者ID:codealphago,项目名称:natural-language-summary-generation-from-structured-data,代码行数:28,代码来源:order_planner_without_copynet.py


示例19: tf_format_mnist_images

def tf_format_mnist_images(X, Y, Y_, n=100, lines=10):
    correct_prediction = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
    correctly_recognised_indices = tf.squeeze(tf.where(correct_prediction), [1])  # indices of correctly recognised images
    incorrectly_recognised_indices = tf.squeeze(tf.where(tf.logical_not(correct_prediction)), [1]) # indices of incorrectly recognised images
    everything_incorrect_first = tf.concat([incorrectly_recognised_indices, correctly_recognised_indices], 0) # images reordered with indeces of unrecognised images first
    everything_incorrect_first = tf.slice(everything_incorrect_first, [0], [n]) # compute first 100 only - no space to display more anyway
    # compute n=100 digits to display only
    Xs = tf.gather(X, everything_incorrect_first)
    Ys = tf.gather(Y, everything_incorrect_first)
    Ys_ = tf.gather(Y_, everything_incorrect_first)
    correct_prediction_s = tf.gather(correct_prediction, everything_incorrect_first)

    digits_left = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_left())
    correct_tags = tf.gather(digits_left, tf.argmax(Ys_, 1)) # correct digits to be printed on the images
    digits_right = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_right())
    computed_tags = tf.gather(digits_right, tf.argmax(Ys, 1)) # computed digits to be printed on the images
    #superimposed_digits = correct_tags+computed_tags
    superimposed_digits = tf.where(correct_prediction_s, tf.zeros_like(correct_tags),correct_tags+computed_tags) # only pring the correct and computed digits on unrecognised images
    correct_bkg   = tf.reshape(tf.tile([1.3,1.3,1.3], [28*28]), [1, 28,28,3]) # white background
    incorrect_bkg = tf.reshape(tf.tile([1.3,1.0,1.0], [28*28]), [1, 28,28,3]) # red background
    recognised_bkg = tf.gather(tf.concat([incorrect_bkg, correct_bkg], 0), tf.cast(correct_prediction_s, tf.int32)) # pick either the red or the white background depending on recognised status

    I = tf.image.grayscale_to_rgb(Xs)
    I = ((1-(I+superimposed_digits))*recognised_bkg)/1.3 # stencil extra data on top of images and reorder them unrecognised first
    I = tf.image.convert_image_dtype(I, tf.uint8, saturate=True)
    Islices = [] # 100 images => 10x10 image block
    for imslice in range(lines):
        Islices.append(tf.concat(tf.unstack(tf.slice(I, [imslice*n//lines,0,0,0], [n//lines,28,28,3])), 1))
    I = tf.concat(Islices, 0)
    return I
开发者ID:Spandyie,项目名称:tensorflow-mnist-tutorial,代码行数:30,代码来源:tensorflowvisu.py


示例20: compute_accuracy

  def compute_accuracy(x, l, mask):
    """Compute model accuracy."""
    preds = ch_model.get_probs(x)
    preds = tf.squeeze(preds)
    preds = tf.argmax(preds, -1, output_type=l.dtype)

    _, acc_update_op = tf.metrics.accuracy(l, preds, weights=mask)

    if FLAGS.surrogate_attack:
      preds = sur_ch_model.get_probs(x)
      preds = tf.squeeze(preds)
      preds = tf.argmax(preds, -1, output_type=l.dtype)
      acc_update_op = tf.tuple((acc_update_op,
                                tf.metrics.accuracy(l, preds, weights=mask)[1]))

    sess.run(tf.initialize_local_variables())
    for i in range(FLAGS.eval_steps):
      tf.logging.info(
          "\tEvaluating batch [%d / %d]" % (i + 1, FLAGS.eval_steps))
      acc = sess.run(acc_update_op)
    if FLAGS.surrogate_attack:
      tf.logging.info("\tFinal acc: (%.4f, %.4f)" % (acc[0], acc[1]))
    else:
      tf.logging.info("\tFinal acc: %.4f" % acc)
    return acc
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:25,代码来源:t2t_attack.py



注:本文中的tensorflow.squeeze函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.stack函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.squared_difference函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap