• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.split函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.split函数的典型用法代码示例。如果您正苦于以下问题:Python split函数的具体用法?Python split怎么用?Python split使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了split函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _composition_function

 def _composition_function(self, inputs, length, init_state=None):
     if self._composition == "GRU":
         cell = GRUCell(self._size)
         return dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                            initial_state=init_state, dtype=tf.float32)[0]
     elif self._composition == "LSTM":
         cell = BasicLSTMCell(self._size)
         init_state = tf.concat(1, [tf.zeros_like(init_state, tf.float32), init_state]) if init_state else None
         outs = dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                            initial_state=init_state, dtype=tf.float32)[0]
         return outs
     elif self._composition == "BiGRU":
         cell = GRUCell(self._size // 2, self._size)
         init_state_fw, init_state_bw = tf.split(1, 2, init_state) if init_state else (None, None)
         with tf.variable_scope("forward"):
             fw_outs = dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                                   initial_state=init_state_fw, dtype=tf.float32)[0]
         with tf.variable_scope("backward"):
             rev_inputs = tf.reverse_sequence(tf.pack(inputs), length, 0, 1)
             rev_inputs = [tf.reshape(x, [-1, self._size]) for x in tf.split(0, len(inputs), rev_inputs)]
             bw_outs = dynamic_rnn(cell, rev_inputs, sequence_length=length, time_major=True,
                                   initial_state=init_state_bw, dtype=tf.float32)[0]
             bw_outs = tf.reverse_sequence(tf.pack(bw_outs), length, 0, 1)
             bw_outs = [tf.reshape(x, [-1, self._size]) for x in tf.split(0, len(inputs), bw_outs)]
         return [tf.concat(1, [fw_out, bw_out]) for fw_out, bw_out in zip(fw_outs, bw_outs)]
     else:
         raise NotImplementedError("Other compositions not implemented yet.")
开发者ID:MorLong,项目名称:qa_network,代码行数:27,代码来源:qa_network.py


示例2: _g_recurrence_1

        def _g_recurrence_1(i, x_t, input_x, gen_x, h_tm1, h_tm1_manager, last_goal, real_goal, give_num):
            cur_sen = \
            tf.split(tf.concat([tf.split(input_x, [i, self.sequence_length - i], 1)[0], self.padding_array], 1),
                     [self.sequence_length, i], 1)[0]
            with tf.variable_scope(self.scope):
                feature = self.FeatureExtractor_unit(cur_sen, self.drop_out)

            h_t_manager = self.g_manager_recurrent_unit(feature, h_tm1_manager)
            sub_goal = self.g_manager_output_unit(h_t_manager)
            sub_goal = tf.nn.l2_normalize(sub_goal, 1)

            h_t_Worker = tf.cond(i > 0, lambda: self.g_worker_recurrent_unit(x_t, h_tm1),
                                 lambda: h_tm1)  # hidden_memory_tuple

            real_sub_goal = tf.cond(i > 0, lambda: tf.add(last_goal, sub_goal), lambda: real_goal)
            # real_goal_array = real_goal_array.write(i, real_sub_goal)

            x_tp1 = tf.cond(i > 0, lambda: ta_emb_x.read(i - 1), lambda: x_t)

            # hidden_memory_tuple
            with tf.control_dependencies([cur_sen]):
                gen_x = tf.cond(i > 0, lambda: gen_x.write(i - 1, ta_x.read(i - 1)), lambda: gen_x)
            return i + 1, x_tp1, input_x, gen_x, h_t_Worker, h_t_manager, \
                   tf.cond(((i) % self.step_size) > 0, lambda: real_sub_goal,
                           lambda: tf.constant(0.0, shape=[self.batch_size, self.goal_out_size])), \
                   tf.cond(((i) % self.step_size) > 0, lambda: real_goal, lambda: real_sub_goal), give_num
开发者ID:IshJ,项目名称:Texygen,代码行数:26,代码来源:LeakganGenerator.py


示例3: build

    def build(self):
        """None
        Build the model graph
        :return:
        """
        with tf.name_scope('G_'):
            self.predict_g = self.__G__()
            self.predict_g2 = self.__G2__()

        with tf.name_scope('D_'):

            # Create reference examples
            # Input d holds real&imaginary values. The discriminative decision based on reconstructed image
            self.reconstructed_image_reference = self.get_reconstructed_image(real=self.input_d['real'],
                                                                              imag=self.input_d['imag'], name='Both_gt')

            predict_g2_stacked = tf.stack([self.predict_g2['real'][:,0,:,:], self.predict_g2['imag'][:,0,:,:]], axis=1)

            self.predict, self.predict_logits = self.__D__([self.reconstructed_image_reference, predict_g2_stacked])

            self.predict_d, self.predict_d_for_g = tf.split(value=self.predict, num_or_size_splits=2, axis=0)
            self.predict_d_logits, self.predict_d_logits_for_g = tf.split(value=self.predict_logits,
                                                                          num_or_size_splits=2, axis=0)
            self.clip_weights = self.__clip_weights__()

        with tf.name_scope('loss'):
            # self.loss_g = self.__loss_g__(predict=self.predict_g, self.labels, reg=self.regularization_sum)
            self.__loss__()

        with tf.name_scope('training'):
            self.train_op_d, self.train_op_g = self.__training__(learning_rate=self.FLAGS.learning_rate)

        with tf.name_scope('evaluation'):
            # Calculate accuracy L2 norm
            self.evaluation = self.__evaluation__(predict=self.predict_g, labels=self.labels)
开发者ID:shohad25,项目名称:thesis,代码行数:35,代码来源:k_space_wgan_gl_g2_unet_Gloss.py


示例4: backward_grads

  def backward_grads(self, y, dy, training=True):
    """Manually compute backward gradients given input and output grads."""
    dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=self.axis)
    y1, y2 = tf.split(y, num_or_size_splits=2, axis=self.axis)

    with tf.GradientTape() as gtape:
      gtape.watch(y1)
      gy1 = self.g(y1, training=training)
    grads_combined = gtape.gradient(
        gy1, [y1] + self.g.trainable_variables, output_gradients=dy2)
    dg = grads_combined[1:]
    dx1 = dy1 + grads_combined[0]
    # This doesn't affect eager execution, but improves memory efficiency with
    # graphs
    with tf.control_dependencies(dg + [dx1]):
      x2 = y2 - gy1

    with tf.GradientTape() as ftape:
      ftape.watch(x2)
      fx2 = self.f(x2, training=training)
    grads_combined = ftape.gradient(
        fx2, [x2] + self.f.trainable_variables, output_gradients=dx1)
    df = grads_combined[1:]
    dx2 = dy2 + grads_combined[0]
    # Same behavior as above
    with tf.control_dependencies(df + [dx2]):
      x1 = y1 - fx2

    x = tf.concat([x1, x2], axis=self.axis)
    dx = tf.concat([dx1, dx2], axis=self.axis)
    grads = df + dg

    return x, dx, grads
开发者ID:dan-lennox,项目名称:tensorflow,代码行数:33,代码来源:blocks.py


示例5: test_backward_grads_with_nativepy

  def test_backward_grads_with_nativepy(self):
    if not tf.test.is_gpu_available():
      self.skipTest("GPU not available")

    input_shape = (128, 8, 8)
    data_shape = (16,) + input_shape
    x = tf.random_normal(shape=data_shape, dtype=tf.float64)
    dy = tf.random_normal(shape=data_shape, dtype=tf.float64)
    dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=1)
    block = blocks.RevBlock(
        n_res=3,
        filters=128,
        strides=(1, 1),
        input_shape=input_shape,
        fused=False,
        dtype=tf.float64)
    with tf.GradientTape() as tape:
      tape.watch(x)
      x1, x2 = tf.split(x, num_or_size_splits=2, axis=1)
      y1, y2 = block((x1, x2), training=True)
      y = tf.concat((y1, y2), axis=1)

    # Compute true grads
    dx_true = tape.gradient(y, x, output_gradients=dy)

    # Compute grads from reconstruction
    (dx1, dx2), _ = block.backward_grads(
        x=(x1, x2), y=(y1, y2), dy=(dy1, dy2), training=True)
    dx = tf.concat((dx1, dx2), axis=1)

    thres = 1e-5
    diff_abs = tf.reshape(abs(dx - dx_true), [-1])
    assert all(diff_abs < thres)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:33,代码来源:blocks_test.py


示例6: add_training_loss

 def add_training_loss(self, final_loss, logits):
   """Computes loss using logits."""
   loss_fn = get_loss_fn(final_loss)  # Get loss function
   task_losses = []
   # label_placeholder of shape (batch_size, n_tasks). Split into n_tasks
   # tensors of shape (batch_size,)
   task_labels = tf.split(
       axis=1, num_or_size_splits=self.n_tasks, value=self.label_placeholder)
   task_weights = tf.split(
       axis=1, num_or_size_splits=self.n_tasks, value=self.weight_placeholder)
   for task in range(self.n_tasks):
     task_label_vector = task_labels[task]
     task_weight_vector = task_weights[task]
     # Convert the labels into one-hot vector encodings.
     one_hot_labels = tf.to_float(
         tf.one_hot(tf.to_int32(tf.squeeze(task_label_vector)), 2))
     # Since we use tf.nn.softmax_cross_entropy_with_logits note that we pass in
     # un-softmaxed logits rather than softmax outputs.
     task_loss = loss_fn(logits[task], one_hot_labels, task_weight_vector)
     task_losses.append(task_loss)
   # It's ok to divide by just the batch_size rather than the number of nonzero
   # examples (effect averages out)
   total_loss = tf.add_n(task_losses)
   total_loss = tf.div(total_loss, self.batch_size)
   return total_loss
开发者ID:joegomes,项目名称:deepchem,代码行数:25,代码来源:multitask_classifier.py


示例7: call

  def call(self, x, mask=None):
    """Execute this layer on input tensors.
    
    x = [atom_features, atom_mask]
    
    Parameters
    ----------
    x: list
      Tensors as listed above
    mask: bool, optional
      Ignored. Present only to shadow superclass call() method.

    Returns
    -------
    outputs: Tensor
      Tensor of concatenated atom features
    """
    self.build()
    atom_features = x[0]
    atom_masks = x[1]
    A = tf.split(atom_features, self.batch_size, axis=0)
    A_mask = tf.split(
        tf.cast(atom_masks, dtype=tf.bool), self.batch_size, axis=0)
    outputs = tf.concat(
        [tf.boolean_mask(A[i], A_mask[i]) for i in range(len(A))], axis=0)
    outputs = tf.matmul(outputs, self.W) + self.b
    outputs = self.activation(outputs)
    return outputs
开发者ID:joegomes,项目名称:deepchem,代码行数:28,代码来源:weave_layers.py


示例8: decode_bbox_target

def decode_bbox_target(box_predictions, anchors):
    """
    Args:
        box_predictions: (..., 4), logits
        anchors: (..., 4), floatbox. Must have the same shape

    Returns:
        box_decoded: (..., 4), float32. With the same shape.
    """
    orig_shape = tf.shape(anchors)
    box_pred_txtytwth = tf.reshape(box_predictions, (-1, 2, 2))
    box_pred_txty, box_pred_twth = tf.split(box_pred_txtytwth, 2, axis=1)
    # each is (...)x1x2
    anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2))
    anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1)

    waha = anchors_x2y2 - anchors_x1y1
    xaya = (anchors_x2y2 + anchors_x1y1) * 0.5

    clip = np.log(config.PREPROC.MAX_SIZE / 16.)
    wbhb = tf.exp(tf.minimum(box_pred_twth, clip)) * waha
    xbyb = box_pred_txty * waha + xaya
    x1y1 = xbyb - wbhb * 0.5
    x2y2 = xbyb + wbhb * 0.5    # (...)x1x2
    out = tf.concat([x1y1, x2y2], axis=-2)
    return tf.reshape(out, orig_shape)
开发者ID:tobyma,项目名称:tensorpack,代码行数:26,代码来源:model_box.py


示例9: call

  def call(self, x, h):
    channels = x.shape[self._feature_axis].value

    with tf.variable_scope('gates'):
      inputs = tf.concat([x, h], axis=self._feature_axis)
      n = channels + self._filters
      m = 2 * self._filters if self._filters > 1 else 2
      W = tf.get_variable('kernel', self._kernel + [n, m])
      y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
      if self._normalize:
        r, u = tf.split(y, 2, axis=self._feature_axis)
        r = tf.contrib.layers.layer_norm(r)
        u = tf.contrib.layers.layer_norm(u)
      else:
        y += tf.get_variable('bias', [m], initializer=tf.ones_initializer())
        r, u = tf.split(y, 2, axis=self._feature_axis)
      r, u = tf.sigmoid(r), tf.sigmoid(u)

      # TODO
      #tf.summary.histogram('reset_gate', r)
      #tf.summary.histogram('update_gate', u)

    with tf.variable_scope('candidate'):
      inputs = tf.concat([x, r * h], axis=self._feature_axis)
      n = channels + self._filters
      m = self._filters
      W = tf.get_variable('kernel', self._kernel + [n, m])
      y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
      if self._normalize:
        y = tf.contrib.layers.layer_norm(y)
      else:
        y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
      h = u * h + (1 - u) * self._activation(y)

	return h, h
开发者ID:ascenoputing,项目名称:SemanticSegmentation_DL,代码行数:35,代码来源:ConvLSTM_Cell.py


示例10: __call__

    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(self, scope or "basic_lstm_cell", reuse=self._reuse):
            # Parameters of gates are concatenated into one multiply for
            # efficiency.
            if self._state_is_tuple:
                c_prev, h_prev = state
            else:
                c_prev, h_prev = tf.split(
                    value=state, num_or_size_splits=2, axis=1)
            concat = tf.contrib.rnn._linear(
                [inputs, h_prev], 4 * self._num_units, True)

            # i = input_gate, g = new_input, f = forget_gate, o = output_gate
            i, g, f, o = tf.split(value=concat, num_or_size_splits=4, axis=1)

            c = (c_prev * tf.sigmoid(f + self._forget_bias) +
                 tf.sigmoid(i) * tf.tanh(g))
            h = tf.tanh(c) * tf.sigmoid(o)

            if self._state_is_tuple:
                new_state = LSTMStateTuple(c, h)
            else:
                new_state = tf.concat([c, h], 1)
            return h, new_state
开发者ID:seasky100,项目名称:tensorflow_end2end_speech_recognition,代码行数:25,代码来源:basic_lstm.py


示例11: conv

 def conv(self,
          input,
          k_h,
          k_w,
          c_o,
          s_h,
          s_w,
          name,
          relu=True,
          padding=DEFAULT_PADDING,
          group=1):
     self.validate_padding(padding)
     c_i = input.get_shape()[-1]
     assert c_i % group == 0
     assert c_o % group == 0
     convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
     with tf.variable_scope(name) as scope:
         kernel = self.make_var('weights', shape=[k_h, k_w, c_i / group, c_o])
         biases = self.make_var('biases', [c_o])
         if group == 1:
             conv = convolve(input, kernel)
         else:
             input_groups = tf.split(3, group, input)
             kernel_groups = tf.split(3, group, kernel)
             output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
             conv = tf.concat(3, output_groups)
         if relu:
             bias = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape().as_list())
             return tf.nn.relu(bias, name=scope.name)
         return tf.reshape(
             tf.nn.bias_add(conv, biases),
             conv.get_shape().as_list(),
             name=scope.name)
开发者ID:MichaelXin,项目名称:caffe-tensorflow,代码行数:33,代码来源:network.py


示例12: build_loss

 def build_loss(self, logits, labels, lambs):
     # put a sigfunction on logits and then transpose
     logits = tf.transpose(framwork.sig_func(logits))
     # according to the labels, erase rows which is not in labels
     labels_unique = tf.constant(range(self.image_classes), dtype=tf.int32)
     labels_num = self.image_classes
     logits = tf.gather(logits, indices=labels_unique)
     lambs = tf.gather(lambs, indices=labels_unique)
     # set the value of each row to True when it occurs in labels
     template = tf.tile(tf.expand_dims(labels_unique, dim=1), [1, self.batch_size])
     labels_expand = tf.tile(tf.expand_dims(labels, dim=0), [labels_num, 1])
     indict_logic = tf.equal(labels_expand, template)
     # split the tensor along rows
     logit_list = tf.split(0, labels_num, logits)
     indict_logic_list = tf.split(0, labels_num, indict_logic)
     lambda_list = tf.split(0, self.image_classes, lambs)
     # loss_list = list()
     # for i in range(self.image_classes):
     #     loss_list.append(framwork.loss_func(logit_list[i], indict_logic_list[i], lambda_list[i]))
     loss_list = map(framwork.loss_func, logit_list, indict_logic_list, lambda_list)
     loss = tf.add_n(loss_list)
     tensors_dict = {'labels_unique': labels_unique, 'template': template, 'logits_sig_trans': logits,
                     'loss': loss, 'indict_logic': indict_logic}
     self.tensors_names.extend(tensors_dict.keys())
     self.net_tensors.update(tensors_dict)
开发者ID:chengyang317,项目名称:information_pursuit,代码行数:25,代码来源:infor_net.py


示例13: build_network

    def build_network(self):
        net_tensors = self.net_tensors
        with self.net_graph.as_default(), tf.device(self.net_device):
            logits = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, self.image_classes))
            labels = tf.placeholder(dtype=tf.int32, shape=(self.batch_size,))
            lambs = tf.placeholder(dtype=tf.float32, shape=(self.image_classes,))
            # put a sigfunction on logits and then transpose
            logits = tf.transpose(framwork.sig_func(logits))
            # according to the labels, erase rows which is not in labels

            labels_unique = tf.constant(range(self.image_classes), dtype=tf.int32)
            labels_num = self.image_classes
            logits = tf.gather(logits, indices=labels_unique)
            lambs = tf.gather(lambs, indices=labels_unique)
            # set the value of each row to True when it occurs in labels
            templete = tf.tile(tf.expand_dims(labels_unique, dim=1), [1, self.batch_size])
            labels_expand = tf.tile(tf.expand_dims(labels, dim=0), [labels_num, 1])
            indict_logic = tf.equal(labels_expand, templete)
            # split the tensor along rows
            logit_list = tf.split(0, labels_num, logits)
            indict_logic_list = tf.split(0, labels_num, indict_logic)
            lamb_list = tf.split(0, self.image_classes, lambs)
            logit_list = [tf.squeeze(item) for item in logit_list]
            indict_logic_list = [tf.squeeze(item) for item in indict_logic_list]
            left_right_tuples = list()
            for i in range(self.image_classes):
                left_right_tuples.append(framwork.lamb_func(logit_list[i], indict_logic_list[i], lamb=lamb_list[i]))
            # func = framwork.lamb_func()
            # left_right_tuples = map(func, logit_list, indict_logic_list, lamb_list)
            net_tensors.update({'left_right_tuples': left_right_tuples, 'logits': logits, 'labels': labels,
                                'lambs': lambs})
开发者ID:chengyang317,项目名称:information_pursuit,代码行数:31,代码来源:infor_net.py


示例14: __call__

  def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with tf.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = tf.split(1, 2, state)
      concat = linear.linear([inputs, h], 4 * self._num_units, True)

      fs = []

      # This can be made more efficient since we're doing more than needs to be
      # done, but for now w/e
      for child_state in child_states:
          c_k, h_k = tf.split(1, 2, child_state)
          concat = linear.linear([inputs, h_k], 4 * self._num_units, True)
          i_k, j_k, f_k, o_k = tf.split(1, 4, concat)
          fs.append(f_k)


      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      # TODO: forget gate for each child, probably need to split by number
      # of child states or something
      i, j, f, o = tf.split(1, 4, concat)

      # If no children just treat it like a regular lstm
      if not fs:
        fs.append(f)

      new_c = sum(c * tf.sigmoid(fs + self._forget_bias)) + tf.sigmoid(i) * tf.tanh(j)
      new_h = tf.tanh(new_c) * tf.sigmoid(o)

    return new_h, tf.concat(1, [new_c, new_h])
开发者ID:StevenLOL,项目名称:LSTMRelatedness,代码行数:31,代码来源:treelstm.py


示例15: testSymbolModalityTargets

 def testSymbolModalityTargets(self):
   batch_size = 10
   num_datashards = 5
   length = 6
   height = 7
   hidden_size = 9
   vocab_size = 11
   model_hparams = tf.contrib.training.HParams(
       symbol_modality_num_shards=4,
       hidden_size=hidden_size,
       label_smoothing=0.2,
       shared_embedding_and_softmax_weights=0)
   body_output = -1 + np.random.random_integers(
       100, size=(batch_size, length, height, hidden_size))
   targets = -1 + np.random.random_integers(
       vocab_size, size=(batch_size, length, height, 1))
   m = modalities.SymbolModality(model_hparams, vocab_size)
   data_parallelism = expert_utils.Parallelism(
       ["/device:CPU:0"] * num_datashards, reuse=True)
   with self.test_session() as session:
     sharded_body_output = tf.split(tf.to_float(body_output), num_datashards)
     sharded_targets = tf.split(targets, num_datashards)
     sharded_logits, train_loss = m.top_sharded(
         sharded_body_output, sharded_targets, data_parallelism)
     logits = tf.concat(sharded_logits, 0)
     session.run(tf.global_variables_initializer())
     res1, res2 = session.run((logits, train_loss))
   self.assertEqual(res1.shape, (batch_size, length, height, 1, vocab_size))
   self.assertEqual(res2.shape, ())
开发者ID:TrunksLegendary,项目名称:tensor2tensor,代码行数:29,代码来源:modalities_test.py


示例16: _read

 def _read(self, keys, redundant_states):
     read = _comp_mul(keys, redundant_states)
     if self._num_copies > 1:
         xs_real = tf.split(1, self._num_copies, _comp_real(read))
         xs_imag = tf.split(1, self._num_copies, _comp_imag(read))
         read = (tf.add_n(xs_real)/self._num_copies, tf.add_n(xs_imag)/self._num_copies)
     return read
开发者ID:BinbinBian,项目名称:dual_am_rnn,代码行数:7,代码来源:dual_assoc_rnn.py


示例17: lnlstm

def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
    nbatch, nin = [v.value for v in xs[0].get_shape()]
    with tf.variable_scope(scope):
        wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
        gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
        bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))

        wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
        gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
        bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))

        b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))

        gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
        bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))

    c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
    for idx, (x, m) in enumerate(zip(xs, ms)):
        c = c*(1-m)
        h = h*(1-m)
        z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
        i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
        i = tf.nn.sigmoid(i)
        f = tf.nn.sigmoid(f)
        o = tf.nn.sigmoid(o)
        u = tf.tanh(u)
        c = f*c + i*u
        h = o*tf.tanh(_ln(c, gc, bc))
        xs[idx] = h
    s = tf.concat(axis=1, values=[c, h])
    return xs, s
开发者ID:MrGoogol,项目名称:baselines,代码行数:31,代码来源:utils.py


示例18: build_loss

    def build_loss(self, out, out_tensor):
        """Build a loss function and accuracy for the model."""
        print('  Building loss and accuracy')

        with tf.variable_scope('accuracy'):
            argmax = tf.to_int32(tf.argmax(out_tensor, 2))
            correct = tf.to_float(tf.equal(argmax, self.ts)) * self.t_mask
            accuracy = tf.reduce_sum(correct) / tf.reduce_sum(self.t_mask)

        with tf.variable_scope('loss'):
            with tf.variable_scope('split_t_and_mask'):
                split_kwargs = { 'split_dim': 1,
                                 'num_split': self.max_t_seq_len }
                ts     = tf.split(value=self.ts,     **split_kwargs)
                t_mask = tf.split(value=self.t_mask, **split_kwargs)
                t_mask = [tf.squeeze(weight) for weight in t_mask]

            loss = seq2seq.sequence_loss(out, ts, t_mask,
                                         self.max_t_seq_len)

            with tf.variable_scope('regularization'):
                regularize = tf.contrib.layers.l2_regularizer(self.reg_scale)
                params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
                reg_term = sum([regularize(param) for param in params])

            loss += reg_term

        return loss, accuracy
开发者ID:Styrke,项目名称:master-code,代码行数:28,代码来源:model.py


示例19: infer

  def infer(self, features, *args, **kwargs):  # pylint: disable=arguments-differ
    """Produce predictions from the model."""
    del args, kwargs
    # Inputs and features preparation needed to handle edge cases.
    if not features:
      features = {}
    inputs_old = None
    if "inputs" in features and len(features["inputs"].shape) < 4:
      inputs_old = features["inputs"]
      features["inputs"] = tf.expand_dims(features["inputs"], 2)

    # Set targets to input size firts.
    features["targets"] = tf.zeros_like(features["inputs"])
    self._encode_on_predict = True
    logits, _ = self(features)  # pylint: disable=not-callable
    if self.hparams.gan_loss_factor != 0:
      logits, _ = tf.split(logits, 2, axis=0)  # Remove GAN.
    logits, _ = tf.split(logits, 2, axis=0)  # Targets and inputs from encoding.
    # Uncomment the line below to get reconstructed inputs instead of targets.
    # (and comment out the line above at the same time).
    # _, logits = tf.split(logits, 2, axis=0)
    samples = tf.argmax(logits, axis=-1)

    # Restore inputs to not confuse Estimator in edge cases.
    if inputs_old is not None:
      features["inputs"] = inputs_old

    # Return samples.
    return samples
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:29,代码来源:autoencoders.py


示例20: minibatch

 def minibatch(self, dataset, subset, use_datasets, cache_data,
               shift_ratio=-1):
     """Get synthetic image batches.
     """
     del subset, use_datasets, cache_data, shift_ratio
     input_shape = [self.batch_size, self.height, self.width, self.depth]
     images = tf.truncated_normal(
         input_shape,
         dtype=self.dtype,
         stddev=1e-1,
         name='synthetic_images')
     labels = tf.random_uniform(
         [self.batch_size],
         minval=0,
         maxval=dataset.num_classes - 1,
         dtype=tf.int32,
         name='synthetic_labels')
     # Note: This results in a H2D copy, but no computation
     # Note: This avoids recomputation of the random values, but still
     #         results in a H2D copy.
     images = tf.contrib.framework.local_variable(images, name='images')
     labels = tf.contrib.framework.local_variable(labels, name='labels')
     if self.num_splits == 1:
         images_splits = [images]
         labels_splits = [labels]
     else:
         images_splits = tf.split(images, self.num_splits, 0)
         labels_splits = tf.split(labels, self.num_splits, 0)
     return images_splits, labels_splits
开发者ID:bowrian,项目名称:tf-imagenet,代码行数:29,代码来源:preprocessing_synthetic.py



注:本文中的tensorflow.split函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.sqrt函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.sparse_to_dense函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap