• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.batch_matmul函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.batch_matmul函数的典型用法代码示例。如果您正苦于以下问题:Python batch_matmul函数的具体用法?Python batch_matmul怎么用?Python batch_matmul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了batch_matmul函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: auto_regressive_model

def auto_regressive_model(input, target, weights, bias):
    """
    Builds the auto regressive model. For details on the model, refer to the written report
    """

    hidden01 = tf.matmul(normalize(input), weights['M1']) # V_d

    hidden01 = tf.batch_matmul(tf.expand_dims(hidden01,2),tf.ones([batch_size,1,NUM_NOTES])) # V_d augmented to D across  dimension 2

    hidden02 = cumsum_weights(normalize(target), weights['M2'],D)  # V_c

    hidden = hidden01 + hidden02

    y = tf.zeros([1], tf.float32)
    split = tf.split(0, batch_size, hidden)

    y = tf.batch_matmul(tf.expand_dims(tf.transpose(tf.squeeze(split[0])), 1), tf.expand_dims(tf.transpose(weights['W']), 2))

    for i in range(1, len(split)):
        y = tf.concat(0, [y, tf.batch_matmul(tf.expand_dims(tf.transpose(tf.squeeze(split[i])), 1),
                                                     tf.expand_dims(tf.transpose(weights['W']), 2))])
    y = tf.squeeze(y)

    output = tf.reshape(y,[batch_size,NUM_NOTES])

    return output
开发者ID:Sephora-M,项目名称:chord2vec,代码行数:26,代码来源:auto_regressive.py


示例2: _define_distance_to_clusters

  def _define_distance_to_clusters(self, data):
    """Defines the Mahalanobis distance to the assigned Gaussian."""
    # TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
    # mean) from log probability function.
    self._all_scores = []
    for shard in data:
      all_scores = []
      shard = tf.expand_dims(shard, 0)
      for c in xrange(self._num_classes):
        if self._covariance_type == FULL_COVARIANCE:
          cov = self._covs[c, :, :]
        elif self._covariance_type == DIAG_COVARIANCE:
          cov = tf.diag(self._covs[c, :])
        inverse = tf.matrix_inverse(cov + self._min_var)
        inv_cov = tf.tile(
            tf.expand_dims(inverse, 0),
            tf.pack([self._num_examples, 1, 1]))
        diff = tf.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
        m_left = tf.batch_matmul(diff, inv_cov)
        all_scores.append(tf.sqrt(tf.batch_matmul(
            m_left, tf.transpose(diff, perm=[0, 2, 1])
        )))
      self._all_scores.append(tf.reshape(
          tf.concat(1, all_scores),
          tf.pack([self._num_examples, self._num_classes])))

    # Distance to the associated class.
    self._all_scores = tf.concat(0, self._all_scores)
    assignments = tf.concat(0, self.assignments())
    rows = tf.to_int64(tf.range(0, self._num_examples))
    indices = tf.concat(1, [tf.expand_dims(rows, 1),
                            tf.expand_dims(assignments, 1)])
    self._scores = tf.gather_nd(self._all_scores, indices)
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:33,代码来源:gmm_ops.py


示例3: test_lanczos_bidiag

  def test_lanczos_bidiag(self):
    np.random.seed(1)
    a_np = np.random.uniform(
        low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
    tol = 1e-12 if dtype_ == np.float64 else 1e-5

    with self.test_session() as sess:
      if use_static_shape_:
        a = tf.constant(a_np)
      else:
        a = tf.placeholder(dtype_)
      operator = util.create_operator(a)
      lbd = lanczos.lanczos_bidiag(
          operator, steps_, orthogonalize=orthogonalize_)

      # The computed factorization should satisfy the equations
      #  A * V = U * B
      #  A' * U[:, :-1] = V * B[:-1, :]'
      av = tf.batch_matmul(a, lbd.v)
      ub = lanczos.bidiag_matmul(lbd.u, lbd.alpha, lbd.beta, adjoint_b=False)
      atu = tf.batch_matmul(a, lbd.u[:, :-1], adj_x=True)
      vbt = lanczos.bidiag_matmul(lbd.v, lbd.alpha, lbd.beta, adjoint_b=True)

      if use_static_shape_:
        av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt])
      else:
        av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt],
                                                    feed_dict={a: a_np})
      self.assertAllClose(av_val, ub_val, atol=tol, rtol=tol)
      self.assertAllClose(atu_val, vbt_val, atol=tol, rtol=tol)
开发者ID:tonydeep,项目名称:tensorflow,代码行数:30,代码来源:lanczos_test.py


示例4: write

  def write(self, M0, write_w0s, write_heads):
    write_w1s = []
    for i in xrange(self.n_heads):
      head = write_heads[i]
      w0 = write_w0s[i]
      w1 = NTMCell.address(M0, w0, head)
      # For analysis
      #w1 = tf.Print(w1, [w1], "write", summarize=1000)
      write_w1s.append(w1)

    M1 = M0
    # Erases
    for w1 in write_w1s:
      we = 1 - tf.batch_matmul(
        tf.expand_dims(w1, 2),
        tf.expand_dims(head["erase"], 1)
      )  
      M1 = M1 * we

    # Writes
    for w1 in write_w1s:
      add = tf.batch_matmul(
        tf.expand_dims(w1, 2),
        tf.expand_dims(head["add"], 1),
      )
      M1 = M1 + add

    return M1, write_w1s
开发者ID:yeoedward,项目名称:Neural-Turing-Machine,代码行数:28,代码来源:ntm.py


示例5: build_memory

  def build_memory(self):
    self.global_step = tf.Variable(0, name="global_step")

    # Linear Projection Layer
    self.T = tf.Variable(tf.random_normal([self.idim, self.edim],
                                          stddev=self.init_std,
                                          name="projection"))

    reshape = tf.reshape(self.story, [-1, self.idim])
    m = tf.matmul(reshape, self.T)   # [batch_size * nstory, edim]
    m = tf.reshape(m, [self.batch_size, self.nstory, -1])

    reshape = tf.reshape(self.query, [-1, self.idim])
    u = tf.matmul(reshape, self.T)   # [batch_size * 1, edim]
    u = tf.reshape(u, [self.batch_size, 1, -1])

    reshape = tf.reshape(self.answer, [-1, self.idim])
    g = tf.matmul(reshape, self.T)  # [batch_size * nanswer, edim]
    g = tf.reshape(g, [self.batch_size, self.nanswer, -1])

    for h in xrange(self.nhop):
      p = tf.batch_matmul(m, u, adj_y=True)  # [batch_size, nstory. 1]
      p = tf.reshape(p, [self.batch_size, -1])
      p = tf.nn.softmax(p)  # [batch_size, nstory]

      reshape = tf.reshape(p, [self.batch_size, -1, 1])
      o = tf.reduce_sum(tf.mul(m, reshape), 1)
      u = tf.add(o, u)

    logits = tf.batch_matmul(g, u, adj_y=True)  # [batch_size, nanswer, 1]
    logits = tf.reshape(logits, [self.batch_size, -1])
    self.logits = logits
    self.probs = tf.nn.softmax(logits)
开发者ID:BinbinBian,项目名称:MovieQAMemNet,代码行数:33,代码来源:model.py


示例6: lstm_cell

 def lstm_cell(i, o, state):
   """
   Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
   Note that in this formulation, we omit the various connections between the
   previous state and the gates.
   """                   
   i_list = tf.pack([i, i, i, i])
   #print i_list.get_shape().as_list()
   o_list = tf.pack([o, o, o, o])
                         
   ins = tf.batch_matmul(i_list, fico_x)
   outs = tf.batch_matmul(o_list, fico_m)
   
   h_x = ins + outs + fico_b
   #print h_x.get_shape().as_list()
   
   #forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
   forget_gate = tf.sigmoid(h_x[0,:,:])
   
   #input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
   input_gate = tf.sigmoid(h_x[1,:,:])
   
   #update = tf.tanh(tf.matmul(i, cx) + tf.matmul(o, cm) + cb)
   update = tf.tanh(h_x[2,:,:])
   
   state = forget_gate*state + input_gate*update
   
   #output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
   output_gate = tf.sigmoid(h_x[3,:,:])
   
   h = output_gate * tf.tanh(state)
   #print 'h', h.get_shape().as_list()
   return h, state
开发者ID:kcbighuge,项目名称:tensorflow-deeplearning,代码行数:33,代码来源:6_lstm.py


示例7: write

 def write(self, lstm_h, Fx, Fy, gamma):
      with tf.variable_scope("writeW",reuse=self.share):
           w = self.linear(lstm_h, self.N * self.N) # batch x (write_n*write_n)
      w = tf.reshape(w, [-1, self.N, self.N])
      Fyt = tf.transpose(Fy, perm=[0, 2, 1])
      wr = tf.batch_matmul(Fyt, tf.batch_matmul(w, Fx))
      return wr*tf.reshape(1.0/gamma, [-1,1,1])
开发者ID:thudzj,项目名称:mem_printer,代码行数:7,代码来源:memory.py


示例8: log_likelihood

def log_likelihood(batch):

	#batch is NxD matrix, where N is length of batch, D is dimension of samples
	#P(D|w) = prod( sum( pi*N(samp|k))
	#exp(-square(mean-samp))

	#multiplying by ones replicates the matrix, becomes (N,D,K)
	tmp1 = tf.batch_matmul(tf.reshape(batch, [N,D,1]), tf.ones([N,1,K]))
	#same but with the means matrix
	tmp2 = tf.batch_matmul(means, tf.ones([K,1,N]))
	tmp2 = tf.transpose(tmp2, [2,1,0])
	# (x - mu)
	tmp3 = tmp1 - tmp2
	tmp4 = tmp1 - tmp2
	# (x - mu).T(x - mu)
	tmp3 = tf.batch_matmul(tf.transpose(tmp3, [0,2,1]), tmp3)
	tmp3 = tf.reduce_sum(tmp3,2)
	# -(x - mu).T(x - mu)
	tmp3 = -tmp3
	# exp(-(x - mu).T(x - mu))
	tmp3 = tf.exp(tmp3)
	#multiply by mixture weights
	tmp3 = tf.matmul(tmp3, mixture_weights)
	#log
	tmp3 = tf.log(tmp3)
	#sum over all samples of the batch
	tmp3 = tf.reduce_sum(tmp3,0)

	return tmp3
开发者ID:chriscremer,项目名称:Other_Code,代码行数:29,代码来源:online_mixed_model_mar28.py


示例9: __init__

    def __init__(self, memory_cells, query, project_query=False):
        """Define Attention.

        Args:
            memory_cells (SequenceBatch): a SequenceBatch containing a Tensor of shape (batch_size, num_cells, cell_dim)
            query (Tensor): a tensor of shape (batch_size, query_dim).
            project_query (bool): defaults to False. If True, the query goes through an extra projection layer to
                coerce it to cell_dim.
        """
        cell_dim = memory_cells.values.get_shape().as_list()[2]
        if project_query:
            # project the query up/down to cell_dim
            self._projection_layer = Dense(cell_dim, activation='linear')
            query = self._projection_layer(query)  # (batch_size, cand_dim)

        memory_values, memory_mask = memory_cells.values, memory_cells.mask

        # batch matrix multiply to compute logit scores for all choices in all batches
        query = tf.expand_dims(query, 2)  # (batch_size, cell_dim, 1)
        logit_values = tf.batch_matmul(memory_values, query)  # (batch_size, num_cells, 1)
        logit_values = tf.squeeze(logit_values, [2])  # (batch_size, num_cells)

        # set all pad logits to negative infinity
        logits = SequenceBatch(logit_values, memory_mask)
        logits = logits.with_pad_value(-float('inf'))

        # normalize to get probs
        probs = tf.nn.softmax(logits.values)  # (batch_size, num_cells)

        retrieved = tf.batch_matmul(tf.expand_dims(probs, 1), memory_values)  # (batch_size, 1, cell_dim)
        retrieved = tf.squeeze(retrieved, [1])  # (batch_size, cell_dim)

        self._logits = logits.values
        self._probs = probs
        self._retrieved = retrieved
开发者ID:siddk,项目名称:lang2program,代码行数:35,代码来源:model.py


示例10: Test

  def Test(self):
    np.random.seed(1)
    n = shape_[-1]
    batch_shape = shape_[:-2]
    a = np.random.uniform(
        low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
    a += a.T
    a = np.tile(a, batch_shape + (1, 1))
    if dtype_ == np.float32:
      atol = 1e-4
    else:
      atol = 1e-12
    for compute_v in False, True:
      np_e, np_v = np.linalg.eig(a)
      with self.test_session():
        if compute_v:
          tf_e, tf_v = tf.self_adjoint_eig(tf.constant(a))

          # Check that V*diag(E)*V^T is close to A.
          a_ev = tf.batch_matmul(
              tf.batch_matmul(tf_v, tf.batch_matrix_diag(tf_e)),
              tf_v,
              adj_y=True)
          self.assertAllClose(a_ev.eval(), a, atol=atol)

          # Compare to numpy.linalg.eig.
          CompareEigenDecompositions(self, np_e, np_v, tf_e.eval(), tf_v.eval(),
                                     atol)
        else:
          tf_e = tf.self_adjoint_eigvals(tf.constant(a))
          self.assertAllClose(
              np.sort(np_e, -1), np.sort(tf_e.eval(), -1), atol=atol)
开发者ID:apollos,项目名称:tensorflow,代码行数:32,代码来源:self_adjoint_eig_op_test.py


示例11: build_node

    def build_node(self, x_in, c_in, h_in, scope="lstm_cell"):
        #print (x_in, c_in, h_in, scope)
        #print [type(thing) for thing in (x_in, c_in, h_in, scope)]
        # print [(item.name, item.dtype) for thing in (h_in, c_in) for item in thing]
        # print (x_in.name, x_in.dtype)

        with tf.variable_scope(scope):
            # print x.shape
            # print h_in.get_shape()
            x_with_h = tf.concat(2, [x_in, h_in])

            ones_for_bias = tf.constant(np.ones([batch_size,1,1]), name="b", dtype=tf.float32)
            x_h_concat = tf.concat(2, [ones_for_bias, x_with_h])

            # forget gate layer
            # print "w_f: ", self.w_f.get_shape()
            # print "x_h_concat: ", x_h_concat.get_shape()
            f = tf.sigmoid(tf.batch_matmul(x_h_concat, self.w_f))

            # candidate values
            i = tf.sigmoid(tf.batch_matmul(x_h_concat, self.w_i))
            candidate_c = tf.tanh(tf.batch_matmul(x_h_concat, self.w_c))

            # new cell state (hidden)
            # forget old values of c
            old_c_to_keep = tf.mul(f, c_in)
            # scaled candidate values of c
            new_c_to_keep = tf.mul(i, candidate_c)
            c = tf.add(old_c_to_keep, new_c_to_keep)

            # new scaled output
            o = tf.sigmoid(tf.batch_matmul(x_h_concat, self.w_o))
            h = tf.mul(o, tf.tanh(c))
            return (c, h)
开发者ID:liangkai,项目名称:char-rnn-tf,代码行数:34,代码来源:rnn.py


示例12: extract_patch

def extract_patch(x, f_y, f_x, nchannels):
    """
    Args:
        x: [B, H, W, D]
        f_y: [B, H, FH]
        f_x: [B, W, FH]
        nchannels: D

    Returns:
        patch: [B, FH, FW]
    """
    patch = [None] * nchannels
    fsize_h = tf.shape(f_y)[2]
    fsize_w = tf.shape(f_x)[2]
    hh = tf.shape(x)[1]
    ww = tf.shape(x)[2]

    for dd in xrange(nchannels):
        # [B, H, W]
        x_ch = tf.reshape(
            tf.slice(x, [0, 0, 0, dd], [-1, -1, -1, 1]),
            tf.pack([-1, hh, ww]))
        patch[dd] = tf.reshape(tf.batch_matmul(
            tf.batch_matmul(f_y, x_ch, adj_x=True),
            f_x), tf.pack([-1, fsize_h, fsize_w, 1]))

    return tf.concat(3, patch)
开发者ID:lrjconan,项目名称:img-count,代码行数:27,代码来源:ris_model_base.py


示例13: copy_net

 def copy_net(decoder_out):
     with tf.variable_scope('copy_net') as scope:
         decoder_out = tf.reshape(decoder_out, [-1, decoder_hidden, 1])
         source_prob = tf.batch_matmul(rnn_encoder_temp, decoder_out)
         source_prob = tf.reshape(source_prob, [-1, 1, source_prob.get_shape().as_list()[1]])
         voc_prob = tf.batch_matmul(source_prob, one_hot)
         voc_prob = tf.reshape(voc_prob, [-1, voc_prob.get_shape().as_list()[-1]])
         return voc_prob
开发者ID:xuwenshen,项目名称:flask_demo,代码行数:8,代码来源:copy_mechanism.py


示例14: build_memory

    def build_memory(self):
        self.global_step = tf.Variable(0, name="global_step")


        # embedding matrix A of dimension d*V, 
        # converting x_i into memory vectors v_i
        self.A = tf.Variable(tf.random_normal([self.nwords, self.edim], stddev=self.init_std))
        # embedding matrix B with the same dimension as A
        # converting q to obtain an internal state u
        self.B = tf.Variable(tf.random_normal([self.nwords, self.edim], stddev=self.init_std))
        # C converts x into o
        self.C = tf.Variable(tf.random_normal([self.edim, self.edim], stddev=self.init_std))

        # Temporal Encoding
        self.T_A = tf.Variable(tf.random_normal([self.mem_size, self.edim], stddev=self.init_std))
        self.T_B = tf.Variable(tf.random_normal([self.mem_size, self.edim], stddev=self.init_std))

        # m_i = sum A_ij * x_ij + T_A_i
				# this embedding_lookup functions retrieves rows of self.A
        Ain_c = tf.nn.embedding_lookup(self.A, self.context) # context is the previous words 
        Ain_t = tf.nn.embedding_lookup(self.T_A, self.time) # time is for temporal
        Ain = tf.add(Ain_c, Ain_t)

        # c_i = sum B_ij * u + T_B_i 
				# ???? is it B or C, looks like B is correct, but the notation is different from the paper
        Bin_c = tf.nn.embedding_lookup(self.B, self.context)
        Bin_t = tf.nn.embedding_lookup(self.T_B, self.time)
        Bin = tf.add(Bin_c, Bin_t)

        # 6 hops to go through
        for h in range(self.nhop):
            # reshape hid to be 3 dimensional
            self.hid3dim = tf.reshape(self.hid[-1], [-1, 1, self.edim]) # -1 is used to infer the shape
            # innerproduct of the memory units and the input vector
            # A_in stores the memory units, i.e., the context and temporal
            # hid represents the hidden state, and what is that? 	
            Aout = tf.batch_matmul(self.hid3dim, Ain, adj_y=True) 
            Aout2dim = tf.reshape(Aout, [-1, self.mem_size])
            P = tf.nn.softmax(Aout2dim)

            probs3dim = tf.reshape(P, [-1, 1, self.mem_size])
            Bout = tf.batch_matmul(probs3dim, Bin) # the output vector
            Bout2dim = tf.reshape(Bout, [-1, self.edim])

            Cout = tf.matmul(self.hid[-1], self.C)
            Dout = tf.add(Cout, Bout2dim) # W(o + u)

            self.share_list[0].append(Cout)

            if self.lindim == self.edim:
                self.hid.append(Dout)
            elif self.lindim == 0:
                self.hid.append(tf.nn.relu(Dout))
            else:
                F = tf.slice(Dout, [0, 0], [self.batch_size, self.lindim])
                G = tf.slice(Dout, [0, self.lindim], [self.batch_size, self.edim-self.lindim])
                K = tf.nn.relu(G)
                self.hid.append(tf.concat(1, [F, K]))
开发者ID:ZeweiChu,项目名称:MemN2N-tensorflow,代码行数:58,代码来源:model.py


示例15: write

def write(windows, N, center_x, center_y, delta, sigma, gamma):
    tol = 1e-5
    W = tf.reshape(windows, [-1, N, N])
    FX, FY = banks(center_x, center_y, sigma, delta, N, (28,28))

    I = tf.batch_matmul(W, FY);
    I = tf.batch_matmul(tf.transpose(FX, [0,2,1]), I)

    return tf.expand_dims(1/(gamma + tol),1)*tf.reshape(I, [-1, 28*28])
开发者ID:wagnermarkd,项目名称:draw,代码行数:9,代码来源:filterbank.py


示例16: get_function

    def get_function(points, mu, sigma): # f_ik [n,k]
        div = coef*tf.rsqrt(tf.batch_matrix_determinant(sigma)) # ((2pi)^p*|S_k|)^-1/2  [k]
        div = tf.tile(tf.reshape(div, [1,k]), [n,1]) # [n,k]
        diff = tf.sub(tf.tile(points, [k,1,1]), tf.tile(mu, [n,1,1])) # x_i-u_k [n*k, p, 1]
        sigma = tf.tile(sigma, [n,1,1]) # [n*k,p,p]
        exp = tf.exp(-0.5*tf.batch_matmul( tf.transpose(diff,perm=[0,2,1]), tf.batch_matmul(tf.batch_matrix_inverse(sigma), diff) )) # e^(d'*S^-1*d)_ik [n*k, 1, 1]
        exp = tf.reshape(exp, [n,k])

        return tf.mul(div, exp) # Multivariate normal distribution evaluated for each vector, for each cluster parameter. Hence the [n,k] shape.
开发者ID:PFAWeb2Control,项目名称:machine-learning,代码行数:9,代码来源:basic_CEM.py


示例17: read

 def read(self, x, Fx, Fy, gamma):
    Fxr = tf.reshape(Fx, [-1, 1, self.N, self.shape[1]])
    Fyr = tf.reshape(Fy, [-1, 1, self.N, self.shape[2]])
    Fxr3 = tf.concat(1, [Fxr, Fxr, Fxr]) # batch * 3 * N * A
    Fyr3 = tf.concat(1, [Fyr, Fyr, Fyr])
    Fxt3 = tf.transpose(Fxr3, perm=[0, 1, 3, 2])
    glimpse = tf.batch_matmul(Fyr3, tf.batch_matmul(x, Fxt3))
    glimpse = tf.reshape(glimpse, [-1, self.att_size])
    return glimpse * tf.reshape(gamma, [-1,1])
开发者ID:thudzj,项目名称:mem_printer,代码行数:9,代码来源:memory.py


示例18: threee_tensor_mul

 def threee_tensor_mul(A, B, C, res):
   # for example 
   # A = tf.ones([4, 3, 2], tf.int32)
   # B = tf.ones([4, 2, 5, 3], tf.int32)
   # C = tf.ones([4, 5, 6], tf.int32)
   # return: (4, 3, 6) which combine 3 channel of matrix multiplication
   c = B.get_shape().as_list()[-1]
   res += tf.batch_matmul(tf.batch_matmul(A, B), C)
   
   return res
开发者ID:Lmaths,项目名称:Matrix-neural-network,代码行数:10,代码来源:layers.py


示例19: read

def read(images, N, delta, gamma, sigma, center_x, center_y):
    #TODO: Make configurable shape
    FX, FY = banks(center_x, center_y, sigma, delta, N, (28,28))

    I = tf.reshape(images, [-1, 28, 28])

    I = tf.batch_matmul(FY, I);
    I = tf.batch_matmul(I, tf.transpose(FX, [0,2,1]))

    return tf.expand_dims(gamma,1)*tf.reshape(I, [-1, N*N])
开发者ID:wagnermarkd,项目名称:draw,代码行数:10,代码来源:filterbank.py


示例20: model

def model(input1, gating_network):

	# return tf.nn.softmax(tf.matmul(tf.transpose(gating_network), (tf.reshape(tf.batch_matmul(w, input_aa), [L-1, n_aa]) + b)))
	input_times_w = tf.reshape(tf.batch_matmul(w, input1), [L, L, n_aa])
	input_times_w_plus_b = input_times_w + b
	activation_function = tf.nn.relu(input_times_w_plus_b)
	# activation_function = tf.sigmoid(input_times_w_plus_b)
	use_gate = tf.batch_matmul(tf.transpose(activation_function, perm=[0, 2, 1]), tf.transpose(gating_network, perm=[0,1,2])) #perm=[1,0,2]
	softmax_output = tf.nn.softmax(tf.reshape(use_gate, [L, n_aa]))
	return softmax_output
开发者ID:chriscremer,项目名称:Other_Code,代码行数:10,代码来源:protein_model8.py



注:本文中的tensorflow.batch_matmul函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.boolean_mask函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.assign_sub函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap