• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.imag函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.imag函数的典型用法代码示例。如果您正苦于以下问题:Python imag函数的具体用法?Python imag怎么用?Python imag使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了imag函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: antenna_jones

    def antenna_jones(lm, stokes, alpha, ref_freq):
        """
        Compute the jones terms for each antenna.

        lm, stokes and alpha are the source variables.
        """

        # Compute the complex phase
        cplx_phase = rime.phase(lm, D.uvw, D.frequency, CT=CT)

        # Check for nans/infs in the complex phase
        phase_msg = ("Check that '1 - l**2  - m**2 >= 0' holds "
                    "for all your lm coordinates. This is required "
                    "for 'n = sqrt(1 - l**2 - m**2) - 1' "
                    "to be finite.")

        phase_real = tf.check_numerics(tf.real(cplx_phase), phase_msg)
        phase_imag = tf.check_numerics(tf.imag(cplx_phase), phase_msg)

        # Compute the square root of the brightness matrix
        # (as well as the sign)
        bsqrt, sgn_brightness = rime.b_sqrt(stokes, alpha,
            D.frequency, ref_freq, CT=CT,
            polarisation_type=polarisation_type)

        # Check for nans/infs in the bsqrt
        bsqrt_msg = ("Check that your stokes parameters "
                    "satisfy I**2 >= Q**2 + U**2 + V**2. "
                    "Montblanc performs a cholesky decomposition "
                    "of the brightness matrix and the above must "
                    "hold for this to produce valid values.")

        bsqrt_real = tf.check_numerics(tf.real(bsqrt), bsqrt_msg)
        bsqrt_imag = tf.check_numerics(tf.imag(bsqrt), bsqrt_msg)

        # Compute the direction dependent effects from the beam
        ejones = rime.e_beam(lm, D.frequency,
            D.pointing_errors, D.antenna_scaling,
            beam_sin, beam_cos,
            D.beam_extents, D.beam_freq_map, D.ebeam)

        deps = [phase_real, phase_imag, bsqrt_real, bsqrt_imag]
        deps = [] # Do nothing for now

        # Combine the brightness square root, complex phase,
        # feed rotation and beam dde's
        with tf.control_dependencies(deps):
            antenna_jones = rime.create_antenna_jones(bsqrt, cplx_phase,
                                                    feed_rotation, ejones, FT=FT)
            return antenna_jones, sgn_brightness
开发者ID:ska-sa,项目名称:montblanc,代码行数:50,代码来源:RimeSolver.py


示例2: __call__

    def __call__(self, inputs, state, scope=None ):
        with tf.variable_scope(scope or type(self).__name__):
            unitary_hidden_state, secondary_cell_hidden_state = tf.split(1,2,state)


            mat_in = tf.get_variable('mat_in', [self.input_size, self.state_size*2])
            mat_out = tf.get_variable('mat_out', [self.state_size*2, self.output_size])
            in_proj = tf.matmul(inputs, mat_in)            
            in_proj_c = tf.complex(tf.split(1,2,in_proj))
            out_state = modReLU( in_proj_c + 
                ulinear(unitary_hidden_state, self.state_size),
                tf.get_variable(name='bias', dtype=tf.float32, shape=tf.shape(unitary_hidden_state), initializer = tf.constant_initalizer(0.)),
                scope=scope)


        with tf.variable_scope('unitary_output'):
            '''computes data linear, unitary linear and summation -- TODO: should be complex output'''
            unitary_linear_output_real = linear.linear([tf.real(out_state), tf.imag(out_state), inputs], True, 0.0)
        

        with tf.variable_scope('scale_nonlinearity'):
            modulus = tf.complex_abs(unitary_linear_output_real)
            rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7)

        #transition to data shortcut connection


        #out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias

        #hidden state is complex but output is completely real
        return out_, out_state #complex 
开发者ID:Liubinggunzu,项目名称:tensorflow_with_latest_papers,代码行数:31,代码来源:unitary_rnn_cell_modern.py


示例3: sparse_dot_product0

def sparse_dot_product0(emb, tuples, use_matmul=True, output_type='real'):
    """
    Compute the dot product of complex vectors.
    It uses complex vectors but tensorflow does not optimize in the complex space (or there is a bug in the gradient
    propagation with complex numbers...)
    :param emb: embeddings
    :param tuples: indices at which we compute dot products
    :return: scores (dot products)
    """
    n_t = tuples.get_shape()[0].value
    rk = emb.get_shape()[1].value
    emb_sel_a = tf.gather(emb, tuples[:, 0])
    emb_sel_b = tf.gather(emb, tuples[:, 1])
    if use_matmul:
        pred_cplx = tf.squeeze(tf.batch_matmul(
                tf.reshape(emb_sel_a, [n_t, rk, 1]),
                tf.reshape(emb_sel_b, [n_t, rk, 1]), adj_x=True))
    else:
        pred_cplx = tf.reduce_sum(tf.mul(tf.conj(emb_sel_a), emb_sel_b), 1)
    if output_type == 'complex':
        return pred_cplx
    elif output_type == 'real':
        return tf.real(pred_cplx) + tf.imag(pred_cplx)
    elif output_type == 'real':
        return tf.abs(pred_cplx)
    elif output_type == 'angle':
        raise NotImplementedError('No argument or inverse-tanh function for complex number in Tensorflow')
    else:
        raise NotImplementedError()
开发者ID:Peratham,项目名称:factorix,代码行数:29,代码来源:hermitian.py


示例4: get_reconstructed_image

    def get_reconstructed_image(self, real, imag, name=None):
        """
        :param real:
        :param imag:
        :param name:
        :return:
        """
        complex_k_space_label = tf.complex(real=tf.squeeze(real), imag=tf.squeeze(imag), name=name+"_complex_k_space")
        rec_image_complex = tf.expand_dims(tf.ifft2d(complex_k_space_label), axis=1)
        
        rec_image_real = tf.reshape(tf.real(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        rec_image_imag = tf.reshape(tf.imag(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])

        # Shifting
        top, bottom = tf.split(rec_image_real, num_or_size_splits=2, axis=2)
        top_left, top_right = tf.split(top, num_or_size_splits=2, axis=3)
        bottom_left, bottom_right = tf.split(bottom, num_or_size_splits=2, axis=3)

        top_shift = tf.concat(axis=3, values=[bottom_right, bottom_left])
        bottom_shift = tf.concat(axis=3, values=[top_right, top_left])
        shifted_image = tf.concat(axis=2, values=[top_shift, bottom_shift])


        # Shifting
        top_imag, bottom_imag = tf.split(rec_image_imag, num_or_size_splits=2, axis=2)
        top_left_imag, top_right_imag = tf.split(top_imag, num_or_size_splits=2, axis=3)
        bottom_left_imag, bottom_right_imag = tf.split(bottom_imag, num_or_size_splits=2, axis=3)

        top_shift_imag = tf.concat(axis=3, values=[bottom_right_imag, bottom_left_imag])
        bottom_shift_imag = tf.concat(axis=3, values=[top_right_imag, top_left_imag])
        shifted_image_imag = tf.concat(axis=2, values=[top_shift_imag, bottom_shift_imag])

        shifted_image_two_channels = tf.stack([shifted_image[:,0,:,:], shifted_image_imag[:,0,:,:]], axis=1)
        return shifted_image_two_channels
开发者ID:shohad25,项目名称:thesis,代码行数:34,代码来源:k_space_wgan_gl_g2_unet_Gloss.py


示例5: _compareMulGradient

  def _compareMulGradient(self, data):
    # data is a float matrix of shape [n, 4].  data[:, 0], data[:, 1],
    # data[:, 2], data[:, 3] are real parts of x, imaginary parts of
    # x, real parts of y and imaginary parts of y.
    with self.test_session():
      inp = tf.convert_to_tensor(data)
      xr, xi, yr, yi = tf.split(1, 4, inp)

      def vec(x):  # Reshape to a vector
        return tf.reshape(x, [-1])
      xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)

      def cplx(r, i):  # Combine to a complex vector
        return tf.complex(r, i)
      x, y = cplx(xr, xi), cplx(yr, yi)
      # z is x times y in complex plane.
      z = x * y
      # Defines the loss function as the sum of all coefficients of z.
      loss = tf.reduce_sum(tf.real(z) + tf.imag(z))
      epsilon = 0.005
      jacob_t, jacob_n = tf.test.compute_gradient(inp,
                                                  list(data.shape),
                                                  loss,
                                                  [1],
                                                  x_init_value=data,
                                                  delta=epsilon)
    self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
开发者ID:BranYang,项目名称:tensorflow,代码行数:27,代码来源:cwise_ops_test.py


示例6: _compareRealImag

 def _compareRealImag(self, cplx, use_gpu):
   np_real, np_imag = np.real(cplx), np.imag(cplx)
   with self.test_session(use_gpu=use_gpu) as sess:
     inx = tf.convert_to_tensor(cplx)
     tf_real = tf.real(inx)
     tf_imag = tf.imag(inx)
     tf_real_val, tf_imag_val = sess.run([tf_real, tf_imag])
   self.assertAllEqual(np_real, tf_real_val)
   self.assertAllEqual(np_imag, tf_imag_val)
   self.assertShapeEqual(np_real, tf_real)
   self.assertShapeEqual(np_imag, tf_imag)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:11,代码来源:cwise_ops_test.py


示例7: __call__

 def __call__(self, inputs, state, scope=None ):
     zero_initer = tf.constant_initializer(0.)
     with tf.variable_scope(scope or type(self).__name__):
         mat_in = tf.get_variable('W_in', [self.input_size, self.state_size*2])
         mat_out = tf.get_variable('W_out', [self.state_size*2, self.output_size])
         in_proj = tf.matmul(inputs, mat_in)
         in_proj_c = tf.complex( in_proj[:, :self.state_size], in_proj[:, self.state_size:] )
         out_state = modrelu_c( in_proj_c + 
             ulinear_c(state,transform=self.transform),
             tf.get_variable(name='B', dtype=tf.float32, shape=[self.state_size], initializer=zero_initer)
             )
         out_bias = tf.get_variable(name='B_out', dtype=tf.float32, shape=[self.output_size], initializer = zero_initer)
         out = tf.matmul( tf.concat(1,[tf.real(out_state), tf.imag(out_state)] ), mat_out ) + out_bias
     return out, out_state
开发者ID:khaotik,项目名称:char-rnn-tensorflow,代码行数:14,代码来源:urnn.py


示例8: c2q1d

def c2q1d(x):
    """ An internal function to convert a 1D Complex vector back to a real
    array,  which is twice the height of x.
    """
    # Input has shape [batch, r, c, 2]
    r, c = x.get_shape().as_list()[1:3]
    x1 = tf.real(x)
    x2 = tf.imag(x)
    # Stack 2 inputs of shape [batch, r, c] to [batch, r, 2, c]
    y = tf.stack([x1, x2], axis=-2)
    # Reshaping interleaves the results
    y = tf.reshape(y, [-1, 2 * r, c])

    return y
开发者ID:rjw57,项目名称:dtcwt,代码行数:14,代码来源:transform1d.py


示例9: _compareGradient

 def _compareGradient(self, x):
     # x[:, 0] is real, x[:, 1] is imag.  We combine real and imag into
     # complex numbers. Then, we extract real and imag parts and
     # computes the squared sum. This is obviously the same as sum(real
     # * real) + sum(imag * imag). We just want to make sure the
     # gradient function is checked.
     with self.test_session():
         inx = tf.convert_to_tensor(x)
         real, imag = tf.split(1, 2, inx)
         real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
         cplx = tf.complex(real, imag)
         cplx = tf.conj(cplx)
         loss = tf.reduce_sum(tf.square(tf.real(cplx))) + tf.reduce_sum(tf.square(tf.imag(cplx)))
         epsilon = 1e-3
         jacob_t, jacob_n = tf.test.compute_gradient(inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
     self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
开发者ID:peace195,项目名称:tensorflow,代码行数:16,代码来源:cwise_ops_test.py


示例10: get_mu_tensor

  def get_mu_tensor(self):
    const_fact = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
    coef = tf.Variable([-1.0, 3.0, 0.0, 1.0], dtype=tf.float32, name="cubic_solver_coef")
    coef = tf.scatter_update(coef, tf.constant(2), -(3 + const_fact) )        
    roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, stateful=False)
    
    # filter out the correct root
    root_idx = tf.logical_and(tf.logical_and(tf.greater(tf.real(roots), tf.constant(0.0) ),
      tf.less(tf.real(roots), tf.constant(1.0) ) ), tf.less(tf.abs(tf.imag(roots) ), 1e-5) )
    # in case there are two duplicated roots satisfying the above condition
    root = tf.reshape(tf.gather(tf.gather(roots, tf.where(root_idx) ), tf.constant(0) ), shape=[] )
    tf.assert_equal(tf.size(root), tf.constant(1) )

    dr = self._h_max / self._h_min
    mu = tf.maximum(tf.real(root)**2, ( (tf.sqrt(dr) - 1)/(tf.sqrt(dr) + 1) )**2)    
    return mu
开发者ID:tigercut,项目名称:MobileNet,代码行数:16,代码来源:yellowfin.py


示例11: __init__

  def __init__(self, **kwargs):
    """
    """
    def _interleaveVectors(vec1, vec2):
        vec1 = tf.expand_dims(vec1, 3)
        vec2 = tf.expand_dims(vec2, 3)
        interleaved = tf.concat([vec1, vec2], 3)
        interleaved = tf.reshape(interleaved, (tf.shape(vec1)[0], tf.shape(vec1)[1], tf.shape(vec1)[2] * 2))
        return interleaved
    super(ComplexToAlternatingRealLayer, self).__init__(**kwargs)

    input_placeholder = self.input_data.get_placeholder_as_batch_major()

    real_value = tf.real(input_placeholder)
    imag_value = tf.imag(input_placeholder)
    self.output.placeholder = _interleaveVectors(real_value, imag_value)
    self.output.size_placeholder = {0: self.input_data.size_placeholder[self.input_data.time_dim_axis_excluding_batch]}
开发者ID:rwth-i6,项目名称:returnn,代码行数:17,代码来源:TFNetworkSigProcLayer.py


示例12: stack_real_imag

def stack_real_imag(x):

    stack_axis = len(x.get_shape().as_list())
    return tf.stack((tf.real(x), tf.imag(x)), axis=stack_axis)
开发者ID:MiG-Kharkov,项目名称:DeepLearningImplementations,代码行数:4,代码来源:scattering.py


示例13: complex_mul_real

def complex_mul_real( z, r ):
    return tf.complex(tf.real(z)*r, tf.imag(z)*r)
开发者ID:Liubinggunzu,项目名称:tensorflow_with_latest_papers,代码行数:2,代码来源:complex_util.py


示例14: abs2_c

def abs2_c(z):
    return tf.real(z)*tf.real(z)+tf.imag(z)*tf.imag(z)
开发者ID:Liubinggunzu,项目名称:tensorflow_with_latest_papers,代码行数:2,代码来源:complex_util.py


示例15: test_Imag

 def test_Imag(self):
     t = tf.imag(self.random(3, 4, complex=True))
     self.check(t)
开发者ID:kestrelm,项目名称:tfdeploy,代码行数:3,代码来源:ops.py


示例16: build_func

def build_func():
    slices_tensor = tf.placeholder(dtype=tf.complex64, shape=[None, None])
    S_tensor = tf.placeholder(dtype=tf.complex64, shape=[None, None])
    envelope_tensor = tf.placeholder(dtype=tf.float32, shape=[None])
    ctf_tensor = tf.placeholder(dtype=tf.float32, shape=[None, None])
    d_tensor = tf.placeholder(dtype=tf.complex64, shape=[None, None])
    logW_S_tensor = tf.placeholder(dtype=tf.float32, shape=[None])
    logW_I_tensor = tf.placeholder(dtype=tf.float32, shape=[None])
    logW_R_tensor = tf.placeholder(dtype=tf.float32, shape=[None])
    div_in_tensor = tf.placeholder(dtype=tf.float32, shape=[])
    sigma2_coloured_tensor = tf.placeholder(dtype=tf.float32, shape=[None])

    cproj = tf.expand_dims(slices_tensor, 1) * tf.complex(ctf_tensor, tf.zeros_like(ctf_tensor)) # r * i * t
    cim = tf.expand_dims(S_tensor, 1) * d_tensor  # s * i * t
    correlation_I = tf.real(tf.expand_dims(cproj, 1)) * tf.real(cim) \
                    + tf.imag(tf.expand_dims(cproj, 1)) * tf.imag(cim)  # r * s * i * t
    power_I = tf.real(cproj) ** 2 + tf.imag(cproj) ** 2  # r * i * t

    g_I =tf.complex(envelope_tensor, tf.zeros_like(envelope_tensor)) * tf.expand_dims(cproj, 1) - cim  # r * s * i * t

    sigma2_I = tf.real(g_I) ** 2 + tf.imag(g_I) ** 2  # r * s * i * t

    tmp = tf.reduce_sum(sigma2_I / sigma2_coloured_tensor, reduction_indices=3)  # r * s * i

    e_I = div_in_tensor * tmp + logW_I_tensor  # r * s * i

    g_I *= tf.complex(ctf_tensor, tf.zeros_like(ctf_tensor))  # r * s * i * t


    etmp = my_logsumexp_tensorflow(e_I)  # r * s
    e_S = etmp + logW_S_tensor  # r * s

    tmp = logW_S_tensor + tf.expand_dims(logW_R_tensor, 1)  # r * s
    phitmp = tf.exp(e_I - tf.expand_dims(etmp, 2))  # r * s * i
    I_tmp = tf.expand_dims(tmp, 2) + e_I

    correlation_S = tf.reduce_sum(tf.expand_dims(phitmp, 3) * correlation_I, reduction_indices=2)  # r * s * t
    power_S = tf.reduce_sum(tf.expand_dims(phitmp, 3) * tf.expand_dims(power_I, 1), reduction_indices=2)  # r * s * t
    sigma2_S = tf.reduce_sum(tf.expand_dims(phitmp, 3) * sigma2_I, reduction_indices=2)  # r * s * t
    g_S = tf.reduce_sum(tf.complex(tf.expand_dims(phitmp, 3), tf.zeros_like(tf.expand_dims(phitmp, 3)))
        * g_I, reduction_indices=2)  # r * s * t

    etmp = my_logsumexp_tensorflow(e_S)  # r
    e_R = etmp + logW_R_tensor  # r

    tmp = logW_R_tensor  # r
    phitmp = tf.exp(e_S - tf.expand_dims(etmp, 1))  # r * s
    S_tmp = tf.expand_dims(tmp, 1) + e_S
    correlation_R = tf.reduce_sum(tf.expand_dims(phitmp, 2) * correlation_S, reduction_indices=1)  # r * t
    power_R = tf.reduce_sum(tf.expand_dims(phitmp, 2) * power_S, reduction_indices=1)  # r * t
    sigma2_R = tf.reduce_sum(tf.expand_dims(phitmp, 2) * sigma2_S, reduction_indices=1)  # r * t

    g = tf.reduce_sum(tf.complex(tf.expand_dims(phitmp, 2), tf.zeros_like(tf.expand_dims(phitmp, 2)))
        * g_S, reduction_indices=1)  # r * t

    tmp = -2.0 * div_in_tensor
    nttmp = tmp * envelope_tensor / sigma2_coloured_tensor

    e = my_logsumexp_tensorflow(e_R)
    lse_in = -e

    # Noise estimate
    phitmp = e_R - e  # r
    R_tmp = phitmp
    phitmp = tf.exp(phitmp)

    sigma2_est = tf.squeeze(tf.matmul(tf.expand_dims(phitmp, 0), sigma2_R), squeeze_dims=[0])
    correlation = tf.squeeze(tf.matmul(tf.expand_dims(phitmp, 0), correlation_R), squeeze_dims=[0])
    power = tf.squeeze(tf.matmul(tf.expand_dims(phitmp, 0), power_R), squeeze_dims=[0])

    global func
    global inputs
    global objectives
    func = tf.Session()
    inputs = [slices_tensor,
              S_tensor,
              envelope_tensor,
              ctf_tensor,
              d_tensor,
              logW_S_tensor,
              logW_I_tensor,
              logW_R_tensor,
              div_in_tensor,
              sigma2_coloured_tensor]
    objectives = [g, I_tmp, S_tmp, R_tmp, sigma2_est, correlation, power, nttmp, lse_in, phitmp]
开发者ID:hqythu,项目名称:Cryo-EM-3D-Reconstruction,代码行数:85,代码来源:objective_tensorflow_kernels.py


示例17: _optimize_2s_local

    def _optimize_2s_local(self,
                           thresh=1E-10,
                           D=None,
                           ncv=40,
                           Ndiag=10,
                           landelta=1E-5,
                           landeltaEta=1E-5,
                           verbose=0):
        raise NotImplementedError()
        mpol = self.mpo[self.mpo.pos - 1]
        mpor = self.mpo[self.mpo.pos]
        Ml, Mc, dl, dlp = mpol.shape
        Mc, Mr, dr, drp = mpor.shape
        mpo = tf.reshape(
            ncon.ncon([mpol, mpor], [[-1, 1, -3, -5], [1, -2, -4, -6]]),
            [Ml, Mr, dl * dr, dlp * drp])
        initial = ncon.ncon([
            self.mps[self.mps.pos-1],
            self.mps.mat,
            self.mps[self.mps.pos]],
                            [[-1,-2,1],[1,2],[2,-3,-4]]
        )
        Dl,dl,dr,Dr=initial.shape
        tf.reshape(initial,[Dl,dl*dr,Dr])
        if self.walltime_log:
            t1=time.time()
            
        nit, vecs, alpha, beta = LZ.do_lanczos(
            L=self.left_envs[self.mps.pos - 1],
            mpo=mpo,
            R=self.right_envs[self.mps.pos],
            initial_state=initial,
            ncv=ncv,
            delta=landelta
        )
        if self.walltime_log:
            self.walltime_log(lan=[(time.time()-t1)/float(nit)]*int(nit),QR=[],add_layer=[],num_lan=[int(nit)])                        
        
        temp = tf.reshape(
            tf.reshape(opt, [
                self.mps.D[self.mps.pos - 1], dlp, drp,
                self.mps.D[self.mps.pos + 1]
            ]), [])
        opt.split(mps_merge_data).transpose(0, 2, 3, 1).merge([[0, 1], [2, 3]])

        U, S, V = temp.svd(truncation_threshold=thresh, D=D)
        Dnew = S.shape[0]
        if verbose > 0:
            stdout.write(
                "\rTS-DMRG it=%i/%i, sites=(%i,%i)/%i: optimized E=%.16f+%.16f at D=%i"
                % (self._it, self.Nsweeps, self.mps.pos - 1, self.mps.pos,
                   len(self.mps), tf.real(e), tf.imag(e), Dnew))
            stdout.flush()
        if verbose > 1:
            print("")
        
        Z = np.sqrt(ncon.ncon([S, S], [[1], [1]]))
        self.mps.mat = S.diag() / Z

        self.mps[self.mps.pos - 1] = U.split([merge_data[0],
                                              [U.shape[1]]]).transpose(0, 2, 1)
        self.mps[self.mps.pos] = V.split([[V.shape[0]],
                                          merge_data[1]]).transpose(0, 2, 1)
        self.left_envs[self.mps.pos] = self.add_layer(
            B=self.left_envs[self.mps.pos - 1],
            mps_tensor=self.mps[self.mps.pos - 1],
            mpo_tensor=self.mpo[self.mps.pos - 1],
            conj_mps_tensor=self.mps[self.mps.pos - 1],
            direction=1
        )
        
        self.right_envs[self.mps.pos - 1] = self.add_layer(
            B=self.right_envs[self.mps.pos],
            mps_tensor=self.mps[self.mps.pos],
            mpo_tensor=self.mpo[self.mps.pos],
            conj_mps_tensor=self.mps[self.mps.pos],
            direction=-1
        )
        return e
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:79,代码来源:DMRG.py


示例18: compose

def compose(input, rank=3):
    return input
    real = tf.real(input)
    imag = tf.imag(input)
    return tf.concat(rank, [real, imag])
开发者ID:255BITS,项目名称:DCGAN-tensorflow,代码行数:5,代码来源:tensorflow_wav.py



注:本文中的tensorflow.imag函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.image_summary函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.identity函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap