• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.complex函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.complex函数的典型用法代码示例。如果您正苦于以下问题:Python complex函数的具体用法?Python complex怎么用?Python complex使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了complex函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _operator_and_mat_and_feed_dict

  def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
    shape = list(shape)
    diag_shape = shape[:-1]

    # Upper triangle will be ignored.
    # Use a diagonal that ensures this matrix is well conditioned.
    tril = tf.random_normal(shape=shape, dtype=dtype.real_dtype)
    diag = tf.random_uniform(
        shape=diag_shape, dtype=dtype.real_dtype, minval=2., maxval=3.)
    if dtype.is_complex:
      tril = tf.complex(
          tril, tf.random_normal(shape, dtype=dtype.real_dtype))
      diag = tf.complex(
          diag, tf.random_uniform(
              shape=diag_shape, dtype=dtype.real_dtype, minval=2., maxval=3.))

    tril = tf.matrix_set_diag(tril, diag)

    tril_ph = tf.placeholder(dtype=dtype)

    if use_placeholder:
      # Evaluate the tril here because (i) you cannot feed a tensor, and (ii)
      # tril is random and we want the same value used for both mat and
      # feed_dict.
      tril = tril.eval()
      operator = linalg.LinearOperatorTriL(tril_ph)
      feed_dict = {tril_ph: tril}
    else:
      operator = linalg.LinearOperatorTriL(tril)
      feed_dict = None

    mat = tf.matrix_band_part(tril, -1, 0)

    return operator, mat, feed_dict
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:34,代码来源:linear_operator_tril_test.py


示例2: __loss__

    def __loss__(self):
        """
        Calculate loss
        :return:
        """

        # Context loss L2
        predict_image = tf.abs(tf.complex(real=self.predict_g2['real'], imag=self.predict_g2['imag']))
        label_image = tf.abs(tf.complex(real=self.labels['real'], imag=self.labels['imag']))
        self.context_loss = tf.reduce_mean(tf.square(tf.contrib.layers.flatten(predict_image - label_image)))

        # self.context_loss = tf.reduce_mean(tf.square(real_diff) + tf.square(imag_diff), name='Context_loss_mean')
        print("You are using L2 loss")

        tf.summary.scalar('g_loss_context_only', self.context_loss, collections='G2')

        self.g_loss = self.FLAGS.gen_loss_context * self.context_loss
        # self.g_loss = self.FLAGS.gen_loss_adversarial * g_loss + self.FLAGS.gen_loss_context * context_loss
        tf.summary.scalar('g_loss_plus_context', self.g_loss, collections='G2')

        # if len(self.regularization_values) > 0:
        # reg_loss_g = self.reg_w * tf.reduce_sum(self.regularization_values)
        self.reg_loss_g = self.get_weights_regularization(dump=self.FLAGS.dump_debug, collection='G2')
        self.g_loss_no_reg = self.g_loss
        self.g_loss += self.reg_loss_g
        if self.FLAGS.dump_debug:
            tf.summary.scalar('g_loss_plus_context_plus_reg', self.g_loss, collections='G2')
            tf.summary.scalar('g_loss_reg_only', self.reg_loss_g, collections='D')
开发者ID:shohad25,项目名称:thesis,代码行数:28,代码来源:k_space_gl_g2_unet_DabsGloss.py


示例3: _inference

    def _inference(self, x, dropout):
        with tf.name_scope('conv1'):
            # Transform to Fourier domain
            x_2d = tf.reshape(x, [-1, 28, 28])
            x_2d = tf.complex(x_2d, 0)
            xf_2d = tf.fft2d(x_2d)
            xf = tf.reshape(xf_2d, [-1, NFEATURES])
            xf = tf.expand_dims(xf, 1)  # NSAMPLES x 1 x NFEATURES
            xf = tf.transpose(xf)  # NFEATURES x 1 x NSAMPLES
            # Filter
            Wreal = self._weight_variable([int(NFEATURES/2), self.F, 1])
            Wimg = self._weight_variable([int(NFEATURES/2), self.F, 1])
            W = tf.complex(Wreal, Wimg)
            xf = xf[:int(NFEATURES/2), :, :]
            yf = tf.matmul(W, xf)  # for each feature
            yf = tf.concat([yf, tf.conj(yf)], axis=0)
            yf = tf.transpose(yf)  # NSAMPLES x NFILTERS x NFEATURES
            yf_2d = tf.reshape(yf, [-1, 28, 28])
            # Transform back to spatial domain
            y_2d = tf.ifft2d(yf_2d)
            y_2d = tf.real(y_2d)
            y = tf.reshape(y_2d, [-1, self.F, NFEATURES])
            # Bias and non-linearity
            b = self._bias_variable([1, self.F, 1])
#            b = self._bias_variable([1, self.F, NFEATURES])
            y += b  # NSAMPLES x NFILTERS x NFEATURES
            y = tf.nn.relu(y)
        with tf.name_scope('fc1'):
            W = self._weight_variable([self.F*NFEATURES, NCLASSES])
            b = self._bias_variable([NCLASSES])
            y = tf.reshape(y, [-1, self.F*NFEATURES])
            y = tf.matmul(y, W) + b
        return y
开发者ID:hyzcn,项目名称:cnn_graph,代码行数:33,代码来源:models.py


示例4: __D__

    def __D__(self, input_d):
        """
        Define the discriminator
        """
        # Input d holds real&imaginary values. The discriminative decision based on reconstructed image
        input_to_discriminator = input_d
        org = input_to_discriminator[0]
        fake = input_to_discriminator[1]

        rec_org = tf.abs(tf.expand_dims(input=tf.complex(real=org[:, 0, :, :], imag=org[:, 1, :, :]), dim=1))
        rec_fake = tf.abs(tf.expand_dims(input=tf.complex(real=fake[:, 0, :, :], imag=fake[:, 1, :, :]), dim=1))
        tf.summary.image('D_x_input_reconstructed' + 'Original', tf.transpose(rec_org, (0,2,3,1)), collections='D', max_outputs=4)
        tf.summary.image('D_x_input_reconstructed' + 'Fake', tf.transpose(rec_fake, (0,2,3,1)), collections='G2', max_outputs=4)
        input_to_discriminator = tf.concat(input_to_discriminator, axis=0)

        # Model convolutions
        out_dim = 8  # 128x128
        self.conv_1_d = ops.conv2d(input_to_discriminator, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1, name="D_conv_1")
        self.pool_1_d = tf.layers.max_pooling2d(self.conv_1_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_1")
        self.conv_1_bn_d = ops.batch_norm(self.pool_1_d, self.train_phase, decay=0.98, name="D_bn1")
        # self.relu_1_d = tf.nn.relu(self.conv_1_bn_d)
        self.relu_1_d = ops.lrelu(self.conv_1_bn_d, name="D_relu1")

        out_dim = 16  # 64x64
        self.conv_2_d = ops.conv2d(self.relu_1_d, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1,
                                            name="D_conv_2")
        self.pool_2_d = tf.layers.max_pooling2d(self.conv_2_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_2")
        self.conv_2_bn_d = ops.batch_norm(self.pool_2_d, self.train_phase, decay=0.98, name="D_bn2")
        # self.relu_2_d = tf.nn.relu(self.conv_2_bn_d)
        self.relu_2_d = ops.lrelu(self.conv_2_bn_d, name="D_relu2")

        # out_dim = 32  # 32x32
        out_dim = 8  # 32x32
        self.conv_3_d = ops.conv2d(self.relu_2_d, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1,
                                            name="D_conv_3")
        self.pool_3_d = tf.layers.max_pooling2d(self.conv_3_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_3")
        self.conv_3_bn_d = ops.batch_norm(self.pool_3_d, self.train_phase, decay=0.98, name="D_bn3")
        # self.relu_3_d = tf.nn.relu(self.conv_3_bn_d)
        self.relu_3_d = ops.lrelu(self.conv_3_bn_d, name="D_relu3")

        # out_dim = 16  # 16x16
        # self.conv_4_d = ops.conv2d(self.relu_3_d, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1,
        #                                     name="D_conv_4")
        #self.pool_4_d = tf.layers.max_pooling2d(self.conv_4_d, pool_size=[2, 2], strides=2, padding='same',
        #                                      data_format='channels_first',name="D_pool_4")
        # self.conv_4_bn_d = ops.batch_norm(self.pool_4_d, self.train_phase, decay=0.98, name="D_bn4")
        # # self.relu_4_d = tf.nn.relu(self.conv_4_bn_d)
        # self.relu_4_d = ops.lrelu(self.conv_4_bn_d)

        out_dim = 1
        self.affine_1_d = ops.linear(tf.contrib.layers.flatten(self.relu_3_d), output_size=out_dim, scope="D_affine_1")
        predict_d = self.affine_1_d
        # Dump prediction out

        return tf.nn.sigmoid(predict_d), predict_d
开发者ID:shohad25,项目名称:thesis,代码行数:58,代码来源:k_space_wgan_gl_g2_unet_Gloss.py


示例5: bilinear_pool

    def bilinear_pool(self, x1, x2):

        p1 = tf.matmul(x1, self.C[0])
        p2 = tf.matmul(x2, self.C[1])
        pc1 = tf.complex(p1, tf.zeros_like(p1))
        pc2 = tf.complex(p2, tf.zeros_like(p2))

        conved = tf.batch_ifft(tf.batch_fft(pc1) * tf.batch_fft(pc2))
        return tf.real(conved)
开发者ID:shmsw25,项目名称:mcb-model-for-vqa,代码行数:9,代码来源:CBP.py


示例6: __loss__

    def __loss__(self):
        """
        Calculate loss
        :return:
        """
        with tf.variable_scope("discriminator") as scope:
            self.d_loss_real = tf.reduce_mean(self.predict_d_logits)
            tf.summary.scalar('d_loss_real', self.d_loss_real, collections='D')
            # scope.reuse_variables()
            self.d_loss_fake = tf.reduce_mean(self.predict_d_logits_for_g)
            tf.summary.scalar('d_loss_fake', self.d_loss_fake, collections='D')

            if self.FLAGS.dump_debug:
                tf.summary.image('D_predict_real', tf.transpose(tf.reshape(self.predict_d_logits,(-1,1,1,1)), (0, 2, 3, 1)), collections='D')
                tf.summary.image('D_predict_fake', tf.transpose(tf.reshape(self.predict_d_logits_for_g, (-1,1,1,1)), (0, 2, 3, 1)), collections='D')

        self.d_loss = self.d_loss_fake - self.d_loss_real
        tf.summary.scalar('d_loss', self.d_loss, collections='D')

        self.reg_loss_d = self.get_weights_regularization(dump=self.FLAGS.dump_debug, collection='D')
        self.d_loss_no_reg = self.d_loss
        self.d_loss += self.reg_loss_d
        if self.FLAGS.dump_debug:
            tf.summary.scalar('d_loss_plus_reg', self.d_loss, collections='D')
            tf.summary.scalar('d_loss_reg_only', self.reg_loss_d, collections='D')

        # Generative loss
        # g_loss = tf.reduce_mean(ops.binary_cross_entropy(preds=self.predict_d_for_g, targets=tf.ones_like(self.predict_d_for_g)))
        g_loss = -tf.reduce_mean(self.predict_d_logits_for_g)

        tf.summary.scalar('g_loss', g_loss, collections='G2')

        # Context loss L2
        predict_image = tf.abs(tf.complex(real=self.predict_g2['real'], imag=self.predict_g2['imag']))
        label_image = tf.abs(tf.complex(real=self.labels['real'], imag=self.labels['imag']))
        self.context_loss = tf.reduce_mean(tf.square(tf.contrib.layers.flatten(predict_image - label_image)))

        # self.context_loss = tf.reduce_mean(tf.square(real_diff) + tf.square(imag_diff), name='Context_loss_mean')
        print("You are using L2 loss")

        tf.summary.scalar('g_loss_context_only', self.context_loss, collections='G2')

        self.g_loss = self.adv_loss_w * g_loss + self.FLAGS.gen_loss_context * self.context_loss
        # self.g_loss = self.FLAGS.gen_loss_adversarial * g_loss + self.FLAGS.gen_loss_context * context_loss
        tf.summary.scalar('g_loss_plus_context', self.g_loss, collections='G2')

        # if len(self.regularization_values) > 0:
        # reg_loss_g = self.reg_w * tf.reduce_sum(self.regularization_values)
        self.reg_loss_g = self.get_weights_regularization(dump=self.FLAGS.dump_debug, collection='G2')
        self.g_loss_no_reg = self.g_loss
        self.g_loss += self.reg_loss_g
        if self.FLAGS.dump_debug:
            tf.summary.scalar('g_loss_plus_context_plus_reg', self.g_loss, collections='G2')
            tf.summary.scalar('g_loss_reg_only', self.reg_loss_g, collections='D')

        tf.summary.scalar('diff-loss', tf.abs(self.d_loss - self.g_loss), collections='G2')
开发者ID:shohad25,项目名称:thesis,代码行数:56,代码来源:k_space_wgan_gl_g2_unet_Gloss.py


示例7: __D__

    def __D__(self, input_d, input_type):
        """
        Define the discriminator
        """
        # Dump input image out
        input_real = tf.concat(axis=0, values=[input_d[0]['real'], input_d[1]['real']])
        input_imag = tf.concat(axis=0, values=[input_d[0]['imag'], input_d[1]['imag']])

        # Input d holds real&imaginary values. The discriminative decision based on reconstructed image
        input_to_discriminator = self.get_reconstructed_image(real=input_real, imag=input_imag, name='Both')

        org, fake = tf.split(input_to_discriminator, num_or_size_splits=2, axis=0)

        org = tf.reshape(tf.abs(tf.complex(real=tf.squeeze(org[:, 0, :, :]), imag=tf.squeeze(org[:, 1, :, :]))),
                         shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        fake = tf.reshape(tf.abs(tf.complex(real=tf.squeeze(fake[:, 0, :, :]), imag=tf.squeeze(fake[:, 1, :, :]))),
                          shape=[-1, 1, self.dims_out[1], self.dims_out[2]])

        tf.summary.image('D_x_input_reconstructed' + 'Original', tf.transpose(org, (0, 2, 3, 1)), collections='D',
                         max_outputs=4)
        tf.summary.image('D_x_input_reconstructed' + 'Fake', tf.transpose(fake, (0, 2, 3, 1)), collections='G',
                         max_outputs=4)

        # Model convolutions
        out_dim = 8    # 256x256 => 128x128
        conv1, pool1 = ops.conv_conv_pool(input_to_discriminator, n_filters=[out_dim, out_dim], activation=tf.nn.relu,
                                          training=self.train_phase, name='D_block_1')

        out_dim = 16   # 128x128 => 64x64
        conv2, pool2 = ops.conv_conv_pool(pool1, n_filters=[out_dim, out_dim], activation=tf.nn.relu,
                                          training=self.train_phase, name='D_block_2')

        out_dim = 32   # 64x128 => 32x32
        conv3, pool3 = ops.conv_conv_pool(pool2, n_filters=[out_dim, out_dim], activation=tf.nn.relu,
                                          training=self.train_phase, name='D_block_3')

        out_dim = 64   # 32x32 => 16x16
        conv4, pool4 = ops.conv_conv_pool(pool3, n_filters=[out_dim, out_dim], activation=tf.nn.relu,
                                          training=self.train_phase, name='D_block_4')

        out_dim = 128  # 16x16
        conv5 = ops.conv_conv_pool(pool4, n_filters=[out_dim, out_dim], activation=tf.nn.relu,
                                   training=self.train_phase, name='D_block_5', pool=False)

        out_dim = 1
        self.affine_1_d = ops.linear(tf.contrib.layers.flatten(conv5), output_size=out_dim, scope="D_affine_1")
        predict_d = self.affine_1_d
        # Dump prediction out

        return tf.nn.sigmoid(predict_d), predict_d
开发者ID:shohad25,项目名称:thesis,代码行数:50,代码来源:k_space_gan_unet2.py


示例8: __D__

    def __D__(self, input_d, input_type):
        """
        Define the discriminator
        """
        # Dump input image out
        input_real = tf.concat(axis=0, values=[input_d[0]['real'], input_d[1]['real']])
        input_imag = tf.concat(axis=0, values=[input_d[0]['imag'], input_d[1]['imag']])
        input_to_discriminator = tf.concat([input_real, input_imag], axis=1)

        org, fake = tf.split(input_to_discriminator, num_or_size_splits=2, axis=0)
        #
        org = tf.reshape(tf.abs(tf.complex(real=tf.squeeze(org[:,0,:,:]), imag=tf.squeeze(org[:,1,:,:]))), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        fake = tf.reshape(tf.abs(tf.complex(real=tf.squeeze(fake[:,0,:,:]), imag=tf.squeeze(fake[:,1,:,:]))), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        #
        tf.summary.image('D_x_input_reconstructed' + 'Original', tf.transpose(org, (0,2,3,1)), collections='D', max_outputs=4)
        tf.summary.image('D_x_input_reconstructed' + 'Fake', tf.transpose(fake, (0,2,3,1)), collections='G', max_outputs=4)

        # Model convolutions
        out_dim = 8  # 128x128
        self.conv_1_d = ops.conv2d(input_to_discriminator, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1, name="D_conv_1")
        self.pool_1_d = tf.layers.max_pooling2d(self.conv_1_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_1")
        self.conv_1_bn_d = ops.batch_norm(self.pool_1_d, self.train_phase, decay=0.98, name="D_bn1")
        # self.relu_1_d = tf.nn.relu(self.conv_1_bn_d)
        self.relu_1_d = ops.lrelu(self.conv_1_bn_d)

        out_dim = 16  # 64x64
        self.conv_2_d = ops.conv2d(self.relu_1_d, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1,
                                            name="D_conv_2")
        self.pool_2_d = tf.layers.max_pooling2d(self.conv_2_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_2")
        self.conv_2_bn_d = ops.batch_norm(self.pool_2_d, self.train_phase, decay=0.98, name="D_bn2")
        # self.relu_2_d = tf.nn.relu(self.conv_2_bn_d)
        self.relu_2_d = ops.lrelu(self.conv_2_bn_d)

        # out_dim = 32  # 32x32
        out_dim = 8  # 32x32
        self.conv_3_d = ops.conv2d(self.relu_2_d, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1,
                                            name="D_conv_3")
        self.pool_3_d = tf.layers.max_pooling2d(self.conv_3_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_3")
        self.conv_3_bn_d = ops.batch_norm(self.pool_3_d, self.train_phase, decay=0.98, name="D_bn3")
        self.relu_3_d = ops.lrelu(self.conv_3_bn_d)

        out_dim = 1
        self.affine_1_d = ops.linear(tf.contrib.layers.flatten(self.relu_3_d), output_size=out_dim, scope="D_affine_1")
        predict_d = self.affine_1_d
        # Dump prediction out

        return tf.nn.sigmoid(predict_d), predict_d
开发者ID:shohad25,项目名称:thesis,代码行数:50,代码来源:k_space_wgan.py


示例9: test_complex_tensor_with_imag_zero_doesnt_raise

 def test_complex_tensor_with_imag_zero_doesnt_raise(self):
   x = tf.convert_to_tensor([1., 0, 3])
   y = tf.convert_to_tensor([0., 0, 0])
   z = tf.complex(x, y)
   with self.test_session():
     # Should not raise.
     linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_util_test.py


示例10: get_reconstructed_image

    def get_reconstructed_image(self, real, imag, name=None):
        """
        :param real:
        :param imag:
        :param name:
        :return:
        """
        complex_k_space_label = tf.complex(real=tf.squeeze(real), imag=tf.squeeze(imag), name=name+"_complex_k_space")
        rec_image_complex = tf.expand_dims(tf.ifft2d(complex_k_space_label), axis=1)
        
        rec_image_real = tf.reshape(tf.real(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        rec_image_imag = tf.reshape(tf.imag(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])

        # Shifting
        top, bottom = tf.split(rec_image_real, num_or_size_splits=2, axis=2)
        top_left, top_right = tf.split(top, num_or_size_splits=2, axis=3)
        bottom_left, bottom_right = tf.split(bottom, num_or_size_splits=2, axis=3)

        top_shift = tf.concat(axis=3, values=[bottom_right, bottom_left])
        bottom_shift = tf.concat(axis=3, values=[top_right, top_left])
        shifted_image = tf.concat(axis=2, values=[top_shift, bottom_shift])


        # Shifting
        top_imag, bottom_imag = tf.split(rec_image_imag, num_or_size_splits=2, axis=2)
        top_left_imag, top_right_imag = tf.split(top_imag, num_or_size_splits=2, axis=3)
        bottom_left_imag, bottom_right_imag = tf.split(bottom_imag, num_or_size_splits=2, axis=3)

        top_shift_imag = tf.concat(axis=3, values=[bottom_right_imag, bottom_left_imag])
        bottom_shift_imag = tf.concat(axis=3, values=[top_right_imag, top_left_imag])
        shifted_image_imag = tf.concat(axis=2, values=[top_shift_imag, bottom_shift_imag])

        shifted_image_two_channels = tf.stack([shifted_image[:,0,:,:], shifted_image_imag[:,0,:,:]], axis=1)
        return shifted_image_two_channels
开发者ID:shohad25,项目名称:thesis,代码行数:34,代码来源:k_space_wgan_gl_g2_unet_Gloss.py


示例11: test_assert_positive_definite_does_not_raise_if_pd_and_complex

 def test_assert_positive_definite_does_not_raise_if_pd_and_complex(self):
   with self.test_session():
     x = [1., 2.]
     y = [1., 0.]
     diag = tf.complex(x, y)  # Re[diag] > 0.
     # Should not fail
     linalg.LinearOperatorDiag(diag).assert_positive_definite().run()
开发者ID:RapidApplicationDevelopment,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_diag_test.py


示例12: test_complex_tensor_with_nonzero_imag_raises

 def test_complex_tensor_with_nonzero_imag_raises(self):
   x = tf.convert_to_tensor([1., 2, 0])
   y = tf.convert_to_tensor([1., 2, 0])
   z = tf.complex(x, y)
   with self.test_session():
     with self.assertRaisesOpError("ABC123"):
       linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_util_test.py


示例13: _operator_and_mat_and_feed_dict

  def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
    shape = list(shape)
    diag_shape = shape[:-1]

    diag = tf.random_normal(diag_shape, dtype=dtype.real_dtype)
    if dtype.is_complex:
      diag = tf.complex(
          diag, tf.random_normal(diag_shape, dtype=dtype.real_dtype))

    diag_ph = tf.placeholder(dtype=dtype)

    if use_placeholder:
      # Evaluate the diag here because (i) you cannot feed a tensor, and (ii)
      # diag is random and we want the same value used for both mat and
      # feed_dict.
      diag = diag.eval()
      operator = linalg.LinearOperatorDiag(diag_ph)
      feed_dict = {diag_ph: diag}
    else:
      operator = linalg.LinearOperatorDiag(diag)
      feed_dict = None

    mat = tf.matrix_diag(diag)

    return operator, mat, feed_dict
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:25,代码来源:linear_operator_diag_test.py


示例14: __D__

    def __D__(self, input_d):
        """
        Define the discriminator
        """
        # Input d holds real&imaginary values. The discriminative decision based on reconstructed image
        input_to_discriminator = input_d
        org = input_to_discriminator[0]
        fake = input_to_discriminator[1]

        rec_org = tf.abs(tf.expand_dims(input=tf.complex(real=org[:, 0, :, :], imag=org[:, 1, :, :]), dim=1))
        rec_fake = tf.abs(tf.expand_dims(input=tf.complex(real=fake[:, 0, :, :], imag=fake[:, 1, :, :]), dim=1))
        tf.summary.image('D_x_input_reconstructed' + 'Original', tf.transpose(rec_org, (0,2,3,1)), collections='D', max_outputs=4)
        tf.summary.image('D_x_input_reconstructed' + 'Fake', tf.transpose(rec_fake, (0,2,3,1)), collections='G2', max_outputs=4)
        input_to_discriminator = tf.concat(input_to_discriminator, axis=0)

        return None
开发者ID:shohad25,项目名称:thesis,代码行数:16,代码来源:k_space_gl_g2_unet_DabsGloss.py


示例15: random_normal

def random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None):
  """Tensor with (possibly complex) Gaussian entries.

  Samples are distributed like

  ```
  N(mean, stddev^2), if dtype is real,
  X + iY,  where X, Y ~ N(mean, stddev^2) if dtype is complex.
  ```

  Args:
    shape:  `TensorShape` or Python list.  Shape of the returned tensor.
    mean:  `Tensor` giving mean of normal to sample from.
    stddev:  `Tensor` giving stdev of normal to sample from.
    dtype:  `TensorFlow` `dtype` or numpy dtype
    seed:  Python integer seed for the RNG.

  Returns:
    `Tensor` with desired shape and dtype.
  """
  dtype = tf.as_dtype(dtype)

  with tf.name_scope("random_normal"):
    samples = tf.random_normal(
        shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
    if dtype.is_complex:
      if seed is not None:
        seed += 1234
      more_samples = tf.random_normal(
          shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
      samples = tf.complex(samples, more_samples)
    return samples
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:32,代码来源:linear_operator_test_util.py


示例16: test_assert_non_singular_does_not_raise_for_complex_nonsingular

 def test_assert_non_singular_does_not_raise_for_complex_nonsingular(self):
   with self.test_session():
     x = [1., 0.]
     y = [0., 1.]
     diag = tf.complex(x, y)
     # Should not raise.
     linalg.LinearOperatorDiag(diag).assert_non_singular().run()
开发者ID:RapidApplicationDevelopment,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_diag_test.py


示例17: __call__

    def __call__(self, inputs, state, scope=None ):
        with tf.variable_scope(scope or type(self).__name__):
            unitary_hidden_state, secondary_cell_hidden_state = tf.split(1,2,state)


            mat_in = tf.get_variable('mat_in', [self.input_size, self.state_size*2])
            mat_out = tf.get_variable('mat_out', [self.state_size*2, self.output_size])
            in_proj = tf.matmul(inputs, mat_in)            
            in_proj_c = tf.complex(tf.split(1,2,in_proj))
            out_state = modReLU( in_proj_c + 
                ulinear(unitary_hidden_state, self.state_size),
                tf.get_variable(name='bias', dtype=tf.float32, shape=tf.shape(unitary_hidden_state), initializer = tf.constant_initalizer(0.)),
                scope=scope)


        with tf.variable_scope('unitary_output'):
            '''computes data linear, unitary linear and summation -- TODO: should be complex output'''
            unitary_linear_output_real = linear.linear([tf.real(out_state), tf.imag(out_state), inputs], True, 0.0)
        

        with tf.variable_scope('scale_nonlinearity'):
            modulus = tf.complex_abs(unitary_linear_output_real)
            rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7)

        #transition to data shortcut connection


        #out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias

        #hidden state is complex but output is completely real
        return out_, out_state #complex 
开发者ID:Liubinggunzu,项目名称:tensorflow_with_latest_papers,代码行数:31,代码来源:unitary_rnn_cell_modern.py


示例18: random_spatial_to_spectral

 def random_spatial_to_spectral(self, channels, filters, height, width):
     # Create a truncated random image, then compute the FFT of that image and return it's values
     # used to initialize spectrally parameterized filters
     # an alternative to this is to initialize directly in the spectral domain
     w = tf.truncated_normal([channels, filters, height, width], mean=0, stddev=0.01)
     fft = tf.batch_fft2d(tf.complex(w, 0.0 * w), name='spectral_initializer')
     return fft.eval(session=self.sess)
开发者ID:el3ment,项目名称:spectral_representations_for_convolutional_neural_networks,代码行数:7,代码来源:main.py


示例19: random_herm

def random_herm(D, dtype):
    if dtype.is_complex:
        h = tf.complex(
            tf.random_normal((D, D), dtype=dtype.real_dtype),
            tf.random_normal((D, D), dtype=dtype.real_dtype))
    else:
        h = tf.random_normal((D, D), dtype=dtype)
    return 0.5 * (h + tf.linalg.adjoint(h))
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:8,代码来源:ttn_1d_uniform.py


示例20: test_nonzero_complex_tensor_doesnt_raise

 def test_nonzero_complex_tensor_doesnt_raise(self):
   x = tf.convert_to_tensor([1., 0, 3])
   y = tf.convert_to_tensor([1., 2, 0])
   z = tf.complex(x, y)
   with self.test_session():
     # Should not raise.
     linear_operator_util.assert_no_entries_with_modulus_zero(
         z, message="ABC123").run()
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:8,代码来源:linear_operator_util_test.py



注:本文中的tensorflow.complex函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.concat函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.clip_by_value函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap