• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ PodZero函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中PodZero函数的典型用法代码示例。如果您正苦于以下问题:C++ PodZero函数的具体用法?C++ PodZero怎么用?C++ PodZero使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了PodZero函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: CopyChunkToBlock

static void
CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
                 uint32_t aOffsetInBlock)
{
  uint32_t blockChannels = aBlock->ChannelCount();
  AutoTArray<const T*,2> channels;
  if (aInput.IsNull()) {
    channels.SetLength(blockChannels);
    PodZero(channels.Elements(), blockChannels);
  } else {
    const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
    channels.SetLength(inputChannels.Length());
    PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
    if (channels.Length() != blockChannels) {
      // We only need to upmix here because aBlock's channel count has been
      // chosen to be a superset of the channel count of every chunk.
      AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
    }
  }

  for (uint32_t c = 0; c < blockChannels; ++c) {
    float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
    if (channels[c]) {
      ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume);
    } else {
      PodZero(outputData, aInput.GetDuration());
    }
  }
}
开发者ID:Wafflespeanut,项目名称:gecko-dev,代码行数:29,代码来源:AudioNodeExternalInputStream.cpp


示例2: mImageContainer

VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
  : mImageContainer(aParams.mImageContainer)
  , mTaskQueue(aParams.mTaskQueue)
  , mInfo(aParams.VideoConfig())
  , mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType))
{
  MOZ_COUNT_CTOR(VPXDecoder);
  PodZero(&mVPX);
  PodZero(&mVPXAlpha);
}
开发者ID:Wafflespeanut,项目名称:gecko-dev,代码行数:10,代码来源:VPXDecoder.cpp


示例3: m_frame

FFTConvolver::FFTConvolver(size_t fftSize)
    : m_frame(fftSize)
    , m_readWriteIndex(0)
{
  m_inputBuffer.SetLength(fftSize);
  PodZero(m_inputBuffer.Elements(), fftSize);
  m_outputBuffer.SetLength(fftSize);
  PodZero(m_outputBuffer.Elements(), fftSize);
  m_lastOverlapBuffer.SetLength(fftSize / 2);
  PodZero(m_lastOverlapBuffer.Elements(), fftSize / 2);
}
开发者ID:AOSC-Dev,项目名称:Pale-Moon,代码行数:11,代码来源:FFTConvolver.cpp


示例4: ProcessBlock

  void ProcessBlock(AudioNodeStream* aStream,
                    GraphTime aFrom,
                    const AudioBlock& aInput,
                    AudioBlock* aOutput,
                    bool* aFinished) override
  {
    // This node is not connected to anything. Per spec, we don't fire the
    // onaudioprocess event. We also want to clear out the input and output
    // buffer queue, and output a null buffer.
    if (!mIsConnected) {
      aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
      mSharedBuffers->Reset();
      mInputWriteIndex = 0;
      return;
    }

    // The input buffer is allocated lazily when non-null input is received.
    if (!aInput.IsNull() && !mInputBuffer) {
      mInputBuffer = ThreadSharedFloatArrayBufferList::
        Create(mInputChannelCount, mBufferSize, fallible);
      if (mInputBuffer && mInputWriteIndex) {
        // Zero leading for null chunks that were skipped.
        for (uint32_t i = 0; i < mInputChannelCount; ++i) {
          float* channelData = mInputBuffer->GetDataForWrite(i);
          PodZero(channelData, mInputWriteIndex);
        }
      }
    }

    // First, record our input buffer, if its allocation succeeded.
    uint32_t inputChannelCount = mInputBuffer ? mInputBuffer->GetChannels() : 0;
    for (uint32_t i = 0; i < inputChannelCount; ++i) {
      float* writeData = mInputBuffer->GetDataForWrite(i) + mInputWriteIndex;
      if (aInput.IsNull()) {
        PodZero(writeData, aInput.GetDuration());
      } else {
        MOZ_ASSERT(aInput.GetDuration() == WEBAUDIO_BLOCK_SIZE, "sanity check");
        MOZ_ASSERT(aInput.ChannelCount() == inputChannelCount);
        AudioBlockCopyChannelWithScale(static_cast<const float*>(aInput.mChannelData[i]),
                                       aInput.mVolume, writeData);
      }
    }
    mInputWriteIndex += aInput.GetDuration();

    // Now, see if we have data to output
    // Note that we need to do this before sending the buffer to the main
    // thread so that our delay time is updated.
    *aOutput = mSharedBuffers->GetOutputBuffer();

    if (mInputWriteIndex >= mBufferSize) {
      SendBuffersToMainThread(aStream, aFrom);
      mInputWriteIndex -= mBufferSize;
    }
  }
开发者ID:heiher,项目名称:gecko-dev,代码行数:54,代码来源:ScriptProcessorNode.cpp


示例5: mImageContainer

VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
    : mImageContainer(aParams.mImageContainer),
      mImageAllocator(aParams.mKnowsCompositor),
      mTaskQueue(aParams.mTaskQueue),
      mInfo(aParams.VideoConfig()),
      mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType)),
      mLowLatency(
          aParams.mOptions.contains(CreateDecoderParams::Option::LowLatency)) {
  MOZ_COUNT_CTOR(VPXDecoder);
  PodZero(&mVPX);
  PodZero(&mVPXAlpha);
}
开发者ID:jld,项目名称:gecko-dev,代码行数:12,代码来源:VPXDecoder.cpp


示例6: m_frame

FFTConvolver::FFTConvolver(size_t fftSize, size_t renderPhase)
    : m_frame(fftSize)
    , m_readWriteIndex(renderPhase % (fftSize / 2))
{
    MOZ_ASSERT(fftSize >= 2 * WEBAUDIO_BLOCK_SIZE);
  m_inputBuffer.SetLength(fftSize);
  PodZero(m_inputBuffer.Elements(), fftSize);
  m_outputBuffer.SetLength(fftSize);
  PodZero(m_outputBuffer.Elements(), fftSize);
  m_lastOverlapBuffer.SetLength(fftSize / 2);
  PodZero(m_lastOverlapBuffer.Elements(), fftSize / 2);
}
开发者ID:LaiPhil,项目名称:gecko-dev,代码行数:12,代码来源:FFTConvolver.cpp


示例7: LOG

nsresult
AppleATDecoder::SetupDecoder(mp4_demuxer::MP4Sample* aSample)
{
  if (mFormatID == kAudioFormatMPEG4AAC &&
      mConfig.extended_profile == 2) {
    // Check for implicit SBR signalling if stream is AAC-LC
    // This will provide us with an updated magic cookie for use with
    // GetInputAudioDescription.
    if (NS_SUCCEEDED(GetImplicitAACMagicCookie(aSample)) &&
        !mMagicCookie.Length()) {
      // nothing found yet, will try again later
      return NS_ERROR_NOT_INITIALIZED;
    }
    // An error occurred, fallback to using default stream description
  }

  LOG("Initializing Apple AudioToolbox decoder");

  AudioStreamBasicDescription inputFormat;
  PodZero(&inputFormat);
  nsresult rv =
    GetInputAudioDescription(inputFormat,
                             mMagicCookie.Length() ?
                                 mMagicCookie : *mConfig.extra_data);
  if (NS_FAILED(rv)) {
    return rv;
  }
  // Fill in the output format manually.
  PodZero(&mOutputFormat);
  mOutputFormat.mFormatID = kAudioFormatLinearPCM;
  mOutputFormat.mSampleRate = inputFormat.mSampleRate;
  mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
  mOutputFormat.mBitsPerChannel = 32;
  mOutputFormat.mFormatFlags =
    kLinearPCMFormatFlagIsFloat |
    0;
#else
# error Unknown audio sample type
#endif
  // Set up the decoder so it gives us one sample per frame
  mOutputFormat.mFramesPerPacket = 1;
  mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
        = mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;

  OSStatus status = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
  if (status) {
    LOG("Error %d constructing AudioConverter", status);
    mConverter = nullptr;
    return NS_ERROR_FAILURE;
  }
  return NS_OK;
}
开发者ID:RobertJGabriel,项目名称:Waterfox,代码行数:53,代码来源:AppleATDecoder.cpp


示例8: mInfo

VorbisDataDecoder::VorbisDataDecoder(const CreateDecoderParams& aParams)
  : mInfo(aParams.AudioConfig())
  , mTaskQueue(aParams.mTaskQueue)
  , mPacketCount(0)
  , mFrames(0)
{
  // Zero these member vars to avoid crashes in Vorbis clear functions when
  // destructor is called before |Init|.
  PodZero(&mVorbisBlock);
  PodZero(&mVorbisDsp);
  PodZero(&mVorbisInfo);
  PodZero(&mVorbisComment);
}
开发者ID:luke-chang,项目名称:gecko-1,代码行数:13,代码来源:VorbisDecoder.cpp


示例9: vorbis_info_init

RefPtr<MediaDataDecoder::InitPromise>
VorbisDataDecoder::Init()
{
  vorbis_info_init(&mVorbisInfo);
  vorbis_comment_init(&mVorbisComment);
  PodZero(&mVorbisDsp);
  PodZero(&mVorbisBlock);

  AutoTArray<unsigned char*,4> headers;
  AutoTArray<size_t,4> headerLens;
  if (!XiphExtradataToHeaders(headers, headerLens,
                              mInfo.mCodecSpecificConfig->Elements(),
                              mInfo.mCodecSpecificConfig->Length())) {
    return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
  }
  for (size_t i = 0; i < headers.Length(); i++) {
    if (NS_FAILED(DecodeHeader(headers[i], headerLens[i]))) {
      return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                                          __func__);
    }
  }

  MOZ_ASSERT(mPacketCount == 3);

  int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
  if (r) {
    return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
  }

  r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
  if (r) {
    return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
  }

  if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec rate do not match!"));
  }
  if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec channels do not match!"));
  }

  AudioConfig::ChannelLayout layout(mVorbisDsp.vi->channels);
  if (!layout.IsValid()) {
    return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
  }

  return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
}
开发者ID:Wafflespeanut,项目名称:gecko-dev,代码行数:50,代码来源:VorbisDecoder.cpp


示例10: vorbis_info_init

nsresult
VorbisDataDecoder::Init()
{
  vorbis_info_init(&mVorbisInfo);
  vorbis_comment_init(&mVorbisComment);
  PodZero(&mVorbisDsp);
  PodZero(&mVorbisBlock);

  size_t available = mInfo.mCodecSpecificConfig->Length();
  uint8_t *p = mInfo.mCodecSpecificConfig->Elements();
  for(int i = 0; i < 3; i++) {
    if (available < 2) {
      return NS_ERROR_FAILURE;
    }
    available -= 2;
    size_t length = BigEndian::readUint16(p);
    p += 2;
    if (available < length) {
      return NS_ERROR_FAILURE;
    }
    available -= length;
    if (NS_FAILED(DecodeHeader((const unsigned char*)p, length))) {
        return NS_ERROR_FAILURE;
    }
    p += length;
  }

  MOZ_ASSERT(mPacketCount == 3);

  int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
  if (r) {
    return NS_ERROR_FAILURE;
  }

  r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
  if (r) {
    return NS_ERROR_FAILURE;
  }

  if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec rate do not match!"));
  }
  if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec channels do not match!"));
  }

  return NS_OK;
}
开发者ID:rtrsparq,项目名称:gecko-dev,代码行数:50,代码来源:VorbisDecoder.cpp


示例11: m_accumulationBuffer

ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
                                           size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer, bool directMode)
    : m_accumulationBuffer(accumulationBuffer)
    , m_accumulationReadIndex(0)
    , m_inputReadIndex(0)
    , m_directMode(directMode)
{
    MOZ_ASSERT(impulseResponse);
    MOZ_ASSERT(accumulationBuffer);

    if (!m_directMode) {
        m_fftKernel = new FFTBlock(fftSize);
        m_fftKernel->PadAndMakeScaledDFT(impulseResponse + stageOffset, stageLength);
        m_fftConvolver = new FFTConvolver(fftSize);
    } else {
        m_directKernel.SetLength(fftSize / 2);
        PodCopy(m_directKernel.Elements(), impulseResponse + stageOffset, fftSize / 2);
        m_directConvolver = new DirectConvolver(renderSliceSize);
    }
    m_temporaryBuffer.SetLength(renderSliceSize);
    PodZero(m_temporaryBuffer.Elements(), m_temporaryBuffer.Length());

    // The convolution stage at offset stageOffset needs to have a corresponding delay to cancel out the offset.
    size_t totalDelay = stageOffset + reverbTotalLatency;

    // But, the FFT convolution itself incurs fftSize / 2 latency, so subtract this out...
    size_t halfSize = fftSize / 2;
    if (!m_directMode) {
        MOZ_ASSERT(totalDelay >= halfSize);
        if (totalDelay >= halfSize)
            totalDelay -= halfSize;
    }

    // We divide up the total delay, into pre and post delay sections so that we can schedule at exactly the moment when the FFT will happen.
    // This is coordinated with the other stages, so they don't all do their FFTs at the same time...
    int maxPreDelayLength = std::min(halfSize, totalDelay);
    m_preDelayLength = totalDelay > 0 ? renderPhase % maxPreDelayLength : 0;
    if (m_preDelayLength > totalDelay)
        m_preDelayLength = 0;

    m_postDelayLength = totalDelay - m_preDelayLength;
    m_preReadWriteIndex = 0;
    m_framesProcessed = 0; // total frames processed so far

    size_t delayBufferSize = m_preDelayLength < fftSize ? fftSize : m_preDelayLength;
    delayBufferSize = delayBufferSize < renderSliceSize ? renderSliceSize : delayBufferSize;
    m_preDelayBuffer.SetLength(delayBufferSize);
    PodZero(m_preDelayBuffer.Elements(), m_preDelayBuffer.Length());
}
开发者ID:70599,项目名称:Waterfox,代码行数:49,代码来源:ReverbConvolverStage.cpp


示例12: PodZero

void
AppleATDecoder::SetupDecoder()
{
  AudioStreamBasicDescription inputFormat, outputFormat;
  // Fill in the input format description from the stream.
  AppleUtils::GetProperty(mStream,
      kAudioFileStreamProperty_DataFormat, &inputFormat);

  // Fill in the output format manually.
  PodZero(&outputFormat);
  outputFormat.mFormatID = kAudioFormatLinearPCM;
  outputFormat.mSampleRate = inputFormat.mSampleRate;
  outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
  outputFormat.mBitsPerChannel = 32;
  outputFormat.mFormatFlags =
    kLinearPCMFormatFlagIsFloat |
    0;
#else
# error Unknown audio sample type
#endif
  // Set up the decoder so it gives us one sample per frame
  outputFormat.mFramesPerPacket = 1;
  outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame
        = outputFormat.mChannelsPerFrame * outputFormat.mBitsPerChannel / 8;

  OSStatus rv = AudioConverterNew(&inputFormat, &outputFormat, &mConverter);
  if (rv) {
    LOG("Error %d constructing AudioConverter", rv);
    mConverter = nullptr;
    mCallback->Error();
  }
  mHaveOutput = false;
}
开发者ID:andrenatal,项目名称:gecko-dev,代码行数:34,代码来源:AppleATDecoder.cpp


示例13: mInfo

VorbisDataDecoder::VorbisDataDecoder(const AudioInfo& aConfig,
                                     FlushableTaskQueue* aTaskQueue,
                                     MediaDataDecoderCallback* aCallback)
  : mInfo(aConfig)
  , mTaskQueue(aTaskQueue)
  , mCallback(aCallback)
  , mPacketCount(0)
  , mFrames(0)
{
  // Zero these member vars to avoid crashes in Vorbis clear functions when
  // destructor is called before |Init|.
  PodZero(&mVorbisBlock);
  PodZero(&mVorbisDsp);
  PodZero(&mVorbisInfo);
  PodZero(&mVorbisComment);
}
开发者ID:Shaif95,项目名称:gecko-dev,代码行数:16,代码来源:VorbisDecoder.cpp


示例14: UpMixDownMixChunk

void
AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex,
                                      const AudioBlock& aChunk,
                                      AudioBlock* aBlock,
                                      nsTArray<float>* aDownmixBuffer)
{
  nsAutoTArray<const float*,GUESS_AUDIO_CHANNELS> channels;
  UpMixDownMixChunk(&aChunk, aBlock->ChannelCount(), channels, *aDownmixBuffer);

  for (uint32_t c = 0; c < channels.Length(); ++c) {
    const float* inputData = static_cast<const float*>(channels[c]);
    float* outputData = aBlock->ChannelFloatsForWrite(c);
    if (inputData) {
      if (aInputIndex == 0) {
        AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
      } else {
        AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData);
      }
    } else {
      if (aInputIndex == 0) {
        PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
      }
    }
  }
}
开发者ID:Nazi-Nigger,项目名称:gecko-dev,代码行数:25,代码来源:AudioNodeStream.cpp


示例15: MOZ_ASSERT

mozilla::ipc::IPCResult CompositorManagerParent::RecvReportMemory(
    ReportMemoryResolver&& aResolver) {
  MOZ_ASSERT(CompositorThreadHolder::IsInCompositorThread());
  MemoryReport aggregate;
  PodZero(&aggregate);

  // Accumulate RenderBackend usage.
  nsTArray<PCompositorBridgeParent*> compositorBridges;
  ManagedPCompositorBridgeParent(compositorBridges);
  for (auto bridge : compositorBridges) {
    static_cast<CompositorBridgeParentBase*>(bridge)->AccumulateMemoryReport(
        &aggregate);
  }

  // Accumulate Renderer usage asynchronously, and resolve.
  //
  // Note that the IPDL machinery requires aResolver to be called on this
  // thread, so we can't just pass it over to the renderer thread. We use
  // an intermediate MozPromise instead.
  wr::RenderThread::AccumulateMemoryReport(aggregate)->Then(
      CompositorThreadHolder::Loop()->SerialEventTarget(), __func__,
      [resolver = std::move(aResolver)](MemoryReport aReport) {
        resolver(aReport);
      },
      [](bool) {
        MOZ_ASSERT_UNREACHABLE("MemoryReport promises are never rejected");
      });

  return IPC_OK();
}
开发者ID:jasonLaster,项目名称:gecko-dev,代码行数:30,代码来源:CompositorManagerParent.cpp


示例16: mImageContainer

AOMDecoder::AOMDecoder(const CreateDecoderParams& aParams)
  : mImageContainer(aParams.mImageContainer)
  , mTaskQueue(aParams.mTaskQueue)
  , mInfo(aParams.VideoConfig())
{
  PodZero(&mCodec);
}
开发者ID:bgrins,项目名称:gecko-dev,代码行数:7,代码来源:AOMDecoder.cpp


示例17: MOZ_ASSERT

void
DelayBuffer::ReadChannels(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
                          const AudioChunk* aOutputChunk,
                          uint32_t aFirstChannel, uint32_t aNumChannelsToRead,
                          ChannelInterpretation aChannelInterpretation)
{
  uint32_t totalChannelCount = aOutputChunk->mChannelData.Length();
  uint32_t readChannelsEnd = aFirstChannel + aNumChannelsToRead;
  MOZ_ASSERT(readChannelsEnd <= totalChannelCount);

  if (mUpmixChannels.Length() != totalChannelCount) {
    mLastReadChunk = -1; // invalidate cache
  }

  float* const* outputChannels = reinterpret_cast<float* const*>
    (const_cast<void* const*>(aOutputChunk->mChannelData.Elements()));
  for (uint32_t channel = aFirstChannel;
       channel < readChannelsEnd; ++channel) {
    PodZero(outputChannels[channel], WEBAUDIO_BLOCK_SIZE);
  }

  for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
    double currentDelay = aPerFrameDelays[i];
    MOZ_ASSERT(currentDelay >= 0.0);
    MOZ_ASSERT(currentDelay <= (mChunks.Length() - 1) * WEBAUDIO_BLOCK_SIZE);

    // Interpolate two input frames in case the read position does not match
    // an integer index.
    // Use the larger delay, for the older frame, first, as this is more
    // likely to use the cached upmixed channel arrays.
    int floorDelay = int(currentDelay);
    double interpolationFactor = currentDelay - floorDelay;
    int positions[2];
    positions[1] = PositionForDelay(floorDelay) + i;
    positions[0] = positions[1] - 1;

    for (unsigned tick = 0; tick < ArrayLength(positions); ++tick) {
      int readChunk = ChunkForPosition(positions[tick]);
      // mVolume is not set on default initialized chunks so handle null
      // chunks specially.
      if (!mChunks[readChunk].IsNull()) {
        int readOffset = OffsetForPosition(positions[tick]);
        UpdateUpmixChannels(readChunk, totalChannelCount,
                            aChannelInterpretation);
        double multiplier = interpolationFactor * mChunks[readChunk].mVolume;
        for (uint32_t channel = aFirstChannel;
             channel < readChannelsEnd; ++channel) {
          outputChannels[channel][i] += multiplier *
            static_cast<const float*>(mUpmixChannels[channel])[readOffset];
        }
      }

      interpolationFactor = 1.0 - interpolationFactor;
    }
  }
}
开发者ID:Andrel322,项目名称:gecko-dev,代码行数:56,代码来源:DelayBuffer.cpp


示例18: CopyChunkToBlock

/**
 * Copies the data in aInput to aOffsetInBlock within aBlock.
 * aBlock must have been allocated with AllocateInputBlock and have a channel
 * count that's a superset of the channels in aInput.
 */
static void
CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock,
                 uint32_t aOffsetInBlock)
{
  uint32_t blockChannels = aBlock->ChannelCount();
  nsAutoTArray<const void*,2> channels;
  if (aInput.IsNull()) {
    channels.SetLength(blockChannels);
    PodZero(channels.Elements(), blockChannels);
  } else {
    channels.SetLength(aInput.ChannelCount());
    PodCopy(channels.Elements(), aInput.mChannelData.Elements(), channels.Length());
    if (channels.Length() != blockChannels) {
      // We only need to upmix here because aBlock's channel count has been
      // chosen to be a superset of the channel count of every chunk.
      AudioChannelsUpMix(&channels, blockChannels, nullptr);
    }
  }

  uint32_t duration = aInput.GetDuration();
  for (uint32_t c = 0; c < blockChannels; ++c) {
    float* outputData =
      static_cast<float*>(const_cast<void*>(aBlock->mChannelData[c])) + aOffsetInBlock;
    if (channels[c]) {
      switch (aInput.mBufferFormat) {
      case AUDIO_FORMAT_FLOAT32:
        ConvertAudioSamplesWithScale(
            static_cast<const float*>(channels[c]), outputData, duration,
            aInput.mVolume);
        break;
      case AUDIO_FORMAT_S16:
        ConvertAudioSamplesWithScale(
            static_cast<const int16_t*>(channels[c]), outputData, duration,
            aInput.mVolume);
        break;
      default:
        NS_ERROR("Unhandled format");
      }
    } else {
      PodZero(outputData, duration);
    }
  }
}
开发者ID:AtulKumar2,项目名称:gecko-dev,代码行数:48,代码来源:AudioNodeExternalInputStream.cpp


示例19: PodZero

SPSData::SPSData()
{
  PodZero(this);
  // Default values when they aren't defined as per ITU-T H.264 (2014/02).
  chroma_format_idc = 1;
  video_format = 5;
  colour_primaries = 2;
  transfer_characteristics = 2;
  sample_ratio = 1.0;
}
开发者ID:AtulKumar2,项目名称:gecko-dev,代码行数:10,代码来源:H264.cpp


示例20: PodZero

void ReverbConvolverStage::reset()
{
    if (!m_directMode)
        m_fftConvolver->reset();
    else
        m_directConvolver->reset();
    PodZero(m_preDelayBuffer.Elements(), m_preDelayBuffer.Length());
    m_accumulationReadIndex = 0;
    m_inputReadIndex = 0;
    m_framesProcessed = 0;
}
开发者ID:70599,项目名称:Waterfox,代码行数:11,代码来源:ReverbConvolverStage.cpp



注:本文中的PodZero函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ Point2函数代码示例发布时间:2022-05-30
下一篇:
C++ PoStartNextPowerIrp函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap