• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ VideoChunk类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中VideoChunk的典型用法代码示例。如果您正苦于以下问题:C++ VideoChunk类的具体用法?C++ VideoChunk怎么用?C++ VideoChunk使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了VideoChunk类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: TRACK_LOG

void
VideoTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
        TrackID aID,
        StreamTime aTrackOffset,
        uint32_t aTrackEvents,
        const MediaSegment& aQueuedMedia)
{
    if (mCanceled) {
        return;
    }

    const VideoSegment& video = static_cast<const VideoSegment&>(aQueuedMedia);

    // Check and initialize parameters for codec encoder.
    if (!mInitialized) {
        mInitCounter++;
        TRACK_LOG(LogLevel::Debug, ("Init the video encoder %d times", mInitCounter));
        VideoSegment::ChunkIterator iter(const_cast<VideoSegment&>(video));
        while (!iter.IsEnded()) {
            VideoChunk chunk = *iter;
            if (!chunk.IsNull()) {
                gfx::IntSize imgsize = chunk.mFrame.GetImage()->GetSize();
                gfx::IntSize intrinsicSize = chunk.mFrame.GetIntrinsicSize();
                nsresult rv = Init(imgsize.width, imgsize.height,
                                   intrinsicSize.width, intrinsicSize.height,
                                   aGraph->GraphRate());
                if (NS_FAILED(rv)) {
                    LOG("[VideoTrackEncoder]: Fail to initialize the encoder!");
                    NotifyCancel();
                }
                break;
            }

            iter.Next();
        }

        mNotInitDuration += aQueuedMedia.GetDuration();
        if (!mInitialized &&
                (mNotInitDuration / aGraph->GraphRate() > INIT_FAILED_DURATION) &&
                mInitCounter > 1) {
            LOG("[VideoTrackEncoder]: Initialize failed for 30s.");
            NotifyEndOfStream();
            return;
        }
    }

    AppendVideoSegment(video);

    // The stream has stopped and reached the end of track.
    if (aTrackEvents == MediaStreamListener::TRACK_EVENT_ENDED) {
        LOG("[VideoTrackEncoder]: Receive TRACK_EVENT_ENDED .");
        NotifyEndOfStream();
    }

}
开发者ID:leplatrem,项目名称:gecko-dev,代码行数:55,代码来源:TrackEncoder.cpp


示例2: TRACK_LOG

void
VideoTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
                                            TrackID aID,
                                            TrackRate aTrackRate,
                                            TrackTicks aTrackOffset,
                                            uint32_t aTrackEvents,
                                            const MediaSegment& aQueuedMedia)
{
  if (mCanceled) {
    return;
  }

  const VideoSegment& video = static_cast<const VideoSegment&>(aQueuedMedia);

   // Check and initialize parameters for codec encoder.
  if (!mInitialized) {
#ifdef PR_LOGGING
    mVideoInitCounter++;
    TRACK_LOG(PR_LOG_DEBUG, ("Init the video encoder %d times", mVideoInitCounter));
#endif
    VideoSegment::ChunkIterator iter(const_cast<VideoSegment&>(video));
    while (!iter.IsEnded()) {
      VideoChunk chunk = *iter;
      if (!chunk.IsNull()) {
        gfx::IntSize imgsize = chunk.mFrame.GetImage()->GetSize();
        gfxIntSize intrinsicSize = chunk.mFrame.GetIntrinsicSize();
        nsresult rv = Init(imgsize.width, imgsize.height,
                           intrinsicSize.width, intrinsicSize.height,
                           aTrackRate);
        if (NS_FAILED(rv)) {
          LOG("[VideoTrackEncoder]: Fail to initialize the encoder!");
          NotifyCancel();
        }
        break;
      }

      iter.Next();
    }
  }

  AppendVideoSegment(video);

  // The stream has stopped and reached the end of track.
  if (aTrackEvents == MediaStreamListener::TRACK_EVENT_ENDED) {
    LOG("[VideoTrackEncoder]: Receive TRACK_EVENT_ENDED .");
    NotifyEndOfStream();
  }

}
开发者ID:Andrel322,项目名称:gecko-dev,代码行数:49,代码来源:TrackEncoder.cpp


示例3: PrepareMutedFrame

nsresult VP8TrackEncoder::PrepareRawFrame(VideoChunk &aChunk)
{
  if (aChunk.mFrame.GetForceBlack() || aChunk.IsNull()) {
    PrepareMutedFrame();
  } else {
    layers::Image* img = aChunk.mFrame.GetImage();
    ImageFormat format = img->GetFormat();
    if (format != ImageFormat::PLANAR_YCBCR) {
      VP8LOG("Unsupported video format\n");
      return NS_ERROR_FAILURE;
    }

    // Cast away constness b/c some of the accessors are non-const
    layers::PlanarYCbCrImage* yuv =
    const_cast<layers::PlanarYCbCrImage *>(static_cast<const layers::PlanarYCbCrImage *>(img));
    // Big-time assumption here that this is all contiguous data coming
    // from getUserMedia or other sources.
    MOZ_ASSERT(yuv);
    const layers::PlanarYCbCrImage::Data *data = yuv->GetData();

    mVPXImageWrapper->planes[PLANE_Y] = data->mYChannel;
    mVPXImageWrapper->planes[PLANE_U] = data->mCbChannel;
    mVPXImageWrapper->planes[PLANE_V] = data->mCrChannel;
    mVPXImageWrapper->stride[VPX_PLANE_Y] = data->mYStride;
    mVPXImageWrapper->stride[VPX_PLANE_U] = data->mCbCrStride;
    mVPXImageWrapper->stride[VPX_PLANE_V] = data->mCbCrStride;
  }
  return NS_OK;
}
开发者ID:JCROM-FxOS,项目名称:b2jc_gecko,代码行数:29,代码来源:VP8TrackEncoder.cpp


示例4: mon

nsresult
VideoTrackEncoder::AppendVideoSegment(const VideoSegment& aSegment)
{
  ReentrantMonitorAutoEnter mon(mReentrantMonitor);

  // Append all video segments from MediaStreamGraph, including null an
  // non-null frames.
  VideoSegment::ChunkIterator iter(const_cast<VideoSegment&>(aSegment));
  while (!iter.IsEnded()) {
    VideoChunk chunk = *iter;
    mTotalFrameDuration += chunk.GetDuration();
    mLastFrameDuration += chunk.GetDuration();
    // Send only the unique video frames for encoding.
    // Or if we got the same video chunks more than 1 seconds,
    // force to send into encoder.
    if ((mLastFrame != chunk.mFrame) ||
        (mLastFrameDuration >= mTrackRate)) {
      RefPtr<layers::Image> image = chunk.mFrame.GetImage();
      // Because we may get chunks with a null image (due to input blocking),
      // accumulate duration and give it to the next frame that arrives.
      // Canonically incorrect - the duration should go to the previous frame
      // - but that would require delaying until the next frame arrives.
      // Best would be to do like OMXEncoder and pass an effective timestamp
      // in with each frame.
      if (image) {
        mRawSegment.AppendFrame(image.forget(),
                                mLastFrameDuration,
                                chunk.mFrame.GetIntrinsicSize(),
                                PRINCIPAL_HANDLE_NONE,
                                chunk.mFrame.GetForceBlack());
        mLastFrameDuration = 0;
      }
    }
    mLastFrame.TakeFrom(&chunk.mFrame);
    iter.Next();
  }

  if (mRawSegment.GetDuration() > 0) {
    mReentrantMonitor.NotifyAll();
  }

  return NS_OK;
}
开发者ID:MekliCZ,项目名称:positron,代码行数:43,代码来源:TrackEncoder.cpp


示例5: mon

nsresult
VideoTrackEncoder::AppendVideoSegment(const VideoSegment& aSegment)
{
  ReentrantMonitorAutoEnter mon(mReentrantMonitor);

  // Append all video segments from MediaStreamGraph, including null an
  // non-null frames.
  VideoSegment::ChunkIterator iter(const_cast<VideoSegment&>(aSegment));
  while (!iter.IsEnded()) {
    VideoChunk chunk = *iter;
    nsRefPtr<layers::Image> image = chunk.mFrame.GetImage();
    mRawSegment.AppendFrame(image.forget(), chunk.GetDuration(),
                            chunk.mFrame.GetIntrinsicSize().ToIntSize());
    iter.Next();
  }

  if (mRawSegment.GetDuration() > 0) {
    mReentrantMonitor.NotifyAll();
  }

  return NS_OK;
}
开发者ID:blue119,项目名称:gecko-dev,代码行数:22,代码来源:TrackEncoder.cpp


示例6: mon

nsresult
OmxVideoTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
  VideoSegment segment;
  {
    // Move all the samples from mRawSegment to segment. We only hold the
    // monitor in this block.
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);

    // Wait if mEncoder is not initialized nor is being canceled.
    while (!mCanceled && (!mInitialized ||
          (mRawSegment.GetDuration() == 0 && !mEndOfStream))) {
      mReentrantMonitor.Wait();
    }

    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }

    segment.AppendFrom(&mRawSegment);
  }

  nsresult rv;
  // Start queuing raw frames to the input buffers of OMXCodecWrapper.
  VideoSegment::ChunkIterator iter(segment);
  while (!iter.IsEnded()) {
    VideoChunk chunk = *iter;

    // Send only the unique video frames to OMXCodecWrapper.
    if (mLastFrame != chunk.mFrame) {
      uint64_t totalDurationUs = mTotalFrameDuration * USECS_PER_S / mTrackRate;
      layers::Image* img = (chunk.IsNull() || chunk.mFrame.GetForceBlack()) ?
                           nullptr : chunk.mFrame.GetImage();
      rv = mEncoder->Encode(img, mFrameWidth, mFrameHeight, totalDurationUs);
      NS_ENSURE_SUCCESS(rv, rv);
    }

    mLastFrame.TakeFrom(&chunk.mFrame);
    mTotalFrameDuration += chunk.GetDuration();

    iter.Next();
  }

  // Send the EOS signal to OMXCodecWrapper.
  if (mEndOfStream && iter.IsEnded() && !mEosSetInEncoder) {
    uint64_t totalDurationUs = mTotalFrameDuration * USECS_PER_S / mTrackRate;
    layers::Image* img = (!mLastFrame.GetImage() || mLastFrame.GetForceBlack())
                         ? nullptr : mLastFrame.GetImage();
    rv = mEncoder->Encode(img, mFrameWidth, mFrameHeight, totalDurationUs,
                          OMXCodecWrapper::BUFFER_EOS);
    NS_ENSURE_SUCCESS(rv, rv);

    // Keep sending EOS signal until OMXVideoEncoder gets it.
    mEosSetInEncoder = true;
  }

  // Dequeue an encoded frame from the output buffers of OMXCodecWrapper.
  nsTArray<uint8_t> buffer;
  int outFlags = 0;
  int64_t outTimeStampUs = 0;
  rv = mEncoder->GetNextEncodedFrame(&buffer, &outTimeStampUs, &outFlags,
                                     GET_ENCODED_VIDEO_FRAME_TIMEOUT);
  NS_ENSURE_SUCCESS(rv, rv);
  if (!buffer.IsEmpty()) {
    nsRefPtr<EncodedFrame> videoData = new EncodedFrame();
    if (outFlags & OMXCodecWrapper::BUFFER_CODEC_CONFIG) {
      videoData->SetFrameType(EncodedFrame::AVC_CSD);
    } else {
      videoData->SetFrameType((outFlags & OMXCodecWrapper::BUFFER_SYNC_FRAME) ?
                              EncodedFrame::AVC_I_FRAME : EncodedFrame::AVC_P_FRAME);
    }
    videoData->SwapInFrameData(buffer);
    videoData->SetTimeStamp(outTimeStampUs);
    aData.AppendEncodedFrame(videoData);
  }

  if (outFlags & OMXCodecWrapper::BUFFER_EOS) {
    mEncodingComplete = true;
    OMX_LOG("Done encoding video.");
  }

  return NS_OK;
}
开发者ID:L2-D2,项目名称:gecko-dev,代码行数:83,代码来源:OmxTrackEncoder.cpp


示例7: EncodeComplete

void
CaptureTask::SetCurrentFrames(const VideoSegment& aSegment)
{
  if (mImageGrabbedOrTrackEnd) {
    return;
  }

  // Callback for encoding complete, it calls on main thread.
  class EncodeComplete : public dom::EncodeCompleteCallback
  {
  public:
    explicit EncodeComplete(CaptureTask* aTask) : mTask(aTask) {}

    nsresult ReceiveBlob(already_AddRefed<dom::Blob> aBlob) override
    {
      RefPtr<dom::Blob> blob(aBlob);
      mTask->TaskComplete(blob.forget(), NS_OK);
      mTask = nullptr;
      return NS_OK;
    }

  protected:
    RefPtr<CaptureTask> mTask;
  };

  VideoSegment::ConstChunkIterator iter(aSegment);



  while (!iter.IsEnded()) {
    VideoChunk chunk = *iter;

    // Extract the first valid video frame.
    VideoFrame frame;
    if (!chunk.IsNull()) {
      RefPtr<layers::Image> image;
      if (chunk.mFrame.GetForceBlack()) {
        // Create a black image.
        image = VideoFrame::CreateBlackImage(chunk.mFrame.GetIntrinsicSize());
      } else {
        image = chunk.mFrame.GetImage();
      }
      MOZ_ASSERT(image);
      mImageGrabbedOrTrackEnd = true;

      // Encode image.
      nsresult rv;
      nsAutoString type(NS_LITERAL_STRING("image/jpeg"));
      nsAutoString options;
      rv = dom::ImageEncoder::ExtractDataFromLayersImageAsync(
                                type,
                                options,
                                false,
                                image,
                                new EncodeComplete(this));
      if (NS_FAILED(rv)) {
        PostTrackEndEvent();
      }
      return;
    }
    iter.Next();
  }
}
开发者ID:MichaelKohler,项目名称:gecko-dev,代码行数:63,代码来源:CaptureTask.cpp


示例8: if

nsresult VP8TrackEncoder::PrepareRawFrame(VideoChunk &aChunk)
{
  nsRefPtr<Image> img;
  if (aChunk.mFrame.GetForceBlack() || aChunk.IsNull()) {
    if (!mMuteFrame) {
      mMuteFrame = VideoFrame::CreateBlackImage(gfxIntSize(mFrameWidth, mFrameHeight));
      MOZ_ASSERT(mMuteFrame);
    }
    img = mMuteFrame;
  } else {
    img = aChunk.mFrame.GetImage();
  }

  ImageFormat format = img->GetFormat();
  if (format != ImageFormat::PLANAR_YCBCR) {
    VP8LOG("Unsupported video format\n");
    return NS_ERROR_FAILURE;
  }

  // Cast away constness b/c some of the accessors are non-const
  PlanarYCbCrImage* yuv =
  const_cast<PlanarYCbCrImage *>(static_cast<const PlanarYCbCrImage *>(img.get()));
  // Big-time assumption here that this is all contiguous data coming
  // from getUserMedia or other sources.
  MOZ_ASSERT(yuv);
  if (!yuv->IsValid()) {
    NS_WARNING("PlanarYCbCrImage is not valid");
    return NS_ERROR_FAILURE;
  }
  const PlanarYCbCrImage::Data *data = yuv->GetData();

  if (isYUV420(data) && !data->mCbSkip) { // 420 planar
    mVPXImageWrapper->planes[VPX_PLANE_Y] = data->mYChannel;
    mVPXImageWrapper->planes[VPX_PLANE_U] = data->mCbChannel;
    mVPXImageWrapper->planes[VPX_PLANE_V] = data->mCrChannel;
    mVPXImageWrapper->stride[VPX_PLANE_Y] = data->mYStride;
    mVPXImageWrapper->stride[VPX_PLANE_U] = data->mCbCrStride;
    mVPXImageWrapper->stride[VPX_PLANE_V] = data->mCbCrStride;
  } else {
    uint32_t yPlaneSize = mFrameWidth * mFrameHeight;
    uint32_t halfWidth = (mFrameWidth + 1) / 2;
    uint32_t halfHeight = (mFrameHeight + 1) / 2;
    uint32_t uvPlaneSize = halfWidth * halfHeight;
    if (mI420Frame.IsEmpty()) {
      mI420Frame.SetLength(yPlaneSize + uvPlaneSize * 2);
    }

    MOZ_ASSERT(mI420Frame.Length() >= (yPlaneSize + uvPlaneSize * 2));
    uint8_t *y = mI420Frame.Elements();
    uint8_t *cb = mI420Frame.Elements() + yPlaneSize;
    uint8_t *cr = mI420Frame.Elements() + yPlaneSize + uvPlaneSize;

    if (isYUV420(data) && data->mCbSkip) {
      // If mCbSkip is set, we assume it's nv12 or nv21.
      if (data->mCbChannel < data->mCrChannel) { // nv12
        libyuv::NV12ToI420(data->mYChannel, data->mYStride,
                           data->mCbChannel, data->mCbCrStride,
                           y, mFrameWidth,
                           cb, halfWidth,
                           cr, halfWidth,
                           mFrameWidth, mFrameHeight);
      } else { // nv21
        libyuv::NV21ToI420(data->mYChannel, data->mYStride,
                           data->mCrChannel, data->mCbCrStride,
                           y, mFrameWidth,
                           cb, halfWidth,
                           cr, halfWidth,
                           mFrameWidth, mFrameHeight);
      }
    } else if (isYUV444(data) && !data->mCbSkip) {
      libyuv::I444ToI420(data->mYChannel, data->mYStride,
                         data->mCbChannel, data->mCbCrStride,
                         data->mCrChannel, data->mCbCrStride,
                         y, mFrameWidth,
                         cb, halfWidth,
                         cr, halfWidth,
                         mFrameWidth, mFrameHeight);
    } else if (isYUV422(data) && !data->mCbSkip) {
      libyuv::I422ToI420(data->mYChannel, data->mYStride,
                         data->mCbChannel, data->mCbCrStride,
                         data->mCrChannel, data->mCbCrStride,
                         y, mFrameWidth,
                         cb, halfWidth,
                         cr, halfWidth,
                         mFrameWidth, mFrameHeight);
    } else {
      VP8LOG("Unsupported planar format\n");
      return NS_ERROR_NOT_IMPLEMENTED;
    }

    mVPXImageWrapper->planes[VPX_PLANE_Y] = y;
    mVPXImageWrapper->planes[VPX_PLANE_U] = cb;
    mVPXImageWrapper->planes[VPX_PLANE_V] = cr;
    mVPXImageWrapper->stride[VPX_PLANE_Y] = mFrameWidth;
    mVPXImageWrapper->stride[VPX_PLANE_U] = halfWidth;
    mVPXImageWrapper->stride[VPX_PLANE_V] = halfWidth;
  }

  return NS_OK;
}
开发者ID:Jinwoo-Song,项目名称:gecko-dev,代码行数:100,代码来源:VP8TrackEncoder.cpp


示例9: lock

void VideoFrameContainer::SetCurrentFrames(const VideoSegment& aSegment)
{
  if (aSegment.IsEmpty()) {
    return;
  }

  MutexAutoLock lock(mMutex);

  // Collect any new frames produced in this iteration.
  AutoTArray<ImageContainer::NonOwningImage,4> newImages;
  PrincipalHandle lastPrincipalHandle = PRINCIPAL_HANDLE_NONE;

  VideoSegment::ConstChunkIterator iter(aSegment);
  while (!iter.IsEnded()) {
    VideoChunk chunk = *iter;

    const VideoFrame* frame = &chunk.mFrame;
    if (*frame == mLastPlayedVideoFrame) {
      iter.Next();
      continue;
    }

    Image* image = frame->GetImage();
    CONTAINER_LOG(LogLevel::Verbose,
                  ("VideoFrameContainer %p writing video frame %p (%d x %d)",
                  this, image, frame->GetIntrinsicSize().width,
                  frame->GetIntrinsicSize().height));

    if (frame->GetForceBlack()) {
      if (!mBlackImage) {
        mBlackImage = GetImageContainer()->CreatePlanarYCbCrImage();
        if (mBlackImage) {
          // Sets the image to a single black pixel, which will be scaled to
          // fill the rendered size.
          SetImageToBlackPixel(mBlackImage->AsPlanarYCbCrImage());
        }
      }
      if (mBlackImage) {
        image = mBlackImage;
      }
    }
    // Don't append null image to the newImages.
    if (!image) {
      iter.Next();
      continue;
    }
    newImages.AppendElement(ImageContainer::NonOwningImage(image, chunk.mTimeStamp));

    lastPrincipalHandle = chunk.GetPrincipalHandle();

    mLastPlayedVideoFrame = *frame;
    iter.Next();
  }

  // Don't update if there are no changes.
  if (newImages.IsEmpty()) {
    return;
  }

  AutoTArray<ImageContainer::NonOwningImage,4> images;

  bool principalHandleChanged =
     lastPrincipalHandle != PRINCIPAL_HANDLE_NONE &&
     lastPrincipalHandle != GetLastPrincipalHandleLocked();

  // Add the frames from this iteration.
  for (auto& image : newImages) {
    image.mFrameID = NewFrameID();
    images.AppendElement(image);
  }

  if (principalHandleChanged) {
    UpdatePrincipalHandleForFrameIDLocked(lastPrincipalHandle,
                                          newImages.LastElement().mFrameID);
  }

  SetCurrentFramesLocked(mLastPlayedVideoFrame.GetIntrinsicSize(), images);
  nsCOMPtr<nsIRunnable> event =
    new VideoFrameContainerInvalidateRunnable(this);
  mMainThread->Dispatch(event.forget());

  images.ClearAndRetainStorage();
}
开发者ID:Wafflespeanut,项目名称:gecko-dev,代码行数:83,代码来源:VideoFrameContainer.cpp


示例10: map

nsresult VP8TrackEncoder::PrepareRawFrame(VideoChunk &aChunk)
{
  RefPtr<Image> img;
  if (aChunk.mFrame.GetForceBlack() || aChunk.IsNull()) {
    if (!mMuteFrame) {
      mMuteFrame = VideoFrame::CreateBlackImage(gfx::IntSize(mFrameWidth, mFrameHeight));
      MOZ_ASSERT(mMuteFrame);
    }
    img = mMuteFrame;
  } else {
    img = aChunk.mFrame.GetImage();
  }

  if (img->GetSize() != IntSize(mFrameWidth, mFrameHeight)) {
    VP8LOG("Dynamic resolution changes (was %dx%d, now %dx%d) are unsupported\n",
           mFrameWidth, mFrameHeight, img->GetSize().width, img->GetSize().height);
    return NS_ERROR_FAILURE;
  }

  ImageFormat format = img->GetFormat();
  if (format == ImageFormat::PLANAR_YCBCR) {
    PlanarYCbCrImage* yuv = static_cast<PlanarYCbCrImage *>(img.get());

    MOZ_RELEASE_ASSERT(yuv);
    if (!yuv->IsValid()) {
      NS_WARNING("PlanarYCbCrImage is not valid");
      return NS_ERROR_FAILURE;
    }
    const PlanarYCbCrImage::Data *data = yuv->GetData();

    if (isYUV420(data) && !data->mCbSkip) {
      // 420 planar, no need for conversions
      mVPXImageWrapper->planes[VPX_PLANE_Y] = data->mYChannel;
      mVPXImageWrapper->planes[VPX_PLANE_U] = data->mCbChannel;
      mVPXImageWrapper->planes[VPX_PLANE_V] = data->mCrChannel;
      mVPXImageWrapper->stride[VPX_PLANE_Y] = data->mYStride;
      mVPXImageWrapper->stride[VPX_PLANE_U] = data->mCbCrStride;
      mVPXImageWrapper->stride[VPX_PLANE_V] = data->mCbCrStride;

      return NS_OK;
    }
  }

  // Not 420 planar, have to convert
  uint32_t yPlaneSize = mFrameWidth * mFrameHeight;
  uint32_t halfWidth = (mFrameWidth + 1) / 2;
  uint32_t halfHeight = (mFrameHeight + 1) / 2;
  uint32_t uvPlaneSize = halfWidth * halfHeight;

  if (mI420Frame.IsEmpty()) {
    mI420Frame.SetLength(yPlaneSize + uvPlaneSize * 2);
  }

  uint8_t *y = mI420Frame.Elements();
  uint8_t *cb = mI420Frame.Elements() + yPlaneSize;
  uint8_t *cr = mI420Frame.Elements() + yPlaneSize + uvPlaneSize;

  if (format == ImageFormat::PLANAR_YCBCR) {
    PlanarYCbCrImage* yuv = static_cast<PlanarYCbCrImage *>(img.get());

    MOZ_RELEASE_ASSERT(yuv);
    if (!yuv->IsValid()) {
      NS_WARNING("PlanarYCbCrImage is not valid");
      return NS_ERROR_FAILURE;
    }
    const PlanarYCbCrImage::Data *data = yuv->GetData();

    int rv;
    std::string yuvFormat;
    if (isYUV420(data) && data->mCbSkip) {
      // If mCbSkip is set, we assume it's nv12 or nv21.
      if (data->mCbChannel < data->mCrChannel) { // nv12
        rv = libyuv::NV12ToI420(data->mYChannel, data->mYStride,
                                data->mCbChannel, data->mCbCrStride,
                                y, mFrameWidth,
                                cb, halfWidth,
                                cr, halfWidth,
                                mFrameWidth, mFrameHeight);
        yuvFormat = "NV12";
      } else { // nv21
        rv = libyuv::NV21ToI420(data->mYChannel, data->mYStride,
                                data->mCrChannel, data->mCbCrStride,
                                y, mFrameWidth,
                                cb, halfWidth,
                                cr, halfWidth,
                                mFrameWidth, mFrameHeight);
        yuvFormat = "NV21";
      }
    } else if (isYUV444(data) && !data->mCbSkip) {
      rv = libyuv::I444ToI420(data->mYChannel, data->mYStride,
                              data->mCbChannel, data->mCbCrStride,
                              data->mCrChannel, data->mCbCrStride,
                              y, mFrameWidth,
                              cb, halfWidth,
                              cr, halfWidth,
                              mFrameWidth, mFrameHeight);
      yuvFormat = "I444";
    } else if (isYUV422(data) && !data->mCbSkip) {
      rv = libyuv::I422ToI420(data->mYChannel, data->mYStride,
                              data->mCbChannel, data->mCbCrStride,
//.........这里部分代码省略.........
开发者ID:Danielzac,项目名称:gecko-dev,代码行数:101,代码来源:VP8TrackEncoder.cpp



注:本文中的VideoChunk类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ VideoDecoder类代码示例发布时间:2022-05-31
下一篇:
C++ VideoCapture类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap