• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ VideoSegment类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中VideoSegment的典型用法代码示例。如果您正苦于以下问题:C++ VideoSegment类的具体用法?C++ VideoSegment怎么用?C++ VideoSegment使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了VideoSegment类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: AssertOwnerThread

void
DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle)
{
  AssertOwnerThread();

  if (!mInfo.HasVideo()) {
    return;
  }

  VideoSegment output;
  TrackID videoTrackId = mInfo.mVideo.mTrackId;
  AutoTArray<RefPtr<MediaData>, 10> video;
  SourceMediaStream* sourceStream = mData->mStream;

  // It's OK to hold references to the VideoData because VideoData
  // is ref-counted.
  mVideoQueue.GetElementsAfter(mData->mNextVideoTime, &video);

  // tracksStartTimeStamp might be null when the SourceMediaStream not yet
  // be added to MediaStreamGraph.
  TimeStamp tracksStartTimeStamp = sourceStream->GetStreamTracksStrartTimeStamp();
  if (tracksStartTimeStamp.IsNull()) {
    tracksStartTimeStamp = TimeStamp::Now();
  }

  for (uint32_t i = 0; i < video.Length(); ++i) {
    VideoData* v = video[i]->As<VideoData>();

    if (mData->mNextVideoTime < v->mTime) {
      // Write last video frame to catch up. mLastVideoImage can be null here
      // which is fine, it just means there's no video.

      // TODO: |mLastVideoImage| should come from the last image rendered
      // by the state machine. This will avoid the black frame when capture
      // happens in the middle of playback (especially in th middle of a
      // video frame). E.g. if we have a video frame that is 30 sec long
      // and capture happens at 15 sec, we'll have to append a black frame
      // that is 15 sec long.
      WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime,
          mData->mNextVideoTime, mData->mLastVideoImageDisplaySize,
          tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->mTime),
          &output, aPrincipalHandle);
      mData->mNextVideoTime = v->mTime;
    }

    if (mData->mNextVideoTime < v->GetEndTime()) {
      WriteVideoToMediaStream(sourceStream, v->mImage, v->GetEndTime(),
          mData->mNextVideoTime, v->mDisplay,
          tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->GetEndTime()),
          &output, aPrincipalHandle);
      mData->mNextVideoTime = v->GetEndTime();
      mData->mLastVideoImage = v->mImage;
      mData->mLastVideoImageDisplaySize = v->mDisplay;
    }
  }

  // Check the output is not empty.
  if (output.GetLastFrame()) {
    mData->mEOSVideoCompensation = ZeroDurationAtLastChunk(output);
  }

  if (!aIsSameOrigin) {
    output.ReplaceWithDisabled();
  }

  if (output.GetDuration() > 0) {
    sourceStream->AppendToTrack(videoTrackId, &output);
  }

  if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
    if (mData->mEOSVideoCompensation) {
      VideoSegment endSegment;
      // Calculate the deviation clock time from DecodedStream.
      int64_t deviation_usec = sourceStream->StreamTimeToMicroseconds(1);
      WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
          mData->mNextVideoTime + deviation_usec, mData->mNextVideoTime,
          mData->mLastVideoImageDisplaySize,
          tracksStartTimeStamp + TimeDuration::FromMicroseconds(mData->mNextVideoTime + deviation_usec),
          &endSegment, aPrincipalHandle);
      mData->mNextVideoTime += deviation_usec;
      MOZ_ASSERT(endSegment.GetDuration() > 0);
      if (!aIsSameOrigin) {
        endSegment.ReplaceWithDisabled();
      }
      sourceStream->AppendToTrack(videoTrackId, &endSegment);
    }
    sourceStream->EndTrack(videoTrackId);
    mData->mHaveSentFinishVideo = true;
  }
}
开发者ID:,项目名称:,代码行数:90,代码来源:


示例2: mon

nsresult
OmxVideoTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
  VideoSegment segment;
  {
    // Move all the samples from mRawSegment to segment. We only hold the
    // monitor in this block.
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);

    // Wait if mEncoder is not initialized nor is being canceled.
    while (!mCanceled && (!mInitialized ||
          (mRawSegment.GetDuration() == 0 && !mEndOfStream))) {
      mReentrantMonitor.Wait();
    }

    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }

    segment.AppendFrom(&mRawSegment);
  }

  nsresult rv;
  // Start queuing raw frames to the input buffers of OMXCodecWrapper.
  VideoSegment::ChunkIterator iter(segment);
  while (!iter.IsEnded()) {
    VideoChunk chunk = *iter;

    // Send only the unique video frames to OMXCodecWrapper.
    if (mLastFrame != chunk.mFrame) {
      uint64_t totalDurationUs = mTotalFrameDuration * USECS_PER_S / mTrackRate;
      layers::Image* img = (chunk.IsNull() || chunk.mFrame.GetForceBlack()) ?
                           nullptr : chunk.mFrame.GetImage();
      rv = mEncoder->Encode(img, mFrameWidth, mFrameHeight, totalDurationUs);
      NS_ENSURE_SUCCESS(rv, rv);
    }

    mLastFrame.TakeFrom(&chunk.mFrame);
    mTotalFrameDuration += chunk.GetDuration();

    iter.Next();
  }

  // Send the EOS signal to OMXCodecWrapper.
  if (mEndOfStream && iter.IsEnded() && !mEosSetInEncoder) {
    uint64_t totalDurationUs = mTotalFrameDuration * USECS_PER_S / mTrackRate;
    layers::Image* img = (!mLastFrame.GetImage() || mLastFrame.GetForceBlack())
                         ? nullptr : mLastFrame.GetImage();
    rv = mEncoder->Encode(img, mFrameWidth, mFrameHeight, totalDurationUs,
                          OMXCodecWrapper::BUFFER_EOS);
    NS_ENSURE_SUCCESS(rv, rv);

    // Keep sending EOS signal until OMXVideoEncoder gets it.
    mEosSetInEncoder = true;
  }

  // Dequeue an encoded frame from the output buffers of OMXCodecWrapper.
  nsTArray<uint8_t> buffer;
  int outFlags = 0;
  int64_t outTimeStampUs = 0;
  rv = mEncoder->GetNextEncodedFrame(&buffer, &outTimeStampUs, &outFlags,
                                     GET_ENCODED_VIDEO_FRAME_TIMEOUT);
  NS_ENSURE_SUCCESS(rv, rv);
  if (!buffer.IsEmpty()) {
    nsRefPtr<EncodedFrame> videoData = new EncodedFrame();
    if (outFlags & OMXCodecWrapper::BUFFER_CODEC_CONFIG) {
      videoData->SetFrameType(EncodedFrame::AVC_CSD);
    } else {
      videoData->SetFrameType((outFlags & OMXCodecWrapper::BUFFER_SYNC_FRAME) ?
                              EncodedFrame::AVC_I_FRAME : EncodedFrame::AVC_P_FRAME);
    }
    videoData->SwapInFrameData(buffer);
    videoData->SetTimeStamp(outTimeStampUs);
    aData.AppendEncodedFrame(videoData);
  }

  if (outFlags & OMXCodecWrapper::BUFFER_EOS) {
    mEncodingComplete = true;
    OMX_LOG("Done encoding video.");
  }

  return NS_OK;
}
开发者ID:L2-D2,项目名称:gecko-dev,代码行数:83,代码来源:OmxTrackEncoder.cpp


示例3: do_CreateInstance

nsresult
MediaEngineDefaultVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
{
    if (mState != kAllocated) {
        return NS_ERROR_FAILURE;
    }

    mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
    if (!mTimer) {
        return NS_ERROR_FAILURE;
    }

    mSource = aStream;

    // Allocate a single blank Image
    layers::Image::Format format = layers::Image::PLANAR_YCBCR;
    mImageContainer = layers::LayerManager::CreateImageContainer();

    nsRefPtr<layers::Image> image = mImageContainer->CreateImage(&format, 1);

    int len = ((WIDTH * HEIGHT) * 3 / 2);
    mImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
    PRUint8* frame = (PRUint8*) PR_Malloc(len);
    memset(frame, 0x80, len); // Gray

    const PRUint8 lumaBpp = 8;
    const PRUint8 chromaBpp = 4;

    layers::PlanarYCbCrImage::Data data;
    data.mYChannel = frame;
    data.mYSize = gfxIntSize(WIDTH, HEIGHT);
    data.mYStride = WIDTH * lumaBpp / 8.0;
    data.mCbCrStride = WIDTH * chromaBpp / 8.0;
    data.mCbChannel = frame + HEIGHT * data.mYStride;
    data.mCrChannel = data.mCbChannel + HEIGHT * data.mCbCrStride / 2;
    data.mCbCrSize = gfxIntSize(WIDTH / 2, HEIGHT / 2);
    data.mPicX = 0;
    data.mPicY = 0;
    data.mPicSize = gfxIntSize(WIDTH, HEIGHT);
    data.mStereoMode = layers::STEREO_MODE_MONO;

    // SetData copies data, so we can free the frame
    mImage->SetData(data);
    PR_Free(frame);


    // AddTrack takes ownership of segment
    VideoSegment *segment = new VideoSegment();
    segment->AppendFrame(image.forget(), USECS_PER_S / FPS, gfxIntSize(WIDTH, HEIGHT));
    mSource->AddTrack(aID, RATE, 0, segment);

    // We aren't going to add any more tracks
    mSource->AdvanceKnownTracksTime(STREAM_TIME_MAX);

    // Remember TrackID so we can end it later
    mTrackID = aID;

    // Start timer for subsequent frames
    mTimer->InitWithCallback(this, 1000 / FPS, nsITimer::TYPE_REPEATING_SLACK);
    mState = kStarted;

    return NS_OK;
}
开发者ID:vdt,项目名称:mozilla-central,代码行数:63,代码来源:MediaEngineDefault.cpp


示例4: lock

void VideoFrameContainer::SetCurrentFrames(const VideoSegment& aSegment)
{
  if (aSegment.IsEmpty()) {
    return;
  }

  MutexAutoLock lock(mMutex);

  // Collect any new frames produced in this iteration.
  AutoTArray<ImageContainer::NonOwningImage,4> newImages;
  PrincipalHandle lastPrincipalHandle = PRINCIPAL_HANDLE_NONE;

  VideoSegment::ConstChunkIterator iter(aSegment);
  while (!iter.IsEnded()) {
    VideoChunk chunk = *iter;

    const VideoFrame* frame = &chunk.mFrame;
    if (*frame == mLastPlayedVideoFrame) {
      iter.Next();
      continue;
    }

    Image* image = frame->GetImage();
    CONTAINER_LOG(LogLevel::Verbose,
                  ("VideoFrameContainer %p writing video frame %p (%d x %d)",
                  this, image, frame->GetIntrinsicSize().width,
                  frame->GetIntrinsicSize().height));

    if (frame->GetForceBlack()) {
      if (!mBlackImage) {
        mBlackImage = GetImageContainer()->CreatePlanarYCbCrImage();
        if (mBlackImage) {
          // Sets the image to a single black pixel, which will be scaled to
          // fill the rendered size.
          SetImageToBlackPixel(mBlackImage->AsPlanarYCbCrImage());
        }
      }
      if (mBlackImage) {
        image = mBlackImage;
      }
    }
    // Don't append null image to the newImages.
    if (!image) {
      iter.Next();
      continue;
    }
    newImages.AppendElement(ImageContainer::NonOwningImage(image, chunk.mTimeStamp));

    lastPrincipalHandle = chunk.GetPrincipalHandle();

    mLastPlayedVideoFrame = *frame;
    iter.Next();
  }

  // Don't update if there are no changes.
  if (newImages.IsEmpty()) {
    return;
  }

  AutoTArray<ImageContainer::NonOwningImage,4> images;

  bool principalHandleChanged =
     lastPrincipalHandle != PRINCIPAL_HANDLE_NONE &&
     lastPrincipalHandle != GetLastPrincipalHandleLocked();

  // Add the frames from this iteration.
  for (auto& image : newImages) {
    image.mFrameID = NewFrameID();
    images.AppendElement(image);
  }

  if (principalHandleChanged) {
    UpdatePrincipalHandleForFrameIDLocked(lastPrincipalHandle,
                                          newImages.LastElement().mFrameID);
  }

  SetCurrentFramesLocked(mLastPlayedVideoFrame.GetIntrinsicSize(), images);
  nsCOMPtr<nsIRunnable> event =
    new VideoFrameContainerInvalidateRunnable(this);
  mMainThread->Dispatch(event.forget());

  images.ClearAndRetainStorage();
}
开发者ID:Wafflespeanut,项目名称:gecko-dev,代码行数:83,代码来源:VideoFrameContainer.cpp



注:本文中的VideoSegment类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ VideoSource类代码示例发布时间:2022-05-31
下一篇:
C++ VideoPtr类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap