本文整理汇总了C++中packet_queue_get函数的典型用法代码示例。如果您正苦于以下问题:C++ packet_queue_get函数的具体用法?C++ packet_queue_get怎么用?C++ packet_queue_get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了packet_queue_get函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: audio_decode_frame
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)
{
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
int len1, data_size;
for(;;)
{
if(packet_queue_get(audioq, &pkt, 1) < 0)
{
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
while(audio_pkt_size > 0)
{
data_size = buf_size;
len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size,audio_pkt_data, audio_pkt_size);
if(len1 < 0)
{
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if(data_size <= 0)
{
continue;
}
return data_size;
}
if(pkt.data)
av_free_packet(&pkt);
}
}
开发者ID:kestiny,项目名称:Demos,代码行数:35,代码来源:videoplayer.cpp
示例2: video_thread
int video_thread(void *arg) {
VideoState *is = (VideoState *) arg;
AVPacket pkt1, *packet = &pkt1;
//int len1;
int frameFinished;
AVFrame *pFrame;
pFrame = av_frame_alloc();
for (;;) {
if (packet_queue_get(&is->videoq, packet, 1) < 0) {
// means we quit getting packets
break;
}
// Decode video frame
//len1 =
avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
packet);
// Did we get a video frame?
if (frameFinished) {
if (queue_picture(is, pFrame) < 0) {
break;
}
}
av_free_packet(packet);
}
av_free(pFrame);
return 0;
}
开发者ID:GNUDimarik,项目名称:dranger-ffmpeg-tuto,代码行数:30,代码来源:tutorial04.c
示例3: audio_decode_frame
int audio_decode_frame(VideoState *is, double *pts_ptr) {
int len1, data_size = 0, n;
AVPacket *pkt = &is->audio_pkt;
double pts;
for (; ;) {
while (is->audio_pkt_size > 0) {
int got_frame = 0;
len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);
if (len1 < 0) {
/* if error, skip frame */
is->audio_pkt_size = 0;
break;
}
if (got_frame) {
data_size =
av_samples_get_buffer_size
(
NULL,
is->audio_st->codec->channels,
is->audio_frame.nb_samples,
is->audio_st->codec->sample_fmt,
1
);
memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
}
is->audio_pkt_data += len1;
is->audio_pkt_size -= len1;
if (data_size <= 0) {
/* No data yet, get more frames */
continue;
}
pts = is->audio_clock;
*pts_ptr = pts;
n = 2 * is->audio_st->codec->channels;
is->audio_clock += (double) data_size /
(double) (n * is->audio_st->codec->sample_rate);
/* We have data, return it and come back for more later */
return data_size;
}
if (pkt->data)
av_free_packet(pkt);
if (is->quit) {
return -1;
}
/* next packet */
if (packet_queue_get(&is->audioq, pkt, 1) < 0) {
return -1;
}
is->audio_pkt_data = pkt->data;
is->audio_pkt_size = pkt->size;
/* if update, update the audio clock w/pts */
if (pkt->pts != AV_NOPTS_VALUE) {
is->audio_clock = av_q2d(is->audio_st->time_base) * pkt->pts;
}
}
}
开发者ID:Akagi201,项目名称:learning-ffmpeg,代码行数:60,代码来源:main.c
示例4: fprintf
void Player::ScheduleNextFrame(bool prerolling)
{
AVPacket pkt;
AVPicture picture;
if (serial_fd > 0 && packet_queue_get(&dataqueue, &pkt, 0)) {
if (pkt.data[0] != ' '){
fprintf(stderr,"written %.*s \n", pkt.size, pkt.data);
write(serial_fd, pkt.data, pkt.size);
}
av_free_packet(&pkt);
}
if (packet_queue_get(&videoqueue, &pkt, 1) < 0)
return;
IDeckLinkMutableVideoFrame *videoFrame;
m_deckLinkOutput->CreateVideoFrame(m_frameWidth,
m_frameHeight,
m_frameWidth * 2,
pix,
bmdFrameFlagDefault,
&videoFrame);
void *frame;
int got_picture;
videoFrame->GetBytes(&frame);
avcodec_decode_video2(video_st->codec, avframe, &got_picture, &pkt);
if (got_picture) {
avpicture_fill(&picture, (uint8_t *)frame, pix_fmt,
m_frameWidth, m_frameHeight);
sws_scale(sws, avframe->data, avframe->linesize, 0, avframe->height,
picture.data, picture.linesize);
if (m_deckLinkOutput->ScheduleVideoFrame(videoFrame,
pkt.pts *
video_st->time_base.num,
pkt.duration *
video_st->time_base.num,
video_st->time_base.den) !=
S_OK)
fprintf(stderr, "Error scheduling frame\n");
}
videoFrame->Release();
av_free_packet(&pkt);
}
开发者ID:djlancelot,项目名称:bmdtools,代码行数:47,代码来源:bmdplay.cpp
示例5: LOGE
void *video_thread(void *arg) {
JNIEnv *env;
if((*g_jvm)->AttachCurrentThread(g_jvm, &env, NULL) != JNI_OK) {
LOGE(1, "### start video thead error");
return;
}
VideoState *is = (VideoState*)arg;
AVPacket pkt1, *packet = &pkt1;
int len1, frameFinished;
AVFrame *pFrame;
double pts;
int numBytes;
pFrame=avcodec_alloc_frame();
int ret;
for(;;) {
if(is->quit == 1 || is->quit == 2) {
break;
}
if(packet_queue_get(&is->videoq, packet, 1) < 0) {
if(debug) LOGI(10,"video_thread get packet exit");
break;
}
pts = 0;
global_video_pkt_pts = packet->pts;
len1 = avcodec_decode_video2(is->video_st->codec,
pFrame,
&frameFinished,
packet);
if(packet->dts == AV_NOPTS_VALUE
&& pFrame->opaque
&& *(uint64_t*)pFrame->opaque
!= AV_NOPTS_VALUE) {
pts = *(uint64_t*) pFrame->opaque;
} else if (packet->dts != AV_NOPTS_VALUE) {
pts = packet->dts;
} else {
pts = 0;
}
pts *= av_q2d(is->video_st->time_base);
//pts *= av_q2d(pCodecCtx->time_base);
if (frameFinished) {
pts = synchronize_video(is, pFrame, pts);
if (queue_picture(is, pFrame, pts) < 0) {
break;
}
}
av_free_packet(packet);
}
av_free(pFrame);
if((*g_jvm)->DetachCurrentThread(g_jvm) != JNI_OK) {
LOGE(1,"### detach video thread error");
}
pthread_exit(0);
if(debug) {
LOGI(1,"### video_thread exit");
}
return ((void *)0);
}
开发者ID:dalvik,项目名称:Drovik,代码行数:58,代码来源:ffmpeg-jni.c
示例6: audio_decode_frame
int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr) {
int len1, data_size, n;
AVPacket *pkt = &is->audio_pkt;
double pts;
for(;;) {
while(is->audio_pkt_size > 0) {
data_size = buf_size;
len1 = avcodec_decode_audio3(is->audio_ctx,
(int16_t *)audio_buf, &data_size, pkt);
//is->audio_pkt_data, is->audio_pkt_size);
//len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size,
// audio_pkt_data, audio_pkt_size);
if(len1 < 0) {
// if error, skip frame
is->audio_pkt_size = 0;
break;
}
is->audio_pkt_data += len1;
is->audio_pkt_size -= len1;
if(data_size <= 0) {
// No data yet, get more frames
continue;
}
pts = is->audio_clock;
*pts_ptr = pts;
//n = 2 * is->audio_ctx->channels;
n = 2;
is->audio_clock += (double)data_size /
(double)(n * is->audio_ctx->sample_rate);
// We have data, return it and come back for more later
return data_size;
}
if(pkt->data)
av_free_packet(pkt);
if(is->quit) {
return -1;
}
// next packet
if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
return -1;
}
is->audio_pkt_data = pkt->data;
is->audio_pkt_size = pkt->size;
// if update, update the audio clock w/pts
if(pkt->pts != AV_NOPTS_VALUE) {
is->audio_clock = av_q2d(is->audio_ctx->time_base)*pkt->pts;
}
}
}
开发者ID:26597925,项目名称:SmileTime,代码行数:58,代码来源:player.c
示例7: audio_decode_frame
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf,
int buf_size) {
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
int len1, data_size = 0;
for(;;) {
while(audio_pkt_size > 0) {
int got_frame = 0;
len1 = avcodec_decode_audio4
(aCodecCtx, m_audio_frame, &got_frame, &pkt);
if(len1 < 0)
{
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
data_size = 0;
if(got_frame)
{
data_size =
av_samples_get_buffer_size(NULL, aCodecCtx->channels,
m_audio_frame->nb_samples,
aCodecCtx->sample_fmt, 1);
//assert(data_size <= buf_size);
memcpy(audio_buf, m_audio_frame->data[0], data_size);
}
if(data_size <= 0)
{
/* No data yet, get more frames */
continue;
}
/* We have data, return it and come back for more later */
return data_size;
}
if(pkt.data)
av_packet_unref(&pkt);
if(quit) {
return -1;
}
if(packet_queue_get(&m_audio_q, &pkt, 1) < 0) {
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}
开发者ID:hoseogame,项目名称:20101898_SDL,代码行数:58,代码来源:DWVideo.cpp
示例8: audio_decode_frame
int audio_decode_frame( void )
{
//printf( "In Audio decode frame : Thread \n" );
SDL_Event quit_audio_event;
quit_audio_event.type = QUIT_AUDIO_EVENT;
PacketQueue *pAQueue = &( gMedia->audioQueue ) ;
static AVPacket packet;
AVFrame *pFrame = avcodec_alloc_frame() ;
int pkt_bytes_decd = 0;
int audio_data_size = 0;
int frame_fin = 0;
if ( packet.size == 0 ) {
if ( !packet_queue_get( pAQueue, &packet ) ) {
SDL_PushEvent( &quit_audio_event );
av_free( pFrame );
return -1;
}
}
while ( packet.size > 0 ) {
// printf("Size of packet is %d\n",packet.size);
pkt_bytes_decd = avcodec_decode_audio4( gMedia->aCodecContext,
pFrame,
&frame_fin,
&packet );
printf( "%d bytes from packet decoded\n", pkt_bytes_decd );
// printf("Format of Decoded frame is %d\n",pFrame->format);
// printf("Format of audio is %d\n",pFrame->nb_samples);
// aud_frame_pts = pFrame->pkt_pts ;
//printf( " audio frame : pts is %" PRId64 "\n", aud_frame_pts );
if ( pkt_bytes_decd < 0 ) {
/* if error, skip packet */
break;
}
if ( frame_fin ) {
audio_data_size = create_channel_data( pFrame );
packet.size -= pkt_bytes_decd;
av_free( pFrame );
return audio_data_size ;
}
}
/* if ( pkt->pts != AV_NOPTS_VALUE ) {
gMedia->audio_clock = av_q2d( gMedia->pFormatContext->
streams[aud_stream_index] )
* pkt->pts;
}
*/
return 1; //Never comes here
}
开发者ID:super11,项目名称:JSPlayer,代码行数:57,代码来源:audiofuncs.c
示例9: audio_decode_frame
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)
{
//ffplay_info("Start.\n");
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
int len1, data_size;
for(;;)
{
while(audio_pkt_size > 0)
{
data_size = buf_size;
len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_buf, &data_size,
&pkt);
ffplay_info("audio_buf = 0x%8x, data_size = %d, pkt = 0x%8x\n",audio_buf,data_size,&pkt);
if(len1 < 0)
{
/* if error, skip frame */
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if(data_size <= 0)
{
/* No data yet, get more frames */
continue;
}
/* We have data, return it and come back for more later */
return data_size;
}
if(pkt.data)
{
ffplay_info("Here.\n");
av_free_packet(&pkt);
}
if(quit)
{
ffplay_info("Here.\n");
return -1;
}
if(packet_queue_get(&audioq, &pkt, 1) < 0)
{
ffplay_info("Here.\n");
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
//ffplay_info("end.\n");
}
开发者ID:beizhong2501,项目名称:ffmpeg_tutorial_modify,代码行数:56,代码来源:ffmpeg_tutorial_modify_03.c
示例10: video_thread
int video_thread(void *arg) {
VideoState *is = (VideoState *)arg;
AVPacket pkt1, *packet = &pkt1;
int frameFinished;
AVFrame *pFrame;
double pts;
pFrame = avcodec_alloc_frame();
for(;;) {
if(packet_queue_get(&is->videoq, packet, 1) < 0) {
// means we quit getting packets
break;
}
if(packet->data == flush_pkt.data) {
avcodec_flush_buffers(is->video_st->codec);
continue;
}
pts = 0;
// Save global pts to be stored in pFrame in first call
global_video_pkt_pts = packet->pts;
// Decode video frame
avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
packet);
if(packet->dts == AV_NOPTS_VALUE
&& pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
pts = *(uint64_t *)pFrame->opaque;
} else if(packet->dts != AV_NOPTS_VALUE) {
pts = packet->dts;
} else {
pts = 0;
}
pts *= av_q2d(is->video_st->time_base);
// Did we get a video frame?
if(frameFinished) {
pts = synchronize_video(is, pFrame, pts);
if(queue_picture(is, pFrame, pts) < 0) {
break;
}
}
av_free_packet(packet);
}
av_free(pFrame);
return 0;
}
开发者ID:elad-perets,项目名称:dranger_ffmpeg_ubuntu_trusty,代码行数:56,代码来源:tutorial07.c
示例11: audio_decode_frame
int audio_decode_frame(AVCodecContext *aCodecCtx, AVPacket *pkt, AVPacket *pkt_temp, AVFrame *frame, uint8_t *audio_buf)
{
int len1, data_size;
int got_frame = 0;
int new_packet = 0;
while(1)
{
while(pkt_temp->size > 0 || (!pkt_temp->data && new_packet))
{
if(!frame)
{
if(!(frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
}
else
{
avcodec_get_frame_defaults(frame);
}
new_packet = 0;
got_frame = 0;
len1 = avcodec_decode_audio4(aCodecCtx, frame, &got_frame, pkt_temp);
if(len1 < 0)
{
/*if error, skip frame*/
pkt_temp->size = 0;
av_free_packet(pkt_temp);
continue;
}
pkt_temp->data += len1;
pkt_temp->size -= len1;
if(!got_frame)
{
/*stop sending empty packets if the decoder is finished*/
continue;
//break;
}
data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels, frame->nb_samples, aCodecCtx->sample_fmt, 1);
memcpy(audio_buf, frame->data[0], frame->linesize[0]);
return data_size;
}
if(pkt->data)
av_free_packet(pkt);
memset(pkt_temp, 0, sizeof(*pkt_temp));
if(quit)
{
return -1;
}
if((new_packet = packet_queue_get(&audioq, pkt, 1)) < 0)
{
return -1;
}
*pkt_temp = *pkt;
}
}
开发者ID:elmagroud00,项目名称:Experiments,代码行数:55,代码来源:main.c
示例12: video_thread
int video_thread(void *arg) {
VideoState *is = (VideoState *)arg;
AVPacket pkt1, *packet = &pkt1;
int len1, frameFinished;
AVFrame *pFrame;
double pts;
pFrame = avcodec_alloc_frame();
is->rgbaFrame = avcodec_alloc_frame();
avpicture_alloc ((AVPicture *)is->rgbaFrame, PIX_FMT_RGBA, is->video_st->codec->width, is->video_st->codec->height);
for(;;) {
if(packet_queue_get(&is->videoq, packet, 1) < 0) {
// means we quit getting packets
break;
}
pts = 0;
// Save global pts to be stored in pFrame
global_video_pkt_pts = packet->pts;
// Decode video frame
len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
packet);
if(packet->dts == AV_NOPTS_VALUE
&& pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
pts = (double)(*(uint64_t *)pFrame->opaque);
} else if(packet->dts != AV_NOPTS_VALUE) {
pts = (double)packet->dts;
} else {
pts = 0;
}
pts *= av_q2d(is->video_st->time_base);
// Did we get a video frame?
if(frameFinished) {
pts = synchronize_video(is, pFrame, pts);
if(queue_picture(is, pFrame, pts) < 0) {
break;
}
}
av_free_packet(packet);
}
SDL_CloseAudio();
av_free(pFrame);
avpicture_free((AVPicture *)is->rgbaFrame);
av_free(is->rgbaFrame);
return 0;
}
开发者ID:pdpdds,项目名称:Win32OpenSourceSample,代码行数:55,代码来源:VideoPlayer.cpp
示例13: decode_audio_frame
int decode_audio_frame (PlayerContext *ctx, uint8_t **buf)
{
static AVPacket pkt, cur_pkt;
static AVFrame *frame;
int got_frame, decoded_bytes;
if (!frame)
{
frame = avcodec_alloc_frame ();
if (!frame)
return AVERROR (ENOMEM);
}
for (;;)
{
while (pkt.size > 0)
{
avcodec_get_frame_defaults (frame);
decoded_bytes = avcodec_decode_audio4 (ctx->audio_codec,
frame, &got_frame,
&pkt);
if (decoded_bytes < 0)
{
// error, skip the frame
pkt.size = 0;
break;
}
pkt.data += decoded_bytes;
pkt.size -= decoded_bytes;
*buf = frame->data[0];
return av_samples_get_buffer_size(NULL,
frame->channels,
frame->nb_samples,
frame->format, 1);
}
// free the current packet
if (cur_pkt.data)
av_free_packet (&cur_pkt);
memset (&pkt, 0, sizeof (pkt));
if (quit)
return -1;
// read next packet
if (packet_queue_get (&ctx->audioq, &cur_pkt, 1) < 0)
return -1;
pkt = cur_pkt;
}
}
开发者ID:bazurbat,项目名称:ffmpeg_tutorial,代码行数:55,代码来源:tutorial04.c
示例14: audio_decode_frame
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
static AVFrame frame;
int len1, resampled_data_size=0;
for (;;) {
while (audio_pkt_size > 0) {
int got_frame = 0;
len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
if (len1 < 0) {
/* if error, skip frame */
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if (got_frame) {
// ---------------
//准备调用 swr_convert 的其他4个必须参数: out,out_samples_per_ch,in,in_samples_per_ch
uint8_t **out = &audio_buf;
const uint8_t **in = (const uint8_t **)frame.extended_data;
//int out_samples_per_ch = buf_size/ (av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)*2);
//调用 swr_convert 进行转换
int len2 = 0;
len2 = swr_convert(swr_ctx, out, frame.nb_samples, in, frame.nb_samples);
resampled_data_size = len2 * 2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
//memcpy(audio_buf, frame.data[0], data_size);
// ----------------
}
/* We have data, return it and come back for more later */
return resampled_data_size;
}
if (pkt.data)
av_free_packet(&pkt);
if (quit) {
return -1;
}
if (packet_queue_get(&audioq, &pkt, 1) < 0) {
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}
开发者ID:shileiz,项目名称:notes,代码行数:55,代码来源:tutorial03.01_ConvertRawAudio.cpp
示例15: video_thread
int video_thread(void *arg) {
VideoState *is = (VideoState *)arg;
AVPacket pkt1, *packet = &pkt1;
int frameFinished;
AVFrame *pFrame;
double pts;
pFrame = av_frame_alloc();
for(;;) {
if(packet_queue_get(&is->videoq, packet, 1) < 0) {
// means we quit getting packets
break;
}
if(packet_queue_get(&is->videoq, packet, 1) < 0) {
// means we quit getting packets
break;
}
pts = 0;
// Decode video frame
avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet);
if((pts = av_frame_get_best_effort_timestamp(pFrame)) == AV_NOPTS_VALUE) {
pts = av_frame_get_best_effort_timestamp(pFrame);
} else {
pts = 0;
}
pts *= av_q2d(is->video_st->time_base);
// Did we get a video frame?
if(frameFinished) {
pts = synchronize_video(is, pFrame, pts);
if(queue_picture(is, pFrame, pts) < 0) {
break;
}
}
av_free_packet(packet);
}
av_frame_free(&pFrame);
return 0;
}
开发者ID:shileiz,项目名称:notes,代码行数:42,代码来源:tutorial07.c
示例16: video_decode_audio_frame
int video_decode_audio_frame( AVCodecContext *context, uint8_t *buffer, int buffer_size ) {
static AVPacket packet;
int used, data_size;
for(;;) {
while( audio_packet_size > 0 ) {
data_size = buffer_size;
AVPacket avp;
av_init_packet( &avp );
avp.data = audio_packet_data;
avp.size = audio_packet_size;
used = avcodec_decode_audio3( context, (int16_t *)audio_buffer, &data_size,
&avp );
if( used < 0 ) {
/* if error, skip frame */
audio_packet_size = 0;
break;
}
audio_packet_data += used;
audio_packet_size -= used;
if( data_size <= 0 ) {
/* No data yet, get more frames */
continue;
}
audio_clock += (double)data_size /
(double)(format_context->streams[audio_stream]->codec->sample_rate *
(2 * format_context->streams[audio_stream]->codec->channels));
/* We have data, return it and come back for more later */
return data_size;
}
if( packet.data )
av_free_packet( &packet );
if( stop ) {
audio_running = 0;
return -1;
}
if( packet_queue_get( &audio_queue, &packet, 1 ) < 0 )
return -1;
audio_packet_data = packet.data;
audio_packet_size = packet.size;
if( packet.pts != AV_NOPTS_VALUE ) {
audio_clock = packet.pts * av_q2d( format_context->streams[audio_stream]->time_base );
}
}
}
开发者ID:pyroticinsanity,项目名称:Cabrio-Remix,代码行数:54,代码来源:video.c
示例17: video_thread
int video_thread(void *arg) {
VideoState *is = (VideoState*)arg;
AVPacket pkt1, *packet = &pkt1;
int len1, frameFinished;
AVFrame *pFrame;
double pts;
pFrame = avcodec_alloc_frame();
for(;;) {
//printf("video_thread loop 1\n");
if(packet_queue_get(&is->videoq, packet, 1) < 0) {
fprintf(stderr, "%d: packet_queue_get errror\n", __LINE__);
break;
}
//printf("video_thread loop 2\n");
if(packet->data == flush_pkt.data) {
avcodec_flush_buffers(is->video_st->codec);
continue;
}
//printf("video_thread loop 3\n");
pts = 0;
global_video_pkt_pts = packet->pts;
//printf("video_thread loop 4\n");
len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet);
/*
if(packet->dts == AV_NOPTS_VALUE && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
pts = *(uint64_t*)pFrame->opaque;
} else if(packet->dts != AV_NOPTS_VALUE) {
pts = packet->dts;
} else {
pts = 0;
}
pts *= av_q2d(is->video_st->time_base);
*/
//printf("video_thread loop 5\n");
if(frameFinished) {
//printf("video_thread loop 6\n");
pts = synchronize_video(is, pFrame, pts);
//printf("video_thread loop 7\n");
if(queue_picture(is, pFrame, pts) < 0)
{
//printf("video_thread loop 8\n");
break;
}
}
//printf("video_thread loop 6\n");
av_free_packet(packet);
}
av_free(pFrame);
//printf("video_thread loop end\n");
return 0;
}
开发者ID:weimingtom,项目名称:ffmpeg_win32_ndk,代码行数:54,代码来源:main.c
示例18: audio_decode_frame
int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size,double *pts_ptr)
{
AVCodecContext *aCodecCtx = is->audio_st->codec;
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
static AVFrame frame;
int len1, data_size = 0;
double pts;
for(;;) {
while(audio_pkt_size > 0) {
int got_frame = 0;
len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
if(len1 < 0) {
/* if error, skip frame */
audio_pkt_size = 0;
break;
}
data_size = AudioResampling(aCodecCtx,&frame, AV_SAMPLE_FMT_S16,aCodecCtx->channels, 44100, audio_buf);
audio_pkt_data += len1;
audio_pkt_size -= len1;
if(data_size <= 0) {
continue;
}
pts = is->audio_clock;
*pts_ptr = pts;
int n = 2 * is->audio_st->codec->channels;
is->audio_clock += (double)data_size /
(double)(n * is->audio_st->codec->sample_rate);
/* We have data, return it and come back for more later */
return data_size;
}
if(pkt.data)
av_free_packet(&pkt);
if(is->quit) {
return -1;
}
if(packet_queue_get(&is->audioq, &pkt, 1) < 0) {
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
if(pkt.pts != AV_NOPTS_VALUE){
is->audio_clock = av_q2d(is->audio_st->time_base)*pkt.pts;
}
}
}
开发者ID:ShengQiangLiu,项目名称:sdl_ffmpeg_player,代码行数:53,代码来源:audio_handler.c
示例19: audio_decode_frame
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)
{
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
AVFrame *pFrame = av_frame_alloc();
int got_frame;
int len1, data_size;
for(;;) {
while(audio_pkt_size > 0) {
data_size = buf_size;
#if 0
len1 = avcodec_decode_audio4(aCodecCtx, pFrame, &got_frame, &pkt);
#else
len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_buf, &data_size, &pkt);
#endif
if(len1 < 0) {
/* if error, skip frame */
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if(data_size <= 0) {
/* No data yet, get more frames */
continue;
}
/* We have data, return it and come back for more later */
av_frame_free(&pFrame);
return data_size;
}
if(pkt.data)
av_free_packet(&pkt);
if(quit) {
av_frame_free(&pFrame);
return -1;
}
if(packet_queue_get(&audioq, &pkt, 1) < 0) {
av_frame_free(&pFrame);
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
printf("\r\n audio_pkt_size:%d!!\r\n",audio_pkt_size);
}
}
开发者ID:zsirGitHub,项目名称:helloffmpeg,代码行数:50,代码来源:myplay.c
示例20: video_thread
int video_thread(void* arg) {
VideoState* is = (VideoState*) arg;
AVPacket pkt1, *packet = &pkt1;
int frameFinished;
AVFrame* pFrame, *pFrameYUV;
uint8_t *out_buffer;
double pts;
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
out_buffer = (uint8_t*) av_malloc(
avpicture_get_size(AV_PIX_FMT_YUV420P, is->video_ctx->width,
is->video_ctx->height));
avpicture_fill((AVPicture*) pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P,
is->video_ctx->width, is->video_ctx->height);
for (;;) {
if (packet_queue_get(&is->videoq, packet, 1, 1) < 0) {
break;
}
if (packet->data == flush_pkt.data) {
avcodec_flush_buffers(is->video_ctx);
continue;
}
pts = 0;
avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet);
if ((pts = av_frame_get_best_effort_timestamp(pFrame)) == AV_NOPTS_VALUE) {
pts = 0;
}
pts *= av_q2d(is->video_st->time_base);
printf("queue_picture frameFinished=%d packet->size=%d pts=%lf\n",
frameFinished, packet->size, pts);
if (frameFinished) {
pts = synchronize_video(is, pFrame, pts);
if (queue_picture(is, pFrame, pFrameYUV, pts) < 0) {
break;
}
}
av_free_packet(packet);
}
av_frame_free(&pFrame);
av_frame_free(&pFrameYUV);
return 0;
}
开发者ID:soffio,项目名称:FFmpegTutorial,代码行数:50,代码来源:Test7.cpp
注:本文中的packet_queue_get函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论