本文整理汇总了C++中bytestream2_skip函数的典型用法代码示例。如果您正苦于以下问题:C++ bytestream2_skip函数的具体用法?C++ bytestream2_skip怎么用?C++ bytestream2_skip使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bytestream2_skip函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: tgq_decode_mb
static int tgq_decode_mb(TgqContext *s, AVFrame *frame, int mb_y, int mb_x)
{
int mode;
int i;
int8_t dc[6];
mode = bytestream2_get_byte(&s->gb);
if (mode > 12) {
GetBitContext gb;
init_get_bits8(&gb, s->gb.buffer, FFMIN(bytestream2_get_bytes_left(&s->gb), mode));
for (i = 0; i < 6; i++)
tgq_decode_block(s, s->block[i], &gb);
tgq_idct_put_mb(s, s->block, frame, mb_x, mb_y);
bytestream2_skip(&s->gb, mode);
} else {
if (mode == 3) {
memset(dc, bytestream2_get_byte(&s->gb), 4);
dc[4] = bytestream2_get_byte(&s->gb);
dc[5] = bytestream2_get_byte(&s->gb);
} else if (mode == 6) {
bytestream2_get_buffer(&s->gb, dc, 6);
} else if (mode == 12) {
for (i = 0; i < 6; i++) {
dc[i] = bytestream2_get_byte(&s->gb);
bytestream2_skip(&s->gb, 1);
}
} else {
av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode);
return -1;
}
tgq_idct_put_mb_dconly(s, frame, mb_x, mb_y, dc);
}
return 0;
}
开发者ID:Armada651,项目名称:FFmpeg,代码行数:34,代码来源:eatgq.c
示例2: apng_probe
/*
* To be a valid APNG file, we mandate, in this order:
* PNGSIG
* IHDR
* ...
* acTL
* ...
* IDAT
*/
static int apng_probe(AVProbeData *p)
{
GetByteContext gb;
int state = 0;
uint32_t len, tag;
bytestream2_init(&gb, p->buf, p->buf_size);
if (bytestream2_get_be64(&gb) != PNGSIG)
return 0;
for (;;) {
len = bytestream2_get_be32(&gb);
if (len > 0x7fffffff)
return 0;
tag = bytestream2_get_le32(&gb);
/* we don't check IDAT size, as this is the last tag
* we check, and it may be larger than the probe buffer */
if (tag != MKTAG('I', 'D', 'A', 'T') &&
len > bytestream2_get_bytes_left(&gb))
return 0;
switch (tag) {
case MKTAG('I', 'H', 'D', 'R'):
if (len != 13)
return 0;
if (av_image_check_size(bytestream2_get_be32(&gb), bytestream2_get_be32(&gb), 0, NULL))
return 0;
bytestream2_skip(&gb, 9);
state++;
break;
case MKTAG('a', 'c', 'T', 'L'):
if (state != 1 ||
len != 8 ||
bytestream2_get_be32(&gb) == 0) /* 0 is not a valid value for number of frames */
return 0;
bytestream2_skip(&gb, 8);
state++;
break;
case MKTAG('I', 'D', 'A', 'T'):
if (state != 2)
return 0;
goto end;
default:
/* skip other tags */
bytestream2_skip(&gb, len + 4);
break;
}
}
end:
return AVPROBE_SCORE_MAX;
}
开发者ID:alikuro,项目名称:FFmpeg,代码行数:63,代码来源:apngdec.c
示例3: read_uncompressed_sgi
/**
* Read an uncompressed SGI image.
* @param out_buf output buffer
* @param s the current image state
* @return 0 if read success, otherwise return -1.
*/
static int read_uncompressed_sgi(unsigned char *out_buf, SgiState *s)
{
int x, y, z;
unsigned int offset = s->height * s->width * s->bytes_per_channel;
GetByteContext gp[4];
uint8_t *out_end;
/* Test buffer size. */
if (offset * s->depth > bytestream2_get_bytes_left(&s->g))
return AVERROR_INVALIDDATA;
/* Create a reader for each plane */
for (z = 0; z < s->depth; z++) {
gp[z] = s->g;
bytestream2_skip(&gp[z], z * offset);
}
for (y = s->height - 1; y >= 0; y--) {
out_end = out_buf + (y * s->linesize);
if (s->bytes_per_channel == 1) {
for (x = s->width; x > 0; x--)
for (z = 0; z < s->depth; z++)
*out_end++ = bytestream2_get_byteu(&gp[z]);
} else {
uint16_t *out16 = (uint16_t *)out_end;
for (x = s->width; x > 0; x--)
for (z = 0; z < s->depth; z++)
*out16++ = bytestream2_get_ne16u(&gp[z]);
}
}
return 0;
}
开发者ID:Acidburn0zzz,项目名称:libav,代码行数:38,代码来源:sgidec.c
示例4: pix_decode_header
static int pix_decode_header(PixHeader *out, GetByteContext *pgb)
{
unsigned int header_len = bytestream2_get_be32(pgb);
out->format = bytestream2_get_byte(pgb);
bytestream2_skip(pgb, 2);
out->width = bytestream2_get_be16(pgb);
out->height = bytestream2_get_be16(pgb);
// the header is at least 11 bytes long; we read the first 7
if (header_len < 11)
return AVERROR_INVALIDDATA;
// skip the rest of the header
bytestream2_skip(pgb, header_len - 7);
return 0;
}
开发者ID:0Soul,项目名称:FFmpeg,代码行数:18,代码来源:brenderpix.c
示例5: hqa_decode_frame
static int hqa_decode_frame(HQContext *ctx, AVFrame *pic, size_t data_size)
{
GetBitContext gb;
const int num_slices = 8;
uint32_t slice_off[9];
int i, slice, ret;
int width, height, quant;
const uint8_t *src = ctx->gbc.buffer;
width = bytestream2_get_be16(&ctx->gbc);
height = bytestream2_get_be16(&ctx->gbc);
ctx->avctx->coded_width = FFALIGN(width, 16);
ctx->avctx->coded_height = FFALIGN(height, 16);
ctx->avctx->width = width;
ctx->avctx->height = height;
ctx->avctx->bits_per_raw_sample = 8;
ctx->avctx->pix_fmt = AV_PIX_FMT_YUVA422P;
av_log(ctx->avctx, AV_LOG_VERBOSE, "HQA Profile\n");
quant = bytestream2_get_byte(&ctx->gbc);
bytestream2_skip(&ctx->gbc, 3);
if (quant >= NUM_HQ_QUANTS) {
av_log(ctx->avctx, AV_LOG_ERROR,
"Invalid quantization matrix %d.\n", quant);
return AVERROR_INVALIDDATA;
}
ret = ff_get_buffer(ctx->avctx, pic, 0);
if (ret < 0) {
av_log(ctx->avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return ret;
}
/* Offsets are stored from HQA1 position, so adjust them accordingly. */
for (i = 0; i < num_slices + 1; i++)
slice_off[i] = bytestream2_get_be32(&ctx->gbc) - 4;
for (slice = 0; slice < num_slices; slice++) {
if (slice_off[slice] < (num_slices + 1) * 3 ||
slice_off[slice] >= slice_off[slice + 1] ||
slice_off[slice + 1] > data_size) {
av_log(ctx->avctx, AV_LOG_ERROR,
"Invalid slice size %zu.\n", data_size);
break;
}
init_get_bits(&gb, src + slice_off[slice],
(slice_off[slice + 1] - slice_off[slice]) * 8);
ret = hqa_decode_slice(ctx, pic, &gb, quant, slice, width, height);
if (ret < 0)
return ret;
}
return 0;
}
开发者ID:Brainiarc7,项目名称:libav,代码行数:57,代码来源:hq_hqa.c
示例6: hq_hqa_decode_frame
static int hq_hqa_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
HQContext *ctx = avctx->priv_data;
AVFrame *pic = data;
uint32_t info_tag;
unsigned int data_size;
int ret;
unsigned tag;
bytestream2_init(&ctx->gbc, avpkt->data, avpkt->size);
if (bytestream2_get_bytes_left(&ctx->gbc) < 4 + 4) {
av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n", avpkt->size);
return AVERROR_INVALIDDATA;
}
info_tag = bytestream2_peek_le32(&ctx->gbc);
if (info_tag == MKTAG('I', 'N', 'F', 'O')) {
int info_size;
bytestream2_skip(&ctx->gbc, 4);
info_size = bytestream2_get_le32(&ctx->gbc);
if (bytestream2_get_bytes_left(&ctx->gbc) < info_size) {
av_log(avctx, AV_LOG_ERROR, "Invalid INFO size (%d).\n", info_size);
return AVERROR_INVALIDDATA;
}
ff_canopus_parse_info_tag(avctx, ctx->gbc.buffer, info_size);
bytestream2_skip(&ctx->gbc, info_size);
}
data_size = bytestream2_get_bytes_left(&ctx->gbc);
if (data_size < 4) {
av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n", data_size);
return AVERROR_INVALIDDATA;
}
/* HQ defines dimensions and number of slices, and thus slice traversal
* order. HQA has no size constraint and a fixed number of slices, so it
* needs a separate scheme for it. */
tag = bytestream2_get_le32(&ctx->gbc);
if ((tag & 0x00FFFFFF) == (MKTAG('U', 'V', 'C', ' ') & 0x00FFFFFF)) {
ret = hq_decode_frame(ctx, pic, tag >> 24, data_size);
} else if (tag == MKTAG('H', 'Q', 'A', '1')) {
开发者ID:Diagonactic,项目名称:plex-new-transcoder,代码行数:43,代码来源:hq_hqa.c
示例7: mm_decode_pal
static void mm_decode_pal(MmContext *s)
{
int i;
bytestream2_skip(&s->gb, 4);
for (i = 0; i < 128; i++) {
s->palette[i] = 0xFFU << 24 | bytestream2_get_be24(&s->gb);
s->palette[i+128] = s->palette[i]<<2;
}
}
开发者ID:markjreed,项目名称:vice-emu,代码行数:10,代码来源:mmvideo.c
示例8: mm_decode_pal
static int mm_decode_pal(MmContext *s)
{
int i;
bytestream2_skip(&s->gb, 4);
for (i = 0; i < 128; i++) {
s->palette[i] = bytestream2_get_be24(&s->gb);
s->palette[i+128] = s->palette[i]<<2;
}
return 0;
}
开发者ID:smarter,项目名称:libav,代码行数:12,代码来源:mmvideo.c
示例9: tgq_decode_frame
static int tgq_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt){
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TgqContext *s = avctx->priv_data;
int x,y;
int big_endian;
if (buf_size < 16) {
av_log(avctx, AV_LOG_WARNING, "truncated header\n");
return -1;
}
big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
bytestream2_init(&s->gb, buf + 8, buf_size - 8);
if (big_endian) {
s->width = bytestream2_get_be16u(&s->gb);
s->height = bytestream2_get_be16u(&s->gb);
} else {
s->width = bytestream2_get_le16u(&s->gb);
s->height = bytestream2_get_le16u(&s->gb);
}
if (s->avctx->width!=s->width || s->avctx->height!=s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height);
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
}
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
bytestream2_skip(&s->gb, 3);
if (!s->frame.data[0]) {
s->frame.key_frame = 1;
s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
if (ff_get_buffer(avctx, &s->frame)) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
}
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
if (tgq_decode_mb(s, y, x) < 0)
return AVERROR_INVALIDDATA;
*got_frame = 1;
*(AVFrame*)data = s->frame;
return avpkt->size;
}
开发者ID:bwahn,项目名称:FFmpeg-1,代码行数:51,代码来源:eatgq.c
示例10: parse_palette
static int parse_palette(AVCodecContext *avctx, GetByteContext *gbc,
uint32_t *pal, int colors)
{
int i;
for (i = 0; i <= colors; i++) {
uint8_t r, g, b;
unsigned int idx = bytestream2_get_be16(gbc); /* color index */
if (idx > 255) {
av_log(avctx, AV_LOG_WARNING,
"Palette index out of range: %u\n", idx);
bytestream2_skip(gbc, 6);
continue;
}
r = bytestream2_get_byte(gbc);
bytestream2_skip(gbc, 1);
g = bytestream2_get_byte(gbc);
bytestream2_skip(gbc, 1);
b = bytestream2_get_byte(gbc);
bytestream2_skip(gbc, 1);
pal[idx] = (0xFFU << 24) | (r << 16) | (g << 8) | b;
}
return 0;
}
开发者ID:309746069,项目名称:FFmpeg,代码行数:24,代码来源:qdrw.c
示例11: tgq_decode_frame
static int tgq_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TgqContext *s = avctx->priv_data;
AVFrame *frame = data;
int x, y, ret;
int big_endian;
if (buf_size < 16) {
av_log(avctx, AV_LOG_WARNING, "truncated header\n");
return AVERROR_INVALIDDATA;
}
big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
bytestream2_init(&s->gb, buf + 8, buf_size - 8);
if (big_endian) {
s->width = bytestream2_get_be16u(&s->gb);
s->height = bytestream2_get_be16u(&s->gb);
} else {
s->width = bytestream2_get_le16u(&s->gb);
s->height = bytestream2_get_le16u(&s->gb);
}
ret = ff_set_dimensions(s->avctx, s->width, s->height);
if (ret < 0)
return ret;
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
bytestream2_skip(&s->gb, 3);
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
if (tgq_decode_mb(s, frame, y, x) < 0)
return AVERROR_INVALIDDATA;
*got_frame = 1;
return avpkt->size;
}
开发者ID:Bjelijah,项目名称:EcamTurnH265,代码行数:46,代码来源:eatgq.c
示例12: decode_extradata_ps_mp4
/* There are (invalid) samples in the wild with mp4-style extradata, where the
* parameter sets are stored unescaped (i.e. as RBSP).
* This function catches the parameter set decoding failure and tries again
* after escaping it */
static int decode_extradata_ps_mp4(const uint8_t *buf, int buf_size, H264ParamSets *ps,
int err_recognition, void *logctx)
{
int ret;
ret = decode_extradata_ps(buf, buf_size, ps, 1, logctx);
if (ret < 0 && !(err_recognition & AV_EF_EXPLODE)) {
GetByteContext gbc;
PutByteContext pbc;
uint8_t *escaped_buf;
int escaped_buf_size;
av_log(logctx, AV_LOG_WARNING,
"SPS decoding failure, trying again after escaping the NAL\n");
if (buf_size / 2 >= (INT16_MAX - AV_INPUT_BUFFER_PADDING_SIZE) / 3)
return AVERROR(ERANGE);
escaped_buf_size = buf_size * 3 / 2 + AV_INPUT_BUFFER_PADDING_SIZE;
escaped_buf = av_mallocz(escaped_buf_size);
if (!escaped_buf)
return AVERROR(ENOMEM);
bytestream2_init(&gbc, buf, buf_size);
bytestream2_init_writer(&pbc, escaped_buf, escaped_buf_size);
while (bytestream2_get_bytes_left(&gbc)) {
if (bytestream2_get_bytes_left(&gbc) >= 3 &&
bytestream2_peek_be24(&gbc) <= 3) {
bytestream2_put_be24(&pbc, 3);
bytestream2_skip(&gbc, 2);
} else
bytestream2_put_byte(&pbc, bytestream2_get_byte(&gbc));
}
escaped_buf_size = bytestream2_tell_p(&pbc);
AV_WB16(escaped_buf, escaped_buf_size - 2);
ret = decode_extradata_ps(escaped_buf, escaped_buf_size, ps, 1, logctx);
av_freep(&escaped_buf);
if (ret < 0)
return ret;
}
return 0;
}
开发者ID:411697643,项目名称:FFmpeg,代码行数:49,代码来源:h264_parse.c
示例13: decode_frame
static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
AnmContext *s = avctx->priv_data;
const int buf_size = avpkt->size;
uint8_t *dst, *dst_end;
int count, ret;
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
return ret;
dst = s->frame->data[0];
dst_end = s->frame->data[0] + s->frame->linesize[0]*avctx->height;
bytestream2_init(&s->gb, avpkt->data, buf_size);
if (bytestream2_get_byte(&s->gb) != 0x42) {
avpriv_request_sample(avctx, "Unknown record type");
return AVERROR_INVALIDDATA;
}
if (bytestream2_get_byte(&s->gb)) {
avpriv_request_sample(avctx, "Padding bytes");
return AVERROR_PATCHWELCOME;
}
bytestream2_skip(&s->gb, 2);
s->x = 0;
do {
/* if statements are ordered by probability */
#define OP(gb, pixel, count) \
op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame->linesize[0])
int type = bytestream2_get_byte(&s->gb);
count = type & 0x7F;
type >>= 7;
if (count) {
if (OP(type ? NULL : &s->gb, -1, count)) break;
} else if (!type) {
int pixel;
count = bytestream2_get_byte(&s->gb); /* count==0 gives nop */
pixel = bytestream2_get_byte(&s->gb);
if (OP(NULL, pixel, count)) break;
} else {
int pixel;
type = bytestream2_get_le16(&s->gb);
count = type & 0x3FFF;
type >>= 14;
if (!count) {
if (type == 0)
break; // stop
if (type == 2) {
avpriv_request_sample(avctx, "Unknown opcode");
return AVERROR_PATCHWELCOME;
}
continue;
}
pixel = type == 3 ? bytestream2_get_byte(&s->gb) : -1;
if (type == 1) count += 0x4000;
if (OP(type == 2 ? &s->gb : NULL, pixel, count)) break;
}
} while (bytestream2_get_bytes_left(&s->gb) > 0);
memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);
*got_frame = 1;
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
return buf_size;
}
开发者ID:Vadiza,项目名称:sage-3.5b,代码行数:70,代码来源:anm.c
示例14: decode_frame
static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
AnmContext *s = avctx->priv_data;
const int buf_size = avpkt->size;
uint8_t *dst, *dst_end;
int count;
if(avctx->reget_buffer(avctx, &s->frame) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
dst = s->frame.data[0];
dst_end = s->frame.data[0] + s->frame.linesize[0]*avctx->height;
bytestream2_init(&s->gb, avpkt->data, buf_size);
if (bytestream2_get_byte(&s->gb) != 0x42) {
av_log_ask_for_sample(avctx, "unknown record type\n");
return buf_size;
}
if (bytestream2_get_byte(&s->gb)) {
av_log_ask_for_sample(avctx, "padding bytes not supported\n");
return buf_size;
}
bytestream2_skip(&s->gb, 2);
s->x = 0;
do {
/* if statements are ordered by probability */
#define OP(gb, pixel, count) \
op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame.linesize[0])
int type = bytestream2_get_byte(&s->gb);
count = type & 0x7F;
type >>= 7;
if (count) {
if (OP(type ? NULL : &s->gb, -1, count)) break;
} else if (!type) {
int pixel;
count = bytestream2_get_byte(&s->gb); /* count==0 gives nop */
pixel = bytestream2_get_byte(&s->gb);
if (OP(NULL, pixel, count)) break;
} else {
int pixel;
type = bytestream2_get_le16(&s->gb);
count = type & 0x3FFF;
type >>= 14;
if (!count) {
if (type == 0)
break; // stop
if (type == 2) {
av_log_ask_for_sample(avctx, "unknown opcode");
return AVERROR_INVALIDDATA;
}
continue;
}
pixel = type == 3 ? bytestream2_get_byte(&s->gb) : -1;
if (type == 1) count += 0x4000;
if (OP(type == 2 ? &s->gb : NULL, pixel, count)) break;
}
} while (bytestream2_get_bytes_left(&s->gb) > 0);
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
*data_size = sizeof(AVFrame);
*(AVFrame*)data = s->frame;
return buf_size;
}
开发者ID:n0s,项目名称:libav,代码行数:70,代码来源:anm.c
示例15: flic_decode_frame_8BPP
static int flic_decode_frame_8BPP(AVCodecContext *avctx,
void *data, int *data_size,
const uint8_t *buf, int buf_size)
{
FlicDecodeContext *s = avctx->priv_data;
GetByteContext g2;
int pixel_ptr;
int palette_ptr;
unsigned char palette_idx1;
unsigned char palette_idx2;
unsigned int frame_size;
int num_chunks;
unsigned int chunk_size;
int chunk_type;
int i, j;
int color_packets;
int color_changes;
int color_shift;
unsigned char r, g, b;
int lines;
int compressed_lines;
int starting_line;
signed short line_packets;
int y_ptr;
int byte_run;
int pixel_skip;
int pixel_countdown;
unsigned char *pixels;
unsigned int pixel_limit;
bytestream2_init(&g2, buf, buf_size);
s->frame.reference = 3;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if (avctx->reget_buffer(avctx, &s->frame) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1;
}
pixels = s->frame.data[0];
pixel_limit = s->avctx->height * s->frame.linesize[0];
if (buf_size < 16 || buf_size > INT_MAX - (3 * 256 + FF_INPUT_BUFFER_PADDING_SIZE))
return AVERROR_INVALIDDATA;
frame_size = bytestream2_get_le32(&g2);
if (frame_size > buf_size)
frame_size = buf_size;
bytestream2_skip(&g2, 2); /* skip the magic number */
num_chunks = bytestream2_get_le16(&g2);
bytestream2_skip(&g2, 8); /* skip padding */
frame_size -= 16;
/* iterate through the chunks */
while ((frame_size >= 6) && (num_chunks > 0)) {
int stream_ptr_after_chunk;
chunk_size = bytestream2_get_le32(&g2);
if (chunk_size > frame_size) {
av_log(avctx, AV_LOG_WARNING,
"Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size);
chunk_size = frame_size;
}
stream_ptr_after_chunk = bytestream2_tell(&g2) - 4 + chunk_size;
chunk_type = bytestream2_get_le16(&g2);
switch (chunk_type) {
case FLI_256_COLOR:
case FLI_COLOR:
/* check special case: If this file is from the Magic Carpet
* game and uses 6-bit colors even though it reports 256-color
* chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during
* initialization) */
if ((chunk_type == FLI_256_COLOR) && (s->fli_type != FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE))
color_shift = 0;
else
color_shift = 2;
/* set up the palette */
color_packets = bytestream2_get_le16(&g2);
palette_ptr = 0;
for (i = 0; i < color_packets; i++) {
/* first byte is how many colors to skip */
palette_ptr += bytestream2_get_byte(&g2);
/* next byte indicates how many entries to change */
color_changes = bytestream2_get_byte(&g2);
/* if there are 0 color changes, there are actually 256 */
if (color_changes == 0)
color_changes = 256;
if (bytestream2_tell(&g2) + color_changes * 3 > stream_ptr_after_chunk)
break;
for (j = 0; j < color_changes; j++) {
//.........这里部分代码省略.........
开发者ID:0x0B501E7E,项目名称:ffmpeg,代码行数:101,代码来源:flicvideo.c
示例16: parse_pixel_format
static int parse_pixel_format(AVCodecContext *avctx)
{
DDSContext *ctx = avctx->priv_data;
GetByteContext *gbc = &ctx->gbc;
char buf[32];
uint32_t flags, fourcc, gimp_tag;
enum DDSDXGIFormat dxgi;
int size, bpp, r, g, b, a;
int alpha_exponent, ycocg_classic, ycocg_scaled, normal_map, array;
/* Alternative DDS implementations use reserved1 as custom header. */
bytestream2_skip(gbc, 4 * 3);
gimp_tag = bytestream2_get_le32(gbc);
alpha_exponent = gimp_tag == MKTAG('A', 'E', 'X', 'P');
ycocg_classic = gimp_tag == MKTAG('Y', 'C', 'G', '1');
ycocg_scaled = gimp_tag == MKTAG('Y', 'C', 'G', '2');
bytestream2_skip(gbc, 4 * 7);
/* Now the real DDPF starts. */
size = bytestream2_get_le32(gbc);
if (size != 32) {
av_log(avctx, AV_LOG_ERROR, "Invalid pixel format header %d.\n", size);
return AVERROR_INVALIDDATA;
}
flags = bytestream2_get_le32(gbc);
ctx->compressed = flags & DDPF_FOURCC;
ctx->paletted = flags & DDPF_PALETTE;
normal_map = flags & DDPF_NORMALMAP;
fourcc = bytestream2_get_le32(gbc);
if (ctx->compressed && ctx->paletted) {
av_log(avctx, AV_LOG_WARNING,
"Disabling invalid palette flag for compressed dds.\n");
ctx->paletted = 0;
}
bpp = bytestream2_get_le32(gbc); // rgbbitcount
r = bytestream2_get_le32(gbc); // rbitmask
g = bytestream2_get_le32(gbc); // gbitmask
b = bytestream2_get_le32(gbc); // bbitmask
a = bytestream2_get_le32(gbc); // abitmask
bytestream2_skip(gbc, 4); // caps
bytestream2_skip(gbc, 4); // caps2
bytestream2_skip(gbc, 4); // caps3
bytestream2_skip(gbc, 4); // caps4
bytestream2_skip(gbc, 4); // reserved2
av_get_codec_tag_string(buf, sizeof(buf), fourcc);
av_log(avctx, AV_LOG_VERBOSE, "fourcc %s bpp %d "
"r 0x%x g 0x%x b 0x%x a 0x%x\n", buf, bpp, r, g, b, a);
if (gimp_tag) {
av_get_codec_tag_string(buf, sizeof(buf), gimp_tag);
av_log(avctx, AV_LOG_VERBOSE, "and GIMP-DDS tag %s\n", buf);
}
if (ctx->compressed)
avctx->pix_fmt = AV_PIX_FMT_RGBA;
if (ctx->compressed) {
switch (fourcc) {
case MKTAG('D', 'X', 'T', '1'):
ctx->tex_ratio = 8;
ctx->tex_funct = ctx->texdsp.dxt1a_block;
break;
case MKTAG('D', 'X', 'T', '2'):
ctx->tex_ratio = 16;
ctx->tex_funct = ctx->texdsp.dxt2_block;
break;
case MKTAG('D', 'X', 'T', '3'):
ctx->tex_ratio = 16;
ctx->tex_funct = ctx->texdsp.dxt3_block;
break;
case MKTAG('D', 'X', 'T', '4'):
ctx->tex_ratio = 16;
ctx->tex_funct = ctx->texdsp.dxt4_block;
break;
case MKTAG('D', 'X', 'T', '5'):
ctx->tex_ratio = 16;
if (ycocg_scaled)
ctx->tex_funct = ctx->texdsp.dxt5ys_block;
else if (ycocg_classic)
ctx->tex_funct = ctx->texdsp.dxt5y_block;
else
ctx->tex_funct = ctx->texdsp.dxt5_block;
break;
case MKTAG('R', 'X', 'G', 'B'):
ctx->tex_ratio = 16;
ctx->tex_funct = ctx->texdsp.dxt5_block;
/* This format may be considered as a normal map,
* but it is handled differently in a separate postproc. */
ctx->postproc = DDS_SWIZZLE_RXGB;
normal_map = 0;
break;
case MKTAG('A', 'T', 'I', '1'):
case MKTAG('B', 'C', '4', 'U'):
ctx->tex_ratio = 8;
ctx->tex_funct = ctx->texdsp.rgtc1u_block;
break;
case MKTAG('B', 'C', '4', 'S'):
//.........这里部分代码省略.........
开发者ID:MAXsundai,项目名称:FFmpeg,代码行数:101,代码来源:dds.c
示例17: flic_decode_frame_8BPP
static int flic_decode_frame_8BPP(AVCodecContext *avctx,
void *data, int *got_frame,
const uint8_t *buf, int buf_size)
{
FlicDecodeContext *s = avctx->priv_data;
GetByteContext g2;
int stream_ptr_after_color_chunk;
int pixel_ptr;
int palette_ptr;
unsigned char palette_idx1;
unsigned char palette_idx2;
unsigned int frame_size;
int num_chunks;
unsigned int chunk_size;
int chunk_type;
int i, j, ret;
int color_packets;
int color_changes;
int color_shift;
unsigned char r, g, b;
int lines;
int compressed_lines;
int starting_line;
signed short line_packets;
int y_ptr;
int byte_run;
int pixel_skip;
int pixel_countdown;
unsigned char *pixels;
unsigned int pixel_limit;
bytestream2_init(&g2, buf, buf_size);
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
pixels = s->frame->data[0];
pixel_limit = s->avctx->height * s->frame->linesize[0];
frame_size = bytestream2_get_le32(&g2);
bytestream2_skip(&g2, 2); /* skip the magic number */
num_chunks = bytestream2_get_le16(&g2);
bytestream2_skip(&g2, 8); /* skip padding */
frame_size -= 16;
/* iterate through the chunks */
while ((frame_size > 0) && (num_chunks > 0)) {
chunk_size = bytestream2_get_le32(&g2);
chunk_type = bytestream2_get_le16(&g2);
switch (chunk_type) {
case FLI_256_COLOR:
case FLI_COLOR:
stream_ptr_after_color_chunk = bytestream2_tell(&g2) + chunk_size - 6;
/* check special case: If this file is from the Magic Carpet
* game and uses 6-bit colors even though it reports 256-color
* chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during
* initialization) */
if ((chunk_type == FLI_256_COLOR) && (s->fli_type != FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE))
color_shift = 0;
else
color_shift = 2;
/* set up the palette */
color_packets = bytestream2_get_le16(&g2);
palette_ptr = 0;
for (i = 0; i < color_packets; i++) {
/* first byte is how many colors to skip */
palette_ptr += bytestream2_get_byte(&g2);
/* next byte indicates how many entries to change */
color_changes = bytestream2_get_byte(&g2);
/* if there are 0 color changes, there are actually 256 */
if (color_changes == 0)
color_changes = 256;
for (j = 0; j < color_changes; j++) {
unsigned int entry;
/* wrap around, for good measure */
if ((unsigned)palette_ptr >= 256)
palette_ptr = 0;
r = bytestream2_get_byte(&g2) << color_shift;
g = bytestream2_get_byte(&g2) << color_shift;
b = bytestream2_get_byte(&g2) << color_shift;
entry = (r << 16) | (g << 8) | b;
if (s->palette[palette_ptr] != entry)
s->new_palette = 1;
s->palette[palette_ptr++] = entry;
}
//.........这里部分代码省略.........
开发者ID:AVLeo,项目名称:libav,代码行数:101,代码来源:flicvideo.c
示例18: decode_frame
static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
AVFrame * const p = data;
GetByteContext gbc;
int colors;
int w, h, ret;
int ver;
bytestream2_init(&gbc, avpkt->data, avpkt->size);
if ( bytestream2_get_bytes_left(&gbc) >= 552
&& check_header(gbc.buffer + 512, bytestream2_get_bytes_left(&gbc) - 512)
)
bytestream2_skip(&gbc, 512);
ver = check_header(gbc.buffer, bytestream2_get_bytes_left(&gbc));
/* smallest PICT header */
if (bytestream2_get_bytes_left(&gbc) < 40) {
av_log(avctx, AV_LOG_ERROR, "Frame is too small %d\n",
bytestream2_get_bytes_left(&gbc));
return AVERROR_INVALIDDATA;
}
bytestream2_skip(&gbc, 6);
h = bytestream2_get_be16(&gbc);
w = bytestream2_get_be16(&gbc);
ret = ff_set_dimensions(avctx, w, h);
if (ret < 0)
return ret;
/* version 1 is identified by 0x1101
* it uses byte-aligned opcodes rather than word-aligned */
if (ver == 1) {
avpriv_request_sample(avctx, "QuickDraw version 1");
return AVERROR_PATCHWELCOME;
} else if (ver != 2) {
avpriv_request_sample(avctx, "QuickDraw version unknown (%X)", bytestream2_get_be32(&gbc));
return AVERROR_PATCHWELCOME;
}
bytestream2_skip(&gbc, 4+26);
while (bytestream2_get_bytes_left(&gbc) >= 4) {
int bppcnt, bpp;
int rowbytes, pack_type;
int opcode = bytestream2_get_be16(&gbc);
switch(opcode) {
case PACKBITSRECT:
case PACKBITSRGN:
av_log(avctx, AV_LOG_DEBUG, "Parsing Packbit opcode\n");
bytestream2_skip(&gbc, 30);
bppcnt = bytestream2_get_be16(&gbc); /* cmpCount */
bpp = bytestream2_get_be16(&gbc); /* cmpSize */
av_log(avctx, AV_LOG_DEBUG, "bppcount %d bpp %d\n", bppcnt, bpp);
if (bppcnt == 1 && bpp == 8) {
avctx->pix_fmt = AV_PIX_FMT_PAL8;
} else {
av_log(avctx, AV_LOG_ERROR,
"Invalid pixel format (bppcnt %d bpp %d) in Packbit\n",
bppcnt, bpp);
return AVERROR_INVALIDDATA;
}
/* jump to palette */
bytestream2_skip(&gbc, 18);
colors = bytestream2_get_be16(&gbc);
if (colors < 0 || colors > 256) {
av_log(avctx, AV_LOG_ERROR,
"Error color count - %i(0x%X)\n", colors, colors);
return AVERROR_INVALIDDATA;
}
if (bytestream2_get_bytes_left(&gbc) < (colors + 1) * 8) {
av_log(avctx, AV_LOG_ERROR, "Palette is too small %d\n",
bytestream2_get_bytes_left(&gbc));
return AVERROR_INVALIDDATA;
}
if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
return ret;
parse_palette(avctx, &gbc, (uint32_t *)p->data[1], colors);
p->palette_has_changed = 1;
/* jump to image data */
bytestream2_skip(&gbc, 18);
if (opcode == PACKBITSRGN) {
bytestream2_skip(&gbc, 2 + 8); /* size + rect */
avpriv_report_missing_feature(avctx, "Packbit mask region");
}
ret = decode_rle(avctx, p, &gbc, bppcnt);
if (ret < 0)
return ret;
//.........这里部分代码省略.........
开发者ID:309746069,项目名称:FFmpeg,代码行数:101,代码来源:qdrw.c
示例19: decode_frame
static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
PicContext *s = avctx->priv_data;
uint32_t *palette;
int bits_per_plane, bpp, etype, esize, npal, pos_after_pal;
int i, x, y, plane, tmp;
bytestream2_init(&s->g, avpkt->data, avpkt->size);
if (bytestream2_get_bytes_left(&s->g) < 11)
return AVERROR_INVALIDDATA;
if (bytestream2_get_le16u(&s->g) != 0x1234)
return AVERROR_INVALIDDATA;
s->width = bytestream2_get_le16u(&s->g);
s->height = bytestream2_get_le16u(&s->g);
bytestream2_skip(&s->g, 4);
tmp = bytestream2_get_byteu(&s->g);
bits_per_plane = tmp & 0xF;
s->nb_planes = (tmp >> 4) + 1;
bpp = bits_per_plane * s->nb_planes;
if (bits_per_plane > 8 || bpp < 1 || bpp > 32) {
av_log_ask_for_sample(s, "unsupported bit depth\n");
return AVERROR_INVALIDDATA;
}
if (bytestream2_peek_byte(&s->g) == 0xFF) {
bytestream2_skip(&s->g, 2);
etype = bytestream2_get_le16(&s->g);
esize = bytestream2_get_le16(&s->g);
if (bytestream2_get_bytes_left(&s->g) < esize)
return AVERROR_INVALIDDATA;
} else {
etype = -1;
esize = 0;
}
avctx->pix_fmt = PIX_FMT_PAL8;
if (s->width != avctx->width && s->height != avctx->height) {
if (av_image_check_size(s->width, s->height, 0, avctx) < 0)
return -1;
avcodec_set_dimensions(avctx, s->width, s->height);
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
}
if (avctx->get_buffer(avctx, &s->frame) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
memset(s->frame.data[0], 0, s->height * s->frame.linesize[0]);
s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1;
pos_after_pal = bytestream2_tell(&s->g) + esize;
palette = (uint32_t*)s->frame.data[1];
if (etype == 1 && esize > 1 && bytestream2_peek_byte(&s->g) < 6) {
int idx = bytestream2_get_byte(&s->g);
npal = 4;
for (i = 0; i < npal; i++)
palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ];
} else if (etype == 2) {
npal = FFMIN(esize, 16);
for (i = 0; i < npal; i++) {
int pal_idx = bytestream2_get_byte(&s->g);
palette[i] = ff_cga_palette[FFMIN(pal_idx, 16)];
}
} else if (etype == 3) {
npal = FFMIN(esize, 16);
for (i = 0; i < npal; i++) {
int pal_idx = bytestream2_get_byte(&s->g);
palette[i] = ff_ega_palette[FFMIN(pal_idx, 63)];
}
} else if (etype == 4 || etype == 5) {
npal = FFMIN(esize / 3, 256);
for (i = 0; i < npal;
|
请发表评论