• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ bytestream_put_le16函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中bytestream_put_le16函数的典型用法代码示例。如果您正苦于以下问题:C++ bytestream_put_le16函数的具体用法?C++ bytestream_put_le16怎么用?C++ bytestream_put_le16使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了bytestream_put_le16函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: gif_image_write_image

static int gif_image_write_image(uint8_t **bytestream,
                                 int x1, int y1, int width, int height,
                                 const uint8_t *buf, int linesize, int pix_fmt)
{
    PutBitContext p;
    uint8_t buffer[200]; /* 100 * 9 / 8 = 113 */
    int i, left, w;
    const uint8_t *ptr;
    /* image block */

    bytestream_put_byte(bytestream, 0x2c);
    bytestream_put_le16(bytestream, x1);
    bytestream_put_le16(bytestream, y1);
    bytestream_put_le16(bytestream, width);
    bytestream_put_le16(bytestream, height);
    bytestream_put_byte(bytestream, 0x00); /* flags */
    /* no local clut */

    bytestream_put_byte(bytestream, 0x08);

    left= width * height;

    init_put_bits(&p, buffer, 130);

/*
 * the thing here is the bitstream is written as little packets, with a size byte before
 * but it's still the same bitstream between packets (no flush !)
 */
    ptr = buf;
    w = width;
    while(left>0) {

        put_bits(&p, 9, 0x0100); /* clear code */

        for(i=(left<GIF_CHUNKS)?left:GIF_CHUNKS;i;i--) {
            put_bits(&p, 9, *ptr++);
            if (--w == 0) {
                w = width;
                buf += linesize;
                ptr = buf;
            }
        }

        if(left<=GIF_CHUNKS) {
            put_bits(&p, 9, 0x101); /* end of stream */
            flush_put_bits(&p);
        }
        if(pbBufPtr(&p) - p.buf > 0) {
            bytestream_put_byte(bytestream, pbBufPtr(&p) - p.buf); /* byte count of the packet */
            bytestream_put_buffer(bytestream, p.buf, pbBufPtr(&p) - p.buf); /* the actual buffer */
            p.buf_ptr = p.buf; /* dequeue the bytes off the bitstream */
        }
        left-=GIF_CHUNKS;
    }
    bytestream_put_byte(bytestream, 0x00); /* end of image block */
    bytestream_put_byte(bytestream, 0x3b);
    return 0;
}
开发者ID:BlackMael,项目名称:DirectEncode,代码行数:58,代码来源:gif.c


示例2: read_packet

static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
    IcoDemuxContext *ico = s->priv_data;
    IcoImage *image;
    AVIOContext *pb = s->pb;
    AVStream *st = s->streams[0];
    int ret;

    if (ico->current_image >= ico->nb_images)
        return AVERROR(EIO);

    image = &ico->images[ico->current_image];

    if ((ret = avio_seek(pb, image->offset, SEEK_SET)) < 0)
        return ret;

    if (s->streams[ico->current_image]->codec->codec_id == AV_CODEC_ID_PNG) {
        if ((ret = av_get_packet(pb, pkt, image->size)) < 0)
            return ret;
    } else {
        uint8_t *buf;
        if ((ret = av_new_packet(pkt, 14 + image->size)) < 0)
            return ret;
        buf = pkt->data;

        /* add BMP header */
        bytestream_put_byte(&buf, 'B');
        bytestream_put_byte(&buf, 'M');
        bytestream_put_le32(&buf, pkt->size);
        bytestream_put_le16(&buf, 0);
        bytestream_put_le16(&buf, 0);
        bytestream_put_le32(&buf, 0);

        if ((ret = avio_read(pb, buf, image->size)) < 0)
            return ret;

        st->codec->bits_per_coded_sample = AV_RL16(buf + 14);

        if (AV_RL32(buf + 32))
            image->nb_pal = AV_RL32(buf + 32);

        if (st->codec->bits_per_coded_sample <= 8 && !image->nb_pal) {
            image->nb_pal = 1 << st->codec->bits_per_coded_sample;
            AV_WL32(buf + 32, image->nb_pal);
        }

        AV_WL32(buf - 4, 14 + 40 + image->nb_pal * 4);
        AV_WL32(buf + 8, AV_RL32(buf + 8) / 2);
    }

    pkt->stream_index = ico->current_image++;
    pkt->flags |= AV_PKT_FLAG_KEY;

    return 0;
}
开发者ID:26mansi,项目名称:FFmpeg,代码行数:55,代码来源:icodec.c


示例3: bmp_encode_frame

static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
    BMPContext *s = avctx->priv_data;
    AVFrame *pict = data;
    AVFrame * const p= (AVFrame*)&s->picture;
    int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize;
    uint8_t *ptr;
    unsigned char* buf0 = buf;
    *p = *pict;
    p->pict_type= FF_I_TYPE;
    p->key_frame= 1;
    n_bytes_per_row = (avctx->width*3 + 3) & ~3;
    n_bytes_image = avctx->height*n_bytes_per_row;

    // STRUCTURE.field refer to the MSVC documentation for BITMAPFILEHEADER
    // and related pages.
#define SIZE_BITMAPFILEHEADER 14
#define SIZE_BITMAPINFOHEADER 40
    hsize = SIZE_BITMAPFILEHEADER + SIZE_BITMAPINFOHEADER;
    n_bytes = n_bytes_image + hsize;
    if(n_bytes>buf_size) {
        av_log(avctx, AV_LOG_ERROR, "buf size too small (need %d, got %d)\n", n_bytes, buf_size);
        return -1;
    }
    bytestream_put_byte(&buf, 'B');                   // BITMAPFILEHEADER.bfType
    bytestream_put_byte(&buf, 'M');                   // do.
    bytestream_put_le32(&buf, n_bytes);               // BITMAPFILEHEADER.bfSize
    bytestream_put_le16(&buf, 0);                     // BITMAPFILEHEADER.bfReserved1
    bytestream_put_le16(&buf, 0);                     // BITMAPFILEHEADER.bfReserved2
    bytestream_put_le32(&buf, hsize);                 // BITMAPFILEHEADER.bfOffBits
    bytestream_put_le32(&buf, SIZE_BITMAPINFOHEADER); // BITMAPINFOHEADER.biSize
    bytestream_put_le32(&buf, avctx->width);          // BITMAPINFOHEADER.biWidth
    bytestream_put_le32(&buf, avctx->height);         // BITMAPINFOHEADER.biHeight
    bytestream_put_le16(&buf, 1);                     // BITMAPINFOHEADER.biPlanes
    bytestream_put_le16(&buf, 24);                    // BITMAPINFOHEADER.biBitCount
    bytestream_put_le32(&buf, BMP_RGB);               // BITMAPINFOHEADER.biCompression
    bytestream_put_le32(&buf, n_bytes_image);         // BITMAPINFOHEADER.biSizeImage
    bytestream_put_le32(&buf, 0);                     // BITMAPINFOHEADER.biXPelsPerMeter
    bytestream_put_le32(&buf, 0);                     // BITMAPINFOHEADER.biYPelsPerMeter
    bytestream_put_le32(&buf, 0);                     // BITMAPINFOHEADER.biClrUsed
    bytestream_put_le32(&buf, 0);                     // BITMAPINFOHEADER.biClrImportant
    // BMP files are bottom-to-top so we start from the end...
    ptr = p->data[0] + (avctx->height - 1) * p->linesize[0];
    buf = buf0 + hsize;
    for(i = 0; i < avctx->height; i++) {
        n = 3*avctx->width;
        memcpy(buf, ptr, n);
        buf += n;
        memset(buf, 0, n_bytes_per_row-n);
        buf += n_bytes_per_row-n;
        ptr -= p->linesize[0]; // ... and go back
    }
    return n_bytes;
}
开发者ID:DanielGit,项目名称:Intrisit201202,代码行数:53,代码来源:bmpenc.c


示例4: send_stream_selection_request

/** Send MMST stream selection command based on the AVStream->discard values. */
static int send_stream_selection_request(MMSTContext *mmst)
{
    int i;
    MMSContext *mms = &mmst->mms;
    //  send the streams we want back...
    start_command_packet(mmst, CS_PKT_STREAM_ID_REQUEST);
    bytestream_put_le32(&mms->write_out_ptr, mms->stream_num);         // stream nums
    for(i= 0; i<mms->stream_num; i++) {
        bytestream_put_le16(&mms->write_out_ptr, 0xffff);              // flags
        bytestream_put_le16(&mms->write_out_ptr, mms->streams[i].id);  // stream id
        bytestream_put_le16(&mms->write_out_ptr, 0);                   // selection
    }
    return send_command_packet(mmst);
}
开发者ID:AirStash,项目名称:AirStashPlayer,代码行数:15,代码来源:mmst.c


示例5: start_command_packet

/** Create MMST command packet header */
static void start_command_packet(MMSContext *mms, MMSCSPacketType packet_type)
{
    mms->write_out_ptr = mms->out_buffer;

    bytestream_put_le32(&mms->write_out_ptr, 1); // start sequence
    bytestream_put_le32(&mms->write_out_ptr, 0xb00bface);
    bytestream_put_le32(&mms->write_out_ptr, 0); // Length starts from after the protocol type bytes
    bytestream_put_le32(&mms->write_out_ptr, MKTAG('M','M','S',' '));
    bytestream_put_le32(&mms->write_out_ptr, 0);
    bytestream_put_le32(&mms->write_out_ptr, mms->outgoing_packet_seq++);
    bytestream_put_le64(&mms->write_out_ptr, 0); // timestamp
    bytestream_put_le32(&mms->write_out_ptr, 0);
    bytestream_put_le16(&mms->write_out_ptr, packet_type);
    bytestream_put_le16(&mms->write_out_ptr, 3); // direction to server
}
开发者ID:Akuaksh,项目名称:FFmpeg-alsenc,代码行数:16,代码来源:mmst.c


示例6: gif_image_write_image

static int gif_image_write_image(AVCodecContext *avctx,
                                 uint8_t **bytestream, uint8_t *end,
                                 const uint8_t *buf, int linesize)
{
    GIFContext *s = avctx->priv_data;
    int len = 0, height;
    const uint8_t *ptr;
    /* image block */

    bytestream_put_byte(bytestream, 0x2c);
    bytestream_put_le16(bytestream, 0);
    bytestream_put_le16(bytestream, 0);
    bytestream_put_le16(bytestream, avctx->width);
    bytestream_put_le16(bytestream, avctx->height);
    bytestream_put_byte(bytestream, 0x00); /* flags */
    /* no local clut */

    bytestream_put_byte(bytestream, 0x08);

    ff_lzw_encode_init(s->lzw, s->buf, avctx->width*avctx->height,
                       12, FF_LZW_GIF, put_bits);

    ptr = buf;
    for (height = avctx->height; height--;) {
        len += ff_lzw_encode(s->lzw, ptr, avctx->width);
        ptr += linesize;
    }
    len += ff_lzw_encode_flush(s->lzw, flush_put_bits);

    ptr = s->buf;
    while (len > 0) {
        int size = FFMIN(255, len);
        bytestream_put_byte(bytestream, size);
        if (end - *bytestream < size)
            return -1;
        bytestream_put_buffer(bytestream, ptr, size);
        ptr += size;
        len -= size;
    }
    bytestream_put_byte(bytestream, 0x00); /* end of image block */
    bytestream_put_byte(bytestream, 0x3b);
    return 0;
}
开发者ID:Flameeyes,项目名称:libav,代码行数:43,代码来源:gif.c


示例7: write_typecode

/* NOTE: Typecodes must be spooled AFTER arguments!! */
static void write_typecode(CodingSpool *s, uint8_t type)
{
    s->typeSpool |= (type & 3) << (14 - s->typeSpoolLength);
    s->typeSpoolLength += 2;
    if (s->typeSpoolLength == 16) {
        bytestream_put_le16(s->pout, s->typeSpool);
        bytestream_put_buffer(s->pout, s->argumentSpool,
                              s->args - s->argumentSpool);
        s->typeSpoolLength = 0;
        s->typeSpool = 0;
        s->args = s->argumentSpool;
    }
}
开发者ID:119,项目名称:dropcam_for_iphone,代码行数:14,代码来源:roqvideoenc.c


示例8: gif_image_write_header

/* GIF header */
static int gif_image_write_header(AVCodecContext *avctx,
                                  uint8_t **bytestream, uint32_t *palette)
{
    int i;
    unsigned int v;

    bytestream_put_buffer(bytestream, "GIF", 3);
    bytestream_put_buffer(bytestream, "89a", 3);
    bytestream_put_le16(bytestream, avctx->width);
    bytestream_put_le16(bytestream, avctx->height);

    bytestream_put_byte(bytestream, 0xf7); /* flags: global clut, 256 entries */
    bytestream_put_byte(bytestream, 0x1f); /* background color index */
    bytestream_put_byte(bytestream, 0); /* aspect ratio */

    /* the global palette */
    for(i=0;i<256;i++) {
        v = palette[i];
        bytestream_put_be24(bytestream, v);
    }

    return 0;
}
开发者ID:Flameeyes,项目名称:libav,代码行数:24,代码来源:gif.c


示例9: roq_write_video_info_chunk

static void roq_write_video_info_chunk(RoqContext *enc)
{
    /* ROQ info chunk */
    bytestream_put_le16(&enc->out_buf, RoQ_INFO);

    /* Size: 8 bytes */
    bytestream_put_le32(&enc->out_buf, 8);

    /* Unused argument */
    bytestream_put_byte(&enc->out_buf, 0x00);
    bytestream_put_byte(&enc->out_buf, 0x00);

    /* Width */
    bytestream_put_le16(&enc->out_buf, enc->width);

    /* Height */
    bytestream_put_le16(&enc->out_buf, enc->height);

    /* Unused in Quake 3, mimics the output of the real encoder */
    bytestream_put_byte(&enc->out_buf, 0x08);
    bytestream_put_byte(&enc->out_buf, 0x00);
    bytestream_put_byte(&enc->out_buf, 0x04);
    bytestream_put_byte(&enc->out_buf, 0x00);
}
开发者ID:119,项目名称:dropcam_for_iphone,代码行数:24,代码来源:roqvideoenc.c


示例10: write_codebooks

/**
 * Write codebook chunk
 */
static void write_codebooks(RoqContext *enc, RoqTempdata *tempData)
{
    int i, j;
    uint8_t **outp= &enc->out_buf;

    if (tempData->numCB2) {
        bytestream_put_le16(outp, RoQ_QUAD_CODEBOOK);
        bytestream_put_le32(outp, tempData->numCB2*6 + tempData->numCB4*4);
        bytestream_put_byte(outp, tempData->numCB4);
        bytestream_put_byte(outp, tempData->numCB2);

        for (i=0; i<tempData->numCB2; i++) {
            bytestream_put_buffer(outp, enc->cb2x2[tempData->f2i2[i]].y, 4);
            bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].u);
            bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].v);
        }

        for (i=0; i<tempData->numCB4; i++)
            for (j=0; j<4; j++)
                bytestream_put_byte(outp, tempData->i2f2[enc->cb4x4[tempData->f2i4[i]].idx[j]]);

    }
}
开发者ID:119,项目名称:dropcam_for_iphone,代码行数:26,代码来源:roqvideoenc.c


示例11: reconstruct_and_encode_image

static void reconstruct_and_encode_image(RoqContext *enc, RoqTempdata *tempData, int w, int h, int numBlocks)
{
    int i, j, k;
    int x, y;
    int subX, subY;
    int dist=0;

    roq_qcell *qcell;
    CelEvaluation *eval;

    CodingSpool spool;

    spool.typeSpool=0;
    spool.typeSpoolLength=0;
    spool.args = spool.argumentSpool;
    spool.pout = &enc->out_buf;

    if (tempData->used_option[RoQ_ID_CCC]%2)
        tempData->mainChunkSize+=8; //FIXME

    /* Write the video chunk header */
    bytestream_put_le16(&enc->out_buf, RoQ_QUAD_VQ);
    bytestream_put_le32(&enc->out_buf, tempData->mainChunkSize/8);
    bytestream_put_byte(&enc->out_buf, 0x0);
    bytestream_put_byte(&enc->out_buf, 0x0);

    for (i=0; i<numBlocks; i++) {
        eval = tempData->cel_evals + i;

        x = eval->sourceX;
        y = eval->sourceY;
        dist += eval->eval_dist[eval->best_coding];

        switch (eval->best_coding) {
        case RoQ_ID_MOT:
            write_typecode(&spool, RoQ_ID_MOT);
            break;

        case RoQ_ID_FCC:
            bytestream_put_byte(&spool.args, motion_arg(eval->motion));

            write_typecode(&spool, RoQ_ID_FCC);
            ff_apply_motion_8x8(enc, x, y,
                                eval->motion.d[0], eval->motion.d[1]);
            break;

        case RoQ_ID_SLD:
            bytestream_put_byte(&spool.args, tempData->i2f4[eval->cbEntry]);
            write_typecode(&spool, RoQ_ID_SLD);

            qcell = enc->cb4x4 + eval->cbEntry;
            ff_apply_vector_4x4(enc, x  , y  , enc->cb2x2 + qcell->idx[0]);
            ff_apply_vector_4x4(enc, x+4, y  , enc->cb2x2 + qcell->idx[1]);
            ff_apply_vector_4x4(enc, x  , y+4, enc->cb2x2 + qcell->idx[2]);
            ff_apply_vector_4x4(enc, x+4, y+4, enc->cb2x2 + qcell->idx[3]);
            break;

        case RoQ_ID_CCC:
            write_typecode(&spool, RoQ_ID_CCC);

            for (j=0; j<4; j++) {
                subX = x + 4*(j&1);
                subY = y + 2*(j&2);

                switch(eval->subCels[j].best_coding) {
                case RoQ_ID_MOT:
                    break;

                case RoQ_ID_FCC:
                    bytestream_put_byte(&spool.args,
                                        motion_arg(eval->subCels[j].motion));

                    ff_apply_motion_4x4(enc, subX, subY,
                                        eval->subCels[j].motion.d[0],
                                        eval->subCels[j].motion.d[1]);
                    break;

                case RoQ_ID_SLD:
                    bytestream_put_byte(&spool.args,
                                        tempData->i2f4[eval->subCels[j].cbEntry]);

                    qcell = enc->cb4x4 + eval->subCels[j].cbEntry;

                    ff_apply_vector_2x2(enc, subX  , subY  ,
                                        enc->cb2x2 + qcell->idx[0]);
                    ff_apply_vector_2x2(enc, subX+2, subY  ,
                                        enc->cb2x2 + qcell->idx[1]);
                    ff_apply_vector_2x2(enc, subX  , subY+2,
                                        enc->cb2x2 + qcell->idx[2]);
                    ff_apply_vector_2x2(enc, subX+2, subY+2,
                                        enc->cb2x2 + qcell->idx[3]);
                    break;

                case RoQ_ID_CCC:
                    for (k=0; k<4; k++) {
                        int cb_idx = eval->subCels[j].subCels[k];
                        bytestream_put_byte(&spool.args,
                                            tempData->i2f2[cb_idx]);

                        ff_apply_vector_2x2(enc, subX + 2*(k&1), subY + (k&2),
//.........这里部分代码省略.........
开发者ID:119,项目名称:dropcam_for_iphone,代码行数:101,代码来源:roqvideoenc.c


示例12: gif_image_write_header

/* GIF header */
static int gif_image_write_header(uint8_t **bytestream,
                                  int width, int height, int loop_count,
                                  uint32_t *palette)
{
    int i;
    unsigned int v;

    bytestream_put_buffer(bytestream, "GIF", 3);
    bytestream_put_buffer(bytestream, "89a", 3);
    bytestream_put_le16(bytestream, width);
    bytestream_put_le16(bytestream, height);

    bytestream_put_byte(bytestream, 0xf7); /* flags: global clut, 256 entries */
    bytestream_put_byte(bytestream, 0x1f); /* background color index */
    bytestream_put_byte(bytestream, 0); /* aspect ratio */

    /* the global palette */
    if (!palette) {
        bytestream_put_buffer(bytestream, (const unsigned char *)gif_clut, 216*3);
        for(i=0;i<((256-216)*3);i++)
            bytestream_put_byte(bytestream, 0);
    } else {
        for(i=0;i<256;i++) {
            v = palette[i];
            bytestream_put_be24(bytestream, v);
        }
    }

        /*        update: this is the 'NETSCAPE EXTENSION' that allows for looped animated gif
                see http://members.aol.com/royalef/gifabout.htm#net-extension

                byte   1       : 33 (hex 0x21) GIF Extension code
                byte   2       : 255 (hex 0xFF) Application Extension Label
                byte   3       : 11 (hex (0x0B) Length of Application Block
                                         (eleven bytes of data to follow)
                bytes  4 to 11 : "NETSCAPE"
                bytes 12 to 14 : "2.0"
                byte  15       : 3 (hex 0x03) Length of Data Sub-Block
                                         (three bytes of data to follow)
                byte  16       : 1 (hex 0x01)
                bytes 17 to 18 : 0 to 65535, an unsigned integer in
                                         lo-hi byte format. This indicate the
                                         number of iterations the loop should
                                         be executed.
                bytes 19       : 0 (hex 0x00) a Data Sub-block Terminator
        */

    /* application extension header */
#ifdef GIF_ADD_APP_HEADER
    if (loop_count >= 0 && loop_count <= 65535) {
        bytestream_put_byte(bytestream, 0x21);
        bytestream_put_byte(bytestream, 0xff);
        bytestream_put_byte(bytestream, 0x0b);
        bytestream_put_buffer(bytestream, "NETSCAPE2.0", 11);  // bytes 4 to 14
        bytestream_put_byte(bytestream, 0x03); // byte 15
        bytestream_put_byte(bytestream, 0x01); // byte 16
        bytestream_put_le16(bytestream, (uint16_t)loop_count);
        bytestream_put_byte(bytestream, 0x00); // byte 19
    }
#endif
    return 0;
}
开发者ID:DanielGit,项目名称:Intrisit201202,代码行数:63,代码来源:gif.c


示例13: ff_rtmp_packet_write

int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
                         int chunk_size, RTMPPacket **prev_pkt_ptr,
                         int *nb_prev_pkt)
{
    uint8_t pkt_hdr[16], *p = pkt_hdr;
    int mode = RTMP_PS_TWELVEBYTES;
    int off = 0;
    int written = 0;
    int ret;
    RTMPPacket *prev_pkt;
    int use_delta; // flag if using timestamp delta, not RTMP_PS_TWELVEBYTES
    uint32_t timestamp; // full 32-bit timestamp or delta value

    if ((ret = ff_rtmp_check_alloc_array(prev_pkt_ptr, nb_prev_pkt,
                                         pkt->channel_id)) < 0)
        return ret;
    prev_pkt = *prev_pkt_ptr;

    //if channel_id = 0, this is first presentation of prev_pkt, send full hdr.
    use_delta = prev_pkt[pkt->channel_id].channel_id &&
        pkt->extra == prev_pkt[pkt->channel_id].extra &&
        pkt->timestamp >= prev_pkt[pkt->channel_id].timestamp;

    timestamp = pkt->timestamp;
    if (use_delta) {
        timestamp -= prev_pkt[pkt->channel_id].timestamp;
    }
    if (timestamp >= 0xFFFFFF) {
        pkt->ts_field = 0xFFFFFF;
    } else {
        pkt->ts_field = timestamp;
    }

    if (use_delta) {
        if (pkt->type == prev_pkt[pkt->channel_id].type &&
            pkt->size == prev_pkt[pkt->channel_id].size) {
            mode = RTMP_PS_FOURBYTES;
            if (pkt->ts_field == prev_pkt[pkt->channel_id].ts_field)
                mode = RTMP_PS_ONEBYTE;
        } else {
            mode = RTMP_PS_EIGHTBYTES;
        }
    }

    if (pkt->channel_id < 64) {
        bytestream_put_byte(&p, pkt->channel_id | (mode << 6));
    } else if (pkt->channel_id < 64 + 256) {
        bytestream_put_byte(&p, 0               | (mode << 6));
        bytestream_put_byte(&p, pkt->channel_id - 64);
    } else {
        bytestream_put_byte(&p, 1               | (mode << 6));
        bytestream_put_le16(&p, pkt->channel_id - 64);
    }
    if (mode != RTMP_PS_ONEBYTE) {
        bytestream_put_be24(&p, pkt->ts_field);
        if (mode != RTMP_PS_FOURBYTES) {
            bytestream_put_be24(&p, pkt->size);
            bytestream_put_byte(&p, pkt->type);
            if (mode == RTMP_PS_TWELVEBYTES)
                bytestream_put_le32(&p, pkt->extra);
        }
    }
    if (pkt->ts_field == 0xFFFFFF)
        bytestream_put_be32(&p, timestamp);
    // save history
    prev_pkt[pkt->channel_id].channel_id = pkt->channel_id;
    prev_pkt[pkt->channel_id].type       = pkt->type;
    prev_pkt[pkt->channel_id].size       = pkt->size;
    prev_pkt[pkt->channel_id].timestamp  = pkt->timestamp;
    prev_pkt[pkt->channel_id].ts_field   = pkt->ts_field;
    prev_pkt[pkt->channel_id].extra      = pkt->extra;

    if ((ret = ffurl_write(h, pkt_hdr, p - pkt_hdr)) < 0)
        return ret;
    written = p - pkt_hdr + pkt->size;
    while (off < pkt->size) {
        int towrite = FFMIN(chunk_size, pkt->size - off);
        if ((ret = ffurl_write(h, pkt->data + off, towrite)) < 0)
            return ret;
        off += towrite;
        if (off < pkt->size) {
            uint8_t marker = 0xC0 | pkt->channel_id;
            if ((ret = ffurl_write(h, &marker, 1)) < 0)
                return ret;
            written++;
        }
    }
    return written;
}
开发者ID:Acidburn0zzz,项目名称:libav,代码行数:89,代码来源:rtmppkt.c


示例14: ff_rtmp_packet_write

int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
                         int chunk_size, RTMPPacket *prev_pkt)
{
	uint8_t pkt_hdr[16], *p = pkt_hdr;
	int mode = RTMP_PS_TWELVEBYTES;
	int off = 0;
	int size = 0;

	pkt->ts_delta = pkt->timestamp - prev_pkt[pkt->channel_id].timestamp;

	//if channel_id = 0, this is first presentation of prev_pkt, send full hdr.
	if (prev_pkt[pkt->channel_id].channel_id &&
	        pkt->extra == prev_pkt[pkt->channel_id].extra)
	{
		if (pkt->type == prev_pkt[pkt->channel_id].type &&
		        pkt->data_size == prev_pkt[pkt->channel_id].data_size)
		{
			mode = RTMP_PS_FOURBYTES;
			if (pkt->ts_delta == prev_pkt[pkt->channel_id].ts_delta)
				mode = RTMP_PS_ONEBYTE;
		}
		else
		{
			mode = RTMP_PS_EIGHTBYTES;
		}
	}

	if (pkt->channel_id < 64)
	{
		bytestream_put_byte(&p, pkt->channel_id | (mode << 6));
	}
	else if (pkt->channel_id < 64 + 256)
	{
		bytestream_put_byte(&p, 0               | (mode << 6));
		bytestream_put_byte(&p, pkt->channel_id - 64);
	}
	else
	{
		bytestream_put_byte(&p, 1               | (mode << 6));
		bytestream_put_le16(&p, pkt->channel_id - 64);
	}
	if (mode != RTMP_PS_ONEBYTE)
	{
		uint32_t timestamp = pkt->timestamp;
		if (mode != RTMP_PS_TWELVEBYTES)
			timestamp = pkt->ts_delta;
		bytestream_put_be24(&p, timestamp >= 0xFFFFFF ? 0xFFFFFF : timestamp);
		if (mode != RTMP_PS_FOURBYTES)
		{
			bytestream_put_be24(&p, pkt->data_size);
			bytestream_put_byte(&p, pkt->type);
			if (mode == RTMP_PS_TWELVEBYTES)
				bytestream_put_le32(&p, pkt->extra);
		}
		if (timestamp >= 0xFFFFFF)
			bytestream_put_be32(&p, timestamp);
	}
	// save history
	prev_pkt[pkt->channel_id].channel_id = pkt->channel_id;
	prev_pkt[pkt->channel_id].type       = pkt->type;
	prev_pkt[pkt->channel_id].data_size  = pkt->data_size;
	prev_pkt[pkt->channel_id].timestamp  = pkt->timestamp;
	if (mode != RTMP_PS_TWELVEBYTES)
	{
		prev_pkt[pkt->channel_id].ts_delta   = pkt->ts_delta;
	}
	else
	{
		prev_pkt[pkt->channel_id].ts_delta   = pkt->timestamp;
	}
	prev_pkt[pkt->channel_id].extra      = pkt->extra;

	ffurl_write(h, pkt_hdr, p-pkt_hdr);
	size = p - pkt_hdr + pkt->data_size;
	while (off < pkt->data_size)
	{
		int towrite = FFMIN(chunk_size, pkt->data_size - off);
		ffurl_write(h, pkt->data + off, towrite);
		off += towrite;
		if (off < pkt->data_size)
		{
			uint8_t marker = 0xC0 | pkt->channel_id;
			ffurl_write(h, &marker, 1);
			size++;
		}
	}
	return size;
}
开发者ID:hicks0074,项目名称:freescale_omx_framework,代码行数:88,代码来源:rtmppkt.c


示例15: xkcd_encode_frame

static int xkcd_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *pict, int *got_packet)
{
    const AVFrame * const picture = pict;	/* Actual image data */

	/* header_size = header size */
    int bytes_in_image, bytes_per_row, total_bytes, i, header_size, ret;

	/* pad_bytes_per_row = bytes of null to fill in at the end of a row of image data */
    int pad_bytes_per_row = 0;

	/* Number of bits per pixel */
    int bit_count = avctx->bits_per_coded_sample;

	/* buffer_data = data to be buffered, buf = buffer to write to */
    uint8_t *buffer_data, *buffer;

	/* Cite: BMP encoder */
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
    avctx->coded_frame->key_frame = 1;

	/* Number of bytes of image data in a row */
	/* (width in pixels * bits per pixel) / 8 to put it in bytes.
	Add 7 bits to the width in bits to make sure to have enough
	bytes of storage when we divide (making sure when it truncates
	in division, it doesn't get rid of what we need) */
	/* Cite: BMP encoder */
    bytes_per_row = ((int64_t)avctx->width * (int64_t)bit_count + 7LL) >> 3LL;
	/* End cite */

	/* Bytes at the end of a row that are 'crossed out' */
	/* Take the remainder from the above bytes and fill in with
	padding by looking at the last two bits after 4 - bytes_per_row.*/
    pad_bytes_per_row = (4 - bytes_per_row) & 3;

	/* Total bytes in image */
    bytes_in_image = avctx->height * (bytes_per_row + pad_bytes_per_row);

    header_size = 14;

	/* Number of bytes in the entire file */
    total_bytes = bytes_in_image + header_size;

	/* Cite: BMP encoder */
    if ((ret = ff_alloc_packet2(avctx, pkt, total_bytes)) < 0)
        return ret;
    buffer = pkt->data;
	/* End cite */

	/* Start building the header */
    bytestream_put_byte(&buffer, 'X');                   // Filetype
    bytestream_put_byte(&buffer, 'K');                   // Filetype
    bytestream_put_byte(&buffer, 'C');                   // Filetype
    bytestream_put_byte(&buffer, 'D');                   // Filetype
    bytestream_put_le32(&buffer, total_bytes);           // Size of entire file
    bytestream_put_le16(&buffer, avctx->width);          // Width of image in pixels
    bytestream_put_le16(&buffer, avctx->height);         // Height of image in pixels
    bytestream_put_le16(&buffer, bit_count);             // Bits per pixel


    // Start the buffer
    buffer_data = picture->data[0];

	/* Write the image */
	/* Cite: BMP encoder */
    for(i = 0; i < avctx->height; i++) {
		/* Write line to buffer */
		memcpy(buffer, buffer_data, bytes_per_row);

		/* Point buffer to the end of the data and start of the padding */
        buffer += bytes_per_row;

		/* Null out the array which creates padding */
        memset(buffer, 0, pad_bytes_per_row);

		/* Point buffer to the end of the padding and start of the new data */
        buffer += pad_bytes_per_row;

		/* Now point to next row */
        buffer_data += picture->linesize[0];
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
	/* End cite */

    return 0;
}
开发者ID:greganderson,项目名称:ffmpeg,代码行数:88,代码来源:xkcdenc.c


示例16: pcx_encode_frame

static int pcx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *frame, int *got_packet)
{
    PCXContext *s = avctx->priv_data;
    AVFrame *const pict = &s->picture;
    const uint8_t *buf_end;
    uint8_t *buf;

    int bpp, nplanes, i, y, line_bytes, written, ret, max_pkt_size;
    const uint32_t *pal = NULL;
    uint32_t palette256[256];
    const uint8_t *src;

    *pict = *frame;
    pict->pict_type = AV_PICTURE_TYPE_I;
    pict->key_frame = 1;

    if (avctx->width > 65535 || avctx->height > 65535) {
        av_log(avctx, AV_LOG_ERROR, "image dimensions do not fit in 16 bits\n");
        return -1;
    }

    switch (avctx->pix_fmt) {
    case PIX_FMT_RGB24:
        bpp = 8;
        nplanes = 3;
        break;
    case PIX_FMT_RGB8:
    case PIX_FMT_BGR8:
    case PIX_FMT_RGB4_BYTE:
    case PIX_FMT_BGR4_BYTE:
    case PIX_FMT_GRAY8:
        bpp = 8;
        nplanes = 1;
        ff_set_systematic_pal2(palette256, avctx->pix_fmt);
        pal = palette256;
        break;
    case PIX_FMT_PAL8:
        bpp = 8;
        nplanes = 1;
        pal = (uint32_t *)pict->data[1];
        break;
    case PIX_FMT_MONOBLACK:
        bpp = 1;
        nplanes = 1;
        pal = monoblack_pal;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "unsupported pixfmt\n");
        return -1;
    }

    line_bytes = (avctx->width * bpp + 7) >> 3;
    line_bytes = (line_bytes + 1) & ~1;

    max_pkt_size = 128 + avctx->height * 2 * line_bytes * nplanes + (pal ? 256*3 + 1 : 0);
    if ((ret = ff_alloc_packet2(avctx, pkt, max_pkt_size)) < 0)
        return ret;
    buf     = pkt->data;
    buf_end = pkt->data + pkt->size;

    bytestream_put_byte(&buf, 10);                  // manufacturer
    bytestream_put_byte(&buf, 5);                   // version
    bytestream_put_byte(&buf, 1);                   // encoding
    bytestream_put_byte(&buf, bpp);                 // bits per pixel per plane
    bytestream_put_le16(&buf, 0);                   // x min
    bytestream_put_le16(&buf, 0);                   // y min
    bytestream_put_le16(&buf, avctx->width - 1);    // x max
    bytestream_put_le16(&buf, avctx->height - 1);   // y max
    bytestream_put_le16(&buf, 0);                   // horizontal DPI
    bytestream_put_le16(&buf, 0);                   // vertical DPI
    for (i = 0; i < 16; i++)
        bytestream_put_be24(&buf, pal ? pal[i] : 0);// palette (<= 16 color only)
    bytestream_put_byte(&buf, 0);                   // reserved
    bytestream_put_byte(&buf, nplanes);             // number of planes
    bytestream_put_le16(&buf, line_bytes);          // scanline plane size in bytes

    while (buf - pkt->data < 128)
        *buf++= 0;

    src = pict->data[0];

    for (y = 0; y < avctx->height; y++) {
        if ((written = pcx_rle_encode(buf, buf_end - buf,
                                      src, line_bytes, nplanes)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "buffer too small\n");
            return -1;
        }
        buf += written;
        src += pict->linesize[0];
    }

    if (nplanes == 1 && bpp == 8) {
        if (buf_end - buf < 257) {
            av_log(avctx, AV_LOG_ERROR, "buffer too small\n");
            return -1;
        }
        bytestream_put_byte(&buf, 12);
        for (i = 0; i < 256; i++) {
            bytestream_put_be24(&buf, pal[i]);
//.........这里部分代码省略.........
开发者ID:Brhett,项目名称:FFmpeg,代码行数:101,代码来源:pcxenc.c


示例17: ff_put_wav_header

/* returns the size or -1 on error */
int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc, int flags)
{
    int bps, blkalign, bytespersec, frame_size;
    int hdrsize;
    int64_t hdrstart = avio_tell(pb);
    int waveformatextensible;
    uint8_t temp[256];
    uint8_t *riff_extradata       = temp;
    uint8_t *riff_extradata_start = temp;

    if (!enc->codec_tag || enc->codec_tag > 0xffff)
        return -1;

    /* We use the known constant frame size for the codec if known, otherwise
     * fall back on using AVCodecContext.frame_size, which is not as reliable
     * for indicating packet duration. */
    frame_size = av_get_audio_frame_duration(enc, enc->block_align);

    waveformatextensible = (enc->channels > 2 && enc->channel_layout) ||
                           enc->sample_rate > 48000 ||
                           enc->codec_id == AV_CODEC_ID_EAC3 ||
                           av_get_bits_per_sample(enc->codec_id) > 16;

    if (waveformatextensible)
        avio_wl16(pb, 0xfffe);
    else
        avio_wl16(pb, enc->codec_tag);

    avio_wl16(pb, enc->channels);
    avio_wl32(pb, enc->sample_rate);
    if (enc->codec_id == AV_CODEC_ID_ATRAC3 ||
        enc->codec_id == AV_CODEC_ID_G723_1 ||
        enc->codec_id == AV_CODEC_ID_MP2    ||
        enc->codec_id == AV_CODEC_ID_MP3    ||
        enc->codec_id == AV_CODEC_ID_GSM_MS) {
        bps = 0;
    } else {
        if (!(bps = av_get_bits_per_sample(enc->codec_id))) {
            if (enc->bits_per_coded_sample)
                bps = enc->bits_per_coded_sample;
            else
                bps = 16;  // default to 16
        }
    }
    if (bps != enc->bits_per_coded_sample && enc->bits_per_coded_sample) {
        av_log(enc, AV_LOG_WARNING,
               "requested bits_per_coded_sample (%d) "
               "and actually stored (%d) differ\n",
               enc->bits_per_coded_sample, bps);
    }

    if (enc->codec_id == AV_CODEC_ID_MP2) {
        blkalign = (144 * enc->bit_rate - 1)/enc->sample_rate + 1;
    } else if (enc->codec_id == AV_CODEC_ID_MP3) {
        blkalign = 576 * (enc->sample_rate <= (24000 + 32000)/2 ? 1 : 2);
    } else if (enc->codec_id == AV_CODEC_ID_AC3) {
        blkalign = 3840;                /* maximum bytes per frame */
    } else if (enc->codec_id == AV_CODEC_ID_AAC) {
        blkalign = 768 * enc->channels; /* maximum bytes per frame */
    } else if (enc->codec_id == AV_CODEC_ID_G723_1) {
        blkalign = 24;
    } else if (enc->block_align != 0) { /* specified by the codec */
        blkalign = enc->block_align;
    } else
        blkalign = bps * enc->channels / av_gcd(8, bps);
    if (enc->codec_id == AV_CODEC_ID_PCM_U8 ||
        enc->codec_id == AV_CODEC_ID_PCM_S24LE ||
        enc->codec_id == AV_CODEC_ID_PCM_S32LE ||
        enc->codec_id == AV_CODEC_ID_PCM_F32LE ||
        enc->codec_id == AV_CODEC_ID_PCM_F64LE ||
        enc->codec_id == AV_CODEC_ID_PCM_S16LE) {
        bytespersec = enc->sample_rate * blkalign;
    } else if (enc->codec_id == AV_CODEC_ID_G723_1) {
        bytespersec = 800;
    } else {
        bytespersec = enc->bit_rate / 8;
    }
    avio_wl32(pb, bytespersec); /* bytes per second */
    avio_wl16(pb, blkalign);    /* block align */
    avio_wl16(pb, bps);         /* bits per sample */
    if (enc->codec_id == AV_CODEC_ID_MP3) {
        bytestream_put_le16(&riff_extradata, 1);    /* wID */
        bytestream_put_le32(&riff_extradata, 2);    /* fdwFlags */
        bytestream_put_le16(&riff_extradata, 1152); /* nBlockSize */
        bytestream_put_le16(&riff_extradata, 1);    /* nFramesPerBlock */
        bytestream_put_le16(&riff_extradata, 1393); /* nCodecDelay */
    } else if (enc->codec_id == AV_CODEC_ID_MP2) {
        /* fwHeadLayer */
        bytestream_put_le16(&riff_extradata, 2);
        /* dwHeadBitrate */
        bytestream_put_le32(&riff_extradata, enc->bit_rate);
        /* fwHeadMode */
        bytestream_put_le16(&riff_extradata, enc->channels == 2 ? 1 : 8);
        /* fwHeadModeExt */
        bytestream_put_le16(&riff_extradata, 0);
        /* wHeadEmphasis */
        bytestream_put_le16(&riff_extradata, 1);
        /* fwHeadFlags */
        bytestream_put_le16(&riff_extradata, 16);
//.........这里部分代码省略.........
开发者ID:309746069,项目名称:FFmpeg,代码行数:101,代码来源:riffenc.c


示例18: ff_put_wav_header

/* returns the size or -1 on error */
int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc)
{
    int bps, blkalign, bytespersec, frame_size;
    int hdrsize = 18;
    int waveformatextensible;
    uint8_t temp[256];
    uint8_t *riff_extradata       = temp;
    uint8_t *riff_extradata_start = temp;

    if (!enc->codec_tag || enc->codec_tag > 0xffff)
        return -1;

    /* We use the known constant frame size for the codec if known, otherwise
     * fall back on using AVCodecContext.frame_size, which is not as reliable
     * for indicating packet duration. */
    frame_size = av_get_audio_frame_duration(enc, enc->block_align);

    waveformatextensible = (enc->channels > 2 && enc->channel_layout) ||
                           enc->sample_rate > 48000 ||
                           av_get_bits_per_sample(enc->codec_id) > 16;

    if (waveformatextensible)
        avio_wl16(pb, 0xfffe);
    else
        avio_wl16(pb, enc->codec_tag);

    avio_wl16(pb, enc->channels);
    avio_wl32(pb, enc->sample_rate);
    if (enc->codec_id == AV_CODEC_ID_MP2 ||
        enc->codec_id == AV_CODEC_ID_MP3 ||
        enc->codec_id == AV_CODEC_ID_GSM_MS) {
        bps = 0;
    } else {
        if (!(bps = av_get_bits_per_sample(enc->codec_id))) {
            if (enc->bits_per_coded_sample)
                bps = enc->bits_per_coded_sample;
            else
                bps = 16;  // default to 16
        }
    }
    if (bps != enc->bits_per_coded_sample && enc->bits_per_coded_sample) {
        av_log(enc, AV_LOG_WARNING,
               "requested bits_per_coded_sample (%d) "
               "and actually stored (%d) differ\n",
               enc->bits_per_coded_sample, bps);
    }

    if (enc->codec_id == AV_CODEC_ID_MP2) {
        blkalign = frame_size;
    } else if (enc->codec_id == AV_CODEC_ID_MP3) {
        blkalign = 576 * (enc->sample_rate <= 24000 ? 1 : 2);
    } else if (enc->codec_id == AV_CODEC_ID_AC3) {
        blkalign = 3840;                /* maximum bytes per frame */
    } else if (enc->block_align != 0) { /* specified by the codec */
        blkalign = enc->block_align;
    } else
        blkalign = bps * enc->channels / av_gcd(8, bps);
    if (enc->codec_id == AV_CODEC_ID_PCM_U8 ||
        enc->codec_id == AV_CODEC_ID_PCM_S24LE ||
        enc->codec_id == AV_CODEC_ID_PCM_S32LE ||
        enc->codec_id == AV_CODEC_ID_PCM_F32LE ||
        enc->codec_id == AV_CODEC_ID_PCM_F64LE ||
        enc->codec_id == AV_CO 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ bytestream_put_le32函数代码示例发布时间:2022-05-30
下一篇:
C++ bytestream_put_byte函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap