⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 roqvideoenc.c.svn-base

📁 mediastreamer2是开源的网络传输媒体流的库
💻 SVN-BASE
📖 第 1 页 / 共 3 页
字号:
                case RoQ_ID_CCC:                    for (k=0; k<4; k++) {                        int cb_idx = eval->subCels[j].subCels[k];                        bytestream_put_byte(&spool.args,                                            tempData->i2f2[cb_idx]);                        ff_apply_vector_2x2(enc, subX + 2*(k&1), subY + (k&2),                                            enc->cb2x2 + cb_idx);                    }                    break;                }                write_typecode(&spool, eval->subCels[j].best_coding);            }            break;        }    }    /* Flush the remainder of the argument/type spool */    while (spool.typeSpoolLength)        write_typecode(&spool, 0x0);#if 0    uint8_t *fdata[3] = {enc->frame_to_enc->data[0],                           enc->frame_to_enc->data[1],                           enc->frame_to_enc->data[2]};    uint8_t *cdata[3] = {enc->current_frame->data[0],                           enc->current_frame->data[1],                           enc->current_frame->data[2]};    av_log(enc->avctx, AV_LOG_ERROR, "Expected distortion: %i Actual: %i\n",           dist,           block_sse(fdata, cdata, 0, 0, 0, 0,                     enc->frame_to_enc->linesize,                     enc->current_frame->linesize,                     enc->width));  //WARNING: Square dimensions implied...#endif}/** * Create a single YUV cell from a 2x2 section of the image */static inline void frame_block_to_cell(uint8_t *block, uint8_t **data,                                       int top, int left, int *stride){    int i, j, u=0, v=0;    for (i=0; i<2; i++)        for (j=0; j<2; j++) {            int x = (top+i)*stride[0] + left + j;            *block++ = data[0][x];            x = (top+i)*stride[1] + left + j;            u       += data[1][x];            v       += data[2][x];        }    *block++ = (u+2)/4;    *block++ = (v+2)/4;}/** * Creates YUV clusters for the entire image */static void create_clusters(AVFrame *frame, int w, int h, uint8_t *yuvClusters){    int i, j, k, l;    for (i=0; i<h; i+=4)        for (j=0; j<w; j+=4) {            for (k=0; k < 2; k++)                for (l=0; l < 2; l++)                    frame_block_to_cell(yuvClusters + (l + 2*k)*6, frame->data,                                        i+2*k, j+2*l, frame->linesize);            yuvClusters += 24;        }}static void generate_codebook(RoqContext *enc, roq_tempdata_t *tempdata,                              int *points, int inputCount, roq_cell *results,                              int size, int cbsize){    int i, j, k;    int c_size = size*size/4;    int *buf = points;    int *codebook = av_malloc(6*c_size*cbsize*sizeof(int));    int *closest_cb;    if (size == 4)        closest_cb = av_malloc(6*c_size*inputCount*sizeof(int));    else        closest_cb = tempdata->closest_cb2;    ff_init_elbg(points, 6*c_size, inputCount, codebook, cbsize, 1, closest_cb, &enc->randctx);    ff_do_elbg(points, 6*c_size, inputCount, codebook, cbsize, 1, closest_cb, &enc->randctx);    if (size == 4)        av_free(closest_cb);    buf = codebook;    for (i=0; i<cbsize; i++)        for (k=0; k<c_size; k++) {            for(j=0; j<4; j++)                results->y[j] = *buf++;            results->u =    (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;            results->v =    (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;            results++;        }    av_free(codebook);}static void generate_new_codebooks(RoqContext *enc, roq_tempdata_t *tempData){    int i,j;    roq_codebooks_t *codebooks = &tempData->codebooks;    int max = enc->width*enc->height/16;    uint8_t mb2[3*4];    roq_cell *results4 = av_malloc(sizeof(roq_cell)*MAX_CBS_4x4*4);    uint8_t *yuvClusters=av_malloc(sizeof(int)*max*6*4);    int *points = av_malloc(max*6*4*sizeof(int));    int bias;    /* Subsample YUV data */    create_clusters(enc->frame_to_enc, enc->width, enc->height, yuvClusters);    /* Cast to integer and apply chroma bias */    for (i=0; i<max*24; i++) {        bias = ((i%6)<4) ? 1 : CHROMA_BIAS;        points[i] = bias*yuvClusters[i];    }    /* Create 4x4 codebooks */    generate_codebook(enc, tempData, points, max, results4, 4, MAX_CBS_4x4);    codebooks->numCB4 = MAX_CBS_4x4;    tempData->closest_cb2 = av_malloc(max*4*sizeof(int));    /* Create 2x2 codebooks */    generate_codebook(enc, tempData, points, max*4, enc->cb2x2, 2, MAX_CBS_2x2);    codebooks->numCB2 = MAX_CBS_2x2;    /* Unpack 2x2 codebook clusters */    for (i=0; i<codebooks->numCB2; i++)        unpack_roq_cell(enc->cb2x2 + i, codebooks->unpacked_cb2 + i*2*2*3);    /* Index all 4x4 entries to the 2x2 entries, unpack, and enlarge */    for (i=0; i<codebooks->numCB4; i++) {        for (j=0; j<4; j++) {            unpack_roq_cell(&results4[4*i + j], mb2);            index_mb(mb2, codebooks->unpacked_cb2, codebooks->numCB2,                     &enc->cb4x4[i].idx[j], 2);        }        unpack_roq_qcell(codebooks->unpacked_cb2, enc->cb4x4 + i,                         codebooks->unpacked_cb4 + i*4*4*3);        enlarge_roq_mb4(codebooks->unpacked_cb4 + i*4*4*3,                        codebooks->unpacked_cb4_enlarged + i*8*8*3);    }    av_free(yuvClusters);    av_free(points);    av_free(results4);}static void roq_encode_video(RoqContext *enc){    roq_tempdata_t tempData;    int i;    memset(&tempData, 0, sizeof(tempData));    create_cel_evals(enc, &tempData);    generate_new_codebooks(enc, &tempData);    if (enc->framesSinceKeyframe >= 1) {        motion_search(enc, 8);        motion_search(enc, 4);    } retry_encode:    for (i=0; i<enc->width*enc->height/64; i++)        gather_data_for_cel(tempData.cel_evals + i, enc, &tempData);    /* Quake 3 can't handle chunks bigger than 65536 bytes */    if (tempData.mainChunkSize/8 > 65536) {        enc->lambda *= .8;        goto retry_encode;    }    remap_codebooks(enc, &tempData);    write_codebooks(enc, &tempData);    reconstruct_and_encode_image(enc, &tempData, enc->width, enc->height,                                 enc->width*enc->height/64);    /* Rotate frame history */    FFSWAP(AVFrame *, enc->current_frame, enc->last_frame);    FFSWAP(motion_vect *, enc->last_motion4, enc->this_motion4);    FFSWAP(motion_vect *, enc->last_motion8, enc->this_motion8);    av_free(tempData.cel_evals);    av_free(tempData.closest_cb2);    enc->framesSinceKeyframe++;}static int roq_encode_init(AVCodecContext *avctx){    RoqContext *enc = avctx->priv_data;    av_init_random(1, &enc->randctx);    enc->framesSinceKeyframe = 0;    if ((avctx->width & 0xf) || (avctx->height & 0xf)) {        av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");        return -1;    }    if (((avctx->width)&(avctx->width-1))||((avctx->height)&(avctx->height-1)))        av_log(avctx, AV_LOG_ERROR, "Warning: dimensions not power of two\n");    if (avcodec_check_dimensions(avctx, avctx->width, avctx->height)) {        av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n",               avctx->width, avctx->height);        return -1;    }    enc->width = avctx->width;    enc->height = avctx->height;    enc->framesSinceKeyframe = 0;    enc->first_frame = 1;    enc->last_frame    = &enc->frames[0];    enc->current_frame = &enc->frames[1];    enc->this_motion4 =        av_mallocz((enc->width*enc->height/16)*sizeof(motion_vect));    enc->last_motion4 =        av_malloc ((enc->width*enc->height/16)*sizeof(motion_vect));    enc->this_motion8 =        av_mallocz((enc->width*enc->height/64)*sizeof(motion_vect));    enc->last_motion8 =        av_malloc ((enc->width*enc->height/64)*sizeof(motion_vect));    return 0;}static void roq_write_video_info_chunk(RoqContext *enc){    /* ROQ info chunk */    bytestream_put_le16(&enc->out_buf, RoQ_INFO);    /* Size: 8 bytes */    bytestream_put_le32(&enc->out_buf, 8);    /* Unused argument */    bytestream_put_byte(&enc->out_buf, 0x00);    bytestream_put_byte(&enc->out_buf, 0x00);    /* Width */    bytestream_put_le16(&enc->out_buf, enc->width);    /* Height */    bytestream_put_le16(&enc->out_buf, enc->height);    /* Unused in Quake 3, mimics the output of the real encoder */    bytestream_put_byte(&enc->out_buf, 0x08);    bytestream_put_byte(&enc->out_buf, 0x00);    bytestream_put_byte(&enc->out_buf, 0x04);    bytestream_put_byte(&enc->out_buf, 0x00);}static int roq_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){    RoqContext *enc = avctx->priv_data;    AVFrame *frame= data;    uint8_t *buf_start = buf;    enc->out_buf = buf;    enc->avctx = avctx;    enc->frame_to_enc = frame;    if (frame->quality)        enc->lambda = frame->quality - 1;    else        enc->lambda = 2*ROQ_LAMBDA_SCALE;    /* 138 bits max per 8x8 block +     *     256 codebooks*(6 bytes 2x2 + 4 bytes 4x4) + 8 bytes frame header */    if (((enc->width*enc->height/64)*138+7)/8 + 256*(6+4) + 8 > buf_size) {        av_log(avctx, AV_LOG_ERROR, "  RoQ: Output buffer too small!\n");        return -1;    }    /* Check for I frame */    if (enc->framesSinceKeyframe == avctx->gop_size)        enc->framesSinceKeyframe = 0;    if (enc->first_frame) {        /* Alloc memory for the reconstruction data (we must know the stride         for that) */        if (avctx->get_buffer(avctx, enc->current_frame) ||            avctx->get_buffer(avctx, enc->last_frame)) {            av_log(avctx, AV_LOG_ERROR, "  RoQ: get_buffer() failed\n");            return -1;        }        /* Before the first video frame, write a "video info" chunk */        roq_write_video_info_chunk(enc);        enc->first_frame = 0;    }    /* Encode the actual frame */    roq_encode_video(enc);    return enc->out_buf - buf_start;}static int roq_encode_end(AVCodecContext *avctx){    RoqContext *enc = avctx->priv_data;    avctx->release_buffer(avctx, enc->last_frame);    avctx->release_buffer(avctx, enc->current_frame);    av_free(enc->this_motion4);    av_free(enc->last_motion4);    av_free(enc->this_motion8);    av_free(enc->last_motion8);    return 0;}AVCodec roq_encoder ={    "roqvideo",    CODEC_TYPE_VIDEO,    CODEC_ID_ROQ,    sizeof(RoqContext),    roq_encode_init,    roq_encode_frame,    roq_encode_end,    .supported_framerates = (AVRational[]){{30,1}, {0,0}},    .pix_fmts = (enum PixelFormat[]){PIX_FMT_YUV444P, -1},};

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -