⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 apedec.c

📁 ffmpeg移植到symbian的全部源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
    }    /* Keep a count of the blocks decoded in this frame */    ctx->blocksdecoded = 0;    /* Initialize the rice structs */    ctx->riceX.k = 10;    ctx->riceX.ksum = (1 << ctx->riceX.k) * 16;    ctx->riceY.k = 10;    ctx->riceY.ksum = (1 << ctx->riceY.k) * 16;    /* The first 8 bits of input are ignored. */    ctx->ptr++;    range_start_decoding(ctx);}static const int32_t initial_coeffs[4] = {    360, 317, -109, 98};static void init_predictor_decoder(APEContext * ctx){    APEPredictor *p = &ctx->predictor;    /* Zero the history buffers */    memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(int32_t));    p->buf = p->historybuffer;    /* Initialize and zero the coefficients */    memcpy(p->coeffsA[0], initial_coeffs, sizeof(initial_coeffs));    memcpy(p->coeffsA[1], initial_coeffs, sizeof(initial_coeffs));    memset(p->coeffsB, 0, sizeof(p->coeffsB));    p->filterA[0] = p->filterA[1] = 0;    p->filterB[0] = p->filterB[1] = 0;    p->lastA[0]   = p->lastA[1]   = 0;}/** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */static inline int APESIGN(int32_t x) {    return (x < 0) - (x > 0);}static int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int adaptA, const int adaptB){    int32_t predictionA, predictionB;    p->buf[delayA]     = p->lastA[filter];    p->buf[adaptA]     = APESIGN(p->buf[delayA]);    p->buf[delayA - 1] = p->buf[delayA] - p->buf[delayA - 1];    p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]);    predictionA = p->buf[delayA    ] * p->coeffsA[filter][0] +                  p->buf[delayA - 1] * p->coeffsA[filter][1] +                  p->buf[delayA - 2] * p->coeffsA[filter][2] +                  p->buf[delayA - 3] * p->coeffsA[filter][3];    /*  Apply a scaled first-order filter compression */    p->buf[delayB]     = p->filterA[filter ^ 1] - ((p->filterB[filter] * 31) >> 5);    p->buf[adaptB]     = APESIGN(p->buf[delayB]);    p->buf[delayB - 1] = p->buf[delayB] - p->buf[delayB - 1];    p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]);    p->filterB[filter] = p->filterA[filter ^ 1];    predictionB = p->buf[delayB    ] * p->coeffsB[filter][0] +                  p->buf[delayB - 1] * p->coeffsB[filter][1] +                  p->buf[delayB - 2] * p->coeffsB[filter][2] +                  p->buf[delayB - 3] * p->coeffsB[filter][3] +                  p->buf[delayB - 4] * p->coeffsB[filter][4];    p->lastA[filter] = decoded + ((predictionA + (predictionB >> 1)) >> 10);    p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5);    if (!decoded) // no need updating filter coefficients        return p->filterA[filter];    if (decoded > 0) {        p->coeffsA[filter][0] -= p->buf[adaptA    ];        p->coeffsA[filter][1] -= p->buf[adaptA - 1];        p->coeffsA[filter][2] -= p->buf[adaptA - 2];        p->coeffsA[filter][3] -= p->buf[adaptA - 3];        p->coeffsB[filter][0] -= p->buf[adaptB    ];        p->coeffsB[filter][1] -= p->buf[adaptB - 1];        p->coeffsB[filter][2] -= p->buf[adaptB - 2];        p->coeffsB[filter][3] -= p->buf[adaptB - 3];        p->coeffsB[filter][4] -= p->buf[adaptB - 4];    } else {        p->coeffsA[filter][0] += p->buf[adaptA    ];        p->coeffsA[filter][1] += p->buf[adaptA - 1];        p->coeffsA[filter][2] += p->buf[adaptA - 2];        p->coeffsA[filter][3] += p->buf[adaptA - 3];        p->coeffsB[filter][0] += p->buf[adaptB    ];        p->coeffsB[filter][1] += p->buf[adaptB - 1];        p->coeffsB[filter][2] += p->buf[adaptB - 2];        p->coeffsB[filter][3] += p->buf[adaptB - 3];        p->coeffsB[filter][4] += p->buf[adaptB - 4];    }    return p->filterA[filter];}static void predictor_decode_stereo(APEContext * ctx, int count){    int32_t predictionA, predictionB;    APEPredictor *p = &ctx->predictor;    int32_t *decoded0 = ctx->decoded0;    int32_t *decoded1 = ctx->decoded1;    while (count--) {        /* Predictor Y */        predictionA = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB, YADAPTCOEFFSA, YADAPTCOEFFSB);        predictionB = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB, XADAPTCOEFFSA, XADAPTCOEFFSB);        *(decoded0++) = predictionA;        *(decoded1++) = predictionB;        /* Combined */        p->buf++;        /* Have we filled the history buffer? */        if (p->buf == p->historybuffer + HISTORY_SIZE) {            memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t));            p->buf = p->historybuffer;        }    }}static void predictor_decode_mono(APEContext * ctx, int count){    APEPredictor *p = &ctx->predictor;    int32_t *decoded0 = ctx->decoded0;    int32_t predictionA, currentA, A;    currentA = p->lastA[0];    while (count--) {        A = *decoded0;        p->buf[YDELAYA] = currentA;        p->buf[YDELAYA - 1] = p->buf[YDELAYA] - p->buf[YDELAYA - 1];        predictionA = p->buf[YDELAYA    ] * p->coeffsA[0][0] +                      p->buf[YDELAYA - 1] * p->coeffsA[0][1] +                      p->buf[YDELAYA - 2] * p->coeffsA[0][2] +                      p->buf[YDELAYA - 3] * p->coeffsA[0][3];        currentA = A + (predictionA >> 10);        p->buf[YADAPTCOEFFSA]     = APESIGN(p->buf[YDELAYA    ]);        p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]);        if (A > 0) {            p->coeffsA[0][0] -= p->buf[YADAPTCOEFFSA    ];            p->coeffsA[0][1] -= p->buf[YADAPTCOEFFSA - 1];            p->coeffsA[0][2] -= p->buf[YADAPTCOEFFSA - 2];            p->coeffsA[0][3] -= p->buf[YADAPTCOEFFSA - 3];        } else if (A < 0) {            p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA    ];            p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1];            p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2];            p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3];        }        p->buf++;        /* Have we filled the history buffer? */        if (p->buf == p->historybuffer + HISTORY_SIZE) {            memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t));            p->buf = p->historybuffer;        }        p->filterA[0] = currentA + ((p->filterA[0] * 31) >> 5);        *(decoded0++) = p->filterA[0];    }    p->lastA[0] = currentA;}static void do_init_filter(APEFilter *f, int16_t * buf, int order){    f->coeffs = buf;    f->historybuffer = buf + order;    f->delay       = f->historybuffer + order * 2;    f->adaptcoeffs = f->historybuffer + order;    memset(f->historybuffer, 0, (order * 2) * sizeof(int16_t));    memset(f->coeffs, 0, order * sizeof(int16_t));    f->avg = 0;}static void init_filter(APEContext * ctx, APEFilter *f, int16_t * buf, int order){    do_init_filter(&f[0], buf, order);    do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);}static inline void do_apply_filter(APEContext * ctx, int version, APEFilter *f, int32_t *data, int count, int order, int fracbits){    int res;    int absres;    while (count--) {        /* round fixedpoint scalar product */        res = (ctx->dsp.scalarproduct_int16(f->delay - order, f->coeffs, order, 0) + (1 << (fracbits - 1))) >> fracbits;        if (*data < 0)            ctx->dsp.add_int16(f->coeffs, f->adaptcoeffs - order, order);        else if (*data > 0)            ctx->dsp.sub_int16(f->coeffs, f->adaptcoeffs - order, order);        res += *data;        *data++ = res;        /* Update the output history */        *f->delay++ = av_clip_int16(res);        if (version < 3980) {            /* Version ??? to < 3.98 files (untested) */            f->adaptcoeffs[0]  = (res == 0) ? 0 : ((res >> 28) & 8) - 4;            f->adaptcoeffs[-4] >>= 1;            f->adaptcoeffs[-8] >>= 1;        } else {            /* Version 3.98 and later files */            /* Update the adaption coefficients */            absres = (res < 0 ? -res : res);            if (absres > (f->avg * 3))                *f->adaptcoeffs = ((res >> 25) & 64) - 32;            else if (absres > (f->avg * 4) / 3)                *f->adaptcoeffs = ((res >> 26) & 32) - 16;            else if (absres > 0)                *f->adaptcoeffs = ((res >> 27) & 16) - 8;            else                *f->adaptcoeffs = 0;            f->avg += (absres - f->avg) / 16;            f->adaptcoeffs[-1] >>= 1;            f->adaptcoeffs[-2] >>= 1;            f->adaptcoeffs[-8] >>= 1;        }        f->adaptcoeffs++;        /* Have we filled the history buffer? */        if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {            memmove(f->historybuffer, f->delay - (order * 2),                    (order * 2) * sizeof(int16_t));            f->delay = f->historybuffer + order * 2;            f->adaptcoeffs = f->historybuffer + order;        }    }}static void apply_filter(APEContext * ctx, APEFilter *f,                         int32_t * data0, int32_t * data1,                         int count, int order, int fracbits){    do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);    if (data1)        do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);}static void ape_apply_filters(APEContext * ctx, int32_t * decoded0,                              int32_t * decoded1, int count){    int i;    for (i = 0; i < APE_FILTER_LEVELS; i++) {        if (!ape_filter_orders[ctx->fset][i])            break;        apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count, ape_filter_orders[ctx->fset][i], ape_filter_fracbits[ctx->fset][i]);    }}static void init_frame_decoder(APEContext * ctx){    int i;    init_entropy_decoder(ctx);    init_predictor_decoder(ctx);    for (i = 0; i < APE_FILTER_LEVELS; i++) {        if (!ape_filter_orders[ctx->fset][i])            break;        init_filter(ctx, ctx->filters[i], ctx->filterbuf[i], ape_filter_orders[ctx->fset][i]);    }}static void ape_unpack_mono(APEContext * ctx, int count){    int32_t left;    int32_t *decoded0 = ctx->decoded0;    int32_t *decoded1 = ctx->decoded1;    if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {        entropy_decode(ctx, count, 0);        /* We are pure silence, so we're done. */        av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n");        return;    }    entropy_decode(ctx, count, 0);    ape_apply_filters(ctx, decoded0, NULL, count);    /* Now apply the predictor decoding */    predictor_decode_mono(ctx, count);    /* Pseudo-stereo - just copy left channel to right channel */    if (ctx->channels == 2) {        while (count--) {            left = *decoded0;            *(decoded1++) = *(decoded0++) = left;        }    }}static void ape_unpack_stereo(APEContext * ctx, int count){    int32_t left, right;    int32_t *decoded0 = ctx->decoded0;    int32_t *decoded1 = ctx->decoded1;    if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {        /* We are pure silence, so we're done. */        av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n");        return;    }    entropy_decode(ctx, count, 1);    ape_apply_filters(ctx, decoded0, decoded1, count);    /* Now apply the predictor decoding */    predictor_decode_stereo(ctx, count);    /* Decorrelate and scale to output depth */    while (count--) {        left = *decoded1 - (*decoded0 / 2);        right = left + *decoded0;        *(decoded0++) = left;        *(decoded1++) = right;    }}static int ape_decode_frame(AVCodecContext * avctx,                            void *data, int *data_size,                            const uint8_t * buf, int buf_size){    APEContext *s = avctx->priv_data;    int16_t *samples = data;    int nblocks;    int i, n;    int blockstodecode;    int bytes_used;    if (buf_size == 0 && !s->samples) {        *data_size = 0;        return 0;    }    /* should not happen but who knows */    if (BLOCKS_PER_LOOP * 2 * avctx->channels > *data_size) {        av_log (avctx, AV_LOG_ERROR, "Packet size is too big to be handled in lavc! (max is %d where you have %d)\n", *data_size, s->samples * 2 * avctx->channels);        return -1;    }    if(!s->samples){        s->data = av_realloc(s->data, (buf_size + 3) & ~3);        s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2);        s->ptr = s->last_ptr = s->data;        s->data_end = s->data + buf_size;        nblocks = s->samples = bytestream_get_be32(&s->ptr);        n =  bytestream_get_be32(&s->ptr);        if(n < 0 || n > 3){            av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");            s->data = NULL;            return -1;        }        s->ptr += n;        s->currentframeblocks = nblocks;        buf += 4;        if (s->samples <= 0) {            *data_size = 0;            return buf_size;        }        memset(s->decoded0,  0, sizeof(s->decoded0));        memset(s->decoded1,  0, sizeof(s->decoded1));        /* Initialize the frame decoder */        init_frame_decoder(s);    }    if (!s->data) {        *data_size = 0;        return buf_size;    }    nblocks = s->samples;    blockstodecode = FFMIN(BLOCKS_PER_LOOP, nblocks);    s->error=0;    if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO))        ape_unpack_mono(s, blockstodecode);    else        ape_unpack_stereo(s, blockstodecode);    if(s->error || s->ptr > s->data_end){        s->samples=0;        av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");        return -1;    }    for (i = 0; i < blockstodecode; i++) {        *samples++ = s->decoded0[i];        if(s->channels == 2)            *samples++ = s->decoded1[i];    }    s->samples -= blockstodecode;    *data_size = blockstodecode * 2 * s->channels;    bytes_used = s->samples ? s->ptr - s->last_ptr : buf_size;    s->last_ptr = s->ptr;    return bytes_used;}AVCodec ape_decoder = {    "ape",    CODEC_TYPE_AUDIO,    CODEC_ID_APE,    sizeof(APEContext),    ape_decode_init,    NULL,    ape_decode_close,    ape_decode_frame,#ifdef __CW32__    0,    0,    0,    0,    0,    NULL_IF_CONFIG_SMALL("Monkey's Audio"),#else    .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),#endif};

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -