📄 mpegaudiodec.h
字号:
/* <LIC_AMD_STD> * Copyright (c) 2005 Advanced Micro Devices, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * The full GNU General Public License is included in this distribution in the * file called COPYING * </LIC_AMD_STD> *//* <CTL_AMD_STD> * </CTL_AMD_STD> *//* <DOC_AMD_STD> * </DOC_AMD_STD> */#define GENERATE_STATIC_TABLES 0//#define dprintf printf#define dprintf#if defined(UNDER_CE) #define USE_CE_ASM_FUNCTION #define USE_MAI #endif#if defined(WIN32) || defined(UNDER_CE) typedef unsigned short uint16_t; typedef signed short int16_t; typedef unsigned char uint8_t; typedef signed char int8_t; typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; typedef unsigned int uint32_t; typedef signed int int32_t;#else typedef unsigned short uint16_t; typedef signed short int16_t; typedef unsigned char uint8_t; typedef signed char int8_t; typedef unsigned int uint32_t; typedef signed int int32_t; typedef signed long long int64_t; typedef unsigned long long uint64_t;#endif#ifndef inline_t #if defined(WIN32) || defined(UNDER_CE) #define inline_t __inline #else #define inline_t inline #endif#endif#define VLC_TYPE inttypedef struct VLC { int bits; VLC_TYPE (*table)[2]; ///< code, bits int table_size; int table_allocated;} VLC;typedef struct GetBitContext { const uint8_t *buffer; const uint8_t *buffer_end; int index; int size_in_bits;} GetBitContext;/* * Required number of additionally allocated bytes at the end of the input bitstream for decoding. * this is mainly needed because some optimized bitstream readers read * 32 or 64 bit at once and could read over the end<br> * Note, if the first 23 bits of the additional bytes are not 0 then damaged * MPEG bitstreams could cause overread and segfault */#define FF_INPUT_BUFFER_PADDING_SIZE 8/* in bytes */#define AVCODEC_MAX_AUDIO_FRAME_SIZE 131072#define CODEC_CAP_PARSE_ONLY 0x0004/* max frame size, in samples */#define MPA_FRAME_SIZE 1152 /* max compressed frame size */#define MPA_MAX_CODED_FRAME_SIZE 1792#define MPA_MAX_CHANNELS 2#define SBLIMIT 32 /* number of subbands */extern int get_bits_count(GetBitContext *s);#define MPA_STEREO 0#define MPA_JSTEREO 1#define MPA_DUAL 2#define MPA_MONO 3#define av_abort() exit(1)/* define USE_HIGHPRECISION to have a bit exact (but slower) mpeg audio decoder */#define USE_HIGHPRECISION /* If you turn this off, you have to remake the tables in mpegaudiogentab.c */#ifdef USE_HIGHPRECISION#define FRAC_BITS 23 /* fractional bits for sb_samples and dct */#define WFRAC_BITS 16 /* fractional bits for window */#else#define FRAC_BITS 15 /* fractional bits for sb_samples and dct */#define WFRAC_BITS 14 /* fractional bits for window */#endif#define FRAC_ONE (1 << FRAC_BITS)#if defined(WIN32) || defined(UNDER_CE) #define MULL(a,b) ((int)(((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS))#else #if defined(MIPS) && (23 == FRAC_BITS) #define USE_INT_MULTC static inline int MULL(A,B) /* We have to use the 9 bits from the lo register else we get background fizzle. */ \ { int tmp, tmp2; asm volatile ("mult %0, %1"::"r"(A), "r"(B)); asm volatile ("mfhi %0":"=r"(tmp)); asm volatile ("sll %0, %1, 9":"=r"(tmp):"r"(tmp)); asm volatile ("mflo %0":"=r"(tmp2)); asm volatile ("srl %0, %1, 23":"=r"(tmp2):"r"(tmp2)); asm volatile ("or %0, %1, %2":"=r"(tmp):"r"(tmp), "r"(tmp2)); return tmp; } #define MULT(OO, AA, BB) /* We have to use the 9 bits from the lo register else we get background fizzle. */ \ { \ int tmp2; \ asm volatile ("mult %0, %1"::"r"(AA), "r"(BB)); \ asm volatile ("mfhi %0":"=r"(OO)); \ asm volatile ("sll %0, %1, 9":"=r"(OO):"r"(OO)); \ asm volatile ("mflo %0":"=r"(tmp2)); \ asm volatile ("srl %0, %1, 23":"=r"(tmp2):"r"(tmp2)); \ asm volatile ("or %0, %1, %2":"=r"(OO):"r"(OO), "r"(tmp2)); \ } #define INT_CMULT(r,i,a,b,c,d) \ {\ int tmp;\ asm volatile ("mult %0, %1"::"r"(a), "r"(c));\ asm volatile ("msub %0, %1"::"r"(b), "r"(d));\ asm volatile ("mfhi %0":"=r"(r));\ asm volatile ("sll %0, %1, 9":"=r"(r):"r"(r));\ asm volatile ("mflo %0":"=r"(tmp));\ asm volatile ("srl %0, %1, 23":"=r"(tmp):"r"(tmp));\ asm volatile ("or %0, %1, %2":"=r"(r):"r"(r), "r"(tmp));\ asm volatile ("mult %0, %1"::"r"(a), "r"(d));\ asm volatile ("madd %0, %1"::"r"(b), "r"(c));\ asm volatile ("mfhi %0":"=r"(i));\ asm volatile ("sll %0, %1, 9":"=r"(i):"r"(i));\ asm volatile ("mflo %0":"=r"(tmp));\ asm volatile ("srl %0, %1, 23":"=r"(tmp):"r"(tmp));\ asm volatile ("or %0, %1, %2":"=r"(i):"r"(i), "r"(tmp));\ } #else #define MULL(a,b) ((int)(((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS)) #endif#endif#ifdef USE_INT_MULTC #define BLOCKSCALE_DCT #define xxMULDCT(a,b) ((int)(((int64_t)(a) * (int64_t)((b))) >> 23)) #define MULDCT(OO,AA,BB) \ { \ asm volatile ("mult %0, %1"::"r"(AA), "r"(BB)); \ asm volatile ("mfhi %0":"=r"(OO)); \ asm volatile ("sll %0, %1, 9":"=r"(OO):"r"(OO)); \ } #define MULT6(OO,AA,BB) \ { \ int tmp6; \ asm volatile ("sll %0, %1, 6":"=r"(tmp6):"r"(BB)); \ asm volatile ("mult %0, %1"::"r"(AA), "r"(tmp6)); \ asm volatile ("mfhi %0":"=r"(OO)); \ asm volatile ("sll %0, %1, 3":"=r"(OO):"r"(OO)); \ } #define MULT7(OO,AA,BB) \ { \ int tmp6; \ asm volatile ("sll %0, %1, 7":"=r"(tmp6):"r"(BB)); \ asm volatile ("mult %0, %1"::"r"(AA), "r"(tmp6)); \ asm volatile ("mfhi %0":"=r"(OO)); \ asm volatile ("sll %0, %1, 2":"=r"(OO):"r"(OO)); \ } static inline int MULS24(AA,BB) { int tmp, tmp2; asm volatile ("sll %0, %1, 4":"=r"(tmp):"r"(AA)); asm volatile ("sll %0, %1, 4":"=r"(tmp2):"r"(BB)); asm volatile ("mult %0, %1"::"r"(tmp), "r"(tmp2)); asm volatile ("mfhi %0":"=r"(tmp)); return tmp; }#else #define MULS24(a,b) ((int)(((int64_t)(a) * (int64_t)(b)) >> (OUT_SHIFT))) #define MULDCT(OO,AA,BB) OO=MULL(AA,BB) #define MULT6(OO,AA,BB) OO=MULL(AA,BB) #define MULT7(OO,AA,BB) OO=MULL(AA,BB) #define MULT(OO,AA,BB) OO=MULL(AA,BB)#endif#define MUL64(a,b) ((int64_t)(a) * (int64_t)(b))/* WARNING: only correct for positive numbers */#define FIXR(a) ((int)((a) * FRAC_ONE + 0.5))#define FRAC_RND(a) (int)(((a) + (FRAC_ONE>>1)) >> FRAC_BITS)#ifdef USE_INT_MULTC #define SUM16(sum, ww, w32, pp) \ { \ int tmp;\ pp = synth_buf + 16; \ asm volatile ("mult %0, %1"::"r"((ww)[0 * 64]), "r"(pp[0 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[1 * 64]), "r"(pp[1 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[2 * 64]), "r"(pp[2 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[3 * 64]), "r"(pp[3 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[4 * 64]), "r"(pp[4 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[5 * 64]), "r"(pp[5 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[6 * 64]), "r"(pp[6 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[7 * 64]), "r"(pp[7 * 64])); \ pp = synth_buf + 48; \ asm volatile ("msub %0, %1"::"r"((w32)[0 * 64]), "r"(pp[0 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[1 * 64]), "r"(pp[1 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[2 * 64]), "r"(pp[2 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[3 * 64]), "r"(pp[3 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[4 * 64]), "r"(pp[4 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[5 * 64]), "r"(pp[5 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[6 * 64]), "r"(pp[6 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[7 * 64]), "r"(pp[7 * 64])); \ asm volatile ("mfhi %0":"=r"(sum));\ asm volatile ("sll %0, %1, 8":"=r"(sum):"r"(sum));\ asm volatile ("mflo %0":"=r"(tmp));\ asm volatile ("srl %0, %1, 24":"=r"(tmp):"r"(tmp));\ asm volatile ("or %0, %1, %2":"=r"(sum):"r"(sum), "r"(tmp));\ } #define SUM16A(w32) \ { \ int tmp;\ pp = synth_buf + 16 + j; \ asm volatile ("mult %0, %1"::"r"((ww)[0 * 64]), "r"(pp[0 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[1 * 64]), "r"(pp[1 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[2 * 64]), "r"(pp[2 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[3 * 64]), "r"(pp[3 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[4 * 64]), "r"(pp[4 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[5 * 64]), "r"(pp[5 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[6 * 64]), "r"(pp[6 * 64])); \ asm volatile ("madd %0, %1"::"r"((ww)[7 * 64]), "r"(pp[7 * 64])); \ pp = synth_buf + 48 - j; \ asm volatile ("msub %0, %1"::"r"((w32)[0 * 64]), "r"(pp[0 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[1 * 64]), "r"(pp[1 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[2 * 64]), "r"(pp[2 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[3 * 64]), "r"(pp[3 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[4 * 64]), "r"(pp[4 * 64])); \ asm volatile ("msub %0, %1"::"r"((w32)[5 * 64]), "r"(pp[5 * 64])); \
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -