⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 simple_idct_arm.s

📁 tcpmp播放器的flv插件
💻 S
📖 第 1 页 / 共 2 页
字号:
/*  * simple_idct_arm.S * Copyright (C) 2002 Frederic 'dilb' Boulay. * All Rights Reserved. * * Author: Frederic Boulay <dilb@handhelds.org> * * You can redistribute this file and/or modify * it under the terms of the GNU General Public License (version 2) * as published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA * * * The function defined in this file, is derived from the simple_idct function * from the libavcodec library part of the ffmpeg project.  *//* useful constants for the algorithm, they are save in __constant_ptr__ at *//* the end of the source code.*/#define W1  22725#define W2  21407#define W3  19266#define W4  16383#define W5  12873#define W6  8867#define W7  4520#define MASK_MSHW 0xFFFF0000/* offsets of the constants in the vector */#define offW1  0#define offW2  4#define offW3  8#define offW4  12#define offW5  16#define offW6  20#define offW7  24#define offMASK_MSHW 28#define ROW_SHIFT 11#define ROW_SHIFT2MSHW (16-11)#define COL_SHIFT 20#define ROW_SHIFTED_1 1024 /* 1<< (ROW_SHIFT-1) */#define COL_SHIFTED_1 524288 /* 1<< (COL_SHIFT-1) */	.text	.align	.global simple_idct_ARMsimple_idct_ARM:        @@ void simple_idct_ARM(int16_t *block)        @@ save stack for reg needed (take all of them),        @@ R0-R3 are scratch regs, so no need to save them, but R0 contains the pointer to block        @@ so it must not be overwritten, if it is not saved!!        @@ R12 is another scratch register, so it should not be saved too        @@ save all registers        stmfd sp!, {r4-r11, r14} @ R14 is also called LR        @@ at this point, R0=block, other registers are free.        add r14, r0, #112        @ R14=&block[8*7], better start from the last row, and decrease the value until row=0, i.e. R12=block.        add r12, pc, #(__constant_ptr__-.-8) @ R12=__constant_ptr__, the vector containing the constants, probably not necessary to reserve a register for it        @@ add 2 temporary variables in the stack: R0 and R14        sub sp, sp, #8          @ allow 2 local variables        str r0, [sp, #0]        @ save block in sp[0]        @@ stack status        @@ sp+4   free        @@ sp+0   R0  (block)        @@ at this point, R0=block, R14=&block[56], R12=__const_ptr_, R1-R11 free__row_loop:        @@ read the row and check if it is null, almost null, or not, according to strongarm specs, it is not necessary to optimise ldr accesses (i.e. split 32bits in 2 16bits words), at least it gives more usable registers :)        ldr r1, [r14, #0]        @ R1=(int32)(R12)[0]=ROWr32[0] (relative row cast to a 32b pointer)        ldr r2, [r14, #4]        @ R2=(int32)(R12)[1]=ROWr32[1]        ldr r3, [r14, #8]        @ R3=ROWr32[2]        ldr r4, [r14, #12]       @ R4=ROWr32[3]        @@ check if the words are null, if all of them are null, then proceed with next row (branch __end_row_loop),        @@ if ROWr16[0] is the only one not null, then proceed with this special case (branch __almost_empty_row)        @@ else follow the complete algorithm.        @@ at this point, R0=block, R14=&block[n], R12=__const_ptr_, R1=ROWr32[0], R2=ROWr32[1],        @@                R3=ROWr32[2], R4=ROWr32[3], R5-R11 free        orr r5, r4, r3           @ R5=R4 | R3        orr r5, r5, r2           @ R5=R4 | R3 | R2        orrs r6, r5, r1          @ Test R5 | R1 (the aim is to check if everything is null)        beq __end_row_loop        mov r7, r1, asr #16      @ R7=R1>>16=ROWr16[1] (evaluate it now, as it could be useful later)        ldrsh r6, [r14, #0]      @ R6=ROWr16[0]        orrs r5, r5, r7          @ R5=R4 | R3 | R2 | R7        beq __almost_empty_row__b_evaluation:        @@ at this point, R0=block (temp),  R1(free), R2=ROWr32[1], R3=ROWr32[2], R4=ROWr32[3],        @@     R5=(temp), R6=ROWr16[0], R7=ROWr16[1], R8-R11 free,        @@     R12=__const_ptr_, R14=&block[n]        @@ to save some registers/calls, proceed with b0-b3 first, followed by a0-a3        @@ MUL16(b0, W1, row[1]);        @@ MUL16(b1, W3, row[1]);        @@ MUL16(b2, W5, row[1]);        @@ MUL16(b3, W7, row[1]);        @@ MAC16(b0, W3, row[3]);        @@ MAC16(b1, -W7, row[3]);        @@ MAC16(b2, -W1, row[3]);        @@ MAC16(b3, -W5, row[3]);        ldr r8, [r12, #offW1]    @ R8=W1        mov r2, r2, asr #16      @ R2=ROWr16[3]        mul r0, r8, r7           @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)        ldr r9, [r12, #offW3]    @ R9=W3        ldr r10, [r12, #offW5]   @ R10=W5        mul r1, r9, r7           @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)        ldr r11, [r12, #offW7]   @ R11=W7        mul r5, r10, r7          @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)        mul r7, r11, r7          @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle)		teq r2, #0               @ if null avoid muls		mlane r0, r9, r2, r0     @ R0+=W3*ROWr16[3]=b0 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)        rsbne r2, r2, #0         @ R2=-ROWr16[3]        mlane r1, r11, r2, r1    @ R1-=W7*ROWr16[3]=b1 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)        mlane r5, r8, r2, r5     @ R5-=W1*ROWr16[3]=b2 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)        mlane r7, r10, r2, r7    @ R7-=W5*ROWr16[3]=b3 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle)        @@ at this point, R0=b0,  R1=b1, R2 (free), R3=ROWr32[2], R4=ROWr32[3],        @@     R5=b2, R6=ROWr16[0], R7=b3, R8=W1, R9=W3, R10=W5, R11=W7,        @@     R12=__const_ptr_, R14=&block[n]        @@ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3];        @@ if (temp != 0) {}        orrs r2, r3, r4          @ R2=ROWr32[2] | ROWr32[3]        beq __end_b_evaluation        @@ at this point, R0=b0,  R1=b1, R2 (free), R3=ROWr32[2], R4=ROWr32[3],        @@     R5=b2, R6=ROWr16[0], R7=b3, R8=W1, R9=W3, R10=W5, R11=W7,        @@     R12=__const_ptr_, R14=&block[n]        @@ MAC16(b0, W5, row[5]);        @@ MAC16(b2, W7, row[5]);        @@ MAC16(b3, W3, row[5]);        @@ MAC16(b1, -W1, row[5]);        @@ MAC16(b0, W7, row[7]);        @@ MAC16(b2, W3, row[7]);        @@ MAC16(b3, -W1, row[7]);        @@ MAC16(b1, -W5, row[7]);        mov r3, r3, asr #16      @ R3=ROWr16[5]		teq r3, #0               @ if null avoid muls        mlane r0, r10, r3, r0    @ R0+=W5*ROWr16[5]=b0        mov r4, r4, asr #16      @ R4=ROWr16[7]        mlane r5, r11, r3, r5    @ R5+=W7*ROWr16[5]=b2        mlane r7, r9, r3, r7     @ R7+=W3*ROWr16[5]=b3        rsbne r3, r3, #0         @ R3=-ROWr16[5]        mlane r1, r8, r3, r1     @ R7-=W1*ROWr16[5]=b1        @@ R3 is free now		teq r4, #0               @ if null avoid muls        mlane r0, r11, r4, r0    @ R0+=W7*ROWr16[7]=b0        mlane r5, r9, r4, r5     @ R5+=W3*ROWr16[7]=b2        rsbne r4, r4, #0         @ R4=-ROWr16[7]        mlane r7, r8, r4, r7     @ R7-=W1*ROWr16[7]=b3        mlane r1, r10, r4, r1    @ R1-=W5*ROWr16[7]=b1        @@ R4 is free now__end_b_evaluation:        @@ at this point, R0=b0,  R1=b1, R2=ROWr32[2] | ROWr32[3] (tmp), R3 (free), R4 (free),        @@     R5=b2, R6=ROWr16[0], R7=b3, R8 (free), R9 (free), R10 (free), R11 (free),        @@     R12=__const_ptr_, R14=&block[n]__a_evaluation:        @@ a0 = (W4 * row[0]) + (1 << (ROW_SHIFT - 1));        @@ a1 = a0 + W6 * row[2];        @@ a2 = a0 - W6 * row[2];        @@ a3 = a0 - W2 * row[2];        @@ a0 = a0 + W2 * row[2];        ldr r9, [r12, #offW4]    @ R9=W4        mul r6, r9, r6           @ R6=W4*ROWr16[0]        ldr r10, [r12, #offW6]   @ R10=W6        ldrsh r4, [r14, #4]      @ R4=ROWr16[2] (a3 not defined yet)        add r6, r6, #ROW_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(ROW_SHIFT-1) (a0)        mul r11, r10, r4         @ R11=W6*ROWr16[2]        ldr r8, [r12, #offW2]    @ R8=W2        sub r3, r6, r11          @ R3=a0-W6*ROWr16[2] (a2)        @@ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3];        @@ if (temp != 0) {}        teq r2, #0        beq __end_bef_a_evaluation	add r2, r6, r11          @ R2=a0+W6*ROWr16[2] (a1)        mul r11, r8, r4          @ R11=W2*ROWr16[2]        sub r4, r6, r11          @ R4=a0-W2*ROWr16[2] (a3)        add r6, r6, r11          @ R6=a0+W2*ROWr16[2] (a0)        @@ at this point, R0=b0,  R1=b1, R2=a1, R3=a2, R4=a3,        @@     R5=b2, R6=a0, R7=b3, R8=W2, R9=W4, R10=W6, R11 (free),        @@     R12=__const_ptr_, R14=&block[n]        @@ a0 += W4*row[4]        @@ a1 -= W4*row[4]        @@ a2 -= W4*row[4]        @@ a3 += W4*row[4]        ldrsh r11, [r14, #8]     @ R11=ROWr16[4]		teq r11, #0              @ if null avoid muls        mulne r11, r9, r11       @ R11=W4*ROWr16[4]        @@ R9 is free now        ldrsh r9, [r14, #12]     @ R9=ROWr16[6]        addne r6, r6, r11        @ R6+=W4*ROWr16[4] (a0)        subne r2, r2, r11        @ R2-=W4*ROWr16[4] (a1)        subne r3, r3, r11        @ R3-=W4*ROWr16[4] (a2)        addne r4, r4, r11        @ R4+=W4*ROWr16[4] (a3)        @@ W6 alone is no more useful, save W2*ROWr16[6] in it instead		teq r9, #0               @ if null avoid muls        mulne r11, r10, r9       @ R11=W6*ROWr16[6]        addne r6, r6, r11        @ R6+=W6*ROWr16[6] (a0)        mulne r10, r8, r9        @ R10=W2*ROWr16[6]        @@ a0 += W6*row[6];        @@ a3 -= W6*row[6];        @@ a1 -= W2*row[6];        @@ a2 += W2*row[6];        subne r4, r4, r11        @ R4-=W6*ROWr16[6] (a3)        subne r2, r2, r10        @ R2-=W2*ROWr16[6] (a1)        addne r3, r3, r10        @ R3+=W2*ROWr16[6] (a2)__end_a_evaluation:        @@ at this point, R0=b0,  R1=b1, R2=a1, R3=a2, R4=a3,        @@     R5=b2, R6=a0, R7=b3, R8 (free), R9 (free), R10 (free), R11 (free),        @@     R12=__const_ptr_, R14=&block[n]        @@ row[0] = (a0 + b0) >> ROW_SHIFT;        @@ row[1] = (a1 + b1) >> ROW_SHIFT;        @@ row[2] = (a2 + b2) >> ROW_SHIFT;        @@ row[3] = (a3 + b3) >> ROW_SHIFT;        @@ row[4] = (a3 - b3) >> ROW_SHIFT;        @@ row[5] = (a2 - b2) >> ROW_SHIFT;        @@ row[6] = (a1 - b1) >> ROW_SHIFT;        @@ row[7] = (a0 - b0) >> ROW_SHIFT;        add r8, r6, r0           @ R8=a0+b0        add r9, r2, r1           @ R9=a1+b1        @@ put 2 16 bits half-words in a 32bits word        @@ ROWr32[0]=ROWr16[0] | (ROWr16[1]<<16) (only Little Endian compliant then!!!)        ldr r10, [r12, #offMASK_MSHW] @ R10=0xFFFF0000

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -