📄 h264dsputil.s
字号:
#NO_APP lw $3,20($sp)#APP S32ALN xr4,xr2,xr1,$3 S32ALN xr5,xr3,xr2,$3#NO_APP li $2,3 # 0x3#APP S32ALN xr6,xr5,xr4,$2 D32SLR xr1,xr5,xr0,xr0,8 S32SFL xr0,xr1,xr5,xr7,ptn3 Q8MUL xr2,xr4,xr14,xr1 Q8MUL xr8,xr7,xr15,xr7 Q8MUL xr4,xr6,xr14,xr3 Q16ADD xr2,xr2,xr2,xr0,AA,XW Q16ADD xr0,xr1,xr1,xr1,SS,XW Q16ADD xr0,xr7,xr7,xr7,SS,XW S32SFL xr1,xr0,xr1,xr0,ptn3 Q16ACC xr0,xr2,xr7,xr1,AA Q16ADD xr4,xr4,xr4,xr0,AA,XW Q16ADD xr3,xr3,xr3,xr0,SS,XW Q16ADD xr0,xr8,xr8,xr8,SS,XW S32SFL xr3,xr0,xr3,xr0,ptn3 Q16ACC xr0,xr4,xr8,xr3,AA S32SFL xr0,xr3,xr1,xr1,ptn3 S32SDI xr1,$4,4#NO_APP lw $2,8($sp)#APP S32LDD xr1,$2,0 S32LDD xr2,$2,4 S32LDD xr3,$2,8#NO_APP addu $2,$11,$2 sw $2,8($sp)#APP pref 0,0($2)#NO_APP lw $3,24($sp)#APP S32ALN xr4,xr2,xr1,$3 S32ALN xr5,xr3,xr2,$3#NO_APP li $2,3 # 0x3#APP S32ALN xr6,xr5,xr4,$2 D32SLR xr1,xr5,xr0,xr0,8 S32SFL xr0,xr1,xr5,xr7,ptn3 Q8MUL xr2,xr4,xr14,xr1 Q8MUL xr8,xr7,xr15,xr7 Q8MUL xr4,xr6,xr14,xr3 Q16ADD xr2,xr2,xr2,xr0,AA,XW Q16ADD xr0,xr1,xr1,xr1,SS,XW Q16ADD xr0,xr7,xr7,xr7,SS,XW S32SFL xr1,xr0,xr1,xr0,ptn3 Q16ACC xr0,xr2,xr7,xr1,AA Q16ADD xr4,xr4,xr4,xr0,AA,XW Q16ADD xr3,xr3,xr3,xr0,SS,XW Q16ADD xr0,xr8,xr8,xr8,SS,XW S32SFL xr3,xr0,xr3,xr0,ptn3 Q16ACC xr0,xr4,xr8,xr3,AA S32SFL xr0,xr3,xr1,xr1,ptn3 S32SDI xr1,$4,4#NO_APP lw $2,12($sp)#APP S32LDD xr1,$2,0 S32LDD xr2,$2,4 S32LDD xr3,$2,8#NO_APP addu $2,$11,$2 sw $2,12($sp)#APP pref 0,0($2) S32ALN xr4,xr2,xr1,$10 S32ALN xr5,xr3,xr2,$10#NO_APP li $2,3 # 0x3#APP S32ALN xr6,xr5,xr4,$2 D32SLR xr1,xr5,xr0,xr0,8 S32SFL xr0,xr1,xr5,xr7,ptn3 Q8MUL xr2,xr4,xr14,xr1 Q8MUL xr8,xr7,xr15,xr7 Q8MUL xr4,xr6,xr14,xr3 Q16ADD xr2,xr2,xr2,xr0,AA,XW Q16ADD xr0,xr1,xr1,xr1,SS,XW Q16ADD xr0,xr7,xr7,xr7,SS,XW S32SFL xr1,xr0,xr1,xr0,ptn3 Q16ACC xr0,xr2,xr7,xr1,AA Q16ADD xr4,xr4,xr4,xr0,AA,XW Q16ADD xr3,xr3,xr3,xr0,SS,XW Q16ADD xr0,xr8,xr8,xr8,SS,XW S32SFL xr3,xr0,xr3,xr0,ptn3 Q16ACC xr0,xr4,xr8,xr3,AA S32SFL xr0,xr3,xr1,xr1,ptn3 S32SDI xr1,$4,4#NO_APP li $2,13 # 0xd addiu $5,$5,1 .set noreorder .set nomacro bne $5,$2,$L57 addu $2,$6,$4 .set macro .set reorder li $2,1310720 # 0x140000 ori $2,$2,0x14#APP S32I2M xr15,$2#NO_APP li $3,327680 # 0x50000 ori $3,$3,0x5#APP S32I2M xr14,$3#NO_APP li $4,512 # 0x200#APP S32I2M xr13,$4#NO_APP li $2,65536 # 0x10000 ori $2,$2,0x1#APP S32I2M xr11,$2#NO_APP move $8,$0 .set noreorder .set nomacro j $L54 subu $5,$13,$6 .set macro .set reorder$L60: addu $12,$12,$7$L54: move $2,$5#APP S32LDIV xr1,$2,$6,0 S32LDIV xr2,$2,$6,0 S32LDIV xr3,$2,$6,0 D16MUL xr10,xr2,xr14,xr9,WW D16MUL xr8,xr3,xr15,xr7,WW S32LDIV xr4,$2,$6,0 S32LDIV xr5,$2,$6,0 S32LDDV xr12,$2,$6,0 D16MUL xr3,xr5,xr14,xr2,WW D16MUL xr5,xr4,xr15,xr4,WW Q16ADD xr12,xr1,xr12,xr0,AA,WW D16MUL xr12,xr12,xr11,xr1,WW D32ACC xr12,xr8,xr10,xr0,SS D32ACC xr1,xr7,xr9,xr0,SS D32ACC xr12,xr5,xr3,xr0,SS D32ACC xr1,xr4,xr2,xr0,SS D32ADD xr12,xr12,xr13,xr0,AA D32ADD xr1,xr1,xr13,xr0,AA D32SAR xr2,xr12,xr1,xr1,10 S32SFL xr0,xr2,xr1,xr1,ptn3 Q16SAT xr1,xr0,xr1 S32M2I xr1, $2#NO_APP sh $2,0($12) addiu $2,$5,4#APP S32LDIV xr1,$2,$6,0 S32LDIV xr2,$2,$6,0 S32LDIV xr3,$2,$6,0 D16MUL xr10,xr2,xr14,xr9,WW D16MUL xr8,xr3,xr15,xr7,WW S32LDIV xr4,$2,$6,0 S32LDIV xr5,$2,$6,0 S32LDDV xr12,$2,$6,0 D16MUL xr3,xr5,xr14,xr2,WW D16MUL xr5,xr4,xr15,xr4,WW Q16ADD xr12,xr1,xr12,xr0,AA,WW D16MUL xr12,xr12,xr11,xr1,WW D32ACC xr12,xr8,xr10,xr0,SS D32ACC xr1,xr7,xr9,xr0,SS D32ACC xr12,xr5,xr3,xr0,SS D32ACC xr1,xr4,xr2,xr0,SS D32ADD xr12,xr12,xr13,xr0,AA D32ADD xr1,xr1,xr13,xr0,AA D32SAR xr2,xr12,xr1,xr1,10 S32SFL xr0,xr2,xr1,xr1,ptn3 Q16SAT xr1,xr0,xr1 S32M2I xr1, $3#NO_APP sh $3,2($12) addiu $2,$5,8#APP S32LDIV xr1,$2,$6,0 S32LDIV xr2,$2,$6,0 S32LDIV xr3,$2,$6,0 D16MUL xr10,xr2,xr14,xr9,WW D16MUL xr8,xr3,xr15,xr7,WW S32LDIV xr4,$2,$6,0 S32LDIV xr5,$2,$6,0 S32LDDV xr12,$2,$6,0 D16MUL xr3,xr5,xr14,xr2,WW D16MUL xr5,xr4,xr15,xr4,WW Q16ADD xr12,xr1,xr12,xr0,AA,WW D16MUL xr12,xr12,xr11,xr1,WW D32ACC xr12,xr8,xr10,xr0,SS D32ACC xr1,xr7,xr9,xr0,SS D32ACC xr12,xr5,xr3,xr0,SS D32ACC xr1,xr4,xr2,xr0,SS D32ADD xr12,xr12,xr13,xr0,AA D32ADD xr1,xr1,xr13,xr0,AA D32SAR xr2,xr12,xr1,xr1,10 S32SFL xr0,xr2,xr1,xr1,ptn3 Q16SAT xr1,xr0,xr1 S32M2I xr1, $3#NO_APP sh $3,4($12) addiu $2,$5,12#APP S32LDIV xr1,$2,$6,0 S32LDIV xr2,$2,$6,0 S32LDIV xr3,$2,$6,0 D16MUL xr10,xr2,xr14,xr9,WW D16MUL xr8,xr3,xr15,xr7,WW S32LDIV xr4,$2,$6,0 S32LDIV xr5,$2,$6,0 S32LDDV xr12,$2,$6,0 D16MUL xr3,xr5,xr14,xr2,WW D16MUL xr5,xr4,xr15,xr4,WW Q16ADD xr12,xr1,xr12,xr0,AA,WW D16MUL xr12,xr12,xr11,xr1,WW D32ACC xr12,xr8,xr10,xr0,SS D32ACC xr1,xr7,xr9,xr0,SS D32ACC xr12,xr5,xr3,xr0,SS D32ACC xr1,xr4,xr2,xr0,SS D32ADD xr12,xr12,xr13,xr0,AA D32ADD xr1,xr1,xr13,xr0,AA D32SAR xr2,xr12,xr1,xr1,10 S32SFL xr0,xr2,xr1,xr1,ptn3 Q16SAT xr1,xr0,xr1 S32M2I xr1, $2#NO_APP sh $2,6($12) addiu $8,$8,1 li $2,8 # 0x8 .set noreorder .set nomacro bne $8,$2,$L60 addu $5,$5,$6 .set macro .set reorder .set noreorder .set nomacro j $31 addiu $sp,$sp,32 .set macro .set reorder .end put_h264_qpel8_hv_lowpass_mxu .section .text.put_h264_qpel16_hv_lowpass_mxu,"ax",@progbits .align 2 .align 5 .ent put_h264_qpel16_hv_lowpass_mxu .type put_h264_qpel16_hv_lowpass_mxu, @functionput_h264_qpel16_hv_lowpass_mxu: .frame $sp,56,$31 # vars= 0, regs= 8/0, args= 24, gp= 0 .mask 0x807f0000,-4 .fmask 0x00000000,0 .set noreorder .set nomacro addiu $sp,$sp,-56 sw $20,40($sp) sw $18,32($sp) lw $20,72($sp) lw $18,76($sp) sw $22,48($sp) sw $19,36($sp) addiu $22,$5,16 move $19,$7 sw $17,28($sp) sw $16,24($sp) move $17,$6 move $16,$4 sw $31,52($sp) sw $21,44($sp) sw $20,16($sp) move $21,$5 jal put_h264_qpel8_hv_lowpass_mxu sw $18,20($sp) addiu $4,$16,8 move $5,$22 addiu $6,$17,8 move $7,$19 sw $20,16($sp) jal put_h264_qpel8_hv_lowpass_mxu sw $18,20($sp) sll $2,$18,3 sll $3,$19,3 addu $17,$17,$2 addu $16,$16,$3 move $5,$21 move $4,$16 move $6,$17 move $7,$19 sw $20,16($sp) jal put_h264_qpel8_hv_lowpass_mxu sw $18,20($sp) sw $20,72($sp) sw $18,76($sp) addiu $4,$16,8 move $5,$22 addiu $6,$17,8 move $7,$19 lw $31,52($sp) lw $22,48($sp) lw $21,44($sp) lw $20,40($sp) lw $19,36($sp) lw $18,32($sp) lw $17,28($sp) lw $16,24($sp) j put_h264_qpel8_hv_lowpass_mxu addiu $sp,$sp,56 .set macro .set reorder .end put_h264_qpel16_hv_lowpass_mxu .section .text.put_h264_qpel8_h_lowpass_avg_mxu,"ax",@progbits .align 2 .align 5 .ent put_h264_qpel8_h_lowpass_avg_mxu .type put_h264_qpel8_h_lowpass_avg_mxu, @functionput_h264_qpel8_h_lowpass_avg_mxu: .frame $sp,8,$31 # vars= 0, regs= 1/0, args= 0, gp= 0 .mask 0x00010000,-8 .fmask 0x00000000,0 addiu $sp,$sp,-8 lw $3,24($sp) li $9,-4 # 0xfffffffffffffffc addu $3,$5,$3 addiu $10,$5,-2 li $2,4 # 0x4 andi $11,$10,0x3 andi $8,$3,0x3 andi $12,$5,0x3 and $14,$3,$9 li $3,17104896 # 0x1050000 sw $16,0($sp) subu $25,$2,$8 subu $13,$2,$11 move $16,$6 subu $11,$2,$12 ori $3,$3,0x105 and $10,$10,$9 and $8,$5,$9#APP S32I2M xr15,$3#NO_APP li $2,336855040 # 0x14140000 ori $2,$2,0x501#APP S32I2M xr14,$2#NO_APP li $3,1048576 # 0x100000 ori $3,$3,0x10#APP S32I2M xr13,$3#NO_APP move $6,$0 addiu $9,$4,-4 addu $24,$10,$7 addu $15,$8,$7 .set noreorder .set nomacro j $L64 li $12,3 # 0x3 .set macro .set reorder$L66: addu $2,$16,$9 addu $14,$14,$7 addu $10,$10,$7 addu $8,$8,$7 addiu $9,$2,-8$L64:#APP S32LDD xr1,$10,0 S32LDD xr2,$10,4 S32LDD xr3,$10,8 pref 0,0($24) S32ALN xr4,xr2,xr1,$13 S32ALN xr5,xr3,xr2,$13 S32ALN xr6,xr5,xr4,$12 D32SLR xr1,xr5,xr0,xr0,8 S32SFL xr0,xr1,xr5,xr7,ptn3 Q8MUL xr2,xr4,xr14,xr1 Q8MUL xr8,xr7,xr15,xr7 Q8MUL xr4,xr6,xr14,xr3 Q16ADD xr2,xr2,xr2,xr0,AA,XW Q16ADD xr0,xr1,xr1,xr1,SS,XW Q16ADD xr0,xr7,xr7,xr7,SS,XW S32SFL xr1,xr0,xr1,xr0,ptn3 Q16ACC xr0,xr2,xr7,xr1,AA Q16ADD xr4,xr4,xr4,xr0,AA,XW Q16ADD xr3,xr3,xr3,xr0,SS,XW Q16ADD xr0,xr8,xr8,xr8,SS,XW S32SFL xr3,xr0,xr3,xr0,ptn3 Q16ACC xr0,xr4,xr8,xr3,AA S32SFL xr0,xr3,xr1,xr1,ptn3 Q16ADD xr0,xr1,xr13,xr11,AA,WW S32LDD xr1,$8,0 S32LDD xr2,$8,4 S32LDD xr3,$8,8 pref 0,0($15) S32ALN xr4,xr2,xr1,$11 S32ALN xr5,xr3,xr2,$11 S32LDD xr1,$14,0 S32LDD xr2,$14,4 S32ALN xr6,xr5,xr4,$12 S32ALN xr9,xr2,xr1,$25 D32SLR xr1,xr5,xr0,xr0,8 S32SFL xr0,xr1,xr5,xr7,ptn3 Q8MUL xr2,xr4,xr14,xr1 Q8MUL xr8,xr7,xr15,xr7 Q8MUL xr4,xr6,xr14,xr3 Q16ADD xr2,xr2,xr2,xr0,AA,XW Q16ADD xr0,xr1,xr1,xr1,SS,XW Q16ADD xr0,xr7,xr7,xr7,SS,XW S32SFL xr1,xr0,xr1,xr0,ptn3 Q16ACC xr0,xr2,xr7,xr1,AA Q16ADD xr4,xr4,xr4,xr0,AA,XW Q16ADD xr3,xr3,xr3,xr0,SS,XW Q16ADD xr0,xr8,xr8,xr8,SS,XW S32SFL xr3,xr0,xr3,xr0,ptn3 Q16ACC xr0,xr4,xr8,xr3,AA S32SFL xr0,xr3,xr1,xr1,ptn3 Q16ADD xr0,xr1,xr13,xr12,AA,WW Q16SAR xr12,xr12,xr11,xr11,5 Q16SAT xr1,xr12,xr11 Q8AVGR xr1,xr1,xr9 S32SDI xr1,$9,4#NO_APP addiu $5,$14,4 addiu $4,$8,4 addiu $2,$10,4#APP S32LDD xr1,$2,0 S32LDD xr2,$2,4 S32LDD xr3,$2,8#NO_APP addiu $3,$24,4#APP pref 0,0($3) S32ALN xr4,xr2,xr1,$13 S32ALN xr5,xr3,xr2,$13 S32ALN xr6,xr5,xr4,$12 D32SLR xr1,xr5,xr0,xr0,8 S32SFL xr0,xr1,xr5,xr7,ptn3 Q8MUL xr2,xr4,xr14,xr1 Q8MUL xr8,xr7,xr15,xr7 Q8MUL xr4,xr6,xr14,xr3 Q16ADD xr2,xr2,xr2,xr0,AA,XW Q16ADD xr0,xr1,xr1,xr1,SS,XW Q16ADD xr0,xr7,xr7,xr7,SS,XW S32SFL xr1,xr0,xr1,xr0,ptn3 Q16ACC xr0,xr2,xr7,xr1,AA Q16ADD xr4,xr4,xr4,xr0,AA,XW Q16ADD xr3,xr3,xr3,xr0,SS,XW Q16ADD xr0,xr8,xr8,xr8,SS,XW S32SFL xr3,xr0,xr3,xr0,ptn3 Q16ACC xr0,xr4,xr8,xr3,AA S32SFL xr0,xr3,xr1,xr1,ptn3 Q16ADD xr0,xr1,xr13,xr11,AA,WW S32LDD xr1,$4,0 S32LDD xr2,$4,4 S32LDD xr3,$4,8#NO_APP addiu $2,$15,4#APP pref 0,0($2) S32ALN xr4,xr2,xr1,$11 S32ALN xr5,xr3,xr2,$11 S32LDD xr1,$5,0 S32LDD xr2,$5,4 S32ALN xr6,xr5,xr4,$12 S32ALN xr9,xr2,xr1,$25 D32SLR xr1,xr5,xr0,xr0,8 S32SFL xr0,xr1,xr5,xr7,ptn3 Q8MUL xr2,xr4,xr14,xr1 Q8MUL xr8,xr7,xr15,xr7 Q8MUL xr4,xr6,xr14,xr3 Q16ADD xr2,xr2,xr2,xr0,AA,XW Q16ADD xr0,xr1,xr1,xr1,SS,XW Q16ADD xr0,xr7,xr7,xr7,SS,XW S32SFL xr1,xr0,xr1,xr0,ptn3 Q16ACC xr0,xr2,xr7,xr1,AA Q16ADD xr4,xr4,xr4,xr0,AA,XW Q16ADD xr3,xr3,xr3,xr0,SS,XW Q16ADD xr0,xr8,xr8,xr8,SS,XW S32SFL xr3,xr0,xr3,xr0,ptn3 Q16ACC xr0,xr4,xr8,xr3,AA S32SFL xr0,xr3,xr1,xr1,ptn3 Q16ADD xr0,xr1,xr13,xr12,AA,WW Q16SAR xr12,xr12,xr11,xr11,5 Q16SAT xr1,xr12,xr11 Q8AVGR xr1,xr1,xr9 S32SDI xr1,$9,4#NO_APP li $2,8 # 0x8 addiu $6,$6,1 addu $24,$24,$7 .set noreorder .set nomacro bne $6,$2,$L66 addu $15,$15,$7 .set macro .set reorder lw $16,0($sp) .set noreorder .set nomacro j $31 addiu $sp,$sp,8 .set macro .set reorder .end put_h264_qpel8_h_lowpass_avg_mxu .section .text.put_h264_qpel16_h_lowpass_avg_mxu,"ax",@progbits .align 2 .align 5 .ent put_h264_qpel16_h_lowpass_avg_mxu .type put_h264_qpel16_h_lowpass_avg_mxu, @functionput_h264_qpel16_h_lowpass_avg_mxu: .frame $sp,48,$31 # vars= 0, regs= 6/0, args= 24, gp= 0 .mask 0x801f0000,-4 .fmask 0x00000000,0 .set noreorder .set nomacro addiu $sp,$sp,-48 sw $20,40($sp) lw $20,64($sp) sw $19,36($sp) sw $18,32($sp) move $19,$7 move $18,$6 sw $17,28($sp) sw $16,24($sp) move $17,$5 move $16,$4 sw $31,44($sp) jal put_h264_qpel8_h_lowpass_avg_mxu sw $20,16($sp) addiu $4,$16,8 addiu $5,$17,8 move $6,$18 move $7,$19 jal put_h264_qpel8_h_lowpass_avg_mxu sw $20,16($sp) sll $2,$19,3 sll $3,$18,3 addu $17,$17,$2 addu $16,$16,$3 move $4,$16 move $5,$17 move $6,$18 move $7,$19 jal put_h264_qpel8_h_lowpass_avg_mxu sw $20,16($sp) sw $20,64($sp) addiu $4,$16,8 addiu $5,$17,8 move $6,$18 move $7,$19 lw $31,44($sp) lw $20,40($sp) lw $19,36($sp) lw $18,32($sp) lw $17,28($sp) lw $16,24($sp) j put_h264_qpel8_h_lowpass_avg_mxu addiu $sp,$sp,48 .set macro .set reorder .end put_h264_qpel16_h_lowpass_avg_mxu .section .text.put_h264_qpel4_mc00_c,"ax",@progbits .align 2 .align 5 .ent put_h264_qpel4_mc00_c .type put_h264_qpel4_mc00_c, @functionput_h264_qpel4_mc00_c: .frame $sp,0,$31 # vars= 0, regs= 0/0, args= 0, gp= 0 .mask 0x00000000,0 .fmask 0x00000000,0 andi $7,$5,0x3 li $2,-4 # 0xfffffffffffffffc li $3,4 # 0x4 subu $3,$3,$7 and $5,$5,$2 subu $4,$4,$6#APP S32LDD xr1,$5,0 S32LDD xr2,$5,4#NO_APP addu $5,$5,$6#APP S32ALN xr1,xr2,xr1,$3 S32SDIV xr1,$4,$6,0 S32LDD xr1,$5,0 S32LDD xr2,$5,4#NO_APP addu $5,$6,$5#APP S32ALN xr1,xr2,xr1,$3 S32SDIV xr1,$4,$6,0 S32LDD xr1,$5,0 S32LDD xr2,$5,4
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -