📄 rxinit.asm
字号:
STW .D2T2 B7,*+B4(8) ; |661|
STW .D2T2 B6,*+B4(12) ; |662|
STW .D2T2 B5,*+B4(16) ; |663|
STW .D2T2 B9,*+B4(20) ; |664|
EXTU .S1 A0,16,16,A4 ; |665|
|| STW .D2T1 A3,*B4 ; |665|
MVC .S2 CSR,B4 ; |279|
AND .S2 1,B0,B5 ; |279|
|| AND .L2 -2,B4,B4 ; |279|
OR .S2 B5,B4,B4 ; |279|
MVC .S2 B4,CSR ; |279|
MVK .S2 0xfffffffe,B10 ; |274|
B .S1 L6 ; |666|
NOP 5
; BRANCH OCCURS ; |666|
;** --------------------------------------------------------------------------*
L5:
;** -----------------------g2:
;** 273 ----------------------- gie = CSR&1u; // [6]
;** 274 ----------------------- CSR = CSR&0xfffffffeu; // [6]
;** 726 ----------------------- x0 = gEdmaConfigXmt.opt; // [19]
;** 728 ----------------------- x2 = gEdmaConfigXmt.cnt; // [19]
;** 729 ----------------------- x3 = gEdmaConfigXmt.dst; // [19]
;** 730 ----------------------- x4 = gEdmaConfigXmt.idx; // [19]
;** 733 ----------------------- *(volatile unsigned *)0x2000004u = gEdmaConfigXmt.src; // [19]
;** 734 ----------------------- *(base = (volatile unsigned *)0x2000008) = x2; // [19]
;** 735 ----------------------- base[1] = x3; // [19]
;** 736 ----------------------- base[2] = x4; // [19]
;** 737 ----------------------- base[6] = x0; // [19]
;** 279 ----------------------- CSR = CSR&0xfffffffeu|gie&1u; // [7]
;** ----------------------- K$39 = 0u;
;** ----------------------- U$40 = (unsigned short)U$2;
;** 274 ----------------------- K$28 = 0xfffffffeu; // [6]
MVC .S2 CSR,B4 ; |273|
AND .S2 1,B4,B8 ; |273|
MVC .S2 CSR,B4 ; |274|
AND .S2 -2,B4,B4 ; |274|
MVC .S2 B4,CSR ; |274|
MVKL .S1 _gEdmaConfigXmt,A3 ; |726|
MVKH .S1 _gEdmaConfigXmt,A3 ; |726|
LDW .D1T1 *A3,A3 ; |726|
MVKL .S2 _gEdmaConfigXmt+8,B4 ; |728|
MVKH .S2 _gEdmaConfigXmt+8,B4 ; |728|
LDW .D2T2 *B4,B5 ; |728|
MVKL .S2 _gEdmaConfigXmt+12,B4 ; |729|
MVKH .S2 _gEdmaConfigXmt+12,B4 ; |729|
LDW .D2T2 *B4,B6 ; |729|
MVKL .S2 _gEdmaConfigXmt+16,B4 ; |730|
MVKH .S2 _gEdmaConfigXmt+16,B4 ; |730|
LDW .D2T2 *B4,B7 ; |730|
MVKL .S2 _gEdmaConfigXmt+4,B4 ; |733|
MVKH .S2 _gEdmaConfigXmt+4,B4 ; |733|
LDW .D2T2 *B4,B4 ; |733|
MVKL .S1 0x2000004,A4 ; |733|
MVKH .S1 0x2000004,A4 ; |733|
NOP 2
STW .D1T2 B4,*A4 ; |733|
MVKL .S2 0x2000008,B4 ; |734|
MVKH .S2 0x2000008,B4 ; |734|
STW .D2T2 B5,*B4 ; |734|
STW .D2T2 B6,*+B4(4) ; |735|
STW .D2T2 B7,*+B4(8) ; |736|
STW .D2T1 A3,*+B4(24) ; |737|
MVC .S2 CSR,B4 ; |279|
AND .S2 1,B8,B5 ; |279|
|| AND .L2 -2,B4,B4 ; |279|
OR .S2 B5,B4,B4 ; |279|
MVC .S2 B4,CSR ; |279|
ZERO .D1 A11
|| EXTU .S1 A0,16,16,A4
MVK .S2 0xfffffffe,B10 ; |274|
;** --------------------------------------------------------------------------*
L6:
;** -----------------------g3:
;** 645 ----------------------- U$4 = hEdmaReloadXmtPing; // [17]
;** 645 ----------------------- if ( (U$4 == (K$19 = 0x20000000u))|(U$4 == (K$21 = 0x10000000u)) ) goto g5; // [17]
MVKL .S1 _hEdmaReloadXmtPing,A0 ; |645|
MVKH .S1 _hEdmaReloadXmtPing,A0 ; |645|
LDW .D1T2 *A0,B8 ; |645|
ZERO .D1 A10 ; |645|
MVKH .S1 0x20000000,A10 ; |645|
ZERO .D2 B11 ; |645|
MVKH .S2 0x10000000,B11 ; |645|
CMPEQ .L2X B8,A10,B4 ; |645|
CMPEQ .L2 B8,B11,B5 ; |645|
OR .S2 B5,B4,B0 ; |645|
[ B0] B .S1 L7 ; |645|
NOP 5
; BRANCH OCCURS ; |645|
;** --------------------------------------------------------------------------*
;** 273 ----------------------- gie = CSR&1u; // [6]
;** 274 ----------------------- CSR = CSR&K$28; // [6]
;** 651 ----------------------- x0 = gEdmaConfigXmt.opt; // [17]
;** 652 ----------------------- x1 = gEdmaConfigXmt.src; // [17]
;** 653 ----------------------- x2 = gEdmaConfigXmt.cnt; // [17]
;** 654 ----------------------- x3 = gEdmaConfigXmt.dst; // [17]
;** 655 ----------------------- x4 = gEdmaConfigXmt.idx; // [17]
;** 656 ----------------------- x5 = gEdmaConfigXmt.rld; // [17]
;** 659 ----------------------- C$15 = (unsigned)(unsigned short)U$4+0x1a00000u; // [17]
;** 659 ----------------------- *(volatile unsigned *)C$15 = K$39; // [17]
;** 660 ----------------------- *((volatile unsigned *)C$15+4) = x1; // [17]
;** 661 ----------------------- *((volatile unsigned *)C$15+8) = x2; // [17]
;** 662 ----------------------- *((volatile unsigned *)C$15+12) = x3; // [17]
;** 663 ----------------------- *((volatile unsigned *)C$15+16) = x4; // [17]
;** 664 ----------------------- *((volatile unsigned *)C$15+20) = x5; // [17]
;** 665 ----------------------- U$95 = (unsigned short)U$4; // [17]
;** 665 ----------------------- *(volatile unsigned *)C$15 = x0; // [17]
;** 279 ----------------------- CSR = CSR&K$28|gie&1u; // [7]
;** 666 ----------------------- goto g6; // [17]
MVC .S2 CSR,B4 ; |273|
AND .S2 1,B4,B0 ; |273|
MVC .S2 CSR,B4 ; |274|
AND .S2 B10,B4,B4 ; |274|
MVC .S2 B4,CSR ; |274|
MVKL .S1 _gEdmaConfigXmt,A0 ; |651|
MVKH .S1 _gEdmaConfigXmt,A0 ; |651|
LDW .D1T1 *A0,A0 ; |651|
MVKL .S2 _gEdmaConfigXmt+4,B4 ; |652|
MVKH .S2 _gEdmaConfigXmt+4,B4 ; |652|
LDW .D2T2 *B4,B9 ; |652|
MVKL .S2 _gEdmaConfigXmt+8,B4 ; |653|
MVKH .S2 _gEdmaConfigXmt+8,B4 ; |653|
LDW .D2T2 *B4,B1 ; |653|
MVKL .S2 _gEdmaConfigXmt+12,B4 ; |654|
MVKH .S2 _gEdmaConfigXmt+12,B4 ; |654|
LDW .D2T2 *B4,B6 ; |654|
MVKL .S2 _gEdmaConfigXmt+16,B4 ; |655|
MVKH .S2 _gEdmaConfigXmt+16,B4 ; |655|
LDW .D2T2 *B4,B5 ; |655|
MVKL .S2 _gEdmaConfigXmt+20,B4 ; |656|
MVKH .S2 _gEdmaConfigXmt+20,B4 ; |656|
LDW .D2T2 *B4,B7 ; |656|
EXTU .S2 B8,16,16,B2 ; |659|
|| ZERO .D2 B4 ; |659|
MVKH .S2 0x1a00000,B4 ; |659|
ADD .D2 B4,B2,B4 ; |659|
STW .D2T1 A11,*B4 ; |659|
STW .D2T2 B9,*+B4(4) ; |660|
STW .D2T2 B1,*+B4(8) ; |661|
STW .D2T2 B6,*+B4(12) ; |662|
STW .D2T2 B5,*+B4(16) ; |663|
STW .D2T2 B7,*+B4(20) ; |664|
EXTU .S2 B8,16,16,B9 ; |665|
|| STW .D2T1 A0,*B4 ; |665|
MVC .S2 CSR,B4 ; |279|
AND .S2 1,B0,B5 ; |279|
|| AND .L2 B10,B4,B4 ; |279|
OR .S2 B5,B4,B4 ; |279|
MVC .S2 B4,CSR ; |279|
B .S1 L8 ; |666|
NOP 5
; BRANCH OCCURS ; |666|
;** --------------------------------------------------------------------------*
L7:
;** -----------------------g5:
;** 273 ----------------------- gie = CSR&1u; // [6]
;** 274 ----------------------- CSR = CSR&K$28; // [6]
;** 726 ----------------------- x0 = gEdmaConfigXmt.opt; // [19]
;** 728 ----------------------- x2 = gEdmaConfigXmt.cnt; // [19]
;** 729 ----------------------- x3 = gEdmaConfigXmt.dst; // [19]
;** 730 ----------------------- x4 = gEdmaConfigXmt.idx; // [19]
;** 733 ----------------------- *(volatile unsigned *)0x2000004u = gEdmaConfigXmt.src; // [19]
;** 734 ----------------------- *(base = (volatile unsigned *)0x2000008) = x2; // [19]
;** 735 ----------------------- base[1] = x3; // [19]
;** 736 ----------------------- base[2] = x4; // [19]
;** 737 ----------------------- base[6] = x0; // [19]
;** 279 ----------------------- CSR = CSR&K$28|gie&1u; // [7]
;** ----------------------- U$95 = (unsigned short)U$4;
MVC .S2 CSR,B4 ; |273|
AND .S2 1,B4,B9 ; |273|
MVC .S2 CSR,B4 ; |274|
AND .S2 B10,B4,B4 ; |274|
MVC .S2 B4,CSR ; |274|
MVKL .S1 _gEdmaConfigXmt,A0 ; |726|
MVKH .S1 _gEdmaConfigXmt,A0 ; |726|
LDW .D1T1 *A0,A0 ; |726|
MVKL .S2 _gEdmaConfigXmt+8,B4 ; |728|
MVKH .S2 _gEdmaConfigXmt+8,B4 ; |728|
LDW .D2T2 *B4,B5 ; |728|
MVKL .S2 _gEdmaConfigXmt+12,B4 ; |729|
MVKH .S2 _gEdmaConfigXmt+12,B4 ; |729|
LDW .D2T2 *B4,B4 ; |729|
MVKL .S2 _gEdmaConfigXmt+16,B6 ; |730|
MVKH .S2 _gEdmaConfigXmt+16,B6 ; |730|
LDW .D2T2 *B6,B6 ; |730|
MVKL .S2 _gEdmaConfigXmt+4,B7 ; |733|
MVKH .S2 _gEdmaConfigXmt+4,B7 ; |733|
LDW .D2T2 *B7,B7 ; |733|
MVKL .S1 0x2000004,A3 ; |733|
MVKH .S1 0x2000004,A3 ; |733|
NOP 2
STW .D1T2 B7,*A3 ; |733|
MVKL .S2 0x2000008,B7 ; |734|
MVKH .S2 0x2000008,B7 ; |734|
STW .D2T2 B5,*B7 ; |734|
STW .D2T2 B4,*+B7(4) ; |735|
STW .D2T2 B6,*+B7(8) ; |736|
STW .D2T1 A0,*+B7(24) ; |737|
MVC .S2 CSR,B4 ; |279|
AND .S2 B10,B4,B4 ; |279|
|| AND .L2 1,B9,B5 ; |279|
OR .S2 B5,B4,B4 ; |279|
MVC .S2 B4,CSR ; |279|
EXTU .S2 B8,16,16,B9
;** --------------------------------------------------------------------------*
L8:
;** -----------------------g6:
;** 324 ----------------------- gEdmaConfigXmt.src = &gBufferXmtPong;
;** 645 ----------------------- U$5 = hEdmaReloadXmtPong; // [17]
;** 645 ----------------------- if ( (U$5 == K$19)|(U$5 == K$21) ) goto g8; // [17]
.line 16
MVKL .S1 _gBufferXmtPong,A0 ; |324|
|| MVKL .S2 _gEdmaConfigXmt+4,B4 ; |324|
MVKH .S1 _gBufferXmtPong,A0 ; |324|
|| MVKH .S2 _gEdmaConfigXmt+4,B4 ; |324|
STW .D2T1 A0,*B4 ; |324|
MVKL .S1 _hEdmaReloadXmtPong,A0 ; |645|
MVKH .S1 _hEdmaReloadXmtPong,A0 ; |645|
LDW .D1T2 *A0,B8 ; |645|
NOP 4
CMPEQ .L2 B8,B11,B5 ; |645|
CMPEQ .L2X B8,A10,B4 ; |645|
OR .S2 B5,B4,B0 ; |645|
[ B0] B .S1 L9 ; |645|
NOP 5
; BRANCH OCCURS ; |645|
;** --------------------------------------------------------------------------*
;** 273 ----------------------- gie = CSR&1u; // [6]
;** 274 ----------------------- CSR = CSR&K$28; // [6]
;** 651 ----------------------- x0 = gEdmaConfigXmt.opt; // [17]
;** 652 ----------------------- x1 = gEdmaConfigXmt.src; // [17]
;** 653 ----------------------- x2 = gEdmaConfigXmt.cnt; // [17]
;** 654 ----------------------- x3 = gEdmaConfigXmt.dst; // [17]
;** 655 ----------------------- x4 = gEdmaConfigXmt.idx; // [17]
;** 656 ----------------------- x5 = gEdmaConfigXmt.rld; // [17]
;** 659 ----------------------- C$14 = (unsigned)(unsigned short)U$5+0x1a00000u; // [17]
;** 659 ----------------------- *(volatile unsigned *)C$14 = K$39; // [17]
;** 660 ----------------------- *((volatile unsigned *)C$14+4) = x1; // [17]
;** 661 ----------------------- *((volatile unsigned *)C$14+8) = x2; // [17]
;** 662 ----------------------- *((volatile unsigned *)C$14+12) = x3; // [17]
;** 663 ----------------------- *((volatile unsigned *)C$14+16) = x4; // [17]
;** 664 ----------------------- *((volatile unsigned *)C$14+20) = x5; // [17]
;** 665 ----------------------- U$141 = (unsigned short)U$5; // [17]
;** 665 ----------------------- *(volatile unsigned *)C$14 = x0; // [17]
;** 279 ----------------------- CSR = CSR&K$28|gie&1u; // [7]
;** 666 ----------------------- goto g9; // [17]
MVC .S2 CSR,B4 ; |273|
AND .S2 1,B4,B7 ; |273|
MVC .S2 CSR,B4 ; |274|
AND .S2 B10,B4,B4 ; |274|
MVC .S2 B4,CSR ; |274|
MVKL .S1 _gEdmaConfigXmt,A0 ; |651|
MVKH .S1 _gEdmaConfigXmt,A0 ; |651|
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -