📄 lb1sf68.asm
字号:
#ifndef __mcoldfire__ moveml sp@+,a2-a3 #else movel sp@+,a4 movel sp@+,a3 movel sp@+,a2 #endif| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider| the case of denormalized numbers in the rounding routine itself).| As in the addition (not in the subtraction!) we could have set | one more bit we check this: btst IMM (DBL_MANT_DIG+1),d0 beq 1f#ifndef __mcoldfire__ lsrl IMM (1),d0 roxrl IMM (1),d1 roxrl IMM (1),d2 roxrl IMM (1),d3 addw IMM (1),d4#else lsrl IMM (1),d3 btst IMM (0),d2 beq 10f bset IMM (31),d310: lsrl IMM (1),d2 btst IMM (0),d1 beq 11f bset IMM (31),d211: lsrl IMM (1),d1 btst IMM (0),d0 beq 12f bset IMM (31),d112: lsrl IMM (1),d0 addl IMM (1),d4#endif1: lea pc@(Ladddf$5),a0 | to return from rounding routine PICLEA SYM (_fpCCR),a1 | check the rounding mode#ifdef __mcoldfire__ clrl d6#endif movew a1@(6),d6 | rounding mode in d6 beq Lround$to$nearest#ifndef __mcoldfire__ cmpw IMM (ROUND_TO_PLUS),d6#else cmpl IMM (ROUND_TO_PLUS),d6#endif bhi Lround$to$minus blt Lround$to$zero bra Lround$to$plusLadddf$5:| Put back the exponent and check for overflow#ifndef __mcoldfire__ cmpw IMM (0x7ff),d4 | is the exponent big?#else cmpl IMM (0x7ff),d4 | is the exponent big?#endif bge 1f bclr IMM (DBL_MANT_DIG-1),d0#ifndef __mcoldfire__ lslw IMM (4),d4 | put exponent back into position#else lsll IMM (4),d4 | put exponent back into position#endif swap d0 | #ifndef __mcoldfire__ orw d4,d0 |#else orl d4,d0 |#endif swap d0 | bra Ladddf$ret1: movew IMM (ADD),d5 bra Ld$overflowLsubdf$0:| Here we do the subtraction.#ifndef __mcoldfire__ exg d7,a0 | put sign back in a0 exg d6,a3 |#else movel d7,a4 movel a0,d7 movel a4,a0 movel d6,a4 movel a3,d6 movel a4,a3#endif subl d7,d3 | subxl d6,d2 | subxl d5,d1 | subxl d4,d0 | beq Ladddf$ret$1 | if zero just exit bpl 1f | if positive skip the following movel a0,d7 | bchg IMM (31),d7 | change sign bit in d7 movel d7,a0 | negl d3 | negxl d2 | negxl d1 | and negate result negxl d0 |1: movel a2,d4 | return exponent to d4 movel a0,d7 andl IMM (0x80000000),d7 | isolate sign bit#ifndef __mcoldfire__ moveml sp@+,a2-a3 |#else movel sp@+,a4 movel sp@+,a3 movel sp@+,a2#endif| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider| the case of denormalized numbers in the rounding routine itself).| As in the addition (not in the subtraction!) we could have set | one more bit we check this: btst IMM (DBL_MANT_DIG+1),d0 beq 1f#ifndef __mcoldfire__ lsrl IMM (1),d0 roxrl IMM (1),d1 roxrl IMM (1),d2 roxrl IMM (1),d3 addw IMM (1),d4#else lsrl IMM (1),d3 btst IMM (0),d2 beq 10f bset IMM (31),d310: lsrl IMM (1),d2 btst IMM (0),d1 beq 11f bset IMM (31),d211: lsrl IMM (1),d1 btst IMM (0),d0 beq 12f bset IMM (31),d112: lsrl IMM (1),d0 addl IMM (1),d4#endif1: lea pc@(Lsubdf$1),a0 | to return from rounding routine PICLEA SYM (_fpCCR),a1 | check the rounding mode#ifdef __mcoldfire__ clrl d6#endif movew a1@(6),d6 | rounding mode in d6 beq Lround$to$nearest#ifndef __mcoldfire__ cmpw IMM (ROUND_TO_PLUS),d6#else cmpl IMM (ROUND_TO_PLUS),d6#endif bhi Lround$to$minus blt Lround$to$zero bra Lround$to$plusLsubdf$1:| Put back the exponent and sign (we don't have overflow). ' bclr IMM (DBL_MANT_DIG-1),d0 #ifndef __mcoldfire__ lslw IMM (4),d4 | put exponent back into position#else lsll IMM (4),d4 | put exponent back into position#endif swap d0 | #ifndef __mcoldfire__ orw d4,d0 |#else orl d4,d0 |#endif swap d0 | bra Ladddf$ret| If one of the numbers was too small (difference of exponents >= | DBL_MANT_DIG+1) we return the other (and now we don't have to '| check for finiteness or zero).Ladddf$a$small:#ifndef __mcoldfire__ moveml sp@+,a2-a3 #else movel sp@+,a4 movel sp@+,a3 movel sp@+,a2#endif movel a6@(16),d0 movel a6@(20),d1 PICLEA SYM (_fpCCR),a0 movew IMM (0),a0@#ifndef __mcoldfire__ moveml sp@+,d2-d7 | restore data registers#else moveml sp@,d2-d7 | XXX if frame pointer is ever removed, stack pointer must | be adjusted here.#endif unlk a6 | and return rtsLadddf$b$small:#ifndef __mcoldfire__ moveml sp@+,a2-a3 #else movel sp@+,a4 movel sp@+,a3 movel sp@+,a2 #endif movel a6@(8),d0 movel a6@(12),d1 PICLEA SYM (_fpCCR),a0 movew IMM (0),a0@#ifndef __mcoldfire__ moveml sp@+,d2-d7 | restore data registers#else moveml sp@,d2-d7 | XXX if frame pointer is ever removed, stack pointer must | be adjusted here.#endif unlk a6 | and return rtsLadddf$a$den: movel d7,d4 | d7 contains 0x00200000 bra Ladddf$1Ladddf$b$den: movel d7,d5 | d7 contains 0x00200000 notl d6 bra Ladddf$2Ladddf$b:| Return b (if a is zero) movel d2,d0 movel d3,d1 bra 1fLadddf$a: movel a6@(8),d0 movel a6@(12),d11: movew IMM (ADD),d5| Check for NaN and +/-INFINITY. movel d0,d7 | andl IMM (0x80000000),d7 | bclr IMM (31),d0 | cmpl IMM (0x7ff00000),d0 | bge 2f | movel d0,d0 | check for zero, since we don't ' bne Ladddf$ret | want to return -0 by mistake bclr IMM (31),d7 | bra Ladddf$ret |2: andl IMM (0x000fffff),d0 | check for NaN (nonzero fraction) orl d1,d0 | bne Ld$inop | bra Ld$infty | Ladddf$ret$1:#ifndef __mcoldfire__ moveml sp@+,a2-a3 | restore regs and exit#else movel sp@+,a4 movel sp@+,a3 movel sp@+,a2#endifLadddf$ret:| Normal exit. PICLEA SYM (_fpCCR),a0 movew IMM (0),a0@ orl d7,d0 | put sign bit back#ifndef __mcoldfire__ moveml sp@+,d2-d7#else moveml sp@,d2-d7 | XXX if frame pointer is ever removed, stack pointer must | be adjusted here.#endif unlk a6 rtsLadddf$ret$den:| Return a denormalized number.#ifndef __mcoldfire__ lsrl IMM (1),d0 | shift right once more roxrl IMM (1),d1 |#else lsrl IMM (1),d1 btst IMM (0),d0 beq 10f bset IMM (31),d110: lsrl IMM (1),d0#endif bra Ladddf$retLadddf$nf: movew IMM (ADD),d5| This could be faster but it is not worth the effort, since it is not| executed very often. We sacrifice speed for clarity here. movel a6@(8),d0 | get the numbers back (remember that we movel a6@(12),d1 | did some processing already) movel a6@(16),d2 | movel a6@(20),d3 | movel IMM (0x7ff00000),d4 | useful constant (INFINITY) movel d0,d7 | save sign bits movel d2,d6 | bclr IMM (31),d0 | clear sign bits bclr IMM (31),d2 | | We know that one of them is either NaN of +/-INFINITY| Check for NaN (if either one is NaN return NaN) cmpl d4,d0 | check first a (d0) bhi Ld$inop | if d0 > 0x7ff00000 or equal and bne 2f tstl d1 | d1 > 0, a is NaN bne Ld$inop | 2: cmpl d4,d2 | check now b (d1) bhi Ld$inop | bne 3f tstl d3 | bne Ld$inop | 3:| Now comes the check for +/-INFINITY. We know that both are (maybe not| finite) numbers, but we have to check if both are infinite whether we| are adding or subtracting them. eorl d7,d6 | to check sign bits bmi 1f andl IMM (0x80000000),d7 | get (common) sign bit bra Ld$infty1:| We know one (or both) are infinite, so we test for equality between the| two numbers (if they are equal they have to be infinite both, so we| return NaN). cmpl d2,d0 | are both infinite? bne 1f | if d0 <> d2 they are not equal cmpl d3,d1 | if d0 == d2 test d3 and d1 beq Ld$inop | if equal return NaN1: andl IMM (0x80000000),d7 | get a's sign bit ' cmpl d4,d0 | test now for infinity beq Ld$infty | if a is INFINITY return with this sign bchg IMM (31),d7 | else we know b is INFINITY and has bra Ld$infty | the opposite sign|=============================================================================| __muldf3|=============================================================================| double __muldf3(double, double);SYM (__muldf3):#ifndef __mcoldfire__ link a6,IMM (0) moveml d2-d7,sp@-#else link a6,IMM (-24) moveml d2-d7,sp@#endif movel a6@(8),d0 | get a into d0-d1 movel a6@(12),d1 | movel a6@(16),d2 | and b into d2-d3 movel a6@(20),d3 | movel d0,d7 | d7 will hold the sign of the product eorl d2,d7 | andl IMM (0x80000000),d7 | movel d7,a0 | save sign bit into a0 movel IMM (0x7ff00000),d7 | useful constant (+INFINITY) movel d7,d6 | another (mask for fraction) notl d6 | bclr IMM (31),d0 | get rid of a's sign bit ' movel d0,d4 | orl d1,d4 | beq Lmuldf$a$0 | branch if a is zero movel d0,d4 | bclr IMM (31),d2 | get rid of b's sign bit ' movel d2,d5 | orl d3,d5 | beq Lmuldf$b$0 | branch if b is zero movel d2,d5 | cmpl d7,d0 | is a big? bhi Lmuldf$inop | if a is NaN return NaN beq Lmuldf$a$nf | we still have to check d1 and b ... cmpl d7,d2 | now compare b with INFINITY bhi Lmuldf$inop | is b NaN? beq Lmuldf$b$nf | we still have to check d3 ...| Here we have both numbers finite and nonzero (and with no sign bit).| Now we get the exponents into d4 and d5. andl d7,d4 | isolate exponent in d4 beq Lmuldf$a$den | if exponent zero, have denormalized andl d6,d0 | isolate fraction orl IMM (0x00100000),d0 | and put hidden bit back swap d4 | I like exponents in the first byte#ifndef __mcoldfire__ lsrw IMM (4),d4 | #else lsrl IMM (4),d4 | #endifLmuldf$1: andl d7,d5 | beq Lmuldf$b$den | andl d6,d2 | orl IMM (0x00100000),d2 | and put hidden bit back swap d5 |#ifndef __mcoldfire__ lsrw IMM (4),d5 |#else lsrl IMM (4),d5 |#endifLmuldf$2: |#ifndef __mcoldfire__ addw d5,d4 | add exponents subw IMM (D_BIAS+1),d4 | and subtract bias (plus one)#else addl d5,d4 | add exponents subl IMM (D_BIAS+1),d4 | and subtract bias (plus one)#endif| We are now ready to do the multiplication. The situation is as follows:| both a and b have bit 52 ( bit 20 of d0 and d2) set (even if they were | denormalized to start with!), which means that in the product bit 104 | (which will correspond to bit 8 of the fourth long) is set.| Here we have to do the product.| To do it we have to juggle the registers back and forth, as there are not| enough to keep everything in them. So we use the address registers to keep| some intermediate data.#ifndef __mcoldfire__ moveml a2-a3,sp@- | save a2 and a3 for temporary use#else movel a2,sp@- movel a3,sp@- movel a4,sp@-#endif movel IMM (0),a2 | a2 is a null register movel d4,a3 | and a3 will preserve the exponent| First, shift d2-d3 so bit 20 becomes bit 31:#ifndef __mcoldfire__ rorl IMM (5),d2 | rotate d2 5 places right swap d2 | and swap it rorl IMM (5),d3 | do the same thing with d3 swap d3 | movew d3,d6 | get the rightmost 11 bits of d3 andw IMM (0x07ff),d6 | orw d6,d2 | and put them into d2 andw IMM (0xf800),d3 | clear those bits in d3#else moveq IMM (11),d7 | left shift d2 11 bits lsll d7,d2 movel d3,d6 | get a copy of d3 lsll d7,d3 | left shift d3 11 bits andl IMM (0xffe00000),d6 | get the top 11 bits of d3 moveq IMM (21),d7 | right shift them 21 bits lsrl d7,d6 orl d6,d2 | stick them at the end of d2#endif movel d2,d6 | move b into d6-d7 movel d3,d7 | move a into d4-d5 movel d0,d4 | and clear d0-d1-d2-d3 (to put result) movel d1,d5 | movel IMM (0),d3 | movel d3,d2 | movel d3,d1 | movel d3,d0 || We use a1 as counter: movel IMM (DBL_MANT_DIG-1),a1 #ifndef __mcoldfire__ exg d7,a1#else movel d7,a4 movel a1,d7 movel a4,a1#endif1:#ifndef __mcoldfire__ exg d7,a1 | put counter back in a1#else movel d7,a4 movel a1,d7 movel a4,a1#endif addl d3,d3 | shift sum once left addxl d2,d2 | addxl d1,d1 | addxl d0,d0 | addl d7,d7 | addxl d6,d6 | bcc 2f | if bit clear skip the following#ifndef __mcoldfire__ exg d7,a2 |#else movel d7,a4 movel a2,d7 movel a4,a2#endif addl d5,d3 | else add a to the sum addxl d4,d2 | addxl d7,d1 | addxl d7,d0 |#ifndef __mcoldfire__ exg d7,a2 | #else movel d7,a4 movel a2,d7 movel a4,a2#endif2:#ifndef __mcoldfire__ exg d7,a1 | put counter in d7 dbf d7,1b | decrement and branch#else movel d7,a4 movel a1,d7 movel a4,a1 subql IMM (1),d7 bpl 1b#endif movel a3,d4 | restore exponent#ifndef __mcoldfire__ moveml sp@+,a2-a3
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -