📄 checksum.s
字号:
andcc %g1, 0xf, %o3 andcc %o0, 0x1, %g0 bne ccslow andcc %o0, 0x2, %g0 be 1f andcc %o0, 0x4, %g0 EX(lduh [%o0 + 0x00], %g4, add %g1, 0,#) sub %g1, 2, %g1 EX2(sth %g4, [%o1 + 0x00],#) add %o0, 2, %o0 sll %g4, 16, %g4 addcc %g4, %g7, %g7 add %o1, 2, %o1 srl %g7, 16, %g3 addx %g0, %g3, %g4 sll %g7, 16, %g7 sll %g4, 16, %g3 srl %g7, 16, %g7 andcc %o0, 0x4, %g0 or %g3, %g7, %g71: be 3f andcc %g1, 0xffffff80, %g0 EX(ld [%o0 + 0x00], %g4, add %g1, 0,#) sub %g1, 4, %g1 EX2(st %g4, [%o1 + 0x00],#) add %o0, 4, %o0 addcc %g4, %g7, %g7 add %o1, 4, %o1 addx %g0, %g7, %g7 b 3f andcc %g1, 0xffffff80, %g0 /* Sun, you just can't beat me, you just can't. Stop trying, * give up. I'm serious, I am going to kick the living shit * out of you, game over, lights out. */ .align 8 .globl C_LABEL(__csum_partial_copy_sparc_generic)C_LABEL(__csum_partial_copy_sparc_generic): /* %o0=src, %o1=dest, %g1=len, %g7=sum */ xor %o0, %o1, %o4 ! get changing bits andcc %o4, 3, %g0 ! check for mismatched alignment bne ccslow ! better this than unaligned/fixups andcc %o0, 7, %g0 ! need to align things? bne cc_dword_align ! yes, we check for short lengths there andcc %g1, 0xffffff80, %g0 ! can we use unrolled loop?3: be 3f ! nope, less than one loop remains andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundry? be ccdbl + 4 ! 8 byte aligned, kick ass5: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)10: EXT(5b, 10b, 20f,#) ! note for exception handling sub %g1, 128, %g1 ! detract from length addx %g0, %g7, %g7 ! add in last carry bit andcc %g1, 0xffffff80, %g0 ! more to csum? add %o0, 128, %o0 ! advance src ptr bne 5b ! we did not go negative, continue looping add %o1, 128, %o1 ! advance dest ptr3: andcc %g1, 0x70, %o2 ! can use table?ccmerge:be ccte ! nope, go and check for end cruft andcc %g1, 0xf, %o3 ! get low bits of length (clears carry btw) srl %o2, 1, %o4 ! begin negative offset computation sethi %hi(12f), %o5 ! set up table ptr end add %o0, %o2, %o0 ! advance src ptr sub %o5, %o4, %o5 ! continue table calculation sll %o2, 1, %g2 ! constant multiplies are fun... sub %o5, %g2, %o5 ! some more adjustments jmp %o5 + %lo(12f) ! jump into it, duff style, wheee... add %o1, %o2, %o1 ! advance dest ptr (carry is clear btw)cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)12: EXT(cctbl, 12b, 22f,#) ! note for exception table handling addx %g0, %g7, %g7 andcc %o3, 0xf, %g0 ! check for low bits setccte: bne cc_end_cruft ! something left, handle it out of band andcc %o3, 8, %g0 ! begin checks for that code retl ! return mov %g7, %o0 ! give em the computed checksumccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)11: EXT(ccdbl, 11b, 21f,#) ! note for exception table handling sub %g1, 128, %g1 ! detract from length addx %g0, %g7, %g7 ! add in last carry bit andcc %g1, 0xffffff80, %g0 ! more to csum? add %o0, 128, %o0 ! advance src ptr bne ccdbl ! we did not go negative, continue looping add %o1, 128, %o1 ! advance dest ptr b ccmerge ! finish it off, above andcc %g1, 0x70, %o2 ! can use table? (clears carry btw)ccslow: cmp %g1, 0 mov 0, %g5 bleu 4f andcc %o0, 1, %o5 be,a 1f srl %g1, 1, %g4 sub %g1, 1, %g1 EX(ldub [%o0], %g5, add %g1, 1,#) add %o0, 1, %o0 EX2(stb %g5, [%o1],#) srl %g1, 1, %g4 add %o1, 1, %o11: cmp %g4, 0 be,a 3f andcc %g1, 1, %g0 andcc %o0, 2, %g0 be,a 1f srl %g4, 1, %g4 EX(lduh [%o0], %o4, add %g1, 0,#) sub %g1, 2, %g1 srl %o4, 8, %g2 sub %g4, 1, %g4 EX2(stb %g2, [%o1],#) add %o4, %g5, %g5 EX2(stb %o4, [%o1 + 1],#) add %o0, 2, %o0 srl %g4, 1, %g4 add %o1, 2, %o11: cmp %g4, 0 be,a 2f andcc %g1, 2, %g0 EX3(ld [%o0], %o4,#)5: srl %o4, 24, %g2 srl %o4, 16, %g3 EX2(stb %g2, [%o1],#) srl %o4, 8, %g2 EX2(stb %g3, [%o1 + 1],#) add %o0, 4, %o0 EX2(stb %g2, [%o1 + 2],#) addcc %o4, %g5, %g5 EX2(stb %o4, [%o1 + 3],#) addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl subcc %g4, 1, %g4 ! tricks bne,a 5b EX3(ld [%o0], %o4,#) sll %g5, 16, %g2 srl %g5, 16, %g5 srl %g2, 16, %g2 andcc %g1, 2, %g0 add %g2, %g5, %g5 2: be,a 3f andcc %g1, 1, %g0 EX(lduh [%o0], %o4, and %g1, 3,#) andcc %g1, 1, %g0 srl %o4, 8, %g2 add %o0, 2, %o0 EX2(stb %g2, [%o1],#) add %g5, %o4, %g5 EX2(stb %o4, [%o1 + 1],#) add %o1, 2, %o13: be,a 1f sll %g5, 16, %o4 EX(ldub [%o0], %g2, add %g0, 1,#) sll %g2, 8, %o4 EX2(stb %g2, [%o1],#) add %g5, %o4, %g5 sll %g5, 16, %o41: addcc %o4, %g5, %g5 srl %g5, 16, %o4 addx %g0, %o4, %g5 orcc %o5, %g0, %g0 be 4f srl %g5, 8, %o4 and %g5, 0xff, %g2 and %o4, 0xff, %o4 sll %g2, 8, %g2 or %g2, %o4, %g54: addcc %g7, %g5, %g7 retl addx %g0, %g7, %o0C_LABEL(__csum_partial_copy_end):/* We do these strange calculations for the csum_*_from_user case only, ie. * we only bother with faults on loads... *//* o2 = ((g2%20)&3)*8 * o3 = g1 - (g2/20)*32 - o2 */20: cmp %g2, 20 blu,a 1f and %g2, 3, %o2 sub %g1, 32, %g1 b 20b sub %g2, 20, %g21: sll %o2, 3, %o2 b 31f sub %g1, %o2, %o3/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8) * o3 = g1 - (g2/16)*32 - o2 */21: andcc %g2, 15, %o3 srl %g2, 4, %g2 be,a 1f clr %o2 add %o3, 1, %o3 and %o3, 14, %o3 sll %o3, 3, %o21: sll %g2, 5, %g2 sub %g1, %g2, %o3 b 31f sub %o3, %o2, %o3/* o0 += (g2/10)*16 - 0x70 * 01 += (g2/10)*16 - 0x70 * o2 = (g2 % 10) ? 8 : 0 * o3 += 0x70 - (g2/10)*16 - o2 */22: cmp %g2, 10 blu,a 1f sub %o0, 0x70, %o0 add %o0, 16, %o0 add %o1, 16, %o1 sub %o3, 16, %o3 b 22b sub %g2, 10, %g21: sub %o1, 0x70, %o1 add %o3, 0x70, %o3 clr %o2 tst %g2 bne,a 1f mov 8, %o21: b 31f sub %o3, %o2, %o396: and %g1, 3, %g1 sll %g4, 2, %g4 add %g1, %g4, %o330:/* %o1 is dst * %o3 is # bytes to zero out * %o4 is faulting address * %o5 is %pc where fault occurred */ clr %o231:/* %o0 is src * %o1 is dst * %o2 is # of bytes to copy from src to dst * %o3 is # bytes to zero out * %o4 is faulting address * %o5 is %pc where fault occurred */ save %sp, -104, %sp mov %i5, %o0 mov %i7, %o1 mov %i4, %o2 call C_LABEL(lookup_fault) mov %g7, %i4 cmp %o0, 2 bne 1f add %g0, -EFAULT, %i5 tst %i2 be 2f mov %i0, %o1 mov %i1, %o05: call C_LABEL(__memcpy) mov %i2, %o2 tst %o0 bne,a 2f add %i3, %i2, %i3 add %i1, %i2, %i12: mov %i1, %o06: call C_LABEL(__bzero) mov %i3, %o11: ld [%sp + 168], %o2 ! struct_ptr of parent st %i5, [%o2] ret restore .section __ex_table,#alloc .align 4 .word 5b,2 .word 6b,2
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -