📄 syscall.s
字号:
ldo -16(%r30),%r29 /* Reference param save area */#endif bl syscall_trace, %r2 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 LDREG TASK_PT_GR28(%r1), %r28 /* Restore return val. */ ldil L%syscall_exit,%r1 be,n R%syscall_exit(%sr7,%r1).Ltrace_rt_sigreturn: comib,<> 0,%r25,.Ltrace_in_syscall ldil L%tracesys_sigexit,%r2 be 0(%sr7,%r19) ldo R%tracesys_sigexit(%r2),%r2tracesys_sigexit: ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG 0(%r1), %r1#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */#endif bl syscall_trace, %r2 nop ldil L%syscall_exit_rfi,%r1 be,n R%syscall_exit_rfi(%sr7,%r1) /********************************************************* Light-weight-syscall code r20 - lws number r26,r25,r24,r23,r22 - Input registers r28 - Function return register r21 - Error code. Scracth: Any of the above that aren't being currently used, including r1. Return pointer: r31 (Not usable) Error codes returned by entry path: ENOSYS - r20 was an invalid LWS number. *********************************************************/lws_start: /* Gate and ensure we return to userspace */ gate .+8, %r0 depi 3, 31, 2, %r31 /* Ensure we return to userspace */#ifdef CONFIG_64BIT /* FIXME: If we are a 64-bit kernel just * turn this on unconditionally. */ ssm PSW_SM_W, %r1 extrd,u %r1,PSW_W_BIT,1,%r1 /* sp must be aligned on 4, so deposit the W bit setting into * the bottom of sp temporarily */ or,ev %r1,%r30,%r30 /* Clip LWS number to a 32-bit value always */ depdi 0, 31, 32, %r20#endif /* Is the lws entry number valid? */ comiclr,>>= __NR_lws_entries, %r20, %r0 b,n lws_exit_nosys /* WARNING: Trashing sr2 and sr3 */ mfsp %sr7,%r1 /* get userspace into sr3 */ mtsp %r1,%sr3 mtsp %r0,%sr2 /* get kernel space into sr2 */ /* Load table start */ ldil L%lws_table, %r1 ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */ LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */ /* Jump to lws, lws table pointers already relocated */ be,n 0(%sr2,%r21)lws_exit_nosys: ldo -ENOSYS(%r0),%r21 /* set errno */ /* Fall through: Return to userspace */lws_exit:#ifdef CONFIG_64BIT /* decide whether to reset the wide mode bit * * For a syscall, the W bit is stored in the lowest bit * of sp. Extract it and reset W if it is zero */ extrd,u,*<> %r30,63,1,%r1 rsm PSW_SM_W, %r0 /* now reset the lowest bit of sp if it was set */ xor %r30,%r1,%r30#endif be,n 0(%sr3, %r31) /*************************************************** Implementing CAS as an atomic operation: %r26 - Address to examine %r25 - Old value to check (old) %r24 - New value to set (new) %r28 - Return prev through this register. %r21 - Kernel error code If debugging is DISabled: %r21 has the following meanings: EAGAIN - CAS is busy, ldcw failed, try again. EFAULT - Read or write failed. If debugging is enabled: EDEADLOCK - CAS called recursively. EAGAIN && r28 == 1 - CAS is busy. Lock contended. EAGAIN && r28 == 2 - CAS is busy. ldcw failed. EFAULT - Read or write failed. Scratch: r20, r28, r1 ****************************************************/ /* Do not enable LWS debugging */#define ENABLE_LWS_DEBUG 0 /* ELF64 Process entry path */lws_compare_and_swap64:#ifdef CONFIG_64BIT b,n lws_compare_and_swap#else /* If we are not a 64-bit kernel, then we don't * implement having 64-bit input registers */ b,n lws_exit_nosys#endif /* ELF32 Process entry path */lws_compare_and_swap32:#ifdef CONFIG_64BIT /* Clip all the input registers */ depdi 0, 31, 32, %r26 depdi 0, 31, 32, %r25 depdi 0, 31, 32, %r24#endiflws_compare_and_swap:#ifdef CONFIG_SMP /* Load start of lock table */ ldil L%lws_lock_start, %r20 ldo R%lws_lock_start(%r20), %r28 /* Extract four bits from r26 and hash lock (Bits 4-7) */ extru %r26, 27, 4, %r20 /* Find lock to use, the hash is either one of 0 to 15, multiplied by 16 (keep it 16-byte aligned) and add to the lock table offset. */ shlw %r20, 4, %r20 add %r20, %r28, %r20# ifdef ENABLE_LWS_DEBUG /* DEBUG, check for deadlock! If the thread register values are the same then we were the one that locked it last and this is a recurisve call that will deadlock. We *must* giveup this call and fail. */ ldw 4(%sr2,%r20), %r28 /* Load thread register */ /* WARNING: If cr27 cycles to the same value we have problems */ mfctl %cr27, %r21 /* Get current thread register */ cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ b lws_exit /* Return error! */ ldo -EDEADLOCK(%r0), %r21cas_lock: cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */ ldo 1(%r0), %r28 /* 1st case */ b lws_exit /* Contended... */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */cas_nocontend:# endif/* ENABLE_LWS_DEBUG */ ldcw 0(%sr2,%r20), %r28 /* Try to acquire the lock */ cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */cas_wouldblock: ldo 2(%r0), %r28 /* 2nd case */ b lws_exit /* Contended... */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */#endif/* CONFIG_SMP */ /* prev = *addr; if ( prev == old ) *addr = new; return prev; */ /* NOTES: This all works becuse intr_do_signal and schedule both check the return iasq and see that we are on the kernel page so this process is never scheduled off or is ever sent any signal of any sort, thus it is wholly atomic from usrspaces perspective */cas_action:#if defined CONFIG_SMP && defined ENABLE_LWS_DEBUG /* DEBUG */ mfctl %cr27, %r1 stw %r1, 4(%sr2,%r20)#endif /* The load and store could fail */1: ldw 0(%sr3,%r26), %r28 sub,<> %r28, %r25, %r02: stw %r24, 0(%sr3,%r26)#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20)# ifdef ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20)# endif#endif /* Return to userspace, set no error */ b lws_exit copy %r0, %r213: /* Error occured on load or store */#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20)# ifdef ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20)# endif#endif b lws_exit ldo -EFAULT(%r0),%r21 /* set errno */ nop nop nop nop /* Two exception table entries, one for the load, the other for the store. Either return -EFAULT. Each of the entries must be relocated. */ .section __ex_table,"aw"#ifdef CONFIG_64BIT /* Pad the address calculation */ .word 0,(2b - linux_gateway_page) .word 0,(3b - linux_gateway_page)#else .word (2b - linux_gateway_page) .word (3b - linux_gateway_page)#endif .previous .section __ex_table,"aw"#ifdef CONFIG_64BIT /* Pad the address calculation */ .word 0,(1b - linux_gateway_page) .word 0,(3b - linux_gateway_page)#else .word (1b - linux_gateway_page) .word (3b - linux_gateway_page)#endif .previousend_compare_and_swap: /* Make sure nothing else is placed on this page */ .align 4096 .export end_linux_gateway_pageend_linux_gateway_page: /* Relocate symbols assuming linux_gateway_page is mapped to virtual address 0x0 */#ifdef CONFIG_64BIT /* FIXME: The code will always be on the gateay page and thus it will be on the first 4k, the assembler seems to think that the final subtraction result is only a word in length, so we pad the value. */#define LWS_ENTRY(_name_) .word 0,(lws_##_name_ - linux_gateway_page)#else#define LWS_ENTRY(_name_) .word (lws_##_name_ - linux_gateway_page)#endif .align 4096 /* Light-weight-syscall table */ /* Start of lws table. */ .export lws_table.Llws_table:lws_table: LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */ LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ /* End of lws table */ .align 4096 .export sys_call_table.Lsys_call_table:sys_call_table:#include "syscall_table.S"#ifdef CONFIG_64BIT .align 4096 .export sys_call_table64.Lsys_call_table64:sys_call_table64:#define SYSCALL_TABLE_64BIT#include "syscall_table.S"#endif#ifdef CONFIG_SMP /* All light-weight-syscall atomic operations will use this set of locks */ .section .data .align 4096 .export lws_lock_start.Llws_lock_start:lws_lock_start: /* lws locks */ .align 16 .rept 16 /* Keep locks aligned at 16-bytes */ .word 1 .word 0 .word 0 .word 0 .endr .previous#endif/* CONFIG_SMP for lws_lock_start */.end
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -