⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fsys.s

📁 优龙2410linux2.6.8内核源代码
💻 S
📖 第 1 页 / 共 2 页
字号:
/* * This file contains the light-weight system call handlers (fsyscall-handlers). * * Copyright (C) 2003 Hewlett-Packard Co * 	David Mosberger-Tang <davidm@hpl.hp.com> * * 25-Sep-03 davidm	Implement fsys_rt_sigprocmask(). * 18-Feb-03 louisk	Implement fsys_gettimeofday(). * 28-Feb-03 davidm	Fixed several bugs in fsys_gettimeofday().  Tuned it some more, *			probably broke it along the way... ;-) */#include <asm/asmmacro.h>#include <asm/errno.h>#include <asm/offsets.h>#include <asm/percpu.h>#include <asm/thread_info.h>#include <asm/sal.h>#include <asm/signal.h>#include <asm/system.h>#include <asm/unistd.h>#include "entry.h"/* * See Documentation/ia64/fsys.txt for details on fsyscalls. * * On entry to an fsyscall handler: *   r10	= 0 (i.e., defaults to "successful syscall return") *   r11	= saved ar.pfs (a user-level value) *   r15	= system call number *   r16	= "current" task pointer (in normal kernel-mode, this is in r13) *   r32-r39	= system call arguments *   b6		= return address (a user-level value) *   ar.pfs	= previous frame-state (a user-level value) *   PSR.be	= cleared to zero (i.e., little-endian byte order is in effect) *   all other registers may contain values passed in from user-mode * * On return from an fsyscall handler: *   r11	= saved ar.pfs (as passed into the fsyscall handler) *   r15	= system call number (as passed into the fsyscall handler) *   r32-r39	= system call arguments (as passed into the fsyscall handler) *   b6		= return address (as passed into the fsyscall handler) *   ar.pfs	= previous frame-state (as passed into the fsyscall handler) */ENTRY(fsys_ni_syscall)	.prologue	.altrp b6	.body	mov r8=ENOSYS	mov r10=-1	FSYS_RETURNEND(fsys_ni_syscall)ENTRY(fsys_getpid)	.prologue	.altrp b6	.body	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	;;	ld4 r9=[r9]	add r8=IA64_TASK_TGID_OFFSET,r16	;;	and r9=TIF_ALLWORK_MASK,r9	ld4 r8=[r8]				// r8 = current->tgid	;;	cmp.ne p8,p0=0,r9(p8)	br.spnt.many fsys_fallback_syscall	FSYS_RETURNEND(fsys_getpid)ENTRY(fsys_getppid)	.prologue	.altrp b6	.body	add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16	;;	ld8 r17=[r17]				// r17 = current->group_leader	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	;;	ld4 r9=[r9]	add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = &current->group_leader->real_parent	;;	and r9=TIF_ALLWORK_MASK,r91:	ld8 r18=[r17]				// r18 = current->group_leader->real_parent	;;	cmp.ne p8,p0=0,r9	add r8=IA64_TASK_TGID_OFFSET,r18	// r8 = &current->group_leader->real_parent->tgid	;;	/*	 * The .acq is needed to ensure that the read of tgid has returned its data before	 * we re-check "real_parent".	 */	ld4.acq r8=[r8]				// r8 = current->group_leader->real_parent->tgid#ifdef CONFIG_SMP	/*	 * Re-read current->group_leader->real_parent.	 */	ld8 r19=[r17]				// r19 = current->group_leader->real_parent(p8)	br.spnt.many fsys_fallback_syscall	;;	cmp.ne p6,p0=r18,r19			// did real_parent change?	mov r19=0			// i must not leak kernel bits...(p6)	br.cond.spnt.few 1b			// yes -> redo the read of tgid and the check	;;	mov r17=0			// i must not leak kernel bits...	mov r18=0			// i must not leak kernel bits...#else	mov r17=0			// i must not leak kernel bits...	mov r18=0			// i must not leak kernel bits...	mov r19=0			// i must not leak kernel bits...#endif	FSYS_RETURNEND(fsys_getppid)ENTRY(fsys_set_tid_address)	.prologue	.altrp b6	.body	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	;;	ld4 r9=[r9]	tnat.z p6,p7=r32		// check argument register for being NaT	;;	and r9=TIF_ALLWORK_MASK,r9	add r8=IA64_TASK_PID_OFFSET,r16	add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16	;;	ld4 r8=[r8]	cmp.ne p8,p0=0,r9	mov r17=-1	;;(p6)	st8 [r18]=r32(p7)	st8 [r18]=r17(p8)	br.spnt.many fsys_fallback_syscall	;;	mov r17=0			// i must not leak kernel bits...	mov r18=0			// i must not leak kernel bits...	FSYS_RETURNEND(fsys_set_tid_address)/* * Note 1: This routine uses floating-point registers, but only with registers that *	   operate on integers.  Because of that, we don't need to set ar.fpsr to the *	   kernel default value. * * Note 2: For now, we will assume that all CPUs run at the same clock-frequency. *	   If that wasn't the case, we would have to disable preemption (e.g., *	   by disabling interrupts) between reading the ITC and reading *	   local_cpu_data->nsec_per_cyc. * * Note 3: On platforms where the ITC-drift bit is set in the SAL feature vector, *	   we ought to either skip the ITC-based interpolation or run an ntp-like *	   daemon to keep the ITCs from drifting too far apart. */ENTRY(fsys_gettimeofday)	.prologue	.altrp b6	.body	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	addl r3=THIS_CPU(cpu_info),r0#ifdef CONFIG_SMP	movl r10=__per_cpu_offset	movl r2=sal_platform_features	;;	ld8 r2=[r2]	movl r19=xtime			// xtime is a timespec struct	ld8 r10=[r10]			// r10 <- __per_cpu_offset[0]	addl r21=THIS_CPU(cpu_info),r0	;;	add r10=r21, r10		// r10 <- &cpu_data(time_keeper_id)	tbit.nz p8,p0 = r2, IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT(p8)	br.spnt.many fsys_fallback_syscall#else	;;	mov r10=r3	movl r19=xtime			// xtime is a timespec struct#endif	ld4 r9=[r9]	movl r17=xtime_lock	;;	// r32, r33 should contain the 2 args of gettimeofday	adds r21=IA64_CPUINFO_ITM_NEXT_OFFSET, r10	mov r2=-1	tnat.nz p6,p7=r32		// guard against NaT args	;;	adds r10=IA64_CPUINFO_ITM_DELTA_OFFSET, r10(p7)	tnat.nz p6,p0=r33(p6)	br.cond.spnt.few .fail_einval	adds r8=IA64_CPUINFO_NSEC_PER_CYC_OFFSET, r3	movl r24=2361183241434822607	// for division hack (only for / 1000)	;;	ldf8 f7=[r10]			// f7 now contains itm_delta	setf.sig f11=r2	adds r10=8, r32	adds r20=IA64_TIMESPEC_TV_NSEC_OFFSET, r19	// r20 = &xtime->tv_nsec	movl r26=jiffies	setf.sig f9=r24			// f9 is used for division hack	movl r27=wall_jiffies	and r9=TIF_ALLWORK_MASK,r9	movl r25=last_nsec_offset	;;	/*	 * Verify that we have permission to write to struct timeval.  Note:	 * Another thread might unmap the mapping before we actually get	 * to store the result.  That's OK as long as the stores are also	 * protect by EX().	 */EX(.fail_efault, probe.w.fault r32, 3)		// this must come _after_ NaT-checkEX(.fail_efault, probe.w.fault r10, 3)		// this must come _after_ NaT-check	nop 0	ldf8 f10=[r8]			// f10 <- local_cpu_data->nsec_per_cyc value	cmp.ne p8, p0=0, r9(p8)	br.spnt.many fsys_fallback_syscall	;;.retry:	// *** seq = read_seqbegin(&xtime_lock); ***	ld4.acq r23=[r17]		// since &xtime_lock == &xtime_lock->sequence	ld8 r14=[r25]			// r14 (old) = last_nsec_offset	ld8 r28=[r26]			// r28 = jiffies	ld8 r29=[r27]			// r29 = wall_jiffies	;;	ldf8 f8=[r21]			// f8 now contains itm_next	mov.m r31=ar.itc		// put time stamp into r31 (ITC) == now	sub r28=r29, r28, 1		// r28 now contains "-(lost + 1)"	;;	ld8 r2=[r19]			// r2 = sec = xtime.tv_sec	ld8 r29=[r20]			// r29 = nsec = xtime.tv_nsec	tbit.nz p9, p10=r23, 0		// p9 <- is_odd(r23), p10 <- is_even(r23)	setf.sig f6=r28			// f6 <- -(lost + 1)				(6 cyc)	;;	mf	xma.l f8=f6, f7, f8	// f8 (last_tick) <- -(lost + 1)*itm_delta + itm_next	(5 cyc)	nop 0	setf.sig f12=r31		// f12 <- ITC					(6 cyc)	// *** if (unlikely(read_seqretry(&xtime_lock, seq))) continue; ***	ld4 r24=[r17]			// r24 = xtime_lock->sequence (re-read)	nop 0	;;	xma.l f8=f11, f8, f12	// f8 (elapsed_cycles) <- (-1*last_tick + now) = (now - last_tick)	nop 0	;;	getf.sig r18=f8			// r18 <- (now - last_tick)	xmpy.l f8=f8, f10		// f8 <- elapsed_cycles*nsec_per_cyc (5 cyc)	add r3=r29, r14			// r3 = (nsec + old)	;;	cmp.lt p7, p8=r18, r0		// if now < last_tick, set p7 = 1, p8 = 0	getf.sig r18=f8			// r18 = elapsed_cycles*nsec_per_cyc		(6 cyc)	nop 0	;;(p10)	cmp.ne p9, p0=r23, r24		// if xtime_lock->sequence != seq, set p9	shr.u r18=r18, IA64_NSEC_PER_CYC_SHIFT	// r18 <- offset(p9)	br.spnt.many .retry	;;	mov ar.ccv=r14			// ar.ccv = old					(1 cyc)	cmp.leu p7, p8=r18, r14		// if (offset <= old), set p7 = 1, p8 = 0	;;(p8)	cmpxchg8.rel r24=[r25], r18, ar.ccv	// compare-and-exchange (atomic!)(p8)	add r3=r29, r18			// r3 = (nsec + offset)	;;	shr.u r3=r3, 3			// initiate dividing r3 by 1000	;;	setf.sig f8=r3			//						(6 cyc)	mov r10=1000000			// r10 = 1000000	;;(p8)	cmp.ne.unc p9, p0=r24, r14	xmpy.hu f6=f8, f9		//						(5 cyc)(p9)	br.spnt.many .retry	;;	getf.sig r3=f6			//						(6 cyc)	;;	shr.u r3=r3, 4			// end of division, r3 is divided by 1000 (=usec)	;;1:	cmp.geu p7, p0=r3, r10		// while (usec >= 1000000)	;;(p7)	sub r3=r3, r10			// usec -= 1000000(p7)	adds r2=1, r2			// ++sec(p7)	br.spnt.many 1b	// finally: r2 = sec, r3 = usecEX(.fail_efault, st8 [r32]=r2)	adds r9=8, r32	mov r8=r0			// success	;;EX(.fail_efault, st8 [r9]=r3)		// store them in the timeval struct	mov r10=0	FSYS_RETURN	/*	 * Note: We are NOT clearing the scratch registers here.  Since the only things	 *	 in those registers are time-related variables and some addresses (which	 *	 can be obtained from System.map), none of this should be security-sensitive	 *	 and we should be fine.	 */.fail_einval:	mov r8=EINVAL			// r8 = EINVAL	mov r10=-1			// r10 = -1	FSYS_RETURN.fail_efault:	mov r8=EFAULT			// r8 = EFAULT	mov r10=-1			// r10 = -1	FSYS_RETURNEND(fsys_gettimeofday)/* * long fsys_rt_sigprocmask (int how, sigset_t *set, sigset_t *oset, size_t sigsetsize). */#if _NSIG_WORDS != 1# error Sorry, fsys_rt_sigprocmask() needs to be updated for _NSIG_WORDS != 1.#endifENTRY(fsys_rt_sigprocmask)	.prologue	.altrp b6	.body	add r2=IA64_TASK_BLOCKED_OFFSET,r16	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	cmp4.ltu p6,p0=SIG_SETMASK,r32	cmp.ne p15,p0=r0,r34			// oset != NULL?	tnat.nz p8,p0=r34	add r31=IA64_TASK_SIGHAND_OFFSET,r16	;;	ld8 r3=[r2]				// read/prefetch current->blocked	ld4 r9=[r9]	tnat.nz.or p6,p0=r35	cmp.ne.or p6,p0=_NSIG_WORDS*8,r35	tnat.nz.or p6,p0=r32(p6)	br.spnt.few .fail_einval		// fail with EINVAL	;;#ifdef CONFIG_SMP	ld8 r31=[r31]				// r31 <- current->sighand#endif	and r9=TIF_ALLWORK_MASK,r9	tnat.nz.or p8,p0=r33	;;	cmp.ne p7,p0=0,r9	cmp.eq p6,p0=r0,r33			// set == NULL?	add r31=IA64_SIGHAND_SIGLOCK_OFFSET,r31	// r31 <- current->sighand->siglock(p8)	br.spnt.few .fail_efault		// fail with EFAULT(p7)	br.spnt.many fsys_fallback_syscall	// got pending kernel work...(p6)	br.dpnt.many .store_mask		// -> short-circuit to just reading the signal mask	/* Argh, we actually have to do some work and _update_ the signal mask: */EX(.fail_efault, probe.r.fault r33, 3)		// verify user has read-access to *setEX(.fail_efault, ld8 r14=[r33])			// r14 <- *set	mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1))	;;	rsm psr.i				// mask interrupt delivery	mov ar.ccv=0	andcm r14=r14,r17			// filter out SIGKILL & SIGSTOP#ifdef CONFIG_SMP	mov r17=1	;;	cmpxchg4.acq r18=[r31],r17,ar.ccv	// try to acquire the lock	mov r8=EINVAL			// default to EINVAL	;;	ld8 r3=[r2]			// re-read current->blocked now that we hold the lock	cmp4.ne p6,p0=r18,r0(p6)	br.cond.spnt.many .lock_contention	;;#else	ld8 r3=[r2]			// re-read current->blocked now that we hold the lock	mov r8=EINVAL			// default to EINVAL#endif	add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16	add r19=IA64_TASK_SIGNAL_OFFSET,r16	cmp4.eq p6,p0=SIG_BLOCK,r32	;;	ld8 r19=[r19]			// r19 <- current->signal	cmp4.eq p7,p0=SIG_UNBLOCK,r32	cmp4.eq p8,p0=SIG_SETMASK,r32	;;	ld8 r18=[r18]			// r18 <- current->pending.signal	.pred.rel.mutex p6,p7,p8(p6)	or r14=r3,r14			// SIG_BLOCK(p7)	andcm r14=r3,r14		// SIG_UNBLOCK(p8)	mov r14=r14			// SIG_SETMASK(p6)	mov r8=0			// clear error code	// recalc_sigpending()	add r17=IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,r19	add r19=IA64_SIGNAL_SHARED_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r19	;;	ld4 r17=[r17]		// r17 <- current->signal->group_stop_count(p7)	mov r8=0		// clear error code	ld8 r19=[r19]		// r19 <- current->signal->shared_pending	;;	cmp4.gt p6,p7=r17,r0	// p6/p7 <- (current->signal->group_stop_count > 0)?(p8)	mov r8=0		// clear error code	or r18=r18,r19		// r18 <- current->pending | current->signal->shared_pending	;;	// r18 <- (current->pending | current->signal->shared_pending) & ~current->blocked:	andcm r18=r18,r14	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	;;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -