⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fsys.s

📁 底层驱动开发
💻 S
📖 第 1 页 / 共 2 页
字号:
/* * This file contains the light-weight system call handlers (fsyscall-handlers). * * Copyright (C) 2003 Hewlett-Packard Co * 	David Mosberger-Tang <davidm@hpl.hp.com> * * 25-Sep-03 davidm	Implement fsys_rt_sigprocmask(). * 18-Feb-03 louisk	Implement fsys_gettimeofday(). * 28-Feb-03 davidm	Fixed several bugs in fsys_gettimeofday().  Tuned it some more, *			probably broke it along the way... ;-) * 13-Jul-04 clameter   Implement fsys_clock_gettime and revise fsys_gettimeofday to make *                      it capable of using memory based clocks without falling back to C code. */#include <asm/asmmacro.h>#include <asm/errno.h>#include <asm/asm-offsets.h>#include <asm/percpu.h>#include <asm/thread_info.h>#include <asm/sal.h>#include <asm/signal.h>#include <asm/system.h>#include <asm/unistd.h>#include "entry.h"/* * See Documentation/ia64/fsys.txt for details on fsyscalls. * * On entry to an fsyscall handler: *   r10	= 0 (i.e., defaults to "successful syscall return") *   r11	= saved ar.pfs (a user-level value) *   r15	= system call number *   r16	= "current" task pointer (in normal kernel-mode, this is in r13) *   r32-r39	= system call arguments *   b6		= return address (a user-level value) *   ar.pfs	= previous frame-state (a user-level value) *   PSR.be	= cleared to zero (i.e., little-endian byte order is in effect) *   all other registers may contain values passed in from user-mode * * On return from an fsyscall handler: *   r11	= saved ar.pfs (as passed into the fsyscall handler) *   r15	= system call number (as passed into the fsyscall handler) *   r32-r39	= system call arguments (as passed into the fsyscall handler) *   b6		= return address (as passed into the fsyscall handler) *   ar.pfs	= previous frame-state (as passed into the fsyscall handler) */ENTRY(fsys_ni_syscall)	.prologue	.altrp b6	.body	mov r8=ENOSYS	mov r10=-1	FSYS_RETURNEND(fsys_ni_syscall)ENTRY(fsys_getpid)	.prologue	.altrp b6	.body	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	;;	ld4 r9=[r9]	add r8=IA64_TASK_TGID_OFFSET,r16	;;	and r9=TIF_ALLWORK_MASK,r9	ld4 r8=[r8]				// r8 = current->tgid	;;	cmp.ne p8,p0=0,r9(p8)	br.spnt.many fsys_fallback_syscall	FSYS_RETURNEND(fsys_getpid)ENTRY(fsys_getppid)	.prologue	.altrp b6	.body	add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16	;;	ld8 r17=[r17]				// r17 = current->group_leader	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	;;	ld4 r9=[r9]	add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = &current->group_leader->real_parent	;;	and r9=TIF_ALLWORK_MASK,r91:	ld8 r18=[r17]				// r18 = current->group_leader->real_parent	;;	cmp.ne p8,p0=0,r9	add r8=IA64_TASK_TGID_OFFSET,r18	// r8 = &current->group_leader->real_parent->tgid	;;	/*	 * The .acq is needed to ensure that the read of tgid has returned its data before	 * we re-check "real_parent".	 */	ld4.acq r8=[r8]				// r8 = current->group_leader->real_parent->tgid#ifdef CONFIG_SMP	/*	 * Re-read current->group_leader->real_parent.	 */	ld8 r19=[r17]				// r19 = current->group_leader->real_parent(p8)	br.spnt.many fsys_fallback_syscall	;;	cmp.ne p6,p0=r18,r19			// did real_parent change?	mov r19=0			// i must not leak kernel bits...(p6)	br.cond.spnt.few 1b			// yes -> redo the read of tgid and the check	;;	mov r17=0			// i must not leak kernel bits...	mov r18=0			// i must not leak kernel bits...#else	mov r17=0			// i must not leak kernel bits...	mov r18=0			// i must not leak kernel bits...	mov r19=0			// i must not leak kernel bits...#endif	FSYS_RETURNEND(fsys_getppid)ENTRY(fsys_set_tid_address)	.prologue	.altrp b6	.body	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	;;	ld4 r9=[r9]	tnat.z p6,p7=r32		// check argument register for being NaT	;;	and r9=TIF_ALLWORK_MASK,r9	add r8=IA64_TASK_PID_OFFSET,r16	add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16	;;	ld4 r8=[r8]	cmp.ne p8,p0=0,r9	mov r17=-1	;;(p6)	st8 [r18]=r32(p7)	st8 [r18]=r17(p8)	br.spnt.many fsys_fallback_syscall	;;	mov r17=0			// i must not leak kernel bits...	mov r18=0			// i must not leak kernel bits...	FSYS_RETURNEND(fsys_set_tid_address)/* * Ensure that the time interpolator structure is compatible with the asm code */#if IA64_TIME_INTERPOLATOR_SOURCE_OFFSET !=0 || IA64_TIME_INTERPOLATOR_SHIFT_OFFSET != 2 \	|| IA64_TIME_INTERPOLATOR_JITTER_OFFSET != 3 || IA64_TIME_INTERPOLATOR_NSEC_OFFSET != 4#error fsys_gettimeofday incompatible with changes to struct time_interpolator#endif#define CLOCK_REALTIME 0#define CLOCK_MONOTONIC 1#define CLOCK_DIVIDE_BY_1000 0x4000#define CLOCK_ADD_MONOTONIC 0x8000ENTRY(fsys_gettimeofday)	.prologue	.altrp b6	.body	mov r31 = r32	tnat.nz p6,p0 = r33		// guard against NaT argument(p6)    br.cond.spnt.few .fail_einval	mov r30 = CLOCK_DIVIDE_BY_1000	;;.gettime:	// Register map	// Incoming r31 = pointer to address where to place result	//          r30 = flags determining how time is processed	// r2,r3 = temp r4-r7 preserved	// r8 = result nanoseconds	// r9 = result seconds	// r10 = temporary storage for clock difference	// r11 = preserved: saved ar.pfs	// r12 = preserved: memory stack	// r13 = preserved: thread pointer	// r14 = address of mask / mask	// r15 = preserved: system call number	// r16 = preserved: current task pointer	// r17 = wall to monotonic use	// r18 = time_interpolator->offset	// r19 = address of wall_to_monotonic	// r20 = pointer to struct time_interpolator / pointer to time_interpolator->address	// r21 = shift factor	// r22 = address of time interpolator->last_counter	// r23 = address of time_interpolator->last_cycle	// r24 = adress of time_interpolator->offset	// r25 = last_cycle value	// r26 = last_counter value	// r27 = pointer to xtime	// r28 = sequence number at the beginning of critcal section	// r29 = address of seqlock	// r30 = time processing flags / memory address	// r31 = pointer to result	// Predicates	// p6,p7 short term use	// p8 = timesource ar.itc	// p9 = timesource mmio64	// p10 = timesource mmio32	// p11 = timesource not to be handled by asm code	// p12 = memory time source ( = p9 | p10)	// p13 = do cmpxchg with time_interpolator_last_cycle	// p14 = Divide by 1000	// p15 = Add monotonic	//	// Note that instructions are optimized for McKinley. McKinley can process two	// bundles simultaneously and therefore we continuously try to feed the CPU	// two bundles and then a stop.	tnat.nz p6,p0 = r31	// branch deferred since it does not fit into bundle structure	mov pr = r30,0xc000	// Set predicates according to function	add r2 = TI_FLAGS+IA64_TASK_SIZE,r16	movl r20 = time_interpolator	;;	ld8 r20 = [r20]		// get pointer to time_interpolator structure	movl r29 = xtime_lock	ld4 r2 = [r2]		// process work pending flags	movl r27 = xtime	;;	// only one bundle here	ld8 r21 = [r20]		// first quad with control information	and r2 = TIF_ALLWORK_MASK,r2(p6)    br.cond.spnt.few .fail_einval	// deferred branch	;;	add r10 = IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET,r20	extr r3 = r21,32,32	// time_interpolator->nsec_per_cyc	extr r8 = r21,0,16	// time_interpolator->source	cmp.ne p6, p0 = 0, r2	// Fallback if work is scheduled(p6)    br.cond.spnt.many fsys_fallback_syscall	;;	cmp.eq p8,p12 = 0,r8	// Check for cpu timer	cmp.eq p9,p0 = 1,r8	// MMIO64 ?	extr r2 = r21,24,8	// time_interpolator->jitter	cmp.eq p10,p0 = 2,r8	// MMIO32 ?	cmp.ltu p11,p0 = 2,r8	// function or other clock(p11)	br.cond.spnt.many fsys_fallback_syscall	;;	setf.sig f7 = r3	// Setup for scaling of counter(p15)	movl r19 = wall_to_monotonic(p12)	ld8 r30 = [r10]	cmp.ne p13,p0 = r2,r0	// need jitter compensation?	extr r21 = r21,16,8	// shift factor	;;.time_redo:	.pred.rel.mutex p8,p9,p10	ld4.acq r28 = [r29]	// xtime_lock.sequence. Must come first for locking purposes(p8)	mov r2 = ar.itc		// CPU_TIMER. 36 clocks latency!!!	add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20(p9)	ld8 r2 = [r30]		// readq(ti->address). Could also have latency issues..(p10)	ld4 r2 = [r30]		// readw(ti->address)(p13)	add r23 = IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET,r20	;;			// could be removed by moving the last add upward	ld8 r26 = [r22]		// time_interpolator->last_counter(p13)	ld8 r25 = [r23]		// time interpolator->last_cycle	add r24 = IA64_TIME_INTERPOLATOR_OFFSET_OFFSET,r20(p15)	ld8 r17 = [r19],IA64_TIMESPEC_TV_NSEC_OFFSET 	ld8 r9 = [r27],IA64_TIMESPEC_TV_NSEC_OFFSET	add r14 = IA64_TIME_INTERPOLATOR_MASK_OFFSET, r20	;;	ld8 r18 = [r24]		// time_interpolator->offset	ld8 r8 = [r27],-IA64_TIMESPEC_TV_NSEC_OFFSET	// xtime.tv_nsec(p13)	sub r3 = r25,r2	// Diff needed before comparison (thanks davidm)	;;	ld8 r14 = [r14]		// time_interpolator->mask(p13)	cmp.gt.unc p6,p7 = r3,r0	// check if it is less than last. p6,p7 cleared	sub r10 = r2,r26	// current_counter - last_counter	;;(p6)	sub r10 = r25,r26	// time we got was less than last_cycle(p7)	mov ar.ccv = r25	// more than last_cycle. Prep for cmpxchg	;;	and r10 = r10,r14	// Apply mask	;;	setf.sig f8 = r10	nop.i 123	;;(p7)	cmpxchg8.rel r3 = [r23],r2,ar.ccvEX(.fail_efault, probe.w.fault r31, 3)	// This takes 5 cycles and we have spare time	xmpy.l f8 = f8,f7	// nsec_per_cyc*(counter-last_counter)(p15)	add r9 = r9,r17		// Add wall to monotonic.secs to result secs	;;(p15)	ld8 r17 = [r19],-IA64_TIMESPEC_TV_NSEC_OFFSET(p7)	cmp.ne p7,p0 = r25,r3	// if cmpxchg not successful redo	// simulate tbit.nz.or p7,p0 = r28,0	and r28 = ~1,r28	// Make sequence even to force retry if odd	getf.sig r2 = f8	mf	add r8 = r8,r18		// Add time interpolator offset	;;	ld4 r10 = [r29]		// xtime_lock.sequence(p15)	add r8 = r8, r17	// Add monotonic.nsecs to nsecs	shr.u r2 = r2,r21	;;		// overloaded 3 bundles!	// End critical section.	add r8 = r8,r2		// Add xtime.nsecs	cmp4.ne.or p7,p0 = r28,r10(p7)	br.cond.dpnt.few .time_redo	// sequence number changed ?	// Now r8=tv->tv_nsec and r9=tv->tv_sec	mov r10 = r0	movl r2 = 1000000000	add r23 = IA64_TIMESPEC_TV_NSEC_OFFSET, r31(p14)	movl r3 = 2361183241434822607	// Prep for / 1000 hack	;;.time_normalize:	mov r21 = r8	cmp.ge p6,p0 = r8,r2(p14)	shr.u r20 = r8, 3		// We can repeat this if necessary just wasting some time	;;(p14)	setf.sig f8 = r20(p6)	sub r8 = r8,r2(p6)	add r9 = 1,r9			// two nops before the branch.(p14)	setf.sig f7 = r3		// Chances for repeats are 1 in 10000 for gettod(p6)	br.cond.dpnt.few .time_normalize	;;	// Divided by 8 though shift. Now divide by 125	// The compiler was able to do that with a multiply	// and a shift and we do the sameEX(.fail_efault, probe.w.fault r23, 3)		// This also costs 5 cycles(p14)	xmpy.hu f8 = f8, f7			// xmpy has 5 cycles latency so use it...	;;	mov r8 = r0(p14)	getf.sig r2 = f8	;;(p14)	shr.u r21 = r2, 4	;;EX(.fail_efault, st8 [r31] = r9)EX(.fail_efault, st8 [r23] = r21)	FSYS_RETURN.fail_einval:	mov r8 = EINVAL	mov r10 = -1	FSYS_RETURN.fail_efault:	mov r8 = EFAULT	mov r10 = -1	FSYS_RETURNEND(fsys_gettimeofday)ENTRY(fsys_clock_gettime)	.prologue	.altrp b6	.body	cmp4.ltu p6, p0 = CLOCK_MONOTONIC, r32	// Fallback if this is not CLOCK_REALTIME or CLOCK_MONOTONIC(p6)	br.spnt.few fsys_fallback_syscall	mov r31 = r33	shl r30 = r32,15	br.many .gettimeEND(fsys_clock_gettime)/* * long fsys_rt_sigprocmask (int how, sigset_t *set, sigset_t *oset, size_t sigsetsize). */#if _NSIG_WORDS != 1# error Sorry, fsys_rt_sigprocmask() needs to be updated for _NSIG_WORDS != 1.#endifENTRY(fsys_rt_sigprocmask)	.prologue	.altrp b6	.body	add r2=IA64_TASK_BLOCKED_OFFSET,r16	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	cmp4.ltu p6,p0=SIG_SETMASK,r32	cmp.ne p15,p0=r0,r34			// oset != NULL?	tnat.nz p8,p0=r34	add r31=IA64_TASK_SIGHAND_OFFSET,r16	;;	ld8 r3=[r2]				// read/prefetch current->blocked	ld4 r9=[r9]	tnat.nz.or p6,p0=r35	cmp.ne.or p6,p0=_NSIG_WORDS*8,r35	tnat.nz.or p6,p0=r32(p6)	br.spnt.few .fail_einval		// fail with EINVAL	;;#ifdef CONFIG_SMP	ld8 r31=[r31]				// r31 <- current->sighand#endif	and r9=TIF_ALLWORK_MASK,r9	tnat.nz.or p8,p0=r33	;;	cmp.ne p7,p0=0,r9	cmp.eq p6,p0=r0,r33			// set == NULL?	add r31=IA64_SIGHAND_SIGLOCK_OFFSET,r31	// r31 <- current->sighand->siglock(p8)	br.spnt.few .fail_efault		// fail with EFAULT(p7)	br.spnt.many fsys_fallback_syscall	// got pending kernel work...(p6)	br.dpnt.many .store_mask		// -> short-circuit to just reading the signal mask	/* Argh, we actually have to do some work and _update_ the signal mask: */EX(.fail_efault, probe.r.fault r33, 3)		// verify user has read-access to *setEX(.fail_efault, ld8 r14=[r33])			// r14 <- *set	mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1))	;;	rsm psr.i				// mask interrupt delivery	mov ar.ccv=0	andcm r14=r14,r17			// filter out SIGKILL & SIGSTOP#ifdef CONFIG_SMP	mov r17=1	;;	cmpxchg4.acq r18=[r31],r17,ar.ccv	// try to acquire the lock	mov r8=EINVAL			// default to EINVAL	;;	ld8 r3=[r2]			// re-read current->blocked now that we hold the lock	cmp4.ne p6,p0=r18,r0(p6)	br.cond.spnt.many .lock_contention	;;#else	ld8 r3=[r2]			// re-read current->blocked now that we hold the lock	mov r8=EINVAL			// default to EINVAL#endif	add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16	add r19=IA64_TASK_SIGNAL_OFFSET,r16	cmp4.eq p6,p0=SIG_BLOCK,r32	;;	ld8 r19=[r19]			// r19 <- current->signal	cmp4.eq p7,p0=SIG_UNBLOCK,r32	cmp4.eq p8,p0=SIG_SETMASK,r32	;;	ld8 r18=[r18]			// r18 <- current->pending.signal	.pred.rel.mutex p6,p7,p8(p6)	or r14=r3,r14			// SIG_BLOCK(p7)	andcm r14=r3,r14		// SIG_UNBLOCK(p8)	mov r14=r14			// SIG_SETMASK(p6)	mov r8=0			// clear error code	// recalc_sigpending()	add r17=IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,r19	add r19=IA64_SIGNAL_SHARED_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r19	;;	ld4 r17=[r17]		// r17 <- current->signal->group_stop_count(p7)	mov r8=0		// clear error code	ld8 r19=[r19]		// r19 <- current->signal->shared_pending	;;	cmp4.gt p6,p7=r17,r0	// p6/p7 <- (current->signal->group_stop_count > 0)?(p8)	mov r8=0		// clear error code	or r18=r18,r19		// r18 <- current->pending | current->signal->shared_pending	;;	// r18 <- (current->pending | current->signal->shared_pending) & ~current->blocked:	andcm r18=r18,r14	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	;;(p7)	cmp.ne.or.andcm p6,p7=r18,r0		// p6/p7 <- signal pending	mov r19=0					// i must not leak kernel bits...(p6)	br.cond.dpnt.many .sig_pending	;;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -