kern_lock.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 1,249 行 · 第 1/3 页

C
1,249
字号
/*	$NetBSD: kern_lock.c,v 1.50 2000/11/22 06:31:23 thorpej Exp $	*//*- * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, * NASA Ames Research Center. * * This code is derived from software contributed to The NetBSD Foundation * by Ross Harvey. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *	This product includes software developed by the NetBSD *	Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its *    contributors may be used to endorse or promote products derived *    from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. *//*  * Copyright (c) 1995 *	The Regents of the University of California.  All rights reserved. * * This code contains ideas from software contributed to Berkeley by * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating * System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *	This product includes software developed by the University of *	California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors *    may be used to endorse or promote products derived from this software *    without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95 */#include "opt_multiprocessor.h"#include "opt_lockdebug.h"#include "opt_ddb.h"#include <sys/param.h>#include <sys/proc.h>#include <sys/lock.h>#include <sys/systm.h>#include <machine/cpu.h>#if defined(LOCKDEBUG)#include <sys/syslog.h>/* * note that stdarg.h and the ansi style va_start macro is used for both * ansi and traditional c compiles. * XXX: this requires that stdarg.h define: va_alist and va_dcl */#include <machine/stdarg.h>void	lock_printf(const char *fmt, ...)    __attribute__((__format__(__printf__,1,2)));int	lock_debug_syslog = 0;	/* defaults to printf, but can be patched */#endif/* * Locking primitives implementation. * Locks provide shared/exclusive sychronization. */#if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */#if defined(MULTIPROCESSOR) /* { */#define	COUNT_CPU(cpu_id, x)						\	curcpu()->ci_spin_locks += (x)#elseu_long	spin_locks;#define	COUNT_CPU(cpu_id, x)	spin_locks += (x)#endif /* MULTIPROCESSOR */ /* } */#define	COUNT(lkp, p, cpu_id, x)					\do {									\	if ((lkp)->lk_flags & LK_SPIN)					\		COUNT_CPU((cpu_id), (x));				\	else								\		(p)->p_locks += (x);					\} while (/*CONSTCOND*/0)#else#define COUNT(lkp, p, cpu_id, x)#define COUNT_CPU(cpu_id, x)#endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */#ifndef SPINLOCK_INTERLOCK_RELEASE_HOOK		/* from <machine/lock.h> */#define	SPINLOCK_INTERLOCK_RELEASE_HOOK		/* nothing */#endif#define	INTERLOCK_ACQUIRE(lkp, flags, s)				\do {									\	if ((flags) & LK_SPIN)						\		s = splsched();						\	simple_lock(&(lkp)->lk_interlock);				\} while (0)#define	INTERLOCK_RELEASE(lkp, flags, s)				\do {									\	simple_unlock(&(lkp)->lk_interlock);				\	if ((flags) & LK_SPIN) {					\		splx(s);						\		SPINLOCK_INTERLOCK_RELEASE_HOOK;			\	}								\} while (0)#if defined(LOCKDEBUG)#if defined(DDB)#define	SPINLOCK_SPINCHECK_DEBUGGER	Debugger()#else#define	SPINLOCK_SPINCHECK_DEBUGGER	/* nothing */#endif#define	SPINLOCK_SPINCHECK_DECL						\	/* 32-bits of count -- wrap constitutes a "spinout" */		\	uint32_t __spinc = 0#define	SPINLOCK_SPINCHECK						\do {									\	if (++__spinc == 0) {						\		printf("LK_SPIN spinout, excl %d, share %d\n",		\		    lkp->lk_exclusivecount, lkp->lk_sharecount);	\		if (lkp->lk_exclusivecount)				\			printf("held by CPU %lu\n",			\			    (u_long) lkp->lk_cpu);			\		if (lkp->lk_lock_file)					\			printf("last locked at %s:%d\n",		\			    lkp->lk_lock_file, lkp->lk_lock_line);	\		if (lkp->lk_unlock_file)				\			printf("last unlocked at %s:%d\n",		\			    lkp->lk_unlock_file, lkp->lk_unlock_line);	\		SPINLOCK_SPINCHECK_DEBUGGER;				\	}								\} while (0)#else#define	SPINLOCK_SPINCHECK_DECL			/* nothing */#define	SPINLOCK_SPINCHECK			/* nothing */#endif /* LOCKDEBUG && DDB *//* * Acquire a resource. */#define ACQUIRE(lkp, error, extflags, drain, wanted)			\	if ((extflags) & LK_SPIN) {					\		int interlocked;					\		SPINLOCK_SPINCHECK_DECL;				\									\		if ((drain) == 0)					\			(lkp)->lk_waitcount++;				\		for (interlocked = 1;;) {				\			SPINLOCK_SPINCHECK;				\			if (wanted) {					\				if (interlocked) {			\					INTERLOCK_RELEASE((lkp),	\					    LK_SPIN, s);		\					interlocked = 0;		\				}					\			} else if (interlocked) {			\				break;					\			} else {					\				INTERLOCK_ACQUIRE((lkp), LK_SPIN, s);	\				interlocked = 1;			\			}						\		}							\		if ((drain) == 0)					\			(lkp)->lk_waitcount--;				\		KASSERT((wanted) == 0);					\		error = 0;	/* sanity */				\	} else {							\		for (error = 0; wanted; ) {				\			if ((drain))					\				(lkp)->lk_flags |= LK_WAITDRAIN;	\			else						\				(lkp)->lk_waitcount++;			\			/* XXX Cast away volatile. */			\			error = ltsleep((drain) ? &(lkp)->lk_flags :	\			    (void *)(lkp), (lkp)->lk_prio,		\			    (lkp)->lk_wmesg, (lkp)->lk_timo,		\			    &(lkp)->lk_interlock);			\			if ((drain) == 0)				\				(lkp)->lk_waitcount--;			\			if (error)					\				break;					\			if ((extflags) & LK_SLEEPFAIL) {		\				error = ENOLCK;				\				break;					\			}						\		}							\	}#define	SETHOLDER(lkp, pid, cpu_id)					\do {									\	if ((lkp)->lk_flags & LK_SPIN)					\		(lkp)->lk_cpu = cpu_id;					\	else								\		(lkp)->lk_lockholder = pid;				\} while (/*CONSTCOND*/0)#define	WEHOLDIT(lkp, pid, cpu_id)					\	(((lkp)->lk_flags & LK_SPIN) != 0 ?				\	 ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid)))#define	WAKEUP_WAITER(lkp)						\do {									\	if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) {	\		/* XXX Cast away volatile. */				\		wakeup_one((void *)(lkp));				\	}								\} while (/*CONSTCOND*/0)#if defined(LOCKDEBUG) /* { */#if defined(MULTIPROCESSOR) /* { */struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;#define	SPINLOCK_LIST_LOCK()						\	__cpu_simple_lock(&spinlock_list_slock.lock_data)#define	SPINLOCK_LIST_UNLOCK()						\	__cpu_simple_unlock(&spinlock_list_slock.lock_data)#else#define	SPINLOCK_LIST_LOCK()	/* nothing */#define	SPINLOCK_LIST_UNLOCK()	/* nothing */#endif /* MULTIPROCESSOR */ /* } */TAILQ_HEAD(, lock) spinlock_list =    TAILQ_HEAD_INITIALIZER(spinlock_list);#define	HAVEIT(lkp)							\do {									\	if ((lkp)->lk_flags & LK_SPIN) {				\		int s = spllock();					\		SPINLOCK_LIST_LOCK();					\		/* XXX Cast away volatile. */				\		TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp),	\		    lk_list);						\		SPINLOCK_LIST_UNLOCK();					\		splx(s);						\	}								\} while (/*CONSTCOND*/0)#define	DONTHAVEIT(lkp)							\do {									\	if ((lkp)->lk_flags & LK_SPIN) {				\		int s = spllock();					\		SPINLOCK_LIST_LOCK();					\		/* XXX Cast away volatile. */				\		TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp),	\		    lk_list);						\		SPINLOCK_LIST_UNLOCK();					\		splx(s);						\	}								\} while (/*CONSTCOND*/0)#else#define	HAVEIT(lkp)		/* nothing */#define	DONTHAVEIT(lkp)		/* nothing */#endif /* LOCKDEBUG */ /* } */#if defined(LOCKDEBUG)/* * Lock debug printing routine; can be configured to print to console * or log to syslog. */voidlock_printf(const char *fmt, ...){	va_list ap;	va_start(ap, fmt);	if (lock_debug_syslog)		vlog(LOG_DEBUG, fmt, ap);	else		vprintf(fmt, ap);	va_end(ap);}#endif /* LOCKDEBUG *//* * Initialize a lock; required before use. */voidlockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags){	memset(lkp, 0, sizeof(struct lock));	simple_lock_init(&lkp->lk_interlock);	lkp->lk_flags = flags & LK_EXTFLG_MASK;	if (flags & LK_SPIN)		lkp->lk_cpu = LK_NOCPU;	else {		lkp->lk_lockholder = LK_NOPROC;		lkp->lk_prio = prio;		lkp->lk_timo = timo;	}	lkp->lk_wmesg = wmesg;	/* just a name for spin locks */#if defined(LOCKDEBUG)	lkp->lk_lock_file = NULL;	lkp->lk_unlock_file = NULL;#endif}/* * Determine the status of a lock. */intlockstatus(struct lock *lkp){	int s, lock_type = 0;	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);	if (lkp->lk_exclusivecount != 0)		lock_type = LK_EXCLUSIVE;	else if (lkp->lk_sharecount != 0)		lock_type = LK_SHARED;	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);	return (lock_type);}#if defined(LOCKDEBUG) || defined(DIAGNOSTIC)/* * Make sure no spin locks are held by a CPU that is about * to context switch. */voidspinlock_switchcheck(void){	u_long cnt;	int s;	s = spllock();#if defined(MULTIPROCESSOR)	cnt = curcpu()->ci_spin_locks;#else	cnt = spin_locks;#endif	splx(s);	if (cnt != 0)		panic("spinlock_switchcheck: CPU %lu has %lu spin locks",		    (u_long) cpu_number(), cnt);}#endif /* LOCKDEBUG || DIAGNOSTIC *//* * Locks and IPLs (interrupt priority levels): * * Locks which may be taken from interrupt context must be handled * very carefully; you must spl to the highest IPL where the lock * is needed before acquiring the lock. * * It is also important to avoid deadlock, since certain (very high * priority) interrupts are often needed to keep the system as a whole * from deadlocking, and must not be blocked while you are spinning * waiting for a lower-priority lock. * * In addition, the lock-debugging hooks themselves need to use locks! * * A raw __cpu_simple_lock may be used from interrupts are long as it * is acquired and held at a single IPL. * * A simple_lock (which is a __cpu_simple_lock wrapped with some * debugging hooks) may be used at or below spllock(), which is * typically at or just below splhigh() (i.e. blocks everything * but certain machine-dependent extremely high priority interrupts). * * spinlockmgr spinlocks should be used at or below splsched(). * * Some platforms may have interrupts of higher priority than splsched(), * including hard serial interrupts, inter-processor interrupts, and * kernel debugger traps.

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?