⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lock.c

📁 pebble
💻 C
字号:
/* 
 * Copyright 1999, 2000, 2001, 2002 Lucent Technologies Inc.
 * All Rights Reserved.
 * Information Sciences Research Center, Bell Labs.
 *
 * LUCENT TECHNOLOGIES DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE 
 * OR THE SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE. The
 * software is provided "as is" without expressed or implied warranty 
 * of any kind.
 *
 * These notices must be retained in any copies of any part of this
 * software.
 *
 */

/*
 * User-level lock manager.
 *
 * The lock manager is called when a mutual exclusion lock is busy,
 * and the calling thread has to sleep until the lock is released.
 * The lock manger is also called when a lock is released and there
 * is a thread waiting on it.
 *
 * The lock manager may have multiple threads active inside it at
 * the same time. This is why we use a spin lock to protect its
 * internal data structures.
 *
 * The lock manager usually runs in user mode with interrupts ENABLED.
 * This is the current mode when all interrupt handling code run in
 * seperate protection domains. 
 * However, when an interrupt handler is called directly by the interrupt
 * dispatcher, it may preempt a thread that is currently holding a spin
 * lock inside the lock manager. If the interrupt handler calls any
 * lock operation, this call may block on the same lock. Since the
 * interrupt handler runs in the same thread as the previously preempted
 * lock operation, this is a deadlock. The yield() will do nothing.
 * This is the reason why when we have a driver inside the nucleus,
 * the lock manager MUST run with interrupts disabled.
 * We use the constant IS_ANY_INSIDE from inside.h for this purpose.
 *
 * *** important ***
 * The lock manager must be initialized before any use of mututal exclusion
 * locks.
 */

#include <string.h>
#include <unistd.h>		/* for write */
#include "diag.h"
#include "pebble.h"
#include "nucleus.h"
#include "lock_free.h"
#include "inside.h"
#include <assert.h>

enum	{
	BUCKETS = 512,
	MAX_SPIN_ITERATIONS = 1000,
};

typedef	struct elem_t	{
	int	sem;		/* semaphore for waiting until lock released */
	int	asid;		/* lock's asid */
	void	*lock;		/* lock's address */
	int	nwait;		/* number of threads waiting on this lock */
	struct elem_t *next;
	struct elem_t *last;
} Elem;

typedef	struct {
	int	lock;		/* spin lock */
	Elem	*head;
} List;

static List table[BUCKETS];
static Elem *free_list = NULL;

const char INVALID[] = "invalid lock address"; 

void
sys_dump_lock(void)
{
	int n = 0;
	int i;
	Elem *p;

	fdprintf(2, "lock manager dump:\n");

	for (i = 0; i < BUCKETS ; i++)
		for (p = table[i].head; p != NULL; p = p->next) {
			n++;
			fdprintf(2,
		"lock %p asid=%d has %d waiting threads on semaphore=%d\n",
				p->lock, p->asid, p->nwait, p->sem);
		}

	if (n == 0)
		fdprintf(2, "no threads waiting on locks\n");
}

/*
 * We do not check for infinite loops in the spin locks,
 * since this routine is used only inside the lock manager,
 * which should never hold the spin lock for too long.
 */

void
spin_lock(int *l)
{
	int i;

	for (i = 0; i < MAX_SPIN_ITERATIONS; i++) {
		if (csw(l, 0, 1) >= 0)
			return;
		if (!IS_ANY_INSIDE)
			yield();
	}

	fdprintf(2, "lock manager: spinning too long on lock %p\n", l);
	sys_dump_lock();
	panic("lock manager ended");
}


void
spin_unlock(int *l)
{
	*l = 0;
}


int
hash(int asid, void *l)
{
	return (asid + ((int)l)/sizeof(Lock)) % BUCKETS;
}


Elem *
lookup(int h, int asid, void *l)
{
	Elem *p;

	for (p = table[h].head; p != NULL; p = p->next)
		if (p->asid == asid && p->lock == l)
			return p;

	return NULL;
}


/* allocate a new element */
Elem *
new_elem(int asid, void *l)
{
	Elem *p, *new;

	do {
		p = free_list;
		if (p != NULL)
			new = p->next;
		else
			new = NULL;
	} while (csw((int *)&free_list, (int)p, (int)new) < 0);

	if (p == NULL) {
		p = (Elem *)malloc(sizeof(Elem));
		if (p == NULL)
			panic("lock manager: malloc:");
		if ((p->sem = sem_create(0)) < 0)
			panic("lock manager: sem_create:");
	}

	p->asid = asid;
	p->lock = l;
	p->nwait = 0;
	p->next = p->last = NULL;

	return p;
}


/* free an element */
void
free_elem(Elem *p)
{
	Elem *old;

	do {
		old = free_list;
		p->next = old;
	} while (csw((int *)&free_list, (int)old, (int)p) < 0);
}


/*
 * Insert new element into the head of a doubly-linked list pointed by table[h].
 */
void
enqueue(int h, Elem *p)
{
	p->last = NULL;
	p->next = table[h].head;
	if (p->next != NULL)
		p->next->last = p;
	table[h].head = p;
}


/*
 * remove element from the doubly-linked list pointed by table[h].
 */
void
dequeue(int h, Elem *p)
{
	if (p->next != NULL)
		p->next->last = p->last;

	if (p->last != NULL)
		p->last->next = p->next;
	else	{
		assert(table[h].head == p);
		table[h].head = p->next;
	}
}


int
sys_lock_sleep(Thread *t, void *l, int caller_asid)
{
	int h;
	Elem *p;

	DIAG(LOCK_DIAG, ("sleep:%d %p\n", caller_asid, l));

	if (l == NULL)
		error(INVALID);

	if (on_heap(l))
		caller_asid = 0;

	h = hash(caller_asid, l);
	DIAG(LOCK_DIAG, ("asid=%d h=%d\n", caller_asid, h));

	spin_lock(&table[h].lock);
	if ((p = lookup(h, caller_asid, l)) == NULL) {
		p = new_elem(caller_asid, l);
		enqueue(h, p);
	}

	p->nwait++;
	spin_unlock(&table[h].lock);

	DIAG(LOCK_DIAG, ("thread=%p p=%p sem=%d before sem_wait_caller_pri\n",
		t, p, p->sem));

	if (sem_wait_caller_pri(p->sem) < 0)
		panic("lock manager: sem_wait_caller_pri:");

	DIAG(LOCK_DIAG, ("thread=%p after sem_wait_caller_pri\n", t));

	spin_lock(&table[h].lock);
	p->nwait--;

	if (p->nwait == 0) {
		dequeue(h, p);
		free_elem(p);
	}
	spin_unlock(&table[h].lock);

	DIAG(LOCK_DIAG, ("sys_lock_sleep thread=%p ended\n", t));

	return 0;
}


int
sys_lock_wakeup(Thread *t, Lock *l, int caller_asid)
{
	int h;
	Elem *p;

	DIAG(LOCK_DIAG, ("sys_lock_wakeup: thread=%p asid=%d l=%p\n",
		t, caller_asid, l));

	if (l == NULL)
		error(INVALID);

	if (on_heap(l))
		caller_asid = 0;

	h = hash(caller_asid, l);
	DIAG(LOCK_DIAG, ("thread=%p asid=%d h=%d\n", t, caller_asid, h));

	spin_lock(&table[h].lock);
	if ((p = lookup(h, caller_asid, l)) == NULL) {
		p = new_elem(caller_asid, l);
		enqueue(h, p);
	}

	if (sem_post(p->sem) < 0)
		panic("lock_manager: sem_post:");

	spin_unlock(&table[h].lock);

	DIAG(LOCK_DIAG, ("sys_lock_wakeup thread=%p ended\n", t));

	return 0;
}


/* lock manager initialization */
int main()
{
	printf("lock manager is active\n");

	/* verify that we are running in user mode with interrupts enabled  (no driver in nucleus) */
	/* or with interrupts disabled (there are drivers inside the nucleus) */
	if (!check_psw(1, !IS_ANY_INSIDE)) {
		DIAG(LOCK_DIAG,
			("lock: invalid processor status: %08lx\n", get_psw()));
		task_exit(1);
	}

	if (portal_create(SYS_LOCK_SLEEP, "smtiai", 0, (Func) sys_lock_sleep, 0) < 0)
		panic("portal_create for lock_sleep():");
	if (portal_create(SYS_LOCK_WAKEUP, "smtiai", 0, (Func) sys_lock_wakeup, 0) < 0)
		panic("portal_create for lock_wakeup():");

	if (portal_create(SYS_DUMP_LOCK, "smtiii", 0, (Func) sys_dump_lock, 0) < 0)
		panic("portal_create for dump_lock():");
	DIAG(LOCK_DIAG, ("before returning to initialization code\n"));

	/*
	 * return to initialization code.
	 * cannot just "return", since the startup code (crt0.S) calls
	 * exit when main routine terminates.
	 */
	call_portal(SYS_RTN_RPC);
	return(1);
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -