prot_lock.c
来自「<B>Digital的Unix操作系统VAX 4.2源码</B>」· C语言 代码 · 共 1,176 行 · 第 1/2 页
C
1,176 行
# ifndef lintstatic char *sccsid = "@(#)prot_lock.c 4.2 (ULTRIX) 10/8/90";# endif not lint/**************************************************************** * * * Licensed to Digital Equipment Corporation, Maynard, MA * * Copyright 1985 Sun Microsystems, Inc. * * All rights reserved. * * * ****************************************************************//**//* * Modification history: * ~~~~~~~~~~~~~~~~~~~~ * * revision comments * -------- ----------------------------------------------- * * 08-Sep-90 Fred Glover * Include remote_blocked () routine to deal with * comparison of 32 byte opaque fhandles from client * perspective; continue to use blocked () for server * side comparisons where ULTRIX fhandles are comparable * only for the first 12 bytes (inode, dev, gen #). * * 01-Jun-89 Fred Glover * Update for nfssrc 4.0 * * 16-Feb-88 fglover * Add would_deadlock () routine. * * 18-Jan-88 fries * Added Header and Copyright notice. * * *//* * prot_lock.c consists of low level routines that * manipulates lock entries; * place where real locking codes reside; * it is (in most cases) independent of network code */#include <stdio.h>#include <sys/file.h>#include "prot_lock.h"#include "priv_prot.h"#include <rpcsvc/sm_inter.h>#include "sm_res.h"#define same_proc(x, y) (obj_cmp(&x->lck.oh, &y->lck.oh))static struct priv_struct priv;#define lb l_offsetreclock *wait_q; /* ptr to wait queue*/reclock *call_q; /* ptr to call back queue */struct fs_rlck *rel_fe; /* delayed fe release */struct fs_rlck *rel_me; /* delayed me release */extern int pid; /* used by status monitor*/extern char hostname[20]; /* used by remote_data() */extern int debug;extern char *xmalloc();extern int local_state;extern int used_me;extern msg_entry *retransmitted();extern struct stat_res *stat_mon();int blocked();int remote_blocked();int add_reclock();int delete_reclock();int cancel();int obj_alloc();int obj_copy();int contact_monitor();void add_wait();void remove_wait();void wakeup();void find_insert();void adj_len();void insert_fe();void insert_me();void insert_le();void delete_le();void insert_mp();void delete_mp();struct fs_rlck *find_fe();struct fs_rlck *remote_find_fe();struct fs_rlck *find_me();struct fs_rlck *get_fe();struct fs_rlck *copy_fe();struct fs_rlck *get_me();reclock *get_le();reclock *copy_le();reclock *search_lock();reclock *search_block_lock();bool_t inside();bool_t overlap();bool_t same_op();bool_t same_bound();bool_t same_lock();bool_t obj_cmp();bool_t remote_data();bool_t remote_clnt();/* blocked checks whether a new lock (a) will be blocked by * any previously granted lock (owned by another process). * * fp is set to point to struct fs_rlckp that points to the list * of granted reclock on the same file system. * all reclocks are in [lb, ub) in increasing lb order. * Blocked returns NULL if "a" is not blocked; * insrtp ptr to the lock where new lock should be added; * * Blocked returns 1; if "a" is blocked; rlckp ptr to the first * blocking lock; * */blocked(fp, insrtp, a) struct fs_rlck **fp; reclock **insrtp; reclock *a;{ reclock *nl; if ((*fp = find_fe(a)) == NULL) { /*cannot find fe*/ *insrtp = NULL; return(NULL); } else { /* fp is found*/ *insrtp = NULL; nl = (*fp) -> rlckp; /*set up initial insrtp value*/ while (nl != NULL && nl->lck.lb <= a->lck.lb) { *insrtp = nl; if (same_proc(nl, a) && a->lck.lb <= nl->lck.ub) /* identify an overlapped lock owned by same process */ break; if (!same_proc(nl,a) && a->lck.lb < nl->lck.ub && (nl->lck.op & LOCK_EX || a->lck.op & LOCK_EX)) /* blocked */ return(1); nl = nl->nxt; } while (nl != NULL && nl->lck.lb < a->lck.ub) { if (!same_proc(nl,a) && a->lck.lb < nl->lck.ub && (nl->lck.op & LOCK_EX || a->lck.op & LOCK_EX)) { /*blocked*/ *insrtp = nl; return(1); } nl = nl->nxt; } return(NULL); }}/* * * remote_blocked is a clone of blocked. it is called only from the * remote_lock function. A modification is required as a result of the V4.0 * ULTRIX fhandle redefinition (on behalf of the modified export support): * it is now the case that ULTRIX fhandles may compare unequal * and yet still point to the same file on the server. * the server lockd can "be aware" of the actual composition of the server * fhandle format, and can act accordingly, but the client lockd must treat * the entire fhandle as an opaque object. The difference introduced is in * the fhandle compare routine: * the ULTRIX V4.0 server lockd must compare only the first 12 * bytes (inode #, fsys id, gen #) to determine if two fhandles match. * The client lockd, which will end up calling this routine via remote_lock, * must compare the entire fhandle (or at least the length as defined by the * NFSSRC reference code), since a non_ULTRIX server may use any * fhandle format. - Fred G. * * remote_blocked checks whether a new lock (a) will be blocked by * any previously granted lock (owned by another process on this node). * * fp is set to point to struct fs_rlckp that points to the list * of granted reclock on the same file system. * all reclocks are in [lb, ub) in increasing lb order. * Blocked returns NULL if "a" is not blocked; * insrtp ptr to the lock where new lock should be added; * * Blocked returns 1; if "a" is blocked; rlckp ptr to the first * blocking lock; * */remote_blocked(fp, insrtp, a) struct fs_rlck **fp; reclock **insrtp; reclock *a;{ reclock *nl; if ((*fp = remote_find_fe(a)) == NULL) { /*cannot find fe*/ *insrtp = NULL; return(NULL); } else { /* fp is found*/ *insrtp = NULL; nl = (*fp) -> rlckp; /*set up initial insrtp value*/ while (nl != NULL && nl->lck.lb <= a->lck.lb) { *insrtp = nl; if (same_proc(nl, a) && a->lck.lb <= nl->lck.ub) /* identify an overlapped lock owned by same process */ break; if (!same_proc(nl,a) && a->lck.lb < nl->lck.ub && (nl->lck.op & LOCK_EX || a->lck.op & LOCK_EX)) /* blocked */ return(1); nl = nl->nxt; } while (nl != NULL && nl->lck.lb < a->lck.ub) { if (!same_proc(nl,a) && a->lck.lb < nl->lck.ub && (nl->lck.op & LOCK_EX || a->lck.op & LOCK_EX)) { /*blocked*/ *insrtp = nl; return(1); } nl = nl->nxt; } return(NULL); }}/* add_reclock modifies existing reclock list ptr to by fp->rlckp and * add new lock requets "a" starting from position ptr to by * insrtp; * add_reclock returns -1; if no more fe or le entry is available */add_reclock(fp, insrtp, a) struct fs_rlck *fp; reclock *insrtp; reclock *a;{ reclock *nl; int ind; if (fp == NULL) { /*create new fe entry*/ if ((fp = (struct fs_rlck *) a->pre_fe) == NULL) { fprintf(stderr, "(%x)pre_fe == NULL\n", a); abort(); } a->pre_fe = NULL; insert_fe(fp); insert_le(fp, insrtp, a); /*simplest case*/ if (a->pre_le != NULL) { free_le(a->pre_le); a->pre_le = NULL; } return(0); } else { if (a->pre_fe != NULL) { free_fe(a->pre_fe); a->pre_fe = NULL; } if ( insrtp != NULL && same_proc(insrtp, a) && same_op(insrtp, a) && inside(a, insrtp)) { /* lock exists */ if (insrtp == a) { /* extra protection */ fprintf(stderr, "add_reclock: insrtp = a = %x should not happen!!!\n", a); } else { a->rel = 1; } if (a->pre_le != NULL) { free_le(a->pre_le); a->pre_le = NULL; } return(0); } /* delete all reclock owned by the same process from *insrtp */ ind = delete_reclock(fp, &insrtp, a); if (a->pre_le != NULL) { free_le(a->pre_le); a->pre_le = NULL; } /* check to see if a'lower bound is connected with *insrtp */ if (insrtp != NULL) { nl = insrtp; if (same_proc(nl, a) && same_op(nl, a) && nl->lck.ub == a->lck.lb){ a->lck.lb = nl->lck.lb; adj_len(a); insrtp = insrtp->prev; delete_le(fp, nl); nl->rel = 1; release_le(nl); } } /* check to see if a's upper bound is connected to another lock */ nl = fp->rlckp; while (nl != NULL && nl->lck.lb <= a->lck.ub) { if (same_proc(nl, a) && same_op(nl, a) && nl->lck.lb == a->lck.ub) { /*entend*/ a->lck.ub = nl->lck.ub; adj_len(a); delete_le(fp, nl); nl->rel = 1; release_le(nl); break; } else nl = nl->nxt; } insert_le(fp, insrtp, a); if (!remote_data(a) && a->lck.op & LOCK_SH && ind == 2) /*lock_ex => lock_sh*/ wakeup(a); return(0); }}/* * delete_reclock delete locks in the reclock list ptr to by fp->rlckp * (starting from insrtp position) * that are owned by the same process as "a" and in the * ranges specified by "a". * if the lock ptr to by *insrtp has been deleted; insrtp is modified * to ptr to the lock before; this parameter is used for add_reclock only * * delete_reclock returns -1 if deletion requires new le and no more le * is available; * delete_reclock returns 0 if no lock is deleted; * delete_reclock returns 1 if only shared locks have been deleted; * delele_reclock returns 2 if some exclusive locks have been deleted; * this return value is used to determine if add_reclock cause downgrade * an exclusive and should call wakeup() or if remote lock manager * needs to be contacted for deletion. */delete_reclock(fp, insrtp, a) struct fs_rlck *fp; reclock **insrtp; reclock *a;{ reclock *nl; reclock *next; int lock_ex, lock_sh; reclock *new; lock_ex = 0; lock_sh = 0; if ( *insrtp == NULL) next = fp->rlckp; else next = *insrtp; while ((nl = next) != NULL && nl->lck.lb < a->lck.ub) { /*nl->nxt may change; has to assign next value here*/ next = nl ->nxt; if (same_proc(nl, a) && nl->lck.ub > a->lck.lb) { /*overlap*/ if (nl->lck.op & LOCK_EX) lock_ex ++; else lock_sh ++; if (inside(nl, a)) { /*delete complete*/ if ( nl == *insrtp) *insrtp = (*insrtp)->prev; delete_le(fp, nl); nl->rel = 1; release_le(nl); /*return to free list*/ } else if ( nl->lck.ub > a->lck.ub && nl->lck.lb < a->lck.lb) { /*break into half*/ if ((new = a->pre_le) == NULL) { /* no more lock entry */ fprintf(stderr, "(%x) pre_le is NULL\n", a); abort(); } else { a->pre_le = NULL; /* no need to test return value here, since add_mon returns -1 only with "new" mp addition */ if (add_mon(new, 1) == -1) fprintf(stderr, "add_mon = -1 in delete, should not happen\n"); new->lck.ub = nl->lck.ub; nl->lck.ub = a->lck.lb; adj_len(nl); new->lck.lb = a->lck.ub; new->lck.op = nl->lck.op; new->block = nl->block; new->exclusive = nl->exclusive; adj_len(new); find_insert(fp, new); } } else if (a->lck.lb > nl->lck.lb) { /* first half remains */ nl->lck.ub = a->lck.lb; adj_len(nl); } else if (a->lck.ub < nl->lck.ub) { /*second half remains */ nl->lck.lb = a->lck.ub; adj_len(nl); if (nl == *insrtp) *insrtp = (*insrtp)->prev; delete_le(fp, nl); find_insert(fp, nl); } else printf("impossible!\n"); } } if (lock_ex > 0) /* some exclusive lock has been deleted */ return(2); else if (lock_sh > 0 ) /* some shared lock has been deleted */ return(1); else /* no deletion*/ return(0);}/* * cancel returns 0, if lock is cancelled: * either not found or remove from wait_q; * cancel return -1; if lock is already granted; */intcancel(a) reclock *a;{ reclock *nl; if ((nl = search_lock(a)) == NULL) /* lock not found */ return(0); else { if (nl->w_flag == 0) /* lock already granted */ return(-1); else { remove_wait(nl); nl->rel = 1; release_le(nl); return(0); } }}/* * search_lock locates an identical lock as a in either grant_q or wait_q * search_lock returns NULL if not found; */reclock *search_lock(a) reclock *a;{ struct fs_rlck *fp; reclock *nl; if ( blocked(&fp, &nl, a) == NULL) { /* not blocked */ if (nl != NULL && same_proc(nl, a) && same_bound(nl, a) && same_op(nl, a)) return(nl); /* found in grant_q */ } /* search in wait_q */ return(search_block_lock(a));}/* * return nl if nl in wait queue matches a; * return NULL if not found */reclock *search_block_lock(a) reclock *a;{ reclock *nl; nl = wait_q; while (nl != NULL) { if (same_lock(nl, a)) return(nl); else nl = nl->wait_nxt; } return(NULL);}/* * add wait adds a to the end of wait queue wait_q; */voidadd_wait(a) reclock *a;{ reclock *nl, *next; a->w_flag = 1; /*set wait_flag */ if (a->pre_le != NULL) { free_le(a->pre_le); a->pre_le = NULL; } if ((nl = wait_q) == NULL) { wait_q = a; return; } else { while (nl != NULL) { if (same_lock(nl, a)) { if (debug) printf("same blocking lock already exists\n"); a->rel = 1; return; } next = nl; nl = nl->wait_nxt; } next->wait_nxt = a; a->wait_prev = next; }}voidremove_wait(a) reclock *a;{ a->w_flag = 0; /* remove wait flag */ if (a->wait_prev == NULL) wait_q = a->wait_nxt; else a->wait_prev->wait_nxt = a->wait_nxt; if (a->wait_nxt != NULL) a->wait_nxt->wait_prev =a->wait_prev;}/* * wakeup searches wait_queue to wake up lock that is * waiting for area [a->lb, a->ub) * wakeup is called when a delete is successful or when an * exclusive lock is downgraded to a shared lock. */voidwakeup(a) reclock *a;{ reclock *nl; struct fs_rlck *fp; reclock *insrtp; msg_entry *msgp; if ((nl = wait_q) == NULL) return; else while (nl != NULL) { if (overlap(nl, a) && blocked(&fp, &insrtp, nl) == 0) { if (remote_clnt(nl)) { if (add_call(nl) != -1){ if (add_reclock(fp, insrtp, nl) == -1) fprintf(stderr, "no lock available\n"); remove_wait(nl); } else { fprintf(stderr, "wakeup(%x) cannot take place due to add_call malloc error\n", nl); } } else {/* local clnt, check msg queue */ if (add_reclock(fp, insrtp, nl) == -1) fprintf(stderr, "no lock available\n"); remove_wait(nl);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?