⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lock.c

📁 了解服务器的内部结构
💻 C
📖 第 1 页 / 共 5 页
字号:
 * Functional description
 *	Downgrade an existing lock returning
 *	its new state.
 *
 **************************************/
LBL	lock;
LRQ	request, pending;
SRQ	que;
UCHAR	state, pending_state;
OWN	owner;
PTR	owner_offset;

LOCK_TRACE (("LOCK_downgrade (%d)\n", request_offset));

request = get_request (request_offset);
owner_offset = request->lrq_owner;
owner = (OWN) ABS_PTR (owner_offset);
if (!owner->own_count)
    return FALSE;

acquire (owner_offset);
owner = NULL;				/* remap */
++LOCK_header->lhb_downgrades;

request = (LRQ) ABS_PTR (request_offset);/* Re-init after a potential remap */
lock = (LBL) ABS_PTR (request->lrq_lock);
pending_state = LCK_none;

/* Loop thru requests looking for pending conversions
   and find the highest requested state */

QUE_LOOP (lock->lbl_requests, que)
    {
    pending = (LRQ) ((UCHAR*) que - OFFSET (LRQ, lrq_lbl_requests));
    if (pending->lrq_flags & LRQ_pending && pending != request)
	{
	pending_state = MAX (pending->lrq_requested, pending_state);
	if (pending_state == LCK_EX)
	    break;
	}
    }

for (state = request->lrq_state;
     state > LCK_none && !COMPATIBLE (pending_state, state);
     --state);

if (state == LCK_none || state == LCK_null)
    {
    dequeue (request_offset);
    release (owner_offset);
    state = LCK_none;
    }
else
    convert (request_offset, state, FALSE,
	     request->lrq_ast_routine, request->lrq_ast_argument, status_vector);

return state;
}

SLONG LOCK_enq (
    PTR		prior_request,
    PTR		parent_request,
    USHORT	series,
    UCHAR	*value,
    USHORT	length,
    UCHAR	type,
    int		(*ast_routine)(void *),
    void	*ast_argument,
    SLONG	data,
    SSHORT	lck_wait,
    STATUS	*status_vector,
    PTR		owner_offset)
{
/**************************************
 *
 *	L O C K _ e n q
 *
 **************************************
 *
 * Functional description
 *	Enque on a lock.  If the lock can't be granted immediately,
 *	return an event count on which to wait.  If the lock can't
 *	be granted because of deadlock, return NULL.
 *
 **************************************/
LBL	lock;
LRQ	request;
OWN	owner;
PTR	parent, request_offset;
UCHAR	*p;
SLONG	lock_id;
USHORT	hash_slot, *ps;
SSHORT	l;


#ifdef  WINDOWS_ONLY
lck_wait = 0;		/* LIBS is always no-wait */
#endif

LOCK_TRACE (("LOCK_enq (%d)\n", parent_request));

owner = (OWN) ABS_PTR (owner_offset);
if (!owner_offset || !owner->own_count)
    return 0;

acquire (owner_offset);
owner = NULL;				/* remap */

ASSERT_ACQUIRED;
++LOCK_header->lhb_enqs;

#ifdef VALIDATE_LOCK_TABLE
if ((LOCK_header->lhb_enqs % 50) == 0)
    validate_lhb (LOCK_header);
#endif

if (prior_request)
    dequeue (prior_request);

if (parent_request)
    {
    request = get_request (parent_request);
    parent = request->lrq_lock;
    }
else
    parent = 0;

/* Allocate or reuse a lock request block */

ASSERT_ACQUIRED;
if (QUE_EMPTY (LOCK_header->lhb_free_requests))
    {
    if (!(request = (LRQ) alloc (sizeof (struct lrq), status_vector)))
	{
	release (owner_offset);
	return 0;
	}
    }
else
    {
    ASSERT_ACQUIRED;
    request = (LRQ) ((UCHAR*) QUE_NEXT (LOCK_header->lhb_free_requests) - 
	OFFSET (LRQ, lrq_lbl_requests));
    remove_que (&request->lrq_lbl_requests);
    }

owner = (OWN) ABS_PTR (owner_offset);/* Re-init after a potential remap */
post_history (his_enq, owner_offset, NULL, REL_PTR (request), TRUE);

request->lrq_type = type_lrq;
request->lrq_flags = 0;
request->lrq_requested = type;
request->lrq_state = LCK_none;
request->lrq_data = 0;
request->lrq_owner = owner_offset;
request->lrq_ast_routine = ast_routine;
request->lrq_ast_argument = ast_argument;
insert_tail (&owner->own_requests, &request->lrq_own_requests);
QUE_INIT (request->lrq_own_blocks);

/* See if the lock already exits */

/* #ifdef WINDOWS_ONLY */
/* while the access is denied, wait around. If LOITER_loiter signals
   to break out of the lock, return with an error indicator. */
/*
while (lock = find_lock (parent, series, value, length, &hash_slot))
    if (!LOITER_loiter ())
	{
	ASSERT_RELEASED;
	return 0;
	}

#else
*/
if (lock = find_lock (parent, series, value, length, &hash_slot))
    {
    if (series < LCK_MAX_SERIES)
        ++LOCK_header->lhb_operations [series];
    else
        ++LOCK_header->lhb_operations [0];
    
    insert_tail (&lock->lbl_requests, &request->lrq_lbl_requests);
    request->lrq_data = data;
    if (!(lock_id = grant_or_que (request, lock, lck_wait)))
	{
	*status_vector++ = gds_arg_gds;
	*status_vector++ = (lck_wait > 0) ? gds__deadlock :
			((lck_wait < 0) ? gds__lock_timeout : gds__lock_conflict);
	*status_vector++ = gds_arg_end;
	}
    ASSERT_RELEASED;
    return lock_id;
    }
/* #endif */

/* Lock doesn't exist.  Allocate lock block and set it up. */

request_offset = REL_PTR (request);

if (!(lock = alloc_lock (length, status_vector)))
    {
    /* lock table is exhausted */
    /* release request gracefully */
    remove_que (&request->lrq_own_requests);
    request->lrq_type = type_null;
    insert_tail (&LOCK_header->lhb_free_requests, &request->lrq_lbl_requests);
    release (owner_offset); 
    return 0;
    }
lock->lbl_state = type;
lock->lbl_parent = parent;
lock->lbl_series = series;

/* Maintain lock series data queue */

QUE_INIT (lock->lbl_lhb_data);
if (lock->lbl_data = data)
   insert_data_que (lock);

if (series < LCK_MAX_SERIES)
    ++LOCK_header->lhb_operations [series];
else
    ++LOCK_header->lhb_operations [0];

#ifdef NeXT
lock->lbl_eventcount = 0;
#endif
lock->lbl_flags = 0;
lock->lbl_pending_lrq_count = 0;

#ifndef mpexl
for (l = LCK_max, ps = lock->lbl_counts; l--;)
    *ps++ = 0;
#else
for (l = LCK_max, p = lock->lbl_counts; l--;)
    *p++ = 0;
#endif

if (lock->lbl_length = length)
    {
    p = lock->lbl_key;
    do *p++ = *value++; while (--length);
    }

request = (LRQ) ABS_PTR (request_offset);

QUE_INIT (lock->lbl_requests);
ASSERT_ACQUIRED;
insert_tail (&LOCK_header->lhb_hash [hash_slot], &lock->lbl_lhb_hash);
insert_tail (&lock->lbl_requests, &request->lrq_lbl_requests);
request->lrq_lock = REL_PTR (lock);
grant (request, lock);
lock_id = REL_PTR (request);
release (request->lrq_owner);

return lock_id;
}

void LOCK_fini (
    STATUS	*status_vector,
    PTR		*owner_offset)
{
/**************************************
 *
 *	L O C K _ f i n i
 *
 **************************************
 *
 * Functional description
 *	Release the owner block and any outstanding locks.
 *	The exit handler will unmap the shared memory.
 *
 **************************************/
OWN		owner;
PTR		offset;

LOCK_TRACE (("LOCK_fini(%ld)\n", *owner_offset));

offset = *owner_offset;
owner = (OWN) ABS_PTR (offset);
if (!offset || !owner->own_count)
    return;

if (--owner->own_count > 0 || !LOCK_header)
    return;

#ifndef SUPERSERVER
#if (defined WIN_NT || defined OS2_ONLY || defined NEXT || defined SOLARIS_MT)
shutdown_blocking_thread (status_vector);
#else
#ifdef MMAP_SUPPORTED
if (LOCK_owner)
    { 
    ISC_unmap_object (status_vector, &LOCK_data, &LOCK_owner, sizeof (struct own));
    LOCK_owner_offset=0;
    }
#endif
#endif
LOCK_owner = NULL_PTR;
#endif

#ifndef NeXT
if (LOCK_header->lhb_active_owner != offset)
#endif
    {
    acquire (offset);
    owner = (OWN) ABS_PTR (offset);	/* Re-init after a potential remap */
    }

if (LOCK_pid == owner->own_process_id)
    purge_owner (offset, owner);

release_mutex();

#if !(defined NETWARE_386 || defined WIN_NT || defined OS2_ONLY || defined SOLARIS_MT || POSIX_THREADS)
ISC_signal_cancel (LOCK_block_signal, blocking_action, (void*) offset);
#endif

*owner_offset = NULL;
}

int LOCK_init (
    STATUS	*status_vector,
    SSHORT	owner_flag,
    SLONG	owner_id,
    UCHAR	owner_type,
    SLONG	*owner_handle)
{
/**************************************
 *
 *	L O C K _ i n i t
 *
 **************************************
 *
 * Functional description
 *	Initialize lock manager for the given owner, if not already done.
 *
 *	Initialize an owner block in the lock manager, if not already
 *	initialized.
 *
 *	Return the offset of the owner block through owner_handle.
 *
 *	Return SUCCESS or FAILURE.
 *
 **************************************/
OWN		owner;
#ifdef NeXT
cthread_t	port_waker_c_thread;
#endif
#if (defined OS2_ONLY || defined SOLARIS_MT)
ULONG		status;
#endif

LOCK_TRACE (("LOCK_init (ownerid=%ld)\n", owner_id));

/* If everything is already initialized, just bump the use count. */

if (*owner_handle)
    {
    owner = (OWN) ABS_PTR (*owner_handle);
    owner->own_count++;
    return SUCCESS;
    }

if (!LOCK_header)
    {
    /* We haven't yet mapped the shared region.  Do so now. */

    start_manager = FALSE;
    if (init_lock_table (status_vector) != SUCCESS)
	return FAILURE;
    } 
if (owner_flag && 
    create_owner (status_vector, owner_id, owner_type, owner_handle) != SUCCESS)
    return FAILURE;

#ifndef  SUPERSERVER
#if defined(SCO_EV) || defined(LINUX)
/* 5.5 SCO port: might also help other classic ports, but not sure. This
   and a few subsequent pieces of code later, solve problem of gds_drop 
   giving following message 
Fatal lock manager error: semop failed (acquire), errno: 22
--Invalid argument
If this happens on another classic platform add that platform too. - Shailesh
*/
if ( LOCK_owner_offset = *owner_handle )  
#else
    LOCK_owner_offset = *owner_handle;
#endif
    LOCK_owner = (OWN) ABS_PTR (*owner_handle);
#endif

#if !(defined NETWARE_386 || defined WIN_NT || defined OS2_ONLY || defined SOLARIS_MT || POSIX_THREADS)
#if defined(SCO_EV) || defined(LINUX)
if (LOCK_owner_offset )   /* 5.5 SCO port: gds_drop */
#endif
    ISC_signal (LOCK_block_signal, blocking_action, (void*) LOCK_owner_offset);
#endif
    
/* Initialize process level stuffs for different platforms.
   This should be done after the call to create_owner() that 
   initializes owner_handle. */

#if (defined WIN_NT && !defined SUPERSERVER)
owner = (OWN) ABS_PTR (*owner_handle);
wakeup_event [0] = owner->own_wakeup_hndl;
blocking_event [0] = ISC_make_signal (TRUE, FALSE, LOCK_pid, LOCK_block_signal);
owner->own_blocking_hndl = blocking_event [0];
AST_ALLOC;
if (gds__thread_start ((FPTR_INT) blocking_action_thread,
    &LOCK_owner_offset, THREAD_critical, 0, NULL_PTR))
    {
    *status_vector++ = gds_arg_gds;
    *status_vector++ = gds__lockmanerr;
    *status_vector++ = gds_arg_gds;
    *status_vector++ = gds__sys_request;
    *status_vector++ = gds_arg_string;
    *status_vector++ = (STATUS) "CreateThread";
    *status_vector++ = gds_arg_win32;
    *status_vector++ = GetLastError();
    *status_vector++ = gds_arg_end;
    CloseHandle (blocking_event [0]);
    CloseHandle (wakeup_event [0]);
    return FAILURE;
    }
#endif

#ifdef OS2_ONLY
DosCreateEventSem (NULL, blocking_event, DC_SEM_SHARED, 0);
owner = (OWN) ABS_PTR (*owner_handle);
owner->own_blocking_hndl = blocking_event [0];
if (status = gds__thread_start ((FPTR_INT) blocking_action_thread,
    &LOCK_owner_offset, THREAD_medium_high, 0, NULL_PTR))
    {
    *status_vector++ = gds_arg_gds;
    *status_vector++ = gds__lockmanerr;
    *status_vector++ = gds_arg_gds;
    *status_vector++ = gds__sys_request;
    *status_vector++ = gds_arg_string;
    *status_vector++ = (STATUS) "DosCreateThread";
    *status_vector++ = gds_arg_dos;
    *status_vector++ = status;
    *status_vector++ = gds_arg_end;
    DosCloseEventSem (blocking_event [0]);
    return FAILURE;
    }
#endif

#ifdef NeXT
port_allocate (task_self(), &alarm_port);
null_msg = (msg_header_t *) message_buf;
null_msg->msg_remote_port = alarm_port;
null_msg->msg_size = sizeof (msg_header_t);
null_msg1 = (msg_header_t *) message_buf1;
null_msg1->msg_local_port = alarm_port;
null_msg1->msg_size = sizeof (msg_header_t);
port_waker_c_thread = cthread_fork ((cthread_fn_t) port_waker, (any_t) &LOCK_owner_offset);
cthread_detach (port_waker_c_thread);
cthread_thread (port_waker_c_thread);

ISC_signal (LOCK_wakeup_signal, wakeup_action, (void*) &LOCK_owner_offset);

condition_set = 0;
owner = (OWN) ABS_PTR (*owner_handle);
condition_init (&owner->own_NeXT_semaphore);
#endif

#ifndef SUPERSERVER
#ifdef MMAP_SUPPORTED
#ifdef SOLARIS_MT
/* Map the owner block separately so that threads waiting
   on synchronization variables embedded in the owner block
   don't have to coordinate during lock table unmapping. */

if (!(LOCK_owner = (OWN) ISC_map_object (status_vector, &LOCK_data,
	    LOCK_owner_offset, sizeof (struct own))))
    return FAILURE;
AST_ALLOC;
if (status = gds__thread_start ((FPTR_INT) blocking_action_thread,
    &LOCK_owner_offset, THREAD_high, 0, NULL_PTR))
    {
    *status_vector++ = gds_arg_gds;
    *status_vector++ = gds__lockmanerr;
    *status_vector++ = gds_arg_gds;
    *status_vector++ = gds__sys_request;
    *status_vector++ = gds_arg_string;
    *status_vector++ = (STATUS) "thr_create";
    *status_vector++ = gds_arg_unix;
    *status_vector++ = status;
    *status_vector++ = gds_arg_end;
    return FAILURE;
    }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -