⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lock.c

📁 了解服务器的内部结构
💻 C
📖 第 1 页 / 共 5 页
字号:
 * Functional description
 *      Call exit_handler for WEP.
 *
 **************************************/

exit_handler( NULL);
}
#endif

SLONG LOCK_write_data (
    PTR		request_offset,
    SLONG	data)
{
/**************************************
 *
 *	L O C K _ w r i t e _ d a t a
 *
 **************************************
 *
 * Functional description
 *	Write a longword into the lock block.
 *
 **************************************/
LBL	lock;
LRQ	request;

LOCK_TRACE (("LOCK_write_data (%ld)\n", request_offset));

request = get_request (request_offset);
acquire (request->lrq_owner);
++LOCK_header->lhb_write_data;
request = (LRQ) ABS_PTR (request_offset);/* Re-init after a potential remap */
lock = (LBL) ABS_PTR (request->lrq_lock);
remove_que (&lock->lbl_lhb_data);
if (lock->lbl_data = data)
   insert_data_que (lock);

if (lock->lbl_series < LCK_MAX_SERIES)
    ++LOCK_header->lhb_operations [lock->lbl_series];
else
    ++LOCK_header->lhb_operations [0];

release (request->lrq_owner);

return data;
}

static void acquire (
    PTR		owner_offset)
{
/**************************************
 *
 *	a c q u i r e
 *
 **************************************
 *
 * Functional description
 *	Acquire the lock file.  It it's busy, wait for it.
 *
 **************************************/
PTR		prior_active;
SLONG		length, spins, status;
LHB		header;
STATUS		status_vector [20];
OWN		owner;

#if (defined SOLARIS_MT && !defined SUPERSERVER)
acquire_retry:
#endif

/* Check that we aren't trying to acquire when we already own it! */
/* ASSERT_RELEASED; This will not work, when the current active owner
   of the lock table falls in the remapped portion of the map
   file, which we are yet to expand (remap) to */

#ifndef ANY_THREADING   
++LOCK_asts;
DEBUG_DELAY;
#endif

/* Measure the impact of the lock table resource as an overall
   system bottleneck. This will be useful metric for lock
   improvements and as a limiting factor for SMP. A conditional
   mutex would probably be more accurate but isn't worth the
   effort. */

prior_active = LOCK_header->lhb_active_owner;

#ifndef SUPERSERVER
#ifdef MMAP_SUPPORTED
if (LOCK_owner)
    {
    /* Record a "timestamp" of when this owner requested the lock table */
    LOCK_owner->own_acquire_time = LOCK_header->lhb_acquires;
#ifdef DEV_BUILD
    /* Due to the system overhead of acquiring the actual time-of-day,
       we only get the realtime for DEV_BUILD */
    LOCK_owner->own_acquire_realtime = GET_TIME;
#endif
    LOCK_owner->own_ast_hung_flags |= OWN_hung;
    }
#endif
#endif

/* Perform a spin wait on the lock table mutex. This should only
   be used on SMP machines; it doesn't make much sense otherwise. */

status = FAILURE;
#if (defined UNIX || defined WIN_NT)
for (spins = 0; spins < LOCK_acquire_spins; ++spins)
    if ((status = ISC_mutex_lock_cond (MUTEX)) == SUCCESS)
       	break;
#endif

/* If the spin wait didn't succeed then wait forever. */

if (status != SUCCESS)
    if (ISC_mutex_lock (MUTEX))
    	bug (NULL, "semop failed (acquire)");

++LOCK_header->lhb_acquires;
if (prior_active)
    ++LOCK_header->lhb_acquire_blocks;

#if (defined UNIX || defined WIN_NT)
if (spins)
    {
    ++LOCK_header->lhb_acquire_retries;
    if (spins < LOCK_acquire_spins)
       	++LOCK_header->lhb_retry_success;
    }
#endif

prior_active = LOCK_header->lhb_active_owner;
LOCK_header->lhb_active_owner = owner_offset;

#ifdef MANAGER_PROCESS
LOCK_post_manager = FALSE;
#endif

#ifndef SUPERSERVER
#ifndef ANY_THREADING
if (LOCK_owner)
    {
    LOCK_owner->own_ast_hung_flags &= ~OWN_hung;  /* Can't be hung by OS if we got here */
    }
#endif
#endif

if (LOCK_header->lhb_length > LOCK_data.sh_mem_length_mapped
#ifdef LOCK_DEBUG_ACQUIRE
/* If we're debugging remaps occuring during acquire, force 
   a remap every-so-often. */
    || ((debug_acquire_count++ % DEBUG_ACQUIRE_INTERVAL) == 0) 
#endif
    )
    {

    length = LOCK_header->lhb_length;
/* Do not do Lock table remapping for SUPERSERVER. Specify required
   lock table size in the configuration file */
#if !((defined SUPERSERVER) && (defined MMAP_SUPPORTED))
    header = (LHB) ISC_remap_file (status_vector, &LOCK_data, length, FALSE);
    if (!header)
#endif
	{
	bug (NULL, "remap failed");
	return;
	}
    LOCK_header = header;
    }

/* If we were able to acquire the MUTEX, but there is an prior owner marked
 * in the the lock table, it means that someone died while owning
 * the lock mutex.  In that event, lets see if there is any unfinished work
 * left around that we need to finish up.
 */
if (prior_active)
    {
    SHB	recover;
    SRQ	que;

    post_history (his_active, owner_offset, prior_active, (PTR) 0, FALSE);
    recover = (SHB) ABS_PTR (LOCK_header->lhb_secondary);
    if (recover->shb_remove_node)
	{
	/* There was a remove_que operation in progress when the prior_owner died */
	DEBUG_MSG (0,("Got to the funky shb_remove_node code\n"));
	remove_que ((SRQ)ABS_PTR (recover->shb_remove_node));
	}
    else if (recover->shb_insert_que && recover->shb_insert_prior)
	{
	/* There was a insert_que operation in progress when the prior_owner died */
	DEBUG_MSG (0,("Got to the funky shb_insert_que code\n"));

	que = (SRQ) ABS_PTR (recover->shb_insert_que);
	que->srq_backward = recover->shb_insert_prior;
	que = (SRQ) ABS_PTR (recover->shb_insert_prior);
	que->srq_forward = recover->shb_insert_que;
	recover->shb_insert_que = 0;
	recover->shb_insert_prior = 0;
	}
    }
#if (defined SOLARIS_MT && !defined SUPERSERVER)
if (LOCK_solaris_stall)
    {
    if (owner_offset > 0)
        {
        OWN         first_owner;
        EVENT       event_ptr;
        SLONG       value;

        owner = (OWN) ABS_PTR (owner_offset);    /* Can't be hung by OS if we got he
re */
        owner->own_ast_hung_flags &= ~OWN_hung;
        first_owner = (OWN) ((UCHAR*) QUE_NEXT (LOCK_header->lhb_owners) -
                OFFSET (OWN, own_lhb_owners));
        if (first_owner->own_ast_hung_flags & OWN_hung &&
	    ((LOCK_header->lhb_acquires - first_owner->own_acquire_time)
		> STARVATION_THRESHHOLD))
            {
            first_owner->own_flags |= OWN_starved;
            if (owner->own_flags & OWN_blocking)
                {
                probe_owners (owner_offset);
                owner->own_flags &= ~OWN_blocking;
                release_mutex();
                }
            else
                {
		SLONG	ret;
                owner->own_flags |= OWN_blocking;
                owner->own_flags &= ~OWN_wakeup;
                owner->own_semaphore = 1;
                event_ptr = owner->own_stall;
                value = ISC_event_clear (event_ptr);
                release_mutex();
                ret = ISC_event_wait (1, &event_ptr, &value,
                            LOCK_solaris_stall * 1000000,
                            lock_alarm_handler, event_ptr);
#ifdef DEV_BUILD
		if (ret != SUCCESS)
		    gds__log ("LOCK: owner %d timed out while stalling for benefit of owner %d", 
		        owner_offset, REL_PTR (first_owner));
#endif
                }

            goto acquire_retry;
            }

        owner->own_flags &= ~OWN_blocking;
        }
    }
#endif

}

static UCHAR *alloc (
    SSHORT	size,
    STATUS	*status_vector)
{
/**************************************
 *
 *	a l l o c
 *
 **************************************
 *
 * Functional description
 *	Allocate a block of given size.
 *
 **************************************/
ULONG		length, block;
LHB		header;
OWN		owner;

size = ALIGN (size, sizeof (IPTR));
ASSERT_ACQUIRED;
block = LOCK_header->lhb_used;
LOCK_header->lhb_used += size;

/* Make sure we haven't overflowed the lock table.  If so, bump the size of
   the table */

if (LOCK_header->lhb_used > LOCK_header->lhb_length)
    {
    LOCK_header->lhb_used -= size;
/* Do not do Lock table remapping for SUPERSERVER. Specify required
   lock table size in the configuration file */
#if (!(defined SUPERSERVER) && (defined MMAP_SUPPORTED)) || defined (WIN_NT)
    length = LOCK_data.sh_mem_length_mapped + EXTEND_SIZE;
    header = (LHB) ISC_remap_file (status_vector, &LOCK_data, length, TRUE);
    if (header)
	{
	LOCK_header = header;
	ASSERT_ACQUIRED;
	LOCK_header->lhb_length = LOCK_data.sh_mem_length_mapped;
	LOCK_header->lhb_used += size;
	}
    else
#endif
	{
	/* Do not do abort in case if there is not enough room -- just 
	   return an error */

	if (status_vector)
	    {
	    *status_vector++ = gds_arg_gds;
	    *status_vector++ = gds__random;
	    *status_vector++ = gds_arg_string;
	    *status_vector++ = (STATUS) "lock manager out of room";
	    *status_vector++ = gds_arg_end;
	    }

	return (UCHAR*) NULL;
	}
    }

#ifdef DEV_BUILD
/* This version of alloc() doesn't initialize memory.  To shake out
   any bugs, in DEV_BUILD we initialize it to a "funny" pattern */
memset (ABS_PTR (block), 0xFD, size);
#endif

return ABS_PTR (block);
}

static LBL alloc_lock (
    USHORT	length,
    STATUS	*status_vector)
{
/**************************************
 *
 *	a l l o c _ l o c k
 *
 **************************************
 *
 * Functional description
 *	Allocate a lock for a key of a given length.  Look first to see
 *	if a spare of the right size is sitting around.  If not, allocate
 *	one.
 *
 **************************************/
LBL	lock;
SRQ	que;

length = (length + 3) & ~3;

ASSERT_ACQUIRED;
QUE_LOOP (LOCK_header->lhb_free_locks, que)
    {
    lock = (LBL) ((UCHAR*) que - OFFSET (LBL, lbl_lhb_hash));
    if (lock->lbl_size == length)
	{
	remove_que (&lock->lbl_lhb_hash);
	lock->lbl_type = type_lbl;
	return lock;
	}
    }

if (lock = (LBL) alloc (sizeof (struct lbl) + length, status_vector))
    {
    lock->lbl_size = length;
    lock->lbl_type = type_lbl;
    }

/* NOTE: if the above alloc() fails do not release mutex here but rather
         release it in LOCK_enq() (as of now it is the only function that
         calls alloc_lock()). We need to hold mutex to be able
         to release a lock request block */
   

return lock;
}

#ifdef STATIC_SEMAPHORES
static USHORT alloc_semaphore (
    OWN		owner,
    STATUS	*status_vector)
{
/**************************************
 *
 *	a l l o c _ s e m a p h o r e
 *
 **************************************
 *
 * Functional description
 *	Allocate an unused semaphore.
 *
 **************************************/
USHORT	h, i;
SRQ	que;
OWN	other;
SMB	semaphores;

for (h = 0; h < 2; h++)
    {
    ASSERT_ACQUIRED;
    semaphores = (SMB) ABS_PTR (LOCK_header->lhb_mask);
    for (i = 1; i < (USHORT) LOCK_sem_count; i++)
	if (semaphores->smb_mask [i / BITS_PER_LONG] & (1L << (i % BITS_PER_LONG)))
	    {
	    semaphores->smb_mask [i / BITS_PER_LONG] &= ~(1L << (i % BITS_PER_LONG));
	    owner->own_semaphore = i;
	    owner->own_wakeup [0].event_semnum = i;
	    return i;
	    }

    /* Loop thru owners to see if a semaphore is available */

    ASSERT_ACQUIRED;
    QUE_LOOP (LOCK_header->lhb_owners, que)
	{
	other = (OWN) ((UCHAR*) que - OFFSET (OWN, own_lhb_owners));
	if (other->own_semaphore & OWN_semavail)
	    release_semaphore (other);
	}
    }

bug (status_vector, "semaphores are exhausted");

return 0;
}
#endif

static void blocking_action (
    PTR		owner_offset)
{
/**************************************
 *
 *	b l o c k i n g _ a c t i o n
 *
 **************************************
 *
 * Functional description
 *	Fault hander for a blocking signal.  A blocking signal
 *	is an indication (albeit a strong one) that a blocking
 *	AST is pending for the owner.  Check in with the data
 *	structure for details.
 *	The re-post code in this routine assumes that no more
 *	than one thread of execution can be running in this
 *	routine at any time.
 *
 *      NOTE: This is a wrapper for calling blocking_action2() where 
 *		   the real action is.  This routine would be called 
 *		   from a signal_handler or blocking_action_thread()
 *		   or LOCK_re_post() where acquire() would not have 
 *		   been done.
 *
 **************************************/

/* Ignore signals that occur when executing in lock manager
   or when there is no owner block set up */

#ifndef ANY_THREADING   
if (++LOCK_asts || !owner_offset)
    {
    DEBUG_DELAY;
    if (owner_offset)
	{
	OWN	owner;
	/* This operation is done WITHOUT acquiring the lock table.
	 * As this is done at the signal level, we might be resignalling
	 * ourself and already have the lock table acquired at this
	 * point.  If we already have it we'll hang when trying to acquire
	 * it.  The use of LOCK_asts indicates, somewhat imprecisely,
	 * when we are resignalling ourselves when we have the table
	 * acquired already.
	 */
	owner = (OWN) ABS_PTR (owner_offset);
	owner->own_ast_flags &= ~OWN_signaled;
	}
    DEBUG_DELAY;
    --LOCK_asts;
    DEBUG_DELAY;
    return;
    }
#else
if (!owner_offset)
    return;
#endif

DEBUG_DELAY;

acquire (owner_offset);
blocking_action2 (owner_offset, (PTR)NULL);
#ifdef NeXT
/* make sure we're not shut down */

if (!getout)
#endif
release (owner_offset);

#ifndef ANY_THREADING

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -