📄 lock.c
字号:
#else
#if defined(SCO_EV) || defined(LINUX)
if ( LOCK_owner_offset ) /* 5.5 SCO Port: gds_drop */
#endif
if (!(LOCK_owner = (OWN) ISC_map_object (status_vector, &LOCK_data,
LOCK_owner_offset, sizeof (struct own))))
return FAILURE;
#endif
#endif
#endif
#ifdef MANAGER_PROCESS
if (start_manager)
{
start_manager = FALSE;
if (!fork_lock_manager (status_vector))
return FAILURE;
}
#endif
return SUCCESS;
}
#ifdef MANAGER_PROCESS
void LOCK_manager (
PTR manager_owner_offset)
{
/**************************************
*
* L O C K _ m a n a g e r
*
**************************************
*
* Functional description
* LOCK_manager is used only in the privileged lock manager
* process. The routine simply waits on its semaphore. It
* the semaphore is poked, it wakes up, looks for blocking
* signals to be delivered, delivers them, and goes back to
* sleep.
*
**************************************/
OWN manager_owner, owner;
SRQ que;
int ret = FAILURE;
STATUS local_status [20];
SLONG value;
USHORT semaphore;
EVENT event_ptr;
#ifdef DEBUG
ULONG signal_counter = 0;
#endif
#ifdef VALIDATE_LOCK_TABLE
ULONG manager_counter = 0;
#endif
LOCK_TRACE (("LOCK_manager\n"));
acquire (manager_owner_offset);
#ifdef VALIDATE_LOCK_TABLE
validate_lhb (LOCK_header);
#endif
/* If there already is a lock manager running, quietly return */
while (owner = get_manager (FALSE))
if (signal_owner (owner, (PTR)NULL))
purge_owner (manager_owner_offset, owner);
else
{
DEBUG_MSG (0,("LOCK_manager, pid %ld quiting, pid %ld is already manager\n", LOCK_pid, owner->own_process_id));
release (manager_owner_offset);
return;
}
/* Declare ourselves to be lock manager process */
DEBUG_MSG (0,("LOCK_manager, pid %ld becoming manager\n", LOCK_pid));
manager_owner = (OWN) ABS_PTR (manager_owner_offset);
manager_owner->own_flags |= OWN_manager;
LOCK_process_owner.own_flags |= OWN_manager;
#ifdef STATIC_SEMAPHORES
semaphore = alloc_semaphore (manager_owner, NULL);
#else
manager_owner->own_semaphore = 1;
#endif
ASSERT_ACQUIRED;
LOCK_header->lhb_manager = manager_owner_offset;
LOCK_header->lhb_flags &= ~LHB_shut_manager;
release (manager_owner_offset);
/* Loop, waiting for something to happen */
chmod (LOCK_HEADER, 0444);
for (;;)
{
acquire (manager_owner_offset);
#ifdef VALIDATE_LOCK_TABLE
if ((manager_counter++ % 100) == 0)
validate_lhb (LOCK_header);
#endif
manager_owner = (OWN) ABS_PTR (manager_owner_offset);
if (LOCK_header->lhb_flags & LHB_shut_manager)
{
purge_owner (manager_owner_offset, manager_owner);
release_mutex();
break;
}
ASSERT_ACQUIRED;
QUE_LOOP (LOCK_header->lhb_owners, que)
{
owner = (OWN) ((UCHAR*) que - OFFSET (OWN, own_lhb_owners));
if (owner->own_flags & OWN_signal)
if (signal_owner (owner, (PTR)NULL))
{
que = (SRQ) ABS_PTR (que->srq_backward);
purge_owner (manager_owner_offset, owner);
}
else
{
owner->own_flags &= ~OWN_signal;
#ifdef DEBUG
if ((++signal_counter % 1000) == 0)
DEBUG_MSG (1,("LOCK_manager: delivered %ld signals\n", signal_counter));
#endif
}
}
event_ptr = manager_owner->own_wakeup;
value = ISC_event_clear (manager_owner->own_wakeup);
release (manager_owner_offset);
/* Prepare to wait for a timeout or a wakeup from somebody else. Start
by setting an alarm clock. */
ret = ISC_event_wait (1, &event_ptr, &value,
LOCKMANTIMEOUT*1000000, lock_alarm_handler, event_ptr);
#ifdef DEBUG
if (ret == FAILURE)
DEBUG_MSG (1,("LOCK_manager timer wakeup\n"));
#endif
#ifdef TERMINATE_IDLE_LOCK_MANAGER
/* The code to terminate an idle lock manager
* was disabled around Dec 1993 as part of the Solaris port.
* This was due to customer complaints about running InterBase and
* Oracle on the same machine. When Oracle starts up it grabs all
* the system resources, leaving none for InterBase. And if we
* release our resources we won't be able to restart.
* 1995-March-20 David Schnepper
*/
/* If we're the only process mapping the lock table when the alarm
expires, exit. */
if (ret == FALSE)
if (!flock ((int) LOCK_data.sh_mem_handle, LOCK_EX | LOCK_NB))
break;
else
flock ((int) LOCK_data.sh_mem_handle, LOCK_SH);
#endif
}
/* Release system resources for semaphores. */
LOCK_header = NULL;
#ifdef TERMINATE_IDLE_LOCK_MANAGER
ISC_unmap_file (local_status, &LOCK_data, ISC_SEM_REMOVE);
chmod (LOCK_HEADER, 0664);
#else
ISC_unmap_file (local_status, &LOCK_data, NULL);
#endif
}
#endif
SLONG LOCK_query_data (
PTR parent_request,
USHORT series,
USHORT aggregate)
{
/**************************************
*
* L O C K _ q u e r y _ d a t a
*
**************************************
*
* Functional description
* Query lock series data with respect to a rooted
* lock hierarchy calculating aggregates as we go.
*
**************************************/
LBL lock;
LRQ parent;
SLONG data, count;
SRQ data_header, que;
/* Get root of lock hierarchy */
if (parent_request && series < LCK_MAX_SERIES)
parent = get_request (parent_request);
else
{
CHECK (FALSE);
return 0;
}
acquire (parent->lrq_owner);
parent = (LRQ) ABS_PTR (parent_request); /* remap */
++LOCK_header->lhb_query_data;
data_header = &LOCK_header->lhb_data [series];
data = count = 0;
/* Simply walk the lock series data queue forward for the minimum
and backward for the maximum -- it's maintained in sorted order. */
switch (aggregate)
{
case LCK_MIN:
case LCK_CNT:
case LCK_AVG:
case LCK_SUM:
case LCK_ANY:
for (que = (SRQ) ABS_PTR (data_header->srq_forward);
que != data_header; que = (SRQ) ABS_PTR (que->srq_forward))
{
lock = (LBL) ((UCHAR*) que - OFFSET (LBL, lbl_lhb_data));
CHECK (lock->lbl_series == series);
if (lock->lbl_parent != parent->lrq_lock)
continue;
switch (aggregate)
{
case LCK_MIN:
data = lock->lbl_data;
break;
case LCK_ANY:
case LCK_CNT:
++count;
break;
case LCK_AVG:
++count;
case LCK_SUM:
data += lock->lbl_data;
break;
}
if (aggregate == LCK_MIN || aggregate == LCK_ANY)
break;
}
if (aggregate == LCK_CNT || aggregate == LCK_ANY)
data = count;
else if (aggregate == LCK_AVG)
data = (count) ? data/count : 0;
break;
case LCK_MAX:
for (que = (SRQ) ABS_PTR (data_header->srq_backward);
que != data_header; que = (SRQ) ABS_PTR (que->srq_backward))
{
lock = (LBL) ((UCHAR*) que - OFFSET (LBL, lbl_lhb_data));
CHECK (lock->lbl_series == series);
if (lock->lbl_parent != parent->lrq_lock)
continue;
data = lock->lbl_data;
break;
}
break;
default:
CHECK (FALSE);
}
release (parent->lrq_owner);
return data;
}
SLONG LOCK_read_data (
PTR request_offset)
{
/**************************************
*
* L O C K _ r e a d _ d a t a
*
**************************************
*
* Functional description
* Read data associated with a lock.
*
**************************************/
LBL lock;
LRQ request;
SLONG data;
LOCK_TRACE (("LOCK_read_data(%ld)\n", request_offset));
request = get_request (request_offset);
acquire (request->lrq_owner);
++LOCK_header->lhb_read_data;
request = (LRQ) ABS_PTR (request_offset);/* Re-init after a potential remap */
lock = (LBL) ABS_PTR (request->lrq_lock);
data = lock->lbl_data;
if (lock->lbl_series < LCK_MAX_SERIES)
++LOCK_header->lhb_operations [lock->lbl_series];
else
++LOCK_header->lhb_operations [0];
release (request->lrq_owner);
return data;
}
SLONG LOCK_read_data2 (
PTR parent_request,
USHORT series,
UCHAR *value,
USHORT length,
PTR owner_offset)
{
/**************************************
*
* L O C K _ r e a d _ d a t a 2
*
**************************************
*
* Functional description
* Read data associated with transient locks.
*
**************************************/
PTR parent;
LRQ request;
LBL lock;
SLONG data;
USHORT junk;
LOCK_TRACE (("LOCK_read_data2(%ld)\n", parent_request));
acquire (owner_offset);
++LOCK_header->lhb_read_data;
if (series < LCK_MAX_SERIES)
++LOCK_header->lhb_operations [series];
else
++LOCK_header->lhb_operations [0];
if (parent_request)
{
request = get_request (parent_request);
parent = request->lrq_lock;
}
else
parent = 0;
if (lock = find_lock (parent, series, value, length, &junk))
data = lock->lbl_data;
else
data = 0;
release (owner_offset);
return data;
}
void LOCK_re_post (
int (*ast)(void *),
void *arg,
PTR owner_offset)
{
/**************************************
*
* L O C K _ r e _ p o s t
*
**************************************
*
* Functional description
* Re-post an AST that was previously blocked.
* It is assumed that the routines that look
* at the re-post list only test the ast element.
*
**************************************/
OWN owner;
LRQ request;
LOCK_TRACE (("LOCK_re_post(%ld)\n", owner_offset));
acquire (owner_offset);
/* Allocate or reuse a lock request block */
ASSERT_ACQUIRED;
if (QUE_EMPTY (LOCK_header->lhb_free_requests))
{
if (!(request = (LRQ) alloc (sizeof (struct lrq), NULL)))
{
release (owner_offset);
return;
}
}
else
{
ASSERT_ACQUIRED;
request = (LRQ) ((UCHAR*) QUE_NEXT (LOCK_header->lhb_free_requests) -
OFFSET (LRQ, lrq_lbl_requests));
remove_que (&request->lrq_lbl_requests);
}
owner = (OWN) ABS_PTR (owner_offset);
request->lrq_type = type_lrq;
request->lrq_flags = LRQ_repost;
request->lrq_ast_routine = ast;
request->lrq_ast_argument = arg;
request->lrq_requested = LCK_none;
request->lrq_state = LCK_none;
request->lrq_owner = owner_offset;
request->lrq_lock = (PTR) 0;
insert_tail (&owner->own_blocks, &request->lrq_own_blocks);
DEBUG_DELAY;
#ifdef ANY_THREADING
signal_owner ((OWN) ABS_PTR (owner_offset), (PTR)NULL);
#else
/* The deadlock detection looks at the OWN_signaled bit to decide
* whether processes have things to look at - as we're putting
* a repost item on the blocking queue, we DO have additional work
* to do, so set the flag to indicate so.
*/
owner->own_flags &= ~OWN_signal;
owner->own_ast_flags |= OWN_signaled;
DEBUG_DELAY;
ISC_inhibit();
DEBUG_DELAY;
blocking_action2 (owner_offset, (PTR)NULL);
DEBUG_DELAY;
ISC_enable();
DEBUG_DELAY;
#endif
release (owner_offset);
}
BOOLEAN LOCK_shut_manager (void)
{
/**************************************
*
* L O C K _ s h u t _ m a n a g e r
*
**************************************
*
* Functional description
* Set a shutdown flag and post the lock
* manager process to exit.
*
**************************************/
#ifdef MANAGER_PROCESS
OWN manager;
acquire (DUMMY_OWNER_SHUTDOWN);
if (manager = get_manager (FALSE))
{
LOCK_header->lhb_flags |= LHB_shut_manager;
post_wakeup (manager);
release_mutex();
sleep (5);
acquire (DUMMY_OWNER_SHUTDOWN);
LOCK_header->lhb_flags &= ~LHB_shut_manager;
manager = get_manager (FALSE);
}
release_mutex();
return (manager ? FALSE : TRUE);
#else
return TRUE;
#endif
}
#ifdef WINDOWS_ONLY
void LOCK_wep( void)
{
/**************************************
*
* L O C K _ w e p
*
**************************************
*
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -