📄 ntoskernel.c
字号:
if (object->allow_multiple_callbacks == FALSE && !IsListEmpty(&object->callback_funcs)) { nt_spin_unlock_irql(&object->lock, irql); EXIT2(return NULL); } nt_spin_unlock_irql(&ntoskernel_lock, irql); callback = kmalloc(sizeof(*callback), GFP_KERNEL); if (!callback) { ERROR("couldn't allocate memory"); return NULL; } callback->func = func; callback->context = context; callback->object = object; irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL); InsertTailList(&object->callback_funcs, &callback->list); nt_spin_unlock_irql(&object->lock, irql); EXIT2(return callback);}wstdcall void WIN_FUNC(ExUnregisterCallback,1) (struct callback_func *callback){ struct callback_object *object; KIRQL irql; ENTER3("%p", callback); if (!callback) return; object = callback->object; irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL); RemoveEntryList(&callback->list); nt_spin_unlock_irql(&object->lock, irql); kfree(callback); return;}wstdcall void WIN_FUNC(ExNotifyCallback,3) (struct callback_object *object, void *arg1, void *arg2){ struct callback_func *callback; KIRQL irql; ENTER3("%p", object); irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL); nt_list_for_each_entry(callback, &object->callback_funcs, list) { LIN2WIN3(callback->func, callback->context, arg1, arg2); } nt_spin_unlock_irql(&object->lock, irql); return;}/* check and set signaled state; should be called with dispatcher_lock held *//* @grab indicates if the event should be grabbed or checked * - note that a semaphore may stay in signaled state for multiple * 'grabs' if the count is > 1 */static int grab_object(struct dispatcher_header *dh, struct task_struct *thread, int grab){ EVENTTRACE("%p, %p, %d, %d", dh, thread, grab, dh->signal_state); if (is_mutex_object(dh)) { struct nt_mutex *nt_mutex; nt_mutex = container_of(dh, struct nt_mutex, dh); EVENTTRACE("%p, %p, %d, %p, %d", nt_mutex, nt_mutex->owner_thread, dh->signal_state, thread, grab); /* either no thread owns the mutex or this thread owns * it */ assert(dh->signal_state <= 1); assert(dh->signal_state < 1 || nt_mutex->owner_thread == NULL); if ((dh->signal_state > 0 && nt_mutex->owner_thread == NULL) || nt_mutex->owner_thread == thread) { if (grab) { dh->signal_state--; nt_mutex->owner_thread = thread; } EVENTEXIT(return 1); } } else if (dh->signal_state > 0) { /* if grab, decrement signal_state for * synchronization or semaphore objects */ if (grab && (is_synch_object(dh) || is_semaphore_object(dh))) dh->signal_state--; EVENTEXIT(return 1); } EVENTEXIT(return 0);}/* this function should be called holding dispatcher_lock */static void object_signalled(struct dispatcher_header *dh){ struct wait_block *wb; struct thread_event *thread_event; EVENTENTER("%p", dh); nt_list_for_each_entry(wb, &dh->wait_blocks, list) { EVENTTRACE("%p (%p): waking %p", dh, wb, wb->thread); assert(wb->thread != NULL); assert(wb->object == NULL); wb->object = dh; thread_event = wb->thread_event; thread_event->done = 1; wake_up_process(thread_event->task); } EVENTEXIT(return);}wstdcall NTSTATUS WIN_FUNC(KeWaitForMultipleObjects,8) (ULONG count, void *object[], enum wait_type wait_type, KWAIT_REASON wait_reason, KPROCESSOR_MODE wait_mode, BOOLEAN alertable, LARGE_INTEGER *timeout, struct wait_block *wait_block_array){ int i, res = 0, wait_count; typeof(jiffies) wait_hz = 0; struct wait_block *wb, wb_array[THREAD_WAIT_OBJECTS]; struct dispatcher_header *dh; struct thread_event thread_event; KIRQL irql; EVENTENTER("thread: %p count: %d, type: %d, reason: %u, " "waitmode: %u, alertable: %u, timeout: %p, irql: %d", current, count, wait_type, wait_reason, wait_mode, alertable, timeout, current_irql()); if (count > MAX_WAIT_OBJECTS) EVENTEXIT(return STATUS_INVALID_PARAMETER); if (count > THREAD_WAIT_OBJECTS && wait_block_array == NULL) EVENTEXIT(return STATUS_INVALID_PARAMETER); if (wait_block_array == NULL) wb = wb_array; else wb = wait_block_array; /* If *timeout == 0: In the case of WaitAny, if an object can * be grabbed (object is in signaled state), grab and * return. In the case of WaitAll, we have to first make sure * all objects can be grabbed. If any/some of them can't be * grabbed, either we return STATUS_TIMEOUT or wait for them, * depending on how to satisfy wait. If all of them can be * grabbed, we will grab them in the next loop below */ irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL); for (i = wait_count = 0; i < count; i++) { dh = object[i]; EVENTTRACE("%p: event %p (%d)", current, dh, dh->signal_state); /* wait_type == 1 for WaitAny, 0 for WaitAll */ if (grab_object(dh, current, wait_type)) { if (wait_type == WaitAny) { nt_spin_unlock_irql(&dispatcher_lock, irql); EVENTEXIT(return STATUS_WAIT_0 + i); } } else { EVENTTRACE("%p: wait for %p", current, dh); wait_count++; } } if (timeout && *timeout == 0 && wait_count) { nt_spin_unlock_irql(&dispatcher_lock, irql); EVENTEXIT(return STATUS_TIMEOUT); } /* get the list of objects the thread needs to wait on and add * the thread on the wait list for each such object */ /* if *timeout == 0, this step will grab all the objects */ thread_event.done = 0; thread_event.task = current; EVENTTRACE("%p, %p", &thread_event, current); for (i = 0; i < count; i++) { dh = object[i]; EVENTTRACE("%p: event %p (%d)", current, dh, dh->signal_state); wb[i].object = NULL; if (grab_object(dh, current, 1)) { EVENTTRACE("%p: no wait for %p (%d)", current, dh, dh->signal_state); /* mark that we are not waiting on this object */ wb[i].thread = NULL; } else { assert(timeout == NULL || *timeout != 0); wb[i].thread_event = &thread_event; wb[i].thread = current; EVENTTRACE("%p: wait for %p", current, dh); InsertTailList(&dh->wait_blocks, &wb[i].list); } } nt_spin_unlock_irql(&dispatcher_lock, irql); if (wait_count == 0) EVENTEXIT(return STATUS_SUCCESS); assert(timeout == NULL || *timeout != 0); if (timeout == NULL) wait_hz = 0; else wait_hz = SYSTEM_TIME_TO_HZ(*timeout) + 1; EVENTTRACE("%p: sleep for %ld on %p", current, wait_hz, &thread_event); /* we don't honor 'alertable' - according to decription for * this, even if waiting in non-alertable state, thread may be * alerted in some circumstances */ while (wait_count) { res = wrap_wait_event(thread_event.done, wait_hz, TASK_INTERRUPTIBLE); irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL); EVENTTRACE("%p woke up: %p, %d, %d", current, &thread_event, res, thread_event.done);#ifdef EVENT_DEBUG if (thread_event.task != current) ERROR("%p: argh, task %p should be %p", &thread_event, thread_event.task, current);#endif /* the event may have been set by the time * wrap_wait_event returned and spinlock obtained, so * don't rely on value of 'res' - check event status */ if (!thread_event.done) { assert(res <= 0); /* timed out or interrupted; remove from wait list */ for (i = 0; i < count; i++) { if (!wb[i].thread) continue; EVENTTRACE("%p: timedout, dequeue %p (%p)", current, object[i], wb[i].object); RemoveEntryList(&wb[i].list); } nt_spin_unlock_irql(&dispatcher_lock, irql); if (res < 0) EVENTEXIT(return STATUS_ALERTED); else EVENTEXIT(return STATUS_TIMEOUT); } assert(res > 0); /* woken because object(s) signalled */ for (i = 0; wait_count && i < count; i++) { if (!wb[i].thread || !wb[i].object) continue; DBG_BLOCK(1) { if (wb[i].object != object[i]) { EVENTTRACE("oops %p != %p", wb[i].object, object[i]); continue; } } if (!grab_object(wb[i].object, current, 1)) continue; EVENTTRACE("object: %p, %p", object[i], wb[i].object); RemoveEntryList(&wb[i].list); wait_count--; if (wait_type == WaitAny) { int j; /* done; remove from rest of wait list */ for (j = i + 1; j < count; j++) { if (wb[j].thread) RemoveEntryList(&wb[j].list); } nt_spin_unlock_irql(&dispatcher_lock, irql); EVENTEXIT(return STATUS_WAIT_0 + i); } } thread_event.done = 0; nt_spin_unlock_irql(&dispatcher_lock, irql); if (wait_count == 0) EVENTEXIT(return STATUS_SUCCESS); /* this thread is still waiting for more objects, so * let it wait for remaining time and those objects */ /* we already set res to 1 if timeout was NULL, so * reinitialize wait_hz accordingly */ if (timeout) wait_hz = res; else wait_hz = 0; } /* should never reach here, but compiler wants return value */ ERROR("%p: wait_hz: %ld", current, wait_hz); EVENTEXIT(return STATUS_SUCCESS);}wstdcall NTSTATUS WIN_FUNC(KeWaitForSingleObject,5) (void *object, KWAIT_REASON wait_reason, KPROCESSOR_MODE wait_mode, BOOLEAN alertable, LARGE_INTEGER *timeout){ return KeWaitForMultipleObjects(1, &object, WaitAny, wait_reason, wait_mode, alertable, timeout, NULL);}wstdcall void WIN_FUNC(KeInitializeEvent,3) (struct nt_event *nt_event, enum event_type type, BOOLEAN state){ EVENTENTER("event = %p, type = %d, state = %d", nt_event, type, state); initialize_object(&nt_event->dh, type, state); EVENTEXIT(return);}wstdcall LONG WIN_FUNC(KeSetEvent,3) (struct nt_event *nt_event, KPRIORITY incr, BOOLEAN wait){ LONG old_state; KIRQL irql; EVENTENTER("event = %p, type = %d, wait = %d", nt_event, nt_event->dh.type, wait); if (wait == TRUE) WARNING("wait = %d, not yet implemented", wait); irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL); old_state = nt_event->dh.signal_state; nt_event->dh.signal_state = 1; if (old_state == 0) object_signalled(&nt_event->dh); nt_spin_unlock_irql(&dispatcher_lock, irql); EVENTEXIT(return old_state);}wstdcall void WIN_FUNC(KeClearEvent,1) (struct nt_event *nt_event){ KIRQL irql; EVENTENTER("event = %p", nt_event); irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL); nt_event->dh.signal_state = 0; nt_spin_unlock_irql(&dispatcher_lock, irql); EVENTEXIT(return);}wstdcall LONG WIN_FUNC(KeResetEvent,1) (struct nt_event *nt_event){ LONG old_state; KIRQL irql; EVENTENTER("event = %p", nt_event); irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL); old_state = nt_event->dh.signal_state; nt_event->dh.signal_state = 0; nt_spin_unlock_irql(&dispatcher_lock, irql); EVENTTRACE("old state: %d", old_state); EVENTEXIT(return old_state);}wstdcall LONG WIN_FUNC(KeReadStateEvent,1) (struct nt_event *nt_event){ LONG state; KIRQL irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL); state = nt_event->dh.signal_state; nt_spin_unlock_irql(&dispatcher_lock, irql); EVENTTRACE("%d", state); return state;}wstdcall void WIN_FUNC(KeInitializeMutex,2) (struct nt_mutex *mutex, ULONG level){ KIRQL irql; EVENTENTER("%p", mutex); irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL); initialize_object(&mutex->dh, MutexObject, 1); mutex->dh.size = sizeof(*mutex); InitializeListHead(&mutex->list); mutex->abandoned = FALSE; mutex->apc_disable = 1; mutex->owner_thread = NULL; nt_spin_unlock_irql(&dispatcher_lock, irql); EVENTEXIT(return);}wstdcall LONG WIN_FUNC(KeReleaseMutex,2) (struct nt_mutex *mutex, BOOLEAN wait){ LONG ret; KIRQL irql; struct task_struct *thread; EVENTENTER("%p, %d, %p", mutex, wait, current); if (wait == TRUE) WARNING("wait: %d", wait); thread = current; irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL); EVENTTRACE("%p, %p, %p, %d", mutex, thread, mutex->owner_thread, mutex->dh.signal_state); if ((mutex->owner_thread == thread) && (mutex->dh.signal_state <= 0)) { ret = mutex->dh.signal_state++; if (ret == 0) { mutex->owner_thread = NULL; object_signalled(&mutex->dh); } } else { ret = STATUS_MUTANT_NOT_OWNED; WARNING("invalid mutex: %p, %p, %p", mutex, mutex->owner_thread, thread); } EVENTTRACE("%p, %p, %p, %d", mutex, thread, mutex->owner_thread, mutex->dh.signal_state); nt_spin_unlock_irql(&dispatcher_lock, irql); EVENTEXIT(return ret);}wstdcall void WIN_FUNC(KeInitializeSemaphore,3) (struct nt_semaphore *semaphore, LONG count, LONG limit){ EVENTENTER("%p: %d", semaphore, count); /* if limit > 1, we need to satisfy as many waits (until count * becomes 0); so we keep decrementing count everytime a wait * is satisified */ initialize_object(&semaphore->dh, SemaphoreObject, count); semaphore->dh.size = sizeof(*semaphore); semaphore->limit = limit; EVENTEXIT(return);}wstdcall LONG WIN_FUNC(KeReleaseSemaphore,4) (struct nt_semaphore *semaphore, KPRIORITY incr, LONG adjustment, BOOLEAN wait){ LONG ret; KIRQL irql; EVENTENTER("%p", semaphore); irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL); ret = semaphore->dh.signal_state; assert(ret >= 0); if (semaphore->dh.signal_state + adjustment <= semaphore->limit) semaphore->dh.signal_state += adjustment; else { WARNING("releasing %d over limit %d", adjustment, semaphore->limit); semaphore->dh.signal_state = semaphore->limit; } if (semaphore->dh.signal_state > 0) object_signalled(&semaphore->dh); nt_spin_unlock_irql(&dispatcher_lock, irql); EVENTEXIT(return ret);}wstdcall NTSTATUS WIN_FUNC(KeDelayExecutionThread,3) (KPROCESSOR_MODE wait_mode, BOOLEAN alertable, LARGE_INTEGER *interval){ int res; long timeout; if (wait_mode != 0) ERROR("invalid wait_mode %d", wait_mode); timeout = SYSTEM_TIME_TO_HZ(*interval) + 1; EVENTTRACE("thread: %p, interval: %Ld, timeout: %ld", current, *interval, timeout); if (timeout <= 0) EVENTEXIT(return STATUS_SUCCESS); if (alertable) set_current_state(TASK_INTERRUPTIBLE); else set_current_state(TASK_UNINTERRUPTIBLE); res = schedule_timeout(timeout); EVENTTRACE("thread: %p, res: %d", current, res); if (res == 0) EVENTEXIT(return STATUS_SUCCESS); else EVENTEXIT(return STATUS_ALERTED);}wstdcall ULONGLONG WIN_FUNC(KeQueryInterruptTime,0) (void){ EXIT5(return jiffies * TICKSPERJIFFY);}wstdcall ULONG WIN_FUNC(KeQueryTimeIncrement,0) (void){ EXIT5(return TICKSPERSEC / HZ);}wstdcall void WIN_FUNC(KeQuerySystemTime,1) (LARGE_INTEGER *time){ *time = ticks_1601(); TRACE5("%Lu, %lu", *time, jiffies);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -