📄 ntoskernel.c
字号:
(struct nt_list *head, NT_SPIN_LOCK *lock){ TRACEENTER5("%p", head); return ExfInterlockedRemoveTailList(head, lock);}wfastcall struct nt_slist *WIN_FUNC(ExInterlockedPushEntrySList,3) (nt_slist_header *head, struct nt_slist *entry, NT_SPIN_LOCK *lock){ struct nt_slist *ret; ret = PushEntrySList(head, entry, lock); return ret;}wstdcall struct nt_slist *WIN_FUNC(ExpInterlockedPushEntrySList,2) (nt_slist_header *head, struct nt_slist *entry){ struct nt_slist *ret; ret = PushEntrySList(head, entry, &ntoskernel_lock); return ret;}wfastcall struct nt_slist *WIN_FUNC(InterlockedPushEntrySList,2) (nt_slist_header *head, struct nt_slist *entry){ struct nt_slist *ret; ret = PushEntrySList(head, entry, &ntoskernel_lock); return ret;}wfastcall struct nt_slist *WIN_FUNC(ExInterlockedPopEntrySList,2) (nt_slist_header *head, NT_SPIN_LOCK *lock){ struct nt_slist *ret; ret = PopEntrySList(head, lock); return ret;}wstdcall struct nt_slist *WIN_FUNC(ExpInterlockedPopEntrySList,1) (nt_slist_header *head){ struct nt_slist *ret; ret = PopEntrySList(head, &ntoskernel_lock); return ret;}wfastcall struct nt_slist *WIN_FUNC(InterlockedPopEntrySList,1) (nt_slist_header *head){ struct nt_slist *ret; ret = PopEntrySList(head, &ntoskernel_lock); return ret;}wstdcall USHORT WIN_FUNC(ExQueryDepthSList,1) (nt_slist_header *head){ USHORT depth; TRACEENTER5("%p", head); depth = head->depth; DBGTRACE5("%d, %p", depth, head->next); return depth;}wfastcall LONG WIN_FUNC(InterlockedIncrement,1) (LONG volatile *val){ return post_atomic_add(*val, 1);}wfastcall LONG WIN_FUNC(InterlockedDecrement,1) (LONG volatile *val){ return post_atomic_add(*val, -1);}wfastcall LONG WIN_FUNC(InterlockedExchange,2) (LONG volatile *target, LONG val){ return xchg(target, val);}wfastcall LONG WIN_FUNC(InterlockedCompareExchange,3) (LONG volatile *dest, LONG new, LONG old){ return cmpxchg(dest, old, new);}wfastcall void WIN_FUNC(ExInterlockedAddLargeStatistic,2) (LARGE_INTEGER volatile *plint, ULONG n){ unsigned long flags; save_local_irq(flags);#ifdef CONFIG_X86_64 __asm__ __volatile__( "\n" LOCK_PREFIX "add %1, %0\n\t" : "+m" (*plint) : "r" (n));#else __asm__ __volatile__( "\n" "1:\t" " movl %1, %%ebx\n\t" " movl %%edx, %%ecx\n\t" " addl %%eax, %%ebx\n\t" " adcl $0, %%ecx\n\t" LOCK_PREFIX "cmpxchg8b %0\n\t" " jnz 1b\n\t" : "+m" (*plint) : "m" (n), "A" (*plint) : "ebx", "ecx");#endif restore_local_irq(flags);}static void initialize_dh(struct dispatcher_header *dh, enum dh_type type, int state){ memset(dh, 0, sizeof(*dh)); set_dh_type(dh, type); dh->signal_state = state; InitializeListHead(&dh->wait_blocks);}static void timer_proc(unsigned long data){ struct wrap_timer *wrap_timer = (struct wrap_timer *)data; struct nt_timer *nt_timer; struct kdpc *kdpc; nt_timer = wrap_timer->nt_timer; TRACEENTER5("%p(%p), %lu", wrap_timer, nt_timer, jiffies);#ifdef TIMER_DEBUG BUG_ON(wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC); BUG_ON(nt_timer->wrap_timer_magic != WRAP_TIMER_MAGIC);#endif KeSetEvent((struct nt_event *)nt_timer, 0, FALSE); kdpc = nt_timer->kdpc; if (kdpc && kdpc->func) {#if 1 LIN2WIN4(kdpc->func, kdpc, kdpc->ctx, kdpc->arg1, kdpc->arg2);#else queue_kdpc(kdpc);#endif } nt_spin_lock(&timer_lock); if (wrap_timer->repeat) mod_timer(&wrap_timer->timer, jiffies + wrap_timer->repeat); nt_spin_unlock(&timer_lock); TRACEEXIT5(return);}void wrap_init_timer(struct nt_timer *nt_timer, enum timer_type type, struct kdpc *kdpc, struct ndis_miniport_block *nmb){ struct wrap_timer *wrap_timer; KIRQL irql; /* TODO: if a timer is initialized more than once, we allocate * memory for wrap_timer more than once for the same nt_timer, * wasting memory. We can check if nt_timer->wrap_timer_magic is * set and not allocate, but it is not guaranteed always to be * safe */ TRACEENTER5("%p", nt_timer); /* we allocate memory for wrap_timer behind driver's back and * there is no NDIS/DDK function where this memory can be * freed, so we use slack_kmalloc so it gets freed when driver * is unloaded */ wrap_timer = slack_kmalloc(sizeof(*wrap_timer)); if (!wrap_timer) { ERROR("couldn't allocate memory for timer"); return; } memset(wrap_timer, 0, sizeof(*wrap_timer)); init_timer(&wrap_timer->timer); wrap_timer->timer.data = (unsigned long)wrap_timer; wrap_timer->timer.function = timer_proc; wrap_timer->nt_timer = nt_timer;#ifdef TIMER_DEBUG wrap_timer->wrap_timer_magic = WRAP_TIMER_MAGIC;#endif nt_timer->wrap_timer = wrap_timer; nt_timer->kdpc = kdpc; initialize_dh(&nt_timer->dh, type, 0); nt_timer->wrap_timer_magic = WRAP_TIMER_MAGIC; irql = nt_spin_lock_irql(&timer_lock, DISPATCH_LEVEL); if (nmb) InsertTailList(&nmb->wnd->timer_list, &wrap_timer->list); else InsertTailList(&wrap_timer_list, &wrap_timer->list); nt_spin_unlock_irql(&timer_lock, irql); DBGTRACE5("timer %p (%p)", wrap_timer, nt_timer); TRACEEXIT5(return);}wstdcall void WIN_FUNC(KeInitializeTimerEx,2) (struct nt_timer *nt_timer, enum timer_type type){ TRACEENTER5("%p", nt_timer); wrap_init_timer(nt_timer, type, NULL, NULL);}wstdcall void WIN_FUNC(KeInitializeTimer,1) (struct nt_timer *nt_timer){ TRACEENTER5("%p", nt_timer); wrap_init_timer(nt_timer, NotificationTimer, NULL, NULL);}/* expires and repeat are in HZ */BOOLEAN wrap_set_timer(struct nt_timer *nt_timer, unsigned long expires_hz, unsigned long repeat_hz, struct kdpc *kdpc){ BOOLEAN ret; struct wrap_timer *wrap_timer; KIRQL irql; TRACEENTER4("%p, %lu, %lu, %p, %lu", nt_timer, expires_hz, repeat_hz, kdpc, jiffies); KeClearEvent((struct nt_event *)nt_timer); wrap_timer = nt_timer->wrap_timer; DBGTRACE4("%p", wrap_timer);#ifdef TIMER_DEBUG if (wrap_timer->nt_timer != nt_timer) WARNING("bad timers: %p, %p, %p", wrap_timer, nt_timer, wrap_timer->nt_timer); if (nt_timer->wrap_timer_magic != WRAP_TIMER_MAGIC) { WARNING("Buggy Windows timer didn't initialize timer %p", nt_timer); return FALSE; } if (wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC) { WARNING("timer %p is not initialized (%lx)?", wrap_timer, wrap_timer->wrap_timer_magic); wrap_timer->wrap_timer_magic = WRAP_TIMER_MAGIC; }#endif irql = nt_spin_lock_irql(&timer_lock, DISPATCH_LEVEL); if (kdpc) nt_timer->kdpc = kdpc; wrap_timer->repeat = repeat_hz; if (mod_timer(&wrap_timer->timer, jiffies + expires_hz)) ret = TRUE; else ret = FALSE; nt_spin_unlock_irql(&timer_lock, irql); TRACEEXIT5(return ret);}wstdcall BOOLEAN WIN_FUNC(KeSetTimerEx,4) (struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks, LONG period_ms, struct kdpc *kdpc){ unsigned long expires_hz, repeat_hz; DBGTRACE5("%p, %Ld, %d", nt_timer, duetime_ticks, period_ms); expires_hz = SYSTEM_TIME_TO_HZ(duetime_ticks) + 1; repeat_hz = MSEC_TO_HZ(period_ms); return wrap_set_timer(nt_timer, expires_hz, repeat_hz, kdpc);}wstdcall BOOLEAN WIN_FUNC(KeSetTimer,3) (struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks, struct kdpc *kdpc){ TRACEENTER5("%p, %Ld, %p", nt_timer, duetime_ticks, kdpc); return KeSetTimerEx(nt_timer, duetime_ticks, 0, kdpc);}wstdcall BOOLEAN WIN_FUNC(KeCancelTimer,1) (struct nt_timer *nt_timer){ BOOLEAN canceled; struct wrap_timer *wrap_timer; KIRQL irql; TRACEENTER5("%p", nt_timer); wrap_timer = nt_timer->wrap_timer; if (!wrap_timer) { ERROR("invalid wrap_timer"); return TRUE; }#ifdef TIMER_DEBUG DBGTRACE5("canceling timer %p", wrap_timer); BUG_ON(wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC);#endif DBGTRACE5("deleting timer %p(%p)", wrap_timer, nt_timer); /* disable timer before deleting so if it is periodic timer, it * won't be re-armed after deleting */ irql = nt_spin_lock_irql(&timer_lock, DISPATCH_LEVEL); wrap_timer->repeat = 0; if (del_timer(&wrap_timer->timer)) canceled = TRUE; else canceled = FALSE; nt_spin_unlock_irql(&timer_lock, irql); DBGTRACE5("canceled (%p): %d", wrap_timer, canceled); TRACEEXIT5(return canceled);}wstdcall BOOLEAN WIN_FUNC(KeReadStateTimer,1) (struct nt_timer *nt_timer){ return nt_timer->dh.signal_state;}wstdcall void WIN_FUNC(KeInitializeDpc,3) (struct kdpc *kdpc, void *func, void *ctx){ TRACEENTER3("%p, %p, %p", kdpc, func, ctx); memset(kdpc, 0, sizeof(*kdpc)); kdpc->func = func; kdpc->ctx = ctx; InitializeListHead(&kdpc->list);}#ifdef KDPC_TASKLETstatic void kdpc_worker(unsigned long data)#elsestatic void kdpc_worker(void *data)#endif{ struct nt_list *entry; struct kdpc *kdpc; KIRQL irql; while (1) { irql = nt_spin_lock_irql(&kdpc_list_lock, DISPATCH_LEVEL); entry = RemoveHeadList(&kdpc_list); if (!entry) { nt_spin_unlock_irql(&kdpc_list_lock, irql); break; } kdpc = container_of(entry, struct kdpc, list); /* initialize kdpc's list so queue/dequeue know if it * is in the queue or not */ InitializeListHead(&kdpc->list); /* irql will be lowered below */ nt_spin_unlock(&kdpc_list_lock); DBGTRACE5("%p, %p, %p, %p, %p", kdpc, kdpc->func, kdpc->ctx, kdpc->arg1, kdpc->arg2); LIN2WIN4(kdpc->func, kdpc, kdpc->ctx, kdpc->arg1, kdpc->arg2); lower_irql(irql); }}wstdcall void WIN_FUNC(KeFlushQueuedDpcs,0) (void){#ifdef KDPC_TASKLET kdpc_worker(0);#else kdpc_worker(NULL);#endif}static BOOLEAN queue_kdpc(struct kdpc *kdpc){ BOOLEAN ret; KIRQL irql; TRACEENTER5("%p", kdpc); irql = nt_spin_lock_irql(&kdpc_list_lock, DISPATCH_LEVEL); if (IsListEmpty(&kdpc->list)) { InsertTailList(&kdpc_list, &kdpc->list);#ifdef KDPC_TASKLET tasklet_schedule(&kdpc_work);#else schedule_ntos_work(&kdpc_work);#endif ret = TRUE; } else ret = FALSE; nt_spin_unlock_irql(&kdpc_list_lock, irql); TRACEEXIT5(return ret);}static BOOLEAN dequeue_kdpc(struct kdpc *kdpc){ BOOLEAN ret; KIRQL irql; TRACEENTER5("%p", kdpc); irql = nt_spin_lock_irql(&kdpc_list_lock, DISPATCH_LEVEL); if (IsListEmpty(&kdpc->list)) ret = FALSE; else { RemoveEntryList(&kdpc->list); ret = TRUE; } nt_spin_unlock_irql(&kdpc_list_lock, irql); return ret;}wstdcall BOOLEAN WIN_FUNC(KeInsertQueueDpc,3) (struct kdpc *kdpc, void *arg1, void *arg2){ BOOLEAN ret; TRACEENTER5("%p, %p, %p", kdpc, arg1, arg2); kdpc->arg1 = arg1; kdpc->arg2 = arg2; ret = queue_kdpc(kdpc); TRACEEXIT5(return ret);}wstdcall BOOLEAN WIN_FUNC(KeRemoveQueueDpc,1) (struct kdpc *kdpc){ BOOLEAN ret; TRACEENTER3("%p", kdpc); ret = dequeue_kdpc(kdpc); TRACEEXIT3(return ret);}static void ntos_work_item_worker(void *data){ struct ntos_work_item *ntos_work_item; struct nt_list *cur; KIRQL irql; while (1) { irql = nt_spin_lock_irql(&ntos_work_item_list_lock, DISPATCH_LEVEL); cur = RemoveHeadList(&ntos_work_item_list); nt_spin_unlock_irql(&ntos_work_item_list_lock, irql); if (!cur) break; ntos_work_item = container_of(cur, struct ntos_work_item, list); WORKTRACE("%p: executing %p, %p, %p", current, ntos_work_item->func, ntos_work_item->arg1, ntos_work_item->arg2); LIN2WIN2(ntos_work_item->func, ntos_work_item->arg1, ntos_work_item->arg2); kfree(ntos_work_item); } return;}int schedule_ntos_work_item(NTOS_WORK_FUNC func, void *arg1, void *arg2){ struct ntos_work_item *ntos_work_item; KIRQL irql; WORKENTER("adding work: %p, %p, %p", func, arg1, arg2); ntos_work_item = kmalloc(sizeof(*ntos_work_item), gfp_irql()); if (!ntos_work_item) { ERROR("couldn't allocate memory"); return -ENOMEM; } ntos_work_item->func = func; ntos_work_item->arg1 = arg1; ntos_work_item->arg2 = arg2; irql = nt_spin_lock_irql(&ntos_work_item_list_lock, DISPATCH_LEVEL); InsertTailList(&ntos_work_item_list, &ntos_work_item->list); nt_spin_unlock_irql(&ntos_work_item_list_lock, irql); schedule_ntos_work(&ntos_work_item_work); WORKEXIT(return 0);}wstdcall void WIN_FUNC(KeInitializeSpinLock,1) (NT_SPIN_LOCK *lock){ TRACEENTER6("%p", lock); nt_spin_lock_init(lock);}wstdcall void WIN_FUNC(KeAcquireSpinLock,2) (NT_SPIN_LOCK *lock, KIRQL *irql){
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -