⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ntoskernel.c

📁 linux下安装无线网卡启动的程式
💻 C
📖 第 1 页 / 共 5 页
字号:
/* expires and repeat are in HZ */BOOLEAN wrap_set_timer(struct nt_timer *nt_timer, unsigned long expires_hz,		       unsigned long repeat_hz, struct kdpc *kdpc){	struct wrap_timer *wrap_timer;	TIMERENTER("%p, %lu, %lu, %p, %lu",		   nt_timer, expires_hz, repeat_hz, kdpc, jiffies);	wrap_timer = nt_timer->wrap_timer;	TIMERTRACE("%p", wrap_timer);#ifdef TIMER_DEBUG	if (wrap_timer->nt_timer != nt_timer)		WARNING("bad timers: %p, %p, %p", wrap_timer, nt_timer,			wrap_timer->nt_timer);	if (nt_timer->wrap_timer_magic != WRAP_TIMER_MAGIC) {		WARNING("buggy Windows timer didn't initialize timer %p",			nt_timer);		return FALSE;	}	if (wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC) {		WARNING("timer %p is not initialized (%lx)?",			wrap_timer, wrap_timer->wrap_timer_magic);		wrap_timer->wrap_timer_magic = WRAP_TIMER_MAGIC;	}#endif	KeClearEvent((struct nt_event *)nt_timer);	nt_timer->kdpc = kdpc;	wrap_timer->repeat = repeat_hz;	if (mod_timer(&wrap_timer->timer, jiffies + expires_hz))		TIMEREXIT(return TRUE);	else		TIMEREXIT(return FALSE);}wstdcall BOOLEAN WIN_FUNC(KeSetTimerEx,4)	(struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,	 LONG period_ms, struct kdpc *kdpc){	unsigned long expires_hz, repeat_hz;	TIMERENTER("%p, %Ld, %d", nt_timer, duetime_ticks, period_ms);	expires_hz = SYSTEM_TIME_TO_HZ(duetime_ticks);	repeat_hz = MSEC_TO_HZ(period_ms);	return wrap_set_timer(nt_timer, expires_hz, repeat_hz, kdpc);}wstdcall BOOLEAN WIN_FUNC(KeSetTimer,3)	(struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,	 struct kdpc *kdpc){	TIMERENTER("%p, %Ld, %p", nt_timer, duetime_ticks, kdpc);	return KeSetTimerEx(nt_timer, duetime_ticks, 0, kdpc);}wstdcall BOOLEAN WIN_FUNC(KeCancelTimer,1)	(struct nt_timer *nt_timer){	struct wrap_timer *wrap_timer;	int ret;	TIMERENTER("%p", nt_timer);	wrap_timer = nt_timer->wrap_timer;	if (!wrap_timer) {		ERROR("invalid wrap_timer");		return TRUE;	}#ifdef TIMER_DEBUG	BUG_ON(wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC);#endif	TIMERTRACE("canceling timer %p(%p)", wrap_timer, nt_timer);	/* disable timer before deleting so if it is periodic timer, it	 * won't be re-armed after deleting */	wrap_timer->repeat = 0;	ret = del_timer(&wrap_timer->timer);	/* the documentation for KeCancelTimer suggests the DPC is	 * deqeued, but actually DPC is left to run */	if (ret)		TIMEREXIT(return TRUE);	else		TIMEREXIT(return FALSE);}wstdcall BOOLEAN WIN_FUNC(KeReadStateTimer,1)	(struct nt_timer *nt_timer){	if (nt_timer->dh.signal_state)		return TRUE;	else		return FALSE;}wstdcall void WIN_FUNC(KeInitializeDpc,3)	(struct kdpc *kdpc, void *func, void *ctx){	ENTER3("%p, %p, %p", kdpc, func, ctx);	memset(kdpc, 0, sizeof(*kdpc));	kdpc->func = func;	kdpc->ctx  = ctx;	InitializeListHead(&kdpc->list);}static void kdpc_worker(worker_param_t dummy){	struct nt_list *entry;	struct kdpc *kdpc;	unsigned long flags;	KIRQL irql;	WORKENTER("");	irql = raise_irql(DISPATCH_LEVEL);	while (1) {		spin_lock_irqsave(&kdpc_list_lock, flags);		entry = RemoveHeadList(&kdpc_list);		if (entry) {			kdpc = container_of(entry, struct kdpc, list);			assert(kdpc->queued);			kdpc->queued = 0;		} else			kdpc = NULL;		spin_unlock_irqrestore(&kdpc_list_lock, flags);		if (!kdpc)			break;		WORKTRACE("%p, %p, %p, %p, %p", kdpc, kdpc->func, kdpc->ctx,			  kdpc->arg1, kdpc->arg2);		assert_irql(_irql_ == DISPATCH_LEVEL);		LIN2WIN4(kdpc->func, kdpc, kdpc->ctx, kdpc->arg1, kdpc->arg2);		assert_irql(_irql_ == DISPATCH_LEVEL);	}	lower_irql(irql);	WORKEXIT(return);}wstdcall void WIN_FUNC(KeFlushQueuedDpcs,0)	(void){	kdpc_worker(NULL);}BOOLEAN queue_kdpc(struct kdpc *kdpc){	BOOLEAN ret;	unsigned long flags;	WORKENTER("%p", kdpc);	spin_lock_irqsave(&kdpc_list_lock, flags);	if (kdpc->queued)		ret = FALSE;	else {		if (unlikely(kdpc->importance == HighImportance))			InsertHeadList(&kdpc_list, &kdpc->list);		else			InsertTailList(&kdpc_list, &kdpc->list);		kdpc->queued = 1;		ret = TRUE;	}	spin_unlock_irqrestore(&kdpc_list_lock, flags);	if (ret == TRUE)		schedule_ntos_work(&kdpc_work);	WORKTRACE("%d", ret);	return ret;}BOOLEAN dequeue_kdpc(struct kdpc *kdpc){	BOOLEAN ret;	unsigned long flags;	WORKENTER("%p", kdpc);	spin_lock_irqsave(&kdpc_list_lock, flags);	if (kdpc->queued) {		RemoveEntryList(&kdpc->list);		kdpc->queued = 0;		ret = TRUE;	} else		ret = FALSE;	spin_unlock_irqrestore(&kdpc_list_lock, flags);	WORKTRACE("%d", ret);	return ret;}wstdcall BOOLEAN WIN_FUNC(KeInsertQueueDpc,3)	(struct kdpc *kdpc, void *arg1, void *arg2){	WORKENTER("%p, %p, %p", kdpc, arg1, arg2);	kdpc->arg1 = arg1;	kdpc->arg2 = arg2;	return queue_kdpc(kdpc);}wstdcall BOOLEAN WIN_FUNC(KeRemoveQueueDpc,1)	(struct kdpc *kdpc){	return dequeue_kdpc(kdpc);}wstdcall void WIN_FUNC(KeSetImportanceDpc,2)	(struct kdpc *kdpc, enum kdpc_importance importance){	kdpc->importance = importance;}static void ntos_work_worker(worker_param_t dummy){	struct ntos_work_item *ntos_work_item;	struct nt_list *cur;	while (1) {		spin_lock_bh(&ntos_work_lock);		cur = RemoveHeadList(&ntos_work_list);		spin_unlock_bh(&ntos_work_lock);		if (!cur)			break;		ntos_work_item = container_of(cur, struct ntos_work_item, list);		WORKTRACE("%p: executing %p, %p, %p", current,			  ntos_work_item->func, ntos_work_item->arg1,			  ntos_work_item->arg2);		LIN2WIN2(ntos_work_item->func, ntos_work_item->arg1,			 ntos_work_item->arg2);		kfree(ntos_work_item);	}	WORKEXIT(return);}int schedule_ntos_work_item(NTOS_WORK_FUNC func, void *arg1, void *arg2){	struct ntos_work_item *ntos_work_item;	WORKENTER("adding work: %p, %p, %p", func, arg1, arg2);	ntos_work_item = kmalloc(sizeof(*ntos_work_item), irql_gfp());	if (!ntos_work_item) {		ERROR("couldn't allocate memory");		return -ENOMEM;	}	ntos_work_item->func = func;	ntos_work_item->arg1 = arg1;	ntos_work_item->arg2 = arg2;	spin_lock_bh(&ntos_work_lock);	InsertTailList(&ntos_work_list, &ntos_work_item->list);	spin_unlock_bh(&ntos_work_lock);	schedule_ntos_work(&ntos_work);	WORKEXIT(return 0);}wstdcall void WIN_FUNC(KeInitializeSpinLock,1)	(NT_SPIN_LOCK *lock){	ENTER6("%p", lock);	nt_spin_lock_init(lock);}wstdcall void WIN_FUNC(KeAcquireSpinLock,2)	(NT_SPIN_LOCK *lock, KIRQL *irql){	ENTER6("%p", lock);	*irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);}wstdcall void WIN_FUNC(KeReleaseSpinLock,2)	(NT_SPIN_LOCK *lock, KIRQL oldirql){	ENTER6("%p", lock);	nt_spin_unlock_irql(lock, oldirql);}wstdcall void WIN_FUNC(KeAcquireSpinLockAtDpcLevel,1)	(NT_SPIN_LOCK *lock){	ENTER6("%p", lock);	nt_spin_lock(lock);}wstdcall void WIN_FUNC(KeReleaseSpinLockFromDpcLevel,1)	(NT_SPIN_LOCK *lock){	ENTER6("%p", lock);	nt_spin_unlock(lock);}wstdcall void WIN_FUNC(KeRaiseIrql,2)	(KIRQL newirql, KIRQL *oldirql){	ENTER6("%d", newirql);	*oldirql = raise_irql(newirql);}wstdcall KIRQL WIN_FUNC(KeRaiseIrqlToDpcLevel,0)	(void){	return raise_irql(DISPATCH_LEVEL);}wstdcall void WIN_FUNC(KeLowerIrql,1)	(KIRQL irql){	ENTER6("%d", irql);	lower_irql(irql);}wstdcall KIRQL WIN_FUNC(KeAcquireSpinLockRaiseToDpc,1)	(NT_SPIN_LOCK *lock){	ENTER6("%p", lock);	return nt_spin_lock_irql(lock, DISPATCH_LEVEL);}#undef ExAllocatePoolWithTagwstdcall void *WIN_FUNC(ExAllocatePoolWithTag,3)	(enum pool_type pool_type, SIZE_T size, ULONG tag){	void *addr;	ENTER4("pool_type: %d, size: %lu, tag: 0x%x", pool_type, size, tag);	assert_irql(_irql_ <= DISPATCH_LEVEL);	if (size < PAGE_SIZE)		addr = kmalloc(size, irql_gfp());	else {		if (irql_gfp() & GFP_ATOMIC) {			addr = __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM,					 PAGE_KERNEL);			TRACE1("%p, %lu", addr, size);		} else {			addr = vmalloc(size);			TRACE1("%p, %lu", addr, size);		}	}	DBG_BLOCK(1) {		if (addr)			TRACE4("addr: %p, %lu", addr, size);		else			TRACE1("failed: %lu", size);	}	return addr;}WIN_FUNC_DECL(ExAllocatePoolWithTag,3)wstdcall void WIN_FUNC(ExFreePoolWithTag,2)	(void *addr, ULONG tag){	TRACE4("%p", addr);	if ((unsigned long)addr < VMALLOC_START ||	    (unsigned long)addr >= VMALLOC_END)		kfree(addr);	else		vfree(addr);	EXIT4(return);}wstdcall void WIN_FUNC(ExFreePool,1)	(void *addr){	ExFreePoolWithTag(addr, 0);}WIN_FUNC_DECL(ExFreePool,1)wstdcall void WIN_FUNC(ExInitializeNPagedLookasideList,7)	(struct npaged_lookaside_list *lookaside,	 LOOKASIDE_ALLOC_FUNC *alloc_func, LOOKASIDE_FREE_FUNC *free_func,	 ULONG flags, SIZE_T size, ULONG tag, USHORT depth){	ENTER3("lookaside: %p, size: %lu, flags: %u, head: %p, "	       "alloc: %p, free: %p", lookaside, size, flags,	       lookaside, alloc_func, free_func);	memset(lookaside, 0, sizeof(*lookaside));	lookaside->size = size;	lookaside->tag = tag;	lookaside->depth = 4;	lookaside->maxdepth = 256;	lookaside->pool_type = NonPagedPool;	if (alloc_func)		lookaside->alloc_func = alloc_func;	else		lookaside->alloc_func = WIN_FUNC_PTR(ExAllocatePoolWithTag,3);	if (free_func)		lookaside->free_func = free_func;	else		lookaside->free_func = WIN_FUNC_PTR(ExFreePool,1);#ifndef CONFIG_X86_64	nt_spin_lock_init(&lookaside->obsolete);#endif	EXIT3(return);}wstdcall void WIN_FUNC(ExDeleteNPagedLookasideList,1)	(struct npaged_lookaside_list *lookaside){	struct nt_slist *entry;	ENTER3("lookaside = %p", lookaside);	while ((entry = ExpInterlockedPopEntrySList(&lookaside->head)))		LIN2WIN1(lookaside->free_func, entry);	EXIT3(return);}#if defined(ALLOC_DEBUG) && ALLOC_DEBUG > 1#define ExAllocatePoolWithTag(pool_type, size, tag)			\	wrap_ExAllocatePoolWithTag(pool_type, size, tag, __FILE__, __LINE__)#endifwstdcall NTSTATUS WIN_FUNC(ExCreateCallback,4)	(struct callback_object **object, struct object_attributes *attributes,	 BOOLEAN create, BOOLEAN allow_multiple_callbacks){	struct callback_object *obj;	ENTER2("");	spin_lock_bh(&ntoskernel_lock);	nt_list_for_each_entry(obj, &callback_objects, callback_funcs) {		if (obj->attributes == attributes) {			spin_unlock_bh(&ntoskernel_lock);			*object = obj;			return STATUS_SUCCESS;		}	}	spin_unlock_bh(&ntoskernel_lock);	obj = allocate_object(sizeof(struct callback_object),			      OBJECT_TYPE_CALLBACK, NULL);	if (!obj)		EXIT2(return STATUS_INSUFFICIENT_RESOURCES);	InitializeListHead(&obj->callback_funcs);	nt_spin_lock_init(&obj->lock);	obj->allow_multiple_callbacks = allow_multiple_callbacks;	obj->attributes = attributes;	*object = obj;	EXIT2(return STATUS_SUCCESS);}wstdcall void *WIN_FUNC(ExRegisterCallback,3)	(struct callback_object *object, PCALLBACK_FUNCTION func, void *context){	struct callback_func *callback;	KIRQL irql;	ENTER2("");	irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);	if (object->allow_multiple_callbacks == FALSE &&	    !IsListEmpty(&object->callback_funcs)) {		nt_spin_unlock_irql(&object->lock, irql);		EXIT2(return NULL);	}	nt_spin_unlock_irql(&object->lock, irql);	callback = kmalloc(sizeof(*callback), GFP_KERNEL);	if (!callback) {		ERROR("couldn't allocate memory");		return NULL;	}	callback->func = func;	callback->context = context;	callback->object = object;	irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);	InsertTailList(&object->callback_funcs, &callback->list);	nt_spin_unlock_irql(&object->lock, irql);	EXIT2(return callback);}wstdcall void WIN_FUNC(ExUnregisterCallback,1)	(struct callback_func *callback){	struct callback_object *object;	KIRQL irql;	ENTER3("%p", callback);	if (!callback)		return;	object = callback->object;	irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);	RemoveEntryList(&callback->list);	nt_spin_unlock_irql(&object->lock, irql);	kfree(callback);	return;}wstdcall void WIN_FUNC(ExNotifyCallback,3)	(struct callback_object *object, void *arg1, void *arg2){	struct callback_func *callback;	KIRQL irql;	ENTER3("%p", object);	irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);	nt_list_for_each_entry(callback, &object->callback_funcs, list) {		LIN2WIN3(callback->func, callback->context, arg1, arg2);	}	nt_spin_unlock_irql(&object->lock, irql);	return;}/* check and set signaled state; should be called with dispatcher_lock held *//* @grab indicates if the event should be grabbed or checked * - note that a semaphore may stay in signaled state for multiple

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -