⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ntoskernel.c

📁 ndis在linux下的无线网卡驱动源码
💻 C
📖 第 1 页 / 共 5 页
字号:
wstdcall void WIN_FUNC(KeInitializeTimerEx,2)	(struct nt_timer *nt_timer, enum timer_type type){	TIMERENTER("%p", nt_timer);	wrap_init_timer(nt_timer, type, NULL, NULL);}wstdcall void WIN_FUNC(KeInitializeTimer,1)	(struct nt_timer *nt_timer){	TIMERENTER("%p", nt_timer);	wrap_init_timer(nt_timer, NotificationTimer, NULL, NULL);}/* expires and repeat are in HZ */BOOLEAN wrap_set_timer(struct nt_timer *nt_timer, unsigned long expires_hz,		       unsigned long repeat_hz, struct kdpc *kdpc){	struct wrap_timer *wrap_timer;	TIMERENTER("%p, %lu, %lu, %p, %lu",		   nt_timer, expires_hz, repeat_hz, kdpc, jiffies);	KeClearEvent((struct nt_event *)nt_timer);	wrap_timer = nt_timer->wrap_timer;	TIMERTRACE("%p", wrap_timer);#ifdef TIMER_DEBUG	if (wrap_timer->nt_timer != nt_timer)		WARNING("bad timers: %p, %p, %p", wrap_timer, nt_timer,			wrap_timer->nt_timer);	if (nt_timer->wrap_timer_magic != WRAP_TIMER_MAGIC) {		WARNING("Buggy Windows timer didn't initialize timer %p",			nt_timer);		return FALSE;	}	if (wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC) {		WARNING("timer %p is not initialized (%lx)?",			wrap_timer, wrap_timer->wrap_timer_magic);		wrap_timer->wrap_timer_magic = WRAP_TIMER_MAGIC;	}#endif	if (kdpc)		nt_timer->kdpc = kdpc;	wrap_timer->repeat = repeat_hz;	if (mod_timer(&wrap_timer->timer, jiffies + expires_hz))		TIMEREXIT(return TRUE);	else		TIMEREXIT(return FALSE);}wstdcall BOOLEAN WIN_FUNC(KeSetTimerEx,4)	(struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,	 LONG period_ms, struct kdpc *kdpc){	unsigned long expires_hz, repeat_hz;	TIMERENTER("%p, %Ld, %d", nt_timer, duetime_ticks, period_ms);	expires_hz = SYSTEM_TIME_TO_HZ(duetime_ticks) + 1;	repeat_hz = MSEC_TO_HZ(period_ms);	return wrap_set_timer(nt_timer, expires_hz, repeat_hz, kdpc);}wstdcall BOOLEAN WIN_FUNC(KeSetTimer,3)	(struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,	 struct kdpc *kdpc){	TIMERENTER("%p, %Ld, %p", nt_timer, duetime_ticks, kdpc);	return KeSetTimerEx(nt_timer, duetime_ticks, 0, kdpc);}wstdcall BOOLEAN WIN_FUNC(KeCancelTimer,1)	(struct nt_timer *nt_timer){	struct wrap_timer *wrap_timer;	TIMERENTER("%p", nt_timer);	wrap_timer = nt_timer->wrap_timer;	if (!wrap_timer) {		ERROR("invalid wrap_timer");		return TRUE;	}#ifdef TIMER_DEBUG	BUG_ON(wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC);#endif	TIMERTRACE("canceling timer %p(%p)", wrap_timer, nt_timer);	/* disable timer before deleting so if it is periodic timer, it	 * won't be re-armed after deleting */	wrap_timer->repeat = 0;	if (del_timer(&wrap_timer->timer))		TIMEREXIT(return TRUE);	else		TIMEREXIT(return FALSE);}wstdcall BOOLEAN WIN_FUNC(KeReadStateTimer,1)	(struct nt_timer *nt_timer){	if (nt_timer->dh.signal_state)		return TRUE;	else		return FALSE;}wstdcall void WIN_FUNC(KeInitializeDpc,3)	(struct kdpc *kdpc, void *func, void *ctx){	ENTER3("%p, %p, %p", kdpc, func, ctx);	memset(kdpc, 0, sizeof(*kdpc));	kdpc->func = func;	kdpc->ctx  = ctx;}static void kdpc_worker(worker_param_t dummy){	struct nt_list *entry;	struct kdpc *kdpc;	unsigned long flags;	KIRQL irql;	while (1) {		nt_spin_lock_irqsave(&kdpc_list_lock, flags);		entry = RemoveHeadList(&kdpc_list);		if (entry) {			kdpc = container_of(entry, struct kdpc, list);			assert(kdpc->queued);			kdpc->queued = 0;		} else			kdpc = NULL;		nt_spin_unlock_irqrestore(&kdpc_list_lock, flags);		if (!kdpc)			break;		irql = raise_irql(DISPATCH_LEVEL);		TRACE5("%p, %p, %p, %p, %p", kdpc, kdpc->func, kdpc->ctx,		       kdpc->arg1, kdpc->arg2);		LIN2WIN4(kdpc->func, kdpc, kdpc->ctx, kdpc->arg1, kdpc->arg2);		lower_irql(irql);	}}wstdcall void WIN_FUNC(KeFlushQueuedDpcs,0)	(void){	kdpc_worker(NULL);}static BOOLEAN queue_kdpc(struct kdpc *kdpc){	BOOLEAN ret;	unsigned long flags;	ENTER5("%p", kdpc);	nt_spin_lock_irqsave(&kdpc_list_lock, flags);	if (kdpc->queued)		ret = FALSE;	else {		if (kdpc->importance == HighImportance)			InsertHeadList(&kdpc_list, &kdpc->list);		else			InsertTailList(&kdpc_list, &kdpc->list);		kdpc->queued = 1;		schedule_ntos_work(&kdpc_work);		ret = TRUE;	}	nt_spin_unlock_irqrestore(&kdpc_list_lock, flags);	TRACE5("%d", ret);	return ret;}static BOOLEAN dequeue_kdpc(struct kdpc *kdpc){	BOOLEAN ret;	unsigned long flags;	ENTER5("%p", kdpc);	nt_spin_lock_irqsave(&kdpc_list_lock, flags);	if (kdpc->queued) {		RemoveEntryList(&kdpc->list);		kdpc->queued = 0;		ret = TRUE;	} else		ret = FALSE;	nt_spin_unlock_irqrestore(&kdpc_list_lock, flags);	TRACE5("%d", ret);	return ret;}wstdcall BOOLEAN WIN_FUNC(KeInsertQueueDpc,3)	(struct kdpc *kdpc, void *arg1, void *arg2){	ENTER5("%p, %p, %p", kdpc, arg1, arg2);	kdpc->arg1 = arg1;	kdpc->arg2 = arg2;	return queue_kdpc(kdpc);}wstdcall BOOLEAN WIN_FUNC(KeRemoveQueueDpc,1)	(struct kdpc *kdpc){	return dequeue_kdpc(kdpc);}wstdcall void WIN_FUNC(KeSetImportanceDpc,2)	(struct kdpc *kdpc, enum kdpc_importance importance){	kdpc->importance = importance;}static void ntos_work_worker(worker_param_t dummy){	struct ntos_work_item *ntos_work_item;	struct nt_list *cur;	KIRQL irql;	while (1) {		irql = nt_spin_lock_irql(&ntos_work_lock, DISPATCH_LEVEL);		cur = RemoveHeadList(&ntos_work_list);		nt_spin_unlock_irql(&ntos_work_lock, irql);		if (!cur)			break;		ntos_work_item = container_of(cur, struct ntos_work_item, list);		WORKTRACE("%p: executing %p, %p, %p", current,			  ntos_work_item->func, ntos_work_item->arg1,			  ntos_work_item->arg2);		LIN2WIN2(ntos_work_item->func, ntos_work_item->arg1,			 ntos_work_item->arg2);		kfree(ntos_work_item);	}	return;}int schedule_ntos_work_item(NTOS_WORK_FUNC func, void *arg1, void *arg2){	struct ntos_work_item *ntos_work_item;	KIRQL irql;	WORKENTER("adding work: %p, %p, %p", func, arg1, arg2);	ntos_work_item = kmalloc(sizeof(*ntos_work_item), gfp_irql());	if (!ntos_work_item) {		ERROR("couldn't allocate memory");		return -ENOMEM;	}	ntos_work_item->func = func;	ntos_work_item->arg1 = arg1;	ntos_work_item->arg2 = arg2;	irql = nt_spin_lock_irql(&ntos_work_lock, DISPATCH_LEVEL);	InsertTailList(&ntos_work_list, &ntos_work_item->list);	nt_spin_unlock_irql(&ntos_work_lock, irql);	schedule_ntos_work(&ntos_work);	WORKEXIT(return 0);}wstdcall void WIN_FUNC(KeInitializeSpinLock,1)	(NT_SPIN_LOCK *lock){	ENTER6("%p", lock);	nt_spin_lock_init(lock);}wstdcall void WIN_FUNC(KeAcquireSpinLock,2)	(NT_SPIN_LOCK *lock, KIRQL *irql){	ENTER6("%p", lock);	*irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);}wstdcall void WIN_FUNC(KeReleaseSpinLock,2)	(NT_SPIN_LOCK *lock, KIRQL oldirql){	ENTER6("%p", lock);	nt_spin_unlock_irql(lock, oldirql);}wstdcall void WIN_FUNC(KeAcquireSpinLockAtDpcLevel,1)	(NT_SPIN_LOCK *lock){	ENTER6("%p", lock);	nt_spin_lock(lock);}wstdcall void WIN_FUNC(KeReleaseSpinLockFromDpcLevel,1)	(NT_SPIN_LOCK *lock){	ENTER6("%p", lock);	nt_spin_unlock(lock);}wstdcall void WIN_FUNC(KeRaiseIrql,2)	(KIRQL newirql, KIRQL *oldirql){	ENTER6("%d", newirql);	*oldirql = raise_irql(newirql);}wstdcall KIRQL WIN_FUNC(KeRaiseIrqlToDpcLevel,0)	(void){	return raise_irql(DISPATCH_LEVEL);}wstdcall void WIN_FUNC(KeLowerIrql,1)	(KIRQL irql){	ENTER6("%d", irql);	lower_irql(irql);}wstdcall KIRQL WIN_FUNC(KeAcquireSpinLockRaiseToDpc,1)	(NT_SPIN_LOCK *lock){	ENTER6("%p", lock);	return nt_spin_lock_irql(lock, DISPATCH_LEVEL);}#undef ExAllocatePoolWithTagwstdcall void *WIN_FUNC(ExAllocatePoolWithTag,3)	(enum pool_type pool_type, SIZE_T size, ULONG tag){	unsigned long *addr, alloc_type;	ENTER4("pool_type: %d, size: %lu, tag: 0x%x", pool_type, size, tag);	size += sizeof(unsigned long);	if (size < PAGE_SIZE) {		addr = kmalloc(size, gfp_irql());		alloc_type = ALLOC_TYPE_KMALLOC;	} else if (in_interrupt()) {		/* Some drivers (at least Atheros) allocate large		 * amount of memory during		 * Miniport(Query/Set)Information, which runs at		 * DISPATCH_LEVEL. This means vmalloc is to be called		 * at interrupt context, which is not allowed in		 * 2.6.19+ kernels. For now, we use __get_free_pages		 * which is more likely to fail (since it needs to		 * find contiguous block) than __vmalloc */		TRACE1("Windows driver allocating %lu bytes in interrupt "		       "context: 0x%x", size, preempt_count());		DBG_BLOCK(4) {			dump_stack();		}#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)		addr = wrap_get_free_pages(GFP_ATOMIC | __GFP_HIGHMEM, size);		alloc_type = (get_order(size) << 8) | (ALLOC_TYPE_PAGES & 0xff);#else		addr = __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM, PAGE_KERNEL);		alloc_type = ALLOC_TYPE_VMALLOC;#endif	} else {		addr = vmalloc(size);		alloc_type = ALLOC_TYPE_VMALLOC;	}	if (addr) {		TRACE4("addr: %p, %p, %lu, %lu", addr, addr + 1, alloc_type,		       size);		*addr = alloc_type;		return addr + 1;	} else		EXIT1(return NULL);}WIN_FUNC_DECL(ExAllocatePoolWithTag,3)wstdcall void vfree_nonintr(void *addr, void *ctx){	vfree(addr);}WIN_FUNC_DECL(vfree_nonintr,2)wstdcall void WIN_FUNC(ExFreePoolWithTag,2)	(unsigned long *addr, ULONG tag){	unsigned long alloc_type = *(--addr);	TRACE4("%p, 0x%lx", addr, alloc_type);	if (alloc_type == ALLOC_TYPE_KMALLOC)		kfree(addr);	else if (alloc_type == ALLOC_TYPE_VMALLOC) {		assert((unsigned long)addr >= VMALLOC_START &&		       (unsigned long)addr < VMALLOC_END);		if (in_interrupt())			schedule_ntos_work_item(WIN_FUNC_PTR(vfree_nonintr,2),						addr, NULL);		else			vfree(addr);	} else if ((alloc_type & 0xff) == ALLOC_TYPE_PAGES)		free_pages((unsigned long)addr, alloc_type >> 8);	else {		WARNING("invalid memory: %p, 0x%lx", addr, alloc_type);		dump_stack();	}	EXIT4(return);}wstdcall void WIN_FUNC(ExFreePool,1)	(void *addr){	ExFreePoolWithTag(addr, 0);}WIN_FUNC_DECL(ExFreePool,1)wstdcall void WIN_FUNC(ExInitializeNPagedLookasideList,7)	(struct npaged_lookaside_list *lookaside,	 LOOKASIDE_ALLOC_FUNC *alloc_func, LOOKASIDE_FREE_FUNC *free_func,	 ULONG flags, SIZE_T size, ULONG tag, USHORT depth){	ENTER3("lookaside: %p, size: %lu, flags: %u, head: %p, "	       "alloc: %p, free: %p", lookaside, size, flags,	       lookaside, alloc_func, free_func);	memset(lookaside, 0, sizeof(*lookaside));	lookaside->size = size;	lookaside->tag = tag;	lookaside->depth = 4;	lookaside->maxdepth = 256;	lookaside->pool_type = NonPagedPool;	if (alloc_func)		lookaside->alloc_func = alloc_func;	else		lookaside->alloc_func = WIN_FUNC_PTR(ExAllocatePoolWithTag,3);	if (free_func)		lookaside->free_func = free_func;	else		lookaside->free_func = WIN_FUNC_PTR(ExFreePool,1);#ifndef CONFIG_X86_64	nt_spin_lock_init(&lookaside->obsolete);#endif	EXIT3(return);}wstdcall void WIN_FUNC(ExDeleteNPagedLookasideList,1)	(struct npaged_lookaside_list *lookaside){	struct nt_slist *entry;	ENTER3("lookaside = %p", lookaside);	while ((entry = ExpInterlockedPopEntrySList(&lookaside->head)))		LIN2WIN1(lookaside->free_func, entry);	EXIT3(return);}#if defined(ALLOC_DEBUG) && ALLOC_DEBUG > 1#define ExAllocatePoolWithTag(pool_type, size, tag)			\	wrap_ExAllocatePoolWithTag(pool_type, size, tag, __FILE__, __LINE__)#endifwstdcall NTSTATUS WIN_FUNC(ExCreateCallback,4)	(struct callback_object **object, struct object_attributes *attributes,	 BOOLEAN create, BOOLEAN allow_multiple_callbacks){	struct callback_object *obj;	KIRQL irql;	ENTER2("");	irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);	nt_list_for_each_entry(obj, &callback_objects, callback_funcs) {		if (obj->attributes == attributes) {			nt_spin_unlock_irql(&ntoskernel_lock, irql);			*object = obj;			return STATUS_SUCCESS;		}	}	nt_spin_unlock_irql(&ntoskernel_lock, irql);	obj = allocate_object(sizeof(struct callback_object),			      OBJECT_TYPE_CALLBACK, NULL);	if (!obj)		EXIT2(return STATUS_INSUFFICIENT_RESOURCES);	InitializeListHead(&obj->callback_funcs);	nt_spin_lock_init(&obj->lock);	obj->allow_multiple_callbacks = allow_multiple_callbacks;	obj->attributes = attributes;	*object = obj;	EXIT2(return STATUS_SUCCESS);}wstdcall void *WIN_FUNC(ExRegisterCallback,3)	(struct callback_object *object, PCALLBACK_FUNCTION func, void *context){	struct callback_func *callback;	KIRQL irql;	ENTER2("");	irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -