⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ntoskernel.c

📁 改文件可以安装无线网卡在linux下的驱动,大家可以在网站上查找一下用法
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *  Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani * *  This program is free software; you can redistribute it and/or modify *  it under the terms of the GNU General Public License as published by *  the Free Software Foundation; either version 2 of the License, or *  (at your option) any later version. * *  This program is distributed in the hope that it will be useful, *  but WITHOUT ANY WARRANTY; without even the implied warranty of *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *  GNU General Public License for more details. * */#include "ntoskernel.h"#include "ndis.h"#include "usb.h"/* MDLs describe a range of virtual address with an array of physical * pages right after the header. For different ranges of virtual * addresses, the number of entries of physical pages may be different * (depending on number of entries required). If we want to allocate * MDLs from a pool, the size has to be constant. So we assume that * maximum range used by a driver is CACHE_MDL_PAGES; if a driver * requests an MDL for a bigger region, we allocate it with kmalloc; * otherwise, we allocate from the pool */#define CACHE_MDL_PAGES 2#define CACHE_MDL_SIZE (sizeof(struct mdl) + (sizeof(ULONG) * CACHE_MDL_PAGES))static wait_queue_head_t dispatch_event_wq;KSPIN_LOCK dispatch_event_lock;KSPIN_LOCK irp_cancel_lock;KSPIN_LOCK ntoskernel_lock;static kmem_cache_t *mdl_cache;int ntoskernel_init(void){	kspin_lock_init(&dispatch_event_lock);	kspin_lock_init(&irp_cancel_lock);	kspin_lock_init(&ntoskernel_lock);	init_waitqueue_head(&dispatch_event_wq);	mdl_cache = kmem_cache_create("ndis_mdl", CACHE_MDL_SIZE, 0, 0,				      NULL, NULL);	if (!mdl_cache) {		ERROR("couldn't allocate MDL cache");		return -ENOMEM;	}	return 0;}void ntoskernel_exit(void){	if (mdl_cache && kmem_cache_destroy(mdl_cache))		ERROR("A Windows driver didn't free all MDL(s);"		      "memory is leaking");	return;}WRAP_EXPORT_MAP("KeTickCount", &jiffies);STDCALL void WRAP_EXPORT(KeInitializeTimer)	(struct ktimer *ktimer){	TRACEENTER4("%p", ktimer);	wrapper_init_timer(ktimer, NULL);	ktimer->dispatch_header.signal_state = FALSE;}STDCALL void WRAP_EXPORT(KeInitializeDpc)	(struct kdpc *kdpc, void *func, void *ctx){	TRACEENTER4("%p, %p, %p", kdpc, func, ctx);	init_dpc(kdpc, func, ctx);}STDCALL BOOLEAN WRAP_EXPORT(KeSetTimerEx)	(struct ktimer *ktimer, LARGE_INTEGER due_time, LONG period,	 struct kdpc *kdpc){	unsigned long expires;	unsigned long repeat;	TRACEENTER4("%p, %ld, %u, %p", ktimer, (long)due_time, period, kdpc);	if (due_time < 0)		expires = jiffies + HZ * (-due_time) / TICKSPERSEC;	else		expires = HZ * due_time / TICKSPERSEC;	repeat = HZ * period / TICKSPERSEC;	return wrapper_set_timer(ktimer->wrapper_timer, expires, repeat, kdpc);}STDCALL BOOLEAN WRAP_EXPORT(KeSetTimer)	(struct ktimer *ktimer, LARGE_INTEGER due_time, struct kdpc *kdpc){	TRACEENTER4("%p, %ld, %p", ktimer, (long)due_time, kdpc);	return KeSetTimerEx(ktimer, due_time, 0, kdpc);}STDCALL BOOLEAN WRAP_EXPORT(KeCancelTimer)	(struct ktimer *ktimer){	char canceled;	TRACEENTER4("%p", ktimer);	wrapper_cancel_timer(ktimer->wrapper_timer, &canceled);	return canceled;}STDCALL KIRQL WRAP_EXPORT(KeGetCurrentIrql)	(void){	return current_irql();}STDCALL void WRAP_EXPORT(KeInitializeSpinLock)	(KSPIN_LOCK *lock){	kspin_lock_init(lock);}STDCALL void WRAP_EXPORT(KeAcquireSpinLock)	(KSPIN_LOCK *lock, KIRQL *irql){	*irql = KfAcquireSpinLock(FASTCALL_ARGS_1(lock));}STDCALL void WRAP_EXPORT(KeReleaseSpinLock)	(KSPIN_LOCK *lock, KIRQL oldirql){	KfReleaseSpinLock(FASTCALL_ARGS_2(lock, oldirql));}STDCALL void WRAP_EXPORT(KeAcquireSpinLockAtDpcLevel)	(KSPIN_LOCK *lock){	KfAcquireSpinLock(FASTCALL_ARGS_1(lock));}STDCALL void WRAP_EXPORT(KeLowerIrql)	(KIRQL irql){	KfLowerIrql(FASTCALL_ARGS_1(irql));}STDCALL KIRQL WRAP_EXPORT(KeAcquireSpinLockRaiseToDpc)        (KSPIN_LOCK *lock){        return KfAcquireSpinLock(FASTCALL_ARGS_1(lock));}STDCALL void WRAP_EXPORT(KeReleaseSpinLockFromDpcLevel)	(KSPIN_LOCK *lock){	KefReleaseSpinLockFromDpcLevel(FASTCALL_ARGS_1(lock));}_FASTCALL struct slist_entry *WRAP_EXPORT(ExInterlockedPushEntrySList)	(FASTCALL_DECL_3(union slist_head *head, struct slist_entry *entry, 			 KSPIN_LOCK *lock)){	struct slist_entry *oldhead;	KIRQL irql;	TRACEENTER3("head = %p, entry = %p", head, entry);	KeAcquireSpinLock(lock, &irql);	oldhead = head->list.next;	entry->next = head->list.next;	head->list.next = entry;	head->list.depth++;	KeReleaseSpinLock(lock, irql);	DBGTRACE3("head = %p, oldhead = %p", head, oldhead);	return(oldhead);}_FASTCALL struct slist_entry *WRAP_EXPORT(ExpInterlockedPushEntrySList)	(FASTCALL_DECL_3(union slist_head *head, struct slist_entry *entry, 			 KSPIN_LOCK *lock)){	return ExInterlockedPushEntrySList(FASTCALL_ARGS_3(head, entry, lock));}_FASTCALL struct slist_entry *WRAP_EXPORT(InterlockedPushEntrySList)	(FASTCALL_DECL_2(union slist_head *head, struct slist_entry *entry)){	return ExInterlockedPushEntrySList(FASTCALL_ARGS_3(head, entry,							   &ntoskernel_lock));}_FASTCALL struct slist_entry * WRAP_EXPORT(ExInterlockedPopEntrySList)	(FASTCALL_DECL_2(union slist_head *head, KSPIN_LOCK *lock)){	struct slist_entry *first;	KIRQL irql;	TRACEENTER3("head = %p", head);	KeAcquireSpinLock(lock, &irql);	first = NULL;	if (head) {		first = head->list.next;		if (first) {			head->list.next = first->next;			head->list.depth--;		}	}	KeReleaseSpinLock(lock, irql);	DBGTRACE3("returning %p", first);	return first;}_FASTCALL struct slist_entry * WRAP_EXPORT(ExpInterlockedPopEntrySList)	(FASTCALL_DECL_2(union slist_head *head, KSPIN_LOCK *lock)){	return ExInterlockedPopEntrySList(FASTCALL_ARGS_2(head, lock));}_FASTCALL struct slist_entry * WRAP_EXPORT(InterlockedPopEntrySList)	(FASTCALL_DECL_1(union slist_head *head)){	return ExInterlockedPopEntrySList(FASTCALL_ARGS_2(head,							  &ntoskernel_lock));}_FASTCALL struct list_entry *WRAP_EXPORT(ExfInterlockedInsertTailList)	(FASTCALL_DECL_3(struct list_entry *head, struct list_entry *entry, 			 KSPIN_LOCK *lock)){	struct list_entry *oldhead;	KIRQL irql;	TRACEENTER3("head = %p", head);	KeAcquireSpinLock(lock, &irql);	if (head == NULL)		oldhead = NULL;	else		oldhead = head->bwd_link;	entry->fwd_link = head;	entry->bwd_link = head->bwd_link;	head->bwd_link->fwd_link = entry;	head->bwd_link = entry;	KeReleaseSpinLock(lock, irql);	DBGTRACE3("head = %p, oldhead = %p", head, oldhead);	return(oldhead);}_FASTCALL struct list_entry *WRAP_EXPORT(ExfInterlockedRemoveHeadList)	(FASTCALL_DECL_2(struct list_entry *head, KSPIN_LOCK *lock)){	struct list_entry *entry, *tmp;	KIRQL irql;	TRACEENTER3("head = %p", head);	KeAcquireSpinLock(lock, &irql);	if (head == NULL)		TRACEEXIT3(return NULL);		entry = head->fwd_link;	if (entry == NULL || entry->bwd_link == NULL ||	    entry->fwd_link == NULL ||	    entry->bwd_link->fwd_link != entry ||	    entry->fwd_link->bwd_link != entry) {		ERROR("illegal list_entry %p", entry);		TRACEEXIT3(return NULL);	}	tmp = entry->bwd_link;	entry->fwd_link->bwd_link = entry->bwd_link;	tmp->fwd_link = entry->fwd_link;	entry->fwd_link = NULL;	entry->bwd_link = NULL;	KeReleaseSpinLock(lock, irql);	DBGTRACE3("head = %p", head);	TRACEEXIT3(return entry);}_FASTCALL USHORT WRAP_EXPORT(ExQueryDepthSList)	(union slist_head *head){	return head->list.depth;}_FASTCALL LONG WRAP_EXPORT(InterlockedDecrement)	(FASTCALL_DECL_1(LONG volatile *val)){	LONG x;	TRACEENTER4("%s", "");	kspin_lock(&ntoskernel_lock);	(*val)--;	x = *val;	kspin_unlock(&ntoskernel_lock);	TRACEEXIT4(return x);}_FASTCALL LONG WRAP_EXPORT(InterlockedIncrement)	(FASTCALL_DECL_1(LONG volatile *val)){	LONG x;	TRACEENTER4("%s", "");	kspin_lock(&ntoskernel_lock);	(*val)++;	x = *val;	kspin_unlock(&ntoskernel_lock);	TRACEEXIT4(return x);}_FASTCALL LONG WRAP_EXPORT(InterlockedExchange)	(FASTCALL_DECL_2(LONG volatile *target, LONG val)){	LONG x;	TRACEENTER4("%s", "");	kspin_lock(&ntoskernel_lock);	x = *target;	*target = val;	kspin_unlock(&ntoskernel_lock);	TRACEEXIT4(return x);}_FASTCALL LONG WRAP_EXPORT(InterlockedCompareExchange)	(FASTCALL_DECL_3(LONG volatile *dest, LONG xchg, LONG comperand)){	LONG x;	TRACEENTER4("%s", "");	kspin_lock(&ntoskernel_lock);	x = *dest;	if (*dest == comperand)		*dest = xchg;	kspin_unlock(&ntoskernel_lock);	TRACEEXIT4(return x);}_FASTCALL void WRAP_EXPORT(ExInterlockedAddLargeStatistic)	(FASTCALL_DECL_2(LARGE_INTEGER *plint, ULONG n)){	unsigned long flags;	TRACEENTER3("Stat %p = %llu, n = %u", plint, *plint, n);	kspin_lock_irqsave(&ntoskernel_lock, flags);	*plint += n;	kspin_unlock_irqrestore(&ntoskernel_lock, flags);}STDCALL void *WRAP_EXPORT(ExAllocatePoolWithTag)	(enum pool_type pool_type, SIZE_T size, ULONG tag){	void *ret;	TRACEENTER1("pool_type: %d, size: %lu, tag: %u", pool_type,		    size, tag);	if (current_irql() == DISPATCH_LEVEL)		ret = kmalloc(size, GFP_ATOMIC);	else		ret = kmalloc(size, GFP_KERNEL);				DBGTRACE2("return value = %p", ret);	return ret;}STDCALL void WRAP_EXPORT(ExFreePool)	(void *p){	TRACEENTER2("%p", p);	kfree(p);	TRACEEXIT2(return);}STDCALL void WRAP_EXPORT(ExInitializeNPagedLookasideList)	(struct npaged_lookaside_list *lookaside,	 LOOKASIDE_ALLOC_FUNC *alloc_func, LOOKASIDE_FREE_FUNC *free_func,	 ULONG flags, SIZE_T size, ULONG tag, USHORT depth){	TRACEENTER3("lookaside: %p, size: %lu, flags: %u,"		    " head: %p, size of lookaside: %lu",		    lookaside, size, flags, lookaside->head.list.next,		    (unsigned long)sizeof(struct npaged_lookaside_list));	memset(lookaside, 0, sizeof(*lookaside));	lookaside->size = size;	lookaside->tag = tag;	lookaside->depth = 4;	lookaside->maxdepth = 256;	if (alloc_func)		lookaside->alloc_func = alloc_func;	else		lookaside->alloc_func = ExAllocatePoolWithTag;	if (free_func)		lookaside->free_func = free_func;	else		lookaside->free_func = ExFreePool;	KeInitializeSpinLock(&lookaside->obsolete);	TRACEEXIT3(return);}STDCALL void WRAP_EXPORT(ExDeleteNPagedLookasideList)	(struct npaged_lookaside_list *lookaside){	struct slist_entry *entry, *p;	TRACEENTER3("lookaside = %p", lookaside);	entry = lookaside->head.list.next;	while (entry) {		p = entry;		entry = entry->next;		lookaside->free_func(p);	}	TRACEEXIT4(return);}STDCALL void *WRAP_EXPORT(MmMapIoSpace)	(PHYSICAL_ADDRESS phys_addr, SIZE_T size,	 enum memory_caching_type cache){	void *virt;	if (cache)		virt = ioremap(phys_addr, size);	else		virt = ioremap_nocache(phys_addr, size);	DBGTRACE3("%Lx, %lu, %d: %p", phys_addr, size, cache, virt);	return virt;}STDCALL void WRAP_EXPORT(MmUnmapIoSpace)	(void *addr, SIZE_T size){	TRACEENTER3("%p, %lu", addr, size);	iounmap(addr);	return;}STDCALL int WRAP_EXPORT(IoIsWdmVersionAvailable)	(UCHAR major, UCHAR minor){	TRACEENTER3("%d, %x", major, minor);	if (major == 1 &&	    (minor == 0x30 || // Windows 2003	     minor == 0x20 || // Windows XP	     minor == 0x10)) // Windows 2000		return 1;	return 0;}STDCALL void WRAP_EXPORT(KeInitializeEvent)	(struct kevent *kevent, enum event_type type, BOOLEAN state){	TRACEENTER3("event = %p, type = %d, state = %d",		    kevent, type, state);	kspin_lock(&dispatch_event_lock);	kevent->header.type = type;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -