⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ntoskernel.c

📁 linux下安装无线网卡启动的程式
💻 C
📖 第 1 页 / 共 5 页
字号:
}wstdcall struct nt_thread *WIN_FUNC(KeGetCurrentThread,0)	(void){	struct nt_thread *thread = get_current_nt_thread();	TRACE2("%p, %p", thread, current);	return thread;}wstdcall KPRIORITY WIN_FUNC(KeQueryPriorityThread,1)	(struct nt_thread *thread){	KPRIORITY prio;	struct task_struct *task;	TRACE2("%p", thread);#ifdef CONFIG_X86_64	/* sis163u driver for amd64 passes 0x1f from thread created by	 * PsCreateSystemThread - no idea what is 0x1f */	if (thread == (void *)0x1f)		thread = get_current_nt_thread();#endif	if (!thread) {		TRACE2("invalid thread");		EXIT2(return LOW_REALTIME_PRIORITY);	}	task = get_nt_thread_task(thread);	if (!task) {		TRACE2("couldn't find task for thread: %p", thread);		EXIT2(return LOW_REALTIME_PRIORITY);	}	if (thread_priority(thread->task) <= 0)		prio = LOW_PRIORITY;	else if (thread_priority(thread->task) <= -5)		prio = LOW_REALTIME_PRIORITY;	else		prio = MAXIMUM_PRIORITY;	TRACE2("%d", prio);	return prio;}wstdcall KPRIORITY WIN_FUNC(KeSetPriorityThread,2)	(struct nt_thread *thread, KPRIORITY prio){	KPRIORITY old_prio;	struct task_struct *task;	TRACE2("thread: %p, priority = %u", thread, prio);#ifdef CONFIG_X86_64	if (thread == (void *)0x1f)		thread = get_current_nt_thread();#endif	if (!thread) {		TRACE2("invalid thread");		EXIT2(return LOW_REALTIME_PRIORITY);	}	task = get_nt_thread_task(thread);	if (!task) {		TRACE2("couldn't find task for thread: %p", thread);		EXIT2(return LOW_REALTIME_PRIORITY);	}	if (thread_priority(task) <= 0)		old_prio = LOW_PRIORITY;	else if (thread_priority(task) <= -5)		old_prio = LOW_REALTIME_PRIORITY;	else		old_prio = MAXIMUM_PRIORITY;	if (prio == LOW_PRIORITY)		set_thread_priority(task, 0);	else if (prio == LOW_REALTIME_PRIORITY)		set_thread_priority(task, -5);	else if (prio == HIGH_PRIORITY)		set_thread_priority(task, -10);	TRACE2("%d, %d", old_prio, (int)thread_priority(task));	return old_prio;}struct thread_trampoline {	void (*func)(void *) wstdcall;	void *ctx;	struct nt_thread *thread;	struct completion started;};static int ntdriver_thread(void *data){	struct thread_trampoline *thread_tramp = data;	/* yes, a tramp! */	typeof(thread_tramp->func) func = thread_tramp->func;	typeof(thread_tramp->ctx) ctx = thread_tramp->ctx;	thread_tramp->thread->task = current;	thread_tramp->thread->pid = current->pid;	TRACE2("thread: %p, task: %p (%d)", thread_tramp->thread,	       current, current->pid);	complete(&thread_tramp->started);#ifdef PF_NOFREEZE	current->flags |= PF_NOFREEZE;#endif	strncpy(current->comm, "ntdriver", sizeof(current->comm));	current->comm[sizeof(current->comm)-1] = 0;	LIN2WIN1(func, ctx);	ERROR("task: %p", current);	return 0;}wstdcall NTSTATUS WIN_FUNC(PsCreateSystemThread,7)	(void **handle, ULONG access, void *obj_attr, void *process,	 void *client_id, void (*func)(void *) wstdcall, void *ctx){	struct thread_trampoline thread_tramp;	ENTER2("handle = %p, access = %u, obj_attr = %p, process = %p, "	       "client_id = %p, func = %p, context = %p", handle, access,	       obj_attr, process, client_id, func, ctx);	thread_tramp.thread = create_nt_thread(NULL);	if (!thread_tramp.thread) {		ERROR("couldn't allocate thread object");		EXIT2(return STATUS_RESOURCES);	}	TRACE2("thread: %p", thread_tramp.thread);	thread_tramp.func = func;	thread_tramp.ctx = ctx;	init_completion(&thread_tramp.started);#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)	thread_tramp.thread->pid = kernel_thread(ntdriver_thread, &thread_tramp,						 CLONE_SIGHAND);	TRACE2("pid = %d", thread_tramp.thread->pid);	if (thread_tramp.thread->pid < 0) {		free_object(thread_tramp.thread);		EXIT2(return STATUS_FAILURE);	}	TRACE2("created task: %d", thread_tramp.thread->pid);#else	thread_tramp.thread->task = kthread_run(ntdriver_thread,						&thread_tramp, "ntdriver");	if (IS_ERR(thread_tramp.thread->task)) {		free_object(thread_tramp.thread);		EXIT2(return STATUS_FAILURE);	}	TRACE2("created task: %p", thread_tramp.thread->task);#endif	wait_for_completion(&thread_tramp.started);	*handle = OBJECT_TO_HEADER(thread_tramp.thread);	TRACE2("created thread: %p, %p", thread_tramp.thread, *handle);	EXIT2(return STATUS_SUCCESS);}wstdcall NTSTATUS WIN_FUNC(PsTerminateSystemThread,1)	(NTSTATUS status){	struct nt_thread *thread;	TRACE2("%p, %08X", current, status);	thread = get_current_nt_thread();	TRACE2("%p", thread);	if (thread) {		KeSetEvent((struct nt_event *)&thread->dh, 0, FALSE);		while (1) {			struct nt_list *ent;			struct irp *irp;			KIRQL irql;			irql = nt_spin_lock_irql(&thread->lock, DISPATCH_LEVEL);			ent = RemoveHeadList(&thread->irps);			nt_spin_unlock_irql(&thread->lock, irql);			if (!ent)				break;			irp = container_of(ent, struct irp, thread_list);			IOTRACE("%p", irp);			IoCancelIrp(irp);		}		/* the driver may later query this status with		 * ZwQueryInformationThread */		thread->status = status;	} else		ERROR("couldn't find thread for task: %p", current);	complete_and_exit(NULL, status);	ERROR("oops: %p, %d", thread->task, thread->pid);	return STATUS_FAILURE;}wstdcall BOOLEAN WIN_FUNC(KeRemoveEntryDeviceQueue,2)	(struct kdevice_queue *dev_queue, struct kdevice_queue_entry *entry){	struct kdevice_queue_entry *e;	KIRQL irql;	irql = nt_spin_lock_irql(&dev_queue->lock, DISPATCH_LEVEL);	nt_list_for_each_entry(e, &dev_queue->list, list) {		if (e == entry) {			RemoveEntryList(&e->list);			nt_spin_unlock_irql(&dev_queue->lock, irql);			return TRUE;		}	}	nt_spin_unlock_irql(&dev_queue->lock, irql);	return FALSE;}wstdcall BOOLEAN WIN_FUNC(KeSynchronizeExecution,3)	(struct kinterrupt *interrupt, PKSYNCHRONIZE_ROUTINE synch_routine,	 void *ctx){	BOOLEAN ret;	unsigned long flags;	nt_spin_lock_irqsave(interrupt->actual_lock, flags);	ret = LIN2WIN1(synch_routine, ctx);	nt_spin_unlock_irqrestore(interrupt->actual_lock, flags);	TRACE6("%d", ret);	return ret;}wstdcall void *WIN_FUNC(MmAllocateContiguousMemorySpecifyCache,5)	(SIZE_T size, PHYSICAL_ADDRESS lowest, PHYSICAL_ADDRESS highest,	 PHYSICAL_ADDRESS boundary, enum memory_caching_type cache_type){	void *addr;	unsigned int flags;	ENTER2("%lu, 0x%lx, 0x%lx, 0x%lx, %d", size, (long)lowest,	       (long)highest, (long)boundary, cache_type);	flags = irql_gfp();	addr = wrap_get_free_pages(flags, size);	TRACE2("%p, %lu, 0x%x", addr, size, flags);	if (addr && ((virt_to_phys(addr) + size) <= highest))		EXIT2(return addr);#ifdef CONFIG_X86_64	/* GFP_DMA is really only 16MB even on x86-64, but there is no	 * other zone available */	if (highest <= DMA_31BIT_MASK)		flags |= __GFP_DMA;	else if (highest <= DMA_32BIT_MASK)		flags |= __GFP_DMA32;#else	if (highest <= DMA_24BIT_MASK)		flags |= __GFP_DMA;	else if (highest > DMA_30BIT_MASK)		flags |= __GFP_HIGHMEM;#endif	addr = wrap_get_free_pages(flags, size);	TRACE2("%p, %lu, 0x%x", addr, size, flags);	return addr;}wstdcall void WIN_FUNC(MmFreeContiguousMemorySpecifyCache,3)	(void *base, SIZE_T size, enum memory_caching_type cache_type){	TRACE2("%p, %lu", base, size);	free_pages((unsigned long)base, get_order(size));}wstdcall PHYSICAL_ADDRESS WIN_FUNC(MmGetPhysicalAddress,1)	(void *base){	unsigned long phy = virt_to_phys(base);	TRACE2("%p, %p", base, (void *)phy);	return phy;}/* Atheros card with pciid 168C:0014 calls this function with 0xf0000 * and 0xf6ef0 address, and then check for things that seem to be * related to ACPI: "_SM_" and "_DMI_". This may be the hack they do * to check if this card is installed in IBM thinkpads; we can * probably get this device to work if we create a buffer with the * strings as required by the driver and return virtual address for * that address instead */wstdcall void *WIN_FUNC(MmMapIoSpace,3)	(PHYSICAL_ADDRESS phys_addr, SIZE_T size,	 enum memory_caching_type cache){	void *virt;	ENTER1("cache type: %d", cache);	if (cache == MmCached)		virt = ioremap(phys_addr, size);	else		virt = ioremap_nocache(phys_addr, size);	TRACE1("%Lx, %lu, %p", phys_addr, size, virt);	return virt;}wstdcall void WIN_FUNC(MmUnmapIoSpace,2)	(void *addr, SIZE_T size){	ENTER1("%p, %lu", addr, size);	iounmap(addr);	return;}wstdcall ULONG WIN_FUNC(MmSizeOfMdl,2)	(void *base, ULONG length){	return (sizeof(struct mdl) +		(sizeof(PFN_NUMBER) * SPAN_PAGES(base, length)));}struct mdl *allocate_init_mdl(void *virt, ULONG length){	struct wrap_mdl *wrap_mdl;	struct mdl *mdl;	int mdl_size = MmSizeOfMdl(virt, length);	if (mdl_size <= MDL_CACHE_SIZE) {		wrap_mdl = kmem_cache_alloc(mdl_cache, irql_gfp());		if (!wrap_mdl)			return NULL;		spin_lock_bh(&dispatcher_lock);		InsertHeadList(&wrap_mdl_list, &wrap_mdl->list);		spin_unlock_bh(&dispatcher_lock);		mdl = wrap_mdl->mdl;		TRACE3("allocated mdl from cache: %p(%p), %p(%d)",		       wrap_mdl, mdl, virt, length);		memset(mdl, 0, MDL_CACHE_SIZE);		MmInitializeMdl(mdl, virt, length);		/* mark the MDL as allocated from cache pool so when		 * it is freed, we free it back to the pool */		mdl->flags = MDL_ALLOCATED_FIXED_SIZE | MDL_CACHE_ALLOCATED;	} else {		wrap_mdl =			kmalloc(sizeof(*wrap_mdl) + mdl_size, irql_gfp());		if (!wrap_mdl)			return NULL;		mdl = wrap_mdl->mdl;		TRACE3("allocated mdl from memory: %p(%p), %p(%d)",		       wrap_mdl, mdl, virt, length);		spin_lock_bh(&dispatcher_lock);		InsertHeadList(&wrap_mdl_list, &wrap_mdl->list);		spin_unlock_bh(&dispatcher_lock);		memset(mdl, 0, mdl_size);		MmInitializeMdl(mdl, virt, length);		mdl->flags = MDL_ALLOCATED_FIXED_SIZE;	}	return mdl;}void free_mdl(struct mdl *mdl){	/* A driver may allocate Mdl with NdisAllocateBuffer and free	 * with IoFreeMdl (e.g., 64-bit Broadcom). Since we need to	 * treat buffers allocated with Ndis calls differently, we	 * must call NdisFreeBuffer if it is allocated with Ndis	 * function. We set 'pool' field in Ndis functions. */	if (!mdl)		return;	if (mdl->pool)		NdisFreeBuffer(mdl);	else {		struct wrap_mdl *wrap_mdl = (struct wrap_mdl *)			((char *)mdl - offsetof(struct wrap_mdl, mdl));		spin_lock_bh(&dispatcher_lock);		RemoveEntryList(&wrap_mdl->list);		spin_unlock_bh(&dispatcher_lock);		if (mdl->flags & MDL_CACHE_ALLOCATED) {			TRACE3("freeing mdl cache: %p, %p, %p",			       wrap_mdl, mdl, mdl->mappedsystemva);			kmem_cache_free(mdl_cache, wrap_mdl);		} else {			TRACE3("freeing mdl: %p, %p, %p",			       wrap_mdl, mdl, mdl->mappedsystemva);			kfree(wrap_mdl);		}	}	return;}wstdcall void WIN_FUNC(IoBuildPartialMdl,4)	(struct mdl *source, struct mdl *target, void *virt, ULONG length){	MmInitializeMdl(target, virt, length);	target->flags |= MDL_PARTIAL;}wstdcall void WIN_FUNC(MmBuildMdlForNonPagedPool,1)	(struct mdl *mdl){	PFN_NUMBER *mdl_pages;	int i, n;	ENTER4("%p", mdl);	/* already mapped *///	mdl->mappedsystemva = MmGetMdlVirtualAddress(mdl);	mdl->flags |= MDL_SOURCE_IS_NONPAGED_POOL;	TRACE4("%p, %p, %p, %d, %d", mdl, mdl->mappedsystemva, mdl->startva,	       mdl->byteoffset, mdl->bytecount);	n = SPAN_PAGES(MmGetSystemAddressForMdl(mdl), MmGetMdlByteCount(mdl));	if (n > MDL_CACHE_PAGES)		WARNING("%p, %d, %d", MmGetSystemAddressForMdl(mdl),			MmGetMdlByteCount(mdl), n);	mdl_pages = MmGetMdlPfnArray(mdl);	for (i = 0; i < n; i++)		mdl_pages[i] = (ULONG_PTR)mdl->startva + (i * PAGE_SIZE);	EXIT4(return);}wstdcall void *WIN_FUNC(MmMapLockedPages,2)	(struct mdl *mdl, KPROCESSOR_MODE access_mode){	/* already mapped *///	mdl->mappedsystemva = MmGetMdlVirtualAddress(mdl);	mdl->flags |= MDL_MAPPED_TO_SYSTEM_VA;	/* what is the need for MDL_PARTIAL_HAS_BEEN_MAPPED? */	if (mdl->flags & MDL_PARTIAL)		mdl->flags |= MDL_PARTIAL_HAS_BEEN_MAPPED;	return mdl->mappedsystemva;}wstdcall void *WIN_FUNC(MmMapLockedPagesSpecifyCache,6)	(struct mdl *mdl, KPROCESSOR_MODE access_mode,	 enum memory_caching_type cache_type, void *base_address,	 ULONG bug_check, enum mm_page_priority priority){	return MmMapLockedPages(mdl, access_mode);}wstdcall void WIN_FUNC(MmUnmapLockedPages,2)	(void *base, struct mdl *mdl){	mdl->flags &= ~MDL_MAPPED_TO_SYSTEM_VA;	return;}wstdcall void WIN_FUNC(MmProbeAndLockPages,3)	(struct mdl *mdl, KPROCESSOR_MODE access_mode,	 enum lock_operation operation){	/* already locked */	mdl->flags |= MDL_PAGES_LOCKED;	return;}wstdcall void WIN_FUNC(MmUnlockPages,1)	(struct mdl *mdl){	mdl->flags &= ~MDL_PAGES_LOCKED;	return;}wstdcall BOOLEAN WIN_FUNC(MmIsAddressValid,1)	(void *virt_addr){	if (virt_addr_valid(virt_addr))		return TRUE;	else		return FALSE;}wstdcall void *WIN_FUNC(MmLockPagableDataSection,1)	(void *address){	return address;}wstdcall void WIN_FUNC(MmUnlockPagableImageSection,1)	(void *handle){	return;}wstdcall NTSTATUS WIN_FUNC(ObReferenceObjectByHandle,6)	(void *handle, ACCESS_MASK desired_access, void *obj_type,	 KPROCESSOR_MODE access_mode, void **object, void *handle_info){	struct common_object_header *hdr;	TRACE2("%p", handle);	hdr = HANDLE_TO_HEADER(handle);	atomic_inc_var(hdr->ref_count);	*object = HEADER_TO_OBJECT(hdr);	TRACE2("%p, %p, %d, %p", hdr, object, hdr->ref_count, *object);	return STATUS_SUCCESS;}/* DDK doesn't say if return value should be before incrementing or * after incrementing reference count, but according to #reactos * devels, it should be return value after incrementing */wfastcall LONG WIN_FUNC(ObfReferenceObject,1)	(void *object){	struct common_object_header *hdr;	LONG ret;	hdr = OBJECT_TO_HEADER(object);	ret = post_atomic_add(hdr->ref_count, 1);	TRACE2("%p, %d, %p", hdr, hdr->ref_count, object);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -