⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ntoskernel.c

📁 ndiswrapper工具
💻 C
📖 第 1 页 / 共 5 页
字号:
	TRACEENTER6("%p", lock);	*irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);}wstdcall void WIN_FUNC(KeReleaseSpinLock,2)	(NT_SPIN_LOCK *lock, KIRQL oldirql){	TRACEENTER6("%p", lock);	nt_spin_unlock_irql(lock, oldirql);}wstdcall void WIN_FUNC(KeAcquireSpinLockAtDpcLevel,1)	(NT_SPIN_LOCK *lock){	TRACEENTER6("%p", lock);	nt_spin_lock(lock);}wstdcall void WIN_FUNC(KeReleaseSpinLockFromDpcLevel,1)	(NT_SPIN_LOCK *lock){	TRACEENTER6("%p", lock);	nt_spin_unlock(lock);}wstdcall void WIN_FUNC(KeRaiseIrql,2)	(KIRQL newirql, KIRQL *oldirql){	TRACEENTER6("%d", newirql);	*oldirql = raise_irql(newirql);}wstdcall KIRQL WIN_FUNC(KeRaiseIrqlToDpcLevel,0)	(void){	return raise_irql(DISPATCH_LEVEL);}wstdcall void WIN_FUNC(KeLowerIrql,1)	(KIRQL irql){	TRACEENTER6("%d", irql);	lower_irql(irql);}wstdcall KIRQL WIN_FUNC(KeAcquireSpinLockRaiseToDpc,1)	(NT_SPIN_LOCK *lock){	TRACEENTER6("%p", lock);	return nt_spin_lock_irql(lock, DISPATCH_LEVEL);}#undef ExAllocatePoolWithTagwstdcall void *WIN_FUNC(ExAllocatePoolWithTag,3)	(enum pool_type pool_type, SIZE_T size, ULONG tag){	void *addr;	TRACEENTER4("pool_type: %d, size: %lu, tag: %u", pool_type,		    size, tag);	if (size <= KMALLOC_THRESHOLD)		addr = kmalloc(size, gfp_irql());	else {		if (current_irql() < DISPATCH_LEVEL)			addr = vmalloc(size);		else			addr = __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM,					 PAGE_KERNEL);	}	DBGTRACE4("addr: %p, %lu", addr, size);	TRACEEXIT4(return addr);}WIN_FUNC_DECL(ExAllocatePoolWithTag,3)wstdcall void vfree_nonatomic(void *addr, void *ctx){	vfree(addr);}WIN_FUNC_DECL(vfree_nonatomic,2)wstdcall void WIN_FUNC(ExFreePoolWithTag,2)	(void *addr, ULONG tag){	DBGTRACE4("addr: %p", addr);	if ((unsigned long)addr < VMALLOC_START ||	    (unsigned long)addr >= VMALLOC_END)		kfree(addr);	else {		if (in_interrupt())			schedule_ntos_work_item(WIN_FUNC_PTR(vfree_nonatomic,2),						addr, NULL);		else			vfree(addr);	}	TRACEEXIT4(return);}wstdcall void WIN_FUNC(ExFreePool,1)	(void *addr){	ExFreePoolWithTag(addr, 0);}WIN_FUNC_DECL(ExFreePool,1)wstdcall void WIN_FUNC(ExInitializeNPagedLookasideList,7)	(struct npaged_lookaside_list *lookaside,	 LOOKASIDE_ALLOC_FUNC *alloc_func, LOOKASIDE_FREE_FUNC *free_func,	 ULONG flags, SIZE_T size, ULONG tag, USHORT depth){	TRACEENTER3("lookaside: %p, size: %lu, flags: %u, head: %p, "		    "alloc: %p, free: %p", lookaside, size, flags,		    lookaside, alloc_func, free_func);	memset(lookaside, 0, sizeof(*lookaside));	lookaside->size = size;	lookaside->tag = tag;	lookaside->depth = 4;	lookaside->maxdepth = 256;	lookaside->pool_type = NonPagedPool;	if (alloc_func)		lookaside->alloc_func = alloc_func;	else		lookaside->alloc_func = WIN_FUNC_PTR(ExAllocatePoolWithTag,3);	if (free_func)		lookaside->free_func = free_func;	else		lookaside->free_func = WIN_FUNC_PTR(ExFreePool,1);#ifndef CONFIG_X86_64	nt_spin_lock_init(&lookaside->obsolete);#endif	TRACEEXIT3(return);}wstdcall void WIN_FUNC(ExDeleteNPagedLookasideList,1)	(struct npaged_lookaside_list *lookaside){	struct nt_slist *entry;	TRACEENTER3("lookaside = %p", lookaside);	while ((entry = ExpInterlockedPopEntrySList(&lookaside->head)))		LIN2WIN1(lookaside->free_func, entry);	TRACEEXIT3(return);}#if defined(ALLOC_DEBUG) && ALLOC_DEBUG > 1#define ExAllocatePoolWithTag(pool_type, size, tag)			\	wrap_ExAllocatePoolWithTag(pool_type, size, tag, __FILE__, __LINE__)#endifwstdcall NTSTATUS WIN_FUNC(ExCreateCallback,4)	(struct callback_object **object, struct object_attributes *attributes,	 BOOLEAN create, BOOLEAN allow_multiple_callbacks){	struct callback_object *obj;	KIRQL irql;	TRACEENTER2("");	irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);	nt_list_for_each_entry(obj, &callback_objects, callback_funcs) {		if (obj->attributes == attributes) {			nt_spin_unlock_irql(&ntoskernel_lock, irql);			*object = obj;			return STATUS_SUCCESS;		}	}	nt_spin_unlock_irql(&ntoskernel_lock, irql);	obj = allocate_object(sizeof(struct callback_object),			      OBJECT_TYPE_CALLBACK, NULL);	if (!obj)		TRACEEXIT2(return STATUS_INSUFFICIENT_RESOURCES);	InitializeListHead(&obj->callback_funcs);	nt_spin_lock_init(&obj->lock);	obj->allow_multiple_callbacks = allow_multiple_callbacks;	obj->attributes = attributes;	*object = obj;	TRACEEXIT2(return STATUS_SUCCESS);}wstdcall void *WIN_FUNC(ExRegisterCallback,3)	(struct callback_object *object, PCALLBACK_FUNCTION func, void *context){	struct callback_func *callback;	KIRQL irql;	TRACEENTER2("");	irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);	if (object->allow_multiple_callbacks == FALSE &&	    !IsListEmpty(&object->callback_funcs)) {		nt_spin_unlock_irql(&object->lock, irql);		TRACEEXIT2(return NULL);	}	nt_spin_unlock_irql(&ntoskernel_lock, irql);	callback = kmalloc(sizeof(*callback), GFP_KERNEL);	if (!callback) {		ERROR("couldn't allocate memory");		return NULL;	}	callback->func = func;	callback->context = context;	callback->object = object;	irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);	InsertTailList(&object->callback_funcs, &callback->list);	nt_spin_unlock_irql(&object->lock, irql);	TRACEEXIT2(return callback);}wstdcall void WIN_FUNC(ExUnregisterCallback,1)	(struct callback_func *callback){	struct callback_object *object;	KIRQL irql;	TRACEENTER3("%p", callback);	if (!callback)		return;	object = callback->object;	irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);	RemoveEntryList(&callback->list);	nt_spin_unlock_irql(&object->lock, irql);	kfree(callback);	return;}wstdcall void WIN_FUNC(ExNotifyCallback,3)	(struct callback_object *object, void *arg1, void *arg2){	struct callback_func *callback;	KIRQL irql;	TRACEENTER3("%p", object);	irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);	nt_list_for_each_entry(callback, &object->callback_funcs, list){		LIN2WIN3(callback->func, callback->context, arg1, arg2);	}	nt_spin_unlock_irql(&object->lock, irql);	return;}/* check and set signaled state; should be called with dispatcher_lock held *//* @grab indicates if the event should be put in not-signaled state * - note that a semaphore may stay in signaled state for multiple * 'grabs' if the count is > 1 */static int check_grab_signaled_state(struct dispatcher_header *dh,				     struct task_struct *thread, int grab){	EVENTTRACE("%p, %p, %d, %d", dh, thread, grab, dh->signal_state);	if (is_mutex_dh(dh)) {		struct nt_mutex *nt_mutex;		/* either no thread owns the mutex or this thread owns		 * it */		nt_mutex = container_of(dh, struct nt_mutex, dh);		EVENTTRACE("%p, %p", nt_mutex, nt_mutex->owner_thread);		assert(dh->signal_state <= 1);		assert(nt_mutex->owner_thread == NULL &&		       dh->signal_state == 1);		if (dh->signal_state > 0 || nt_mutex->owner_thread == thread) {			if (grab) {				dh->signal_state--;				nt_mutex->owner_thread = thread;			}			EVENTEXIT(return 1);		}	} else if (dh->signal_state > 0) {		/* if grab, decrement signal_state for		 * synchronization or semaphore objects */		if (grab && (dh->type == SynchronizationObject ||			     is_semaphore_dh(dh)))			dh->signal_state--;		EVENTEXIT(return 1);	}	EVENTEXIT(return 0);}/* this function should be called holding dispatcher_lock spinlock at * DISPATCH_LEVEL */static void wakeup_threads(struct dispatcher_header *dh){	struct nt_list *cur, *next;	struct wait_block *wb = NULL;	EVENTENTER("%p", dh);	nt_list_for_each_safe(cur, next, &dh->wait_blocks) {		wb = container_of(cur, struct wait_block, list);		EVENTTRACE("%p: wait block: %p, thread: %p",			   dh, wb, wb->thread);		assert(wb->thread != NULL);		assert(wb->object == NULL);		if (wb->thread &&		    check_grab_signaled_state(dh, wb->thread, 1)) {			struct thread_event_waitq *thread_waitq =				wb->thread_waitq;			EVENTTRACE("%p: waking up task %p for %p", thread_waitq,				   wb->thread, dh);			RemoveEntryList(&wb->list);			wb->object = dh;			thread_waitq->done = 1;			wake_up(&thread_waitq->head);			if (dh->type == SynchronizationObject)				break;		} else			EVENTTRACE("not waking up task: %p", wb->thread);	}	EVENTEXIT(return);}/* We need workqueue to implement KeWaitFor routines * below. (get/put)_thread_event_wait give/take back a workqueue. Both * these are called holding dispatcher spinlock, so no locking here */static inline struct thread_event_waitq *get_thread_event_waitq(void){	struct thread_event_waitq *thread_event_waitq;	if (thread_event_waitq_pool) {		thread_event_waitq = thread_event_waitq_pool;		thread_event_waitq_pool = thread_event_waitq_pool->next;	} else {		thread_event_waitq = kmalloc(sizeof(*thread_event_waitq),					     GFP_ATOMIC);		if (!thread_event_waitq) {			WARNING("couldn't allocate memory");			return NULL;		}		EVENTTRACE("allocated wq: %p", thread_event_waitq);		init_waitqueue_head(&thread_event_waitq->head);	}#ifdef EVENT_DEBUG	thread_event_waitq->task = current;#endif	EVENTTRACE("%p, %p, %p", thread_event_waitq, current,		   thread_event_waitq_pool);	thread_event_waitq->done = 0;	return thread_event_waitq;}static void put_thread_event_waitq(struct thread_event_waitq *thread_event_waitq){	EVENTENTER("%p, %p", thread_event_waitq, current);#ifdef EVENT_DEBUG	if (thread_event_waitq->task != current)		ERROR("argh, task %p should be %p",		      current, thread_event_waitq->task);	thread_event_waitq->task = NULL;#endif	thread_event_waitq->next = thread_event_waitq_pool;	thread_event_waitq_pool = thread_event_waitq;	thread_event_waitq->done = 0;}wstdcall NTSTATUS WIN_FUNC(KeWaitForMultipleObjects,8)	(ULONG count, void *object[], enum wait_type wait_type,	 KWAIT_REASON wait_reason, KPROCESSOR_MODE wait_mode,	 BOOLEAN alertable, LARGE_INTEGER *timeout,	 struct wait_block *wait_block_array){	int i, res = 0, wait_count;	long wait_jiffies = 0;	struct wait_block *wb, wb_array[THREAD_WAIT_OBJECTS];	struct dispatcher_header *dh;	struct task_struct *thread;	struct thread_event_waitq *thread_waitq;	KIRQL irql;	thread = current;	EVENTENTER("thread: %p count = %d, type: %d, reason = %u, "		   "waitmode = %u, alertable = %u, timeout = %p", thread,		   count, wait_type, wait_reason, wait_mode, alertable,		   timeout);	if (count > MAX_WAIT_OBJECTS)		EVENTEXIT(return STATUS_INVALID_PARAMETER);	if (count > THREAD_WAIT_OBJECTS && wait_block_array == NULL)		EVENTEXIT(return STATUS_INVALID_PARAMETER);	if (wait_block_array == NULL)		wb = &wb_array[0];	else		wb = wait_block_array;	/* TODO: should we allow threads to wait in non-alertable state? */	alertable = TRUE;	irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);	/* If *timeout == 0: In the case of WaitAny, if an object can	 * be grabbed (object is in signaled state), grab and	 * return. In the case of WaitAll, we have to first make sure	 * all objects can be grabbed. If any/some of them can't be	 * grabbed, either we return STATUS_TIMEOUT or wait for them,	 * depending on how to satisfy wait. If all of them can be	 * grabbed, we will grab them in the next loop below */	for (i = wait_count = 0; i < count; i++) {		dh = object[i];		EVENTTRACE("%p: event %p state: %d",			   thread, dh, dh->signal_state);		/* wait_type == 1 for WaitAny, 0 for WaitAll */		if (check_grab_signaled_state(dh, thread, wait_type)) {			if (wait_type == WaitAny) {				nt_spin_unlock_irql(&dispatcher_lock, irql);				if (count > 1)					EVENTEXIT(return STATUS_WAIT_0 + i);				else					EVENTEXIT(return STATUS_SUCCESS);			}		} else {			EVENTTRACE("%p: wait for %p", thread, dh);			wait_count++;		}	}	if (wait_count) {		if (timeout && *timeout == 0) {			nt_spin_unlock_irql(&dispatcher_lock, irql);			EVENTEXIT(return STATUS_TIMEOUT);		}		thread_waitq = get_thread_event_waitq();		if (!thread_waitq) {			nt_spin_unlock_irql(&dispatcher_lock, irql);			EVENTEXIT(return STATUS_RESOURCES);		}	} else		thread_waitq = NULL;	/* get the list of objects the thread needs to wait on and add	 * the thread on the wait list for each such object */	/* if *timeout == 0, this step will grab all the objects */	for (i = 0; i < count; i++) {		dh = object[i];		EVENTTRACE("%p: event %p state: %d",			   thread, dh, dh->signal_state);		wb[i].object = NULL;		wb[i].thread_waitq = thread_waitq;		if (check_grab_signaled_state(dh, thread, 1)) {			EVENTTRACE("%p: event %p already signaled: %d",				   thread, dh, dh->signal_state);			/* mark that we are not waiting on this object */			wb[i].thread = NULL;		} else {			assert(timeout == NULL || *timeout != 0);			assert(thread_waitq != NULL);			wb[i].thread = thread;			EVENTTRACE("%p: need to wait on event %p", thread, dh);			InsertTailList(&dh->wait_blocks, &wb[i].list);		}	}	nt_spin_unlock_irql(&dispatcher_lock, irql);	if (wait_count == 0) {		assert(thread_waitq == NULL);		EVENTEXIT(return STATUS_SUCCESS);	}	assert(thread_waitq);	assert(timeout == NULL || *timeout != 0);	if (timeout == NULL)		wait_jiffies = 0;	else		wait_jiffies = SYSTEM_TIME_TO_HZ(*timeout) + 1;	EVENTTRACE("%p: sleeping for %ld on %p",		   thread, wait_jiffies, thread_waitq);	while (wait_count) {		if (wait_jiffies) {			res = wait_event_interruptible_timeout(				thread_waitq->head, (thread_waitq->done == 1),				wait_jiffies);		} else {			wait_event_interruptible(				thread_waitq->head,(thread_waitq->done == 1));			/* mark that it didn't timeout */			res = 1;		}		thread_waitq->done = 0;		irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);		if (signal_pending(current))			res = -ERESTARTSYS;		EVENTTRACE("%p woke up on %p, res = %d, done: %d", thread,			   thread_waitq, res, thread_waitq->done);#ifdef EVENT_DEBUG

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -