⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ntoskernel.c

📁 linux下安装无线网卡启动的程式
💻 C
📖 第 1 页 / 共 5 页
字号:
 * 'grabs' if the count is > 1 */static int grab_object(struct dispatcher_header *dh,		       struct task_struct *thread, int grab){	EVENTTRACE("%p, %p, %d, %d", dh, thread, grab, dh->signal_state);	if (unlikely(is_mutex_object(dh))) {		struct nt_mutex *nt_mutex;		nt_mutex = container_of(dh, struct nt_mutex, dh);		EVENTTRACE("%p, %p, %d, %p, %d", nt_mutex,			   nt_mutex->owner_thread, dh->signal_state,			   thread, grab);		/* either no thread owns the mutex or this thread owns		 * it */		assert(dh->signal_state == 1 && nt_mutex->owner_thread == NULL);		assert(dh->signal_state < 1 && nt_mutex->owner_thread != NULL);		if ((dh->signal_state == 1 && nt_mutex->owner_thread == NULL) ||		    nt_mutex->owner_thread == thread) {			if (grab) {				dh->signal_state--;				nt_mutex->owner_thread = thread;			}			EVENTEXIT(return 1);		}	} else if (dh->signal_state > 0) {		/* to grab, decrement signal_state for synchronization		 * or semaphore objects */		if (grab && (is_synch_object(dh) || is_semaphore_object(dh)))			dh->signal_state--;		EVENTEXIT(return 1);	}	EVENTEXIT(return 0);}/* this function should be called holding dispatcher_lock */static void object_signalled(struct dispatcher_header *dh){	struct nt_list *cur, *next;	struct wait_block *wb;	EVENTENTER("%p", dh);	nt_list_for_each_safe(cur, next, &dh->wait_blocks) {		wb = container_of(cur, struct wait_block, list);		assert(wb->thread != NULL);		assert(wb->object == NULL);		if (!grab_object(dh, wb->thread, 1))			continue;		EVENTTRACE("%p (%p): waking %p", dh, wb, wb->thread);		RemoveEntryList(cur);		wb->object = dh;		*(wb->wait_done) = 1;		wake_up_process(wb->thread);	}	EVENTEXIT(return);}wstdcall NTSTATUS WIN_FUNC(KeWaitForMultipleObjects,8)	(ULONG count, void *object[], enum wait_type wait_type,	 KWAIT_REASON wait_reason, KPROCESSOR_MODE wait_mode,	 BOOLEAN alertable, LARGE_INTEGER *timeout,	 struct wait_block *wait_block_array){	int i, res = 0, wait_count, wait_done;	typeof(jiffies) wait_hz = 0;	struct wait_block *wb, wb_array[THREAD_WAIT_OBJECTS];	struct dispatcher_header *dh;	EVENTENTER("%p, %d, %u, %p", current, count, wait_type, timeout);	if (count > MAX_WAIT_OBJECTS ||	    (count > THREAD_WAIT_OBJECTS && wait_block_array == NULL))		EVENTEXIT(return STATUS_INVALID_PARAMETER);	if (wait_block_array == NULL)		wb = wb_array;	else		wb = wait_block_array;	/* If *timeout == 0: In the case of WaitAny, if an object can	 * be grabbed (object is in signaled state), grab and	 * return. In the case of WaitAll, we have to first make sure	 * all objects can be grabbed. If any/some of them can't be	 * grabbed, either we return STATUS_TIMEOUT or wait for them,	 * depending on how to satisfy wait. If all of them can be	 * grabbed, we will grab them in the next loop below */	spin_lock_bh(&dispatcher_lock);	for (i = wait_count = 0; i < count; i++) {		dh = object[i];		EVENTTRACE("%p: event %p (%d)", current, dh, dh->signal_state);		/* wait_type == 1 for WaitAny, 0 for WaitAll */		if (grab_object(dh, current, wait_type)) {			if (wait_type == WaitAny) {				spin_unlock_bh(&dispatcher_lock);				EVENTEXIT(return STATUS_WAIT_0 + i);			}		} else {			EVENTTRACE("%p: wait for %p", current, dh);			wait_count++;		}	}	if (timeout && *timeout == 0 && wait_count) {		spin_unlock_bh(&dispatcher_lock);		EVENTEXIT(return STATUS_TIMEOUT);	}	/* get the list of objects the thread needs to wait on and add	 * the thread on the wait list for each such object */	/* if *timeout == 0, this step will grab all the objects */	wait_done = 0;	for (i = 0; i < count; i++) {		dh = object[i];		EVENTTRACE("%p: event %p (%d)", current, dh, dh->signal_state);		wb[i].object = NULL;		if (grab_object(dh, current, 1)) {			EVENTTRACE("%p: no wait for %p (%d)",				   current, dh, dh->signal_state);			/* mark that we are not waiting on this object */			wb[i].thread = NULL;		} else {			wb[i].wait_done = &wait_done;			wb[i].thread = current;			EVENTTRACE("%p: wait for %p", current, dh);			InsertTailList(&dh->wait_blocks, &wb[i].list);		}	}	spin_unlock_bh(&dispatcher_lock);	if (wait_count == 0)		EVENTEXIT(return STATUS_SUCCESS);	assert(timeout == NULL || *timeout != 0);	if (timeout == NULL)		wait_hz = 0;	else		wait_hz = SYSTEM_TIME_TO_HZ(*timeout);	DBG_BLOCK(2) {		KIRQL irql = current_irql();		if (irql >= DISPATCH_LEVEL) {			TRACE2("wait in atomic context: %Lu, %lu, %d, %ld",			       *timeout, wait_hz, in_atomic(), in_interrupt());		}	}	assert_irql(_irql_ < DISPATCH_LEVEL);	EVENTTRACE("%p: sleep for %ld on %p", current, wait_hz, &wait_done);	/* we don't honor 'alertable' - according to decription for	 * this, even if waiting in non-alertable state, thread may be	 * alerted in some circumstances */	while (wait_count) {		res = wait_condition(wait_done, wait_hz, TASK_INTERRUPTIBLE);		spin_lock_bh(&dispatcher_lock);		EVENTTRACE("%p woke up: %d, %d", current, res, wait_done);		/* the event may have been set by the time		 * wrap_wait_event returned and spinlock obtained, so		 * don't rely on value of 'res' - check event status */		if (!wait_done) {			assert(res <= 0);			/* timed out or interrupted; remove from wait list */			for (i = 0; i < count; i++) {				if (!wb[i].thread)					continue;				EVENTTRACE("%p: timedout, dequeue %p (%p)",					   current, object[i], wb[i].object);				assert(wb[i].object == NULL);				RemoveEntryList(&wb[i].list);			}			spin_unlock_bh(&dispatcher_lock);			if (res < 0)				EVENTEXIT(return STATUS_ALERTED);			else				EVENTEXIT(return STATUS_TIMEOUT);		}		assert(res > 0);		/* woken because object(s) signalled */		for (i = 0; wait_count && i < count; i++) {			if (!wb[i].thread || !wb[i].object)				continue;			DBG_BLOCK(1) {				if (wb[i].object != object[i]) {					EVENTTRACE("oops %p != %p",						   wb[i].object, object[i]);					continue;				}			}			wait_count--;			if (wait_type == WaitAny) {				int j;				/* done; remove from rest of wait list */				for (j = i + 1; j < count; j++) {					if (wb[j].thread && !wb[j].object)						RemoveEntryList(&wb[j].list);				}				spin_unlock_bh(&dispatcher_lock);				EVENTEXIT(return STATUS_WAIT_0 + i);			}		}		wait_done = 0;		spin_unlock_bh(&dispatcher_lock);		if (wait_count == 0)			EVENTEXIT(return STATUS_SUCCESS);		/* this thread is still waiting for more objects, so		 * let it wait for remaining time and those objects */		if (timeout)			wait_hz = res;		else			wait_hz = 0;	}	/* should never reach here, but compiler wants return value */	ERROR("%p: wait_hz: %ld", current, wait_hz);	EVENTEXIT(return STATUS_SUCCESS);}wstdcall NTSTATUS WIN_FUNC(KeWaitForSingleObject,5)	(void *object, KWAIT_REASON wait_reason, KPROCESSOR_MODE wait_mode,	 BOOLEAN alertable, LARGE_INTEGER *timeout){	return KeWaitForMultipleObjects(1, &object, WaitAny, wait_reason,					wait_mode, alertable, timeout, NULL);}wstdcall void WIN_FUNC(KeInitializeEvent,3)	(struct nt_event *nt_event, enum event_type type, BOOLEAN state){	EVENTENTER("event = %p, type = %d, state = %d", nt_event, type, state);	initialize_object(&nt_event->dh, type, state);	EVENTEXIT(return);}wstdcall LONG WIN_FUNC(KeSetEvent,3)	(struct nt_event *nt_event, KPRIORITY incr, BOOLEAN wait){	LONG old_state;	EVENTENTER("%p, %d", nt_event, nt_event->dh.type);	if (wait == TRUE)		WARNING("wait = %d, not yet implemented", wait);	spin_lock_bh(&dispatcher_lock);	old_state = nt_event->dh.signal_state;	nt_event->dh.signal_state = 1;	if (old_state == 0)		object_signalled(&nt_event->dh);	spin_unlock_bh(&dispatcher_lock);	EVENTEXIT(return old_state);}wstdcall void WIN_FUNC(KeClearEvent,1)	(struct nt_event *nt_event){	EVENTENTER("%p", nt_event);	nt_event->dh.signal_state = 0;	EVENTEXIT(return);}wstdcall LONG WIN_FUNC(KeResetEvent,1)	(struct nt_event *nt_event){	LONG old_state;	EVENTENTER("%p", nt_event);	old_state = xchg(&nt_event->dh.signal_state, 0);	EVENTEXIT(return old_state);}wstdcall LONG WIN_FUNC(KeReadStateEvent,1)	(struct nt_event *nt_event){	LONG state;	state = nt_event->dh.signal_state;	EVENTTRACE("%d", state);	return state;}wstdcall void WIN_FUNC(KeInitializeMutex,2)	(struct nt_mutex *mutex, ULONG level){	EVENTENTER("%p", mutex);	initialize_object(&mutex->dh, MutexObject, 1);	mutex->dh.size = sizeof(*mutex);	InitializeListHead(&mutex->list);	mutex->abandoned = FALSE;	mutex->apc_disable = 1;	mutex->owner_thread = NULL;	EVENTEXIT(return);}wstdcall LONG WIN_FUNC(KeReleaseMutex,2)	(struct nt_mutex *mutex, BOOLEAN wait){	LONG ret;	struct task_struct *thread;	EVENTENTER("%p, %d, %p", mutex, wait, current);	if (wait == TRUE)		WARNING("wait: %d", wait);	thread = current;	spin_lock_bh(&dispatcher_lock);	EVENTTRACE("%p, %p, %p, %d", mutex, thread, mutex->owner_thread,		   mutex->dh.signal_state);	if ((mutex->owner_thread == thread) && (mutex->dh.signal_state <= 0)) {		ret = mutex->dh.signal_state++;		if (ret == 0) {			mutex->owner_thread = NULL;			object_signalled(&mutex->dh);		}	} else {		ret = STATUS_MUTANT_NOT_OWNED;		WARNING("invalid mutex: %p, %p, %p", mutex, mutex->owner_thread,			thread);	}	EVENTTRACE("%p, %p, %p, %d", mutex, thread, mutex->owner_thread,		   mutex->dh.signal_state);	spin_unlock_bh(&dispatcher_lock);	EVENTEXIT(return ret);}wstdcall void WIN_FUNC(KeInitializeSemaphore,3)	(struct nt_semaphore *semaphore, LONG count, LONG limit){	EVENTENTER("%p: %d", semaphore, count);	/* if limit > 1, we need to satisfy as many waits (until count	 * becomes 0); so we keep decrementing count everytime a wait	 * is satisified */	initialize_object(&semaphore->dh, SemaphoreObject, count);	semaphore->dh.size = sizeof(*semaphore);	semaphore->limit = limit;	EVENTEXIT(return);}wstdcall LONG WIN_FUNC(KeReleaseSemaphore,4)	(struct nt_semaphore *semaphore, KPRIORITY incr, LONG adjustment,	 BOOLEAN wait){	LONG ret;	EVENTENTER("%p", semaphore);	spin_lock_bh(&dispatcher_lock);	ret = semaphore->dh.signal_state;	assert(ret >= 0);	if (semaphore->dh.signal_state + adjustment <= semaphore->limit)		semaphore->dh.signal_state += adjustment;	else {		WARNING("releasing %d over limit %d", adjustment,			semaphore->limit);		semaphore->dh.signal_state = semaphore->limit;	}	if (semaphore->dh.signal_state > 0)		object_signalled(&semaphore->dh);	spin_unlock_bh(&dispatcher_lock);	EVENTEXIT(return ret);}wstdcall NTSTATUS WIN_FUNC(KeDelayExecutionThread,3)	(KPROCESSOR_MODE wait_mode, BOOLEAN alertable, LARGE_INTEGER *interval){	int res;	long timeout;	if (wait_mode != 0)		ERROR("invalid wait_mode %d", wait_mode);	timeout = SYSTEM_TIME_TO_HZ(*interval);	EVENTTRACE("%p, %Ld, %ld", current, *interval, timeout);	if (timeout <= 0)		EVENTEXIT(return STATUS_SUCCESS);	if (alertable)		set_current_state(TASK_INTERRUPTIBLE);	else		set_current_state(TASK_UNINTERRUPTIBLE);	res = schedule_timeout(timeout);	EVENTTRACE("%p, %d", current, res);	if (res == 0)		EVENTEXIT(return STATUS_SUCCESS);	else		EVENTEXIT(return STATUS_ALERTED);}wstdcall ULONGLONG WIN_FUNC(KeQueryInterruptTime,0)	(void){	EXIT5(return jiffies * TICKSPERJIFFY);}wstdcall ULONG WIN_FUNC(KeQueryTimeIncrement,0)	(void){	EXIT5(return TICKSPERSEC / HZ);}wstdcall void WIN_FUNC(KeQuerySystemTime,1)	(LARGE_INTEGER *time){	*time = ticks_1601();	TRACE5("%Lu, %lu", *time, jiffies);}wstdcall void WIN_FUNC(KeQueryTickCount,1)	(LARGE_INTEGER *count){	*count = jiffies;}wstdcall LARGE_INTEGER WIN_FUNC(KeQueryPerformanceCounter,1)	(LARGE_INTEGER *counter){	if (counter)		*counter = HZ;	return jiffies;}wstdcall KAFFINITY WIN_FUNC(KeQueryActiveProcessors,0)	(void){	int i, n;	KAFFINITY bits = 0;#ifdef num_online_cpus	n = num_online_cpus();#else	n = NR_CPUS;#endif	for (i = 0; i < n; i++)		bits = (bits << 1) | 1;	return bits;}struct nt_thread *get_current_nt_thread(void){	struct task_struct *task = current;	struct nt_thread *thread;	struct common_object_header *header;	TRACE6("task: %p", task);	thread = NULL;	spin_lock_bh(&ntoskernel_lock);	nt_list_for_each_entry(header, &object_list, list) {		TRACE6("%p, %d", header, header->type);		if (header->type != OBJECT_TYPE_NT_THREAD)			break;		thread = HEADER_TO_OBJECT(header);		TRACE6("%p, %p", thread, thread->task);		if (thread->task == task)			break;		else			thread = NULL;	}	spin_unlock_bh(&ntoskernel_lock);	if (thread == NULL)		TRACE4("couldn't find thread for task %p, %d", task, task->pid);	TRACE6("%p", thread);	return thread;}struct task_struct *get_nt_thread_task(struct nt_thread *thread){	struct task_struct *task;	struct common_object_header *header;	TRACE6("%p", thread);	task = NULL;	spin_lock_bh(&ntoskernel_lock);	nt_list_for_each_entry(header, &object_list, list) {		TRACE6("%p, %d", header, header->type);		if (header->type != OBJECT_TYPE_NT_THREAD)			break;		if (thread == HEADER_TO_OBJECT(header)) {			task = thread->task;			break;		}	}	spin_unlock_bh(&ntoskernel_lock);	if (task == NULL)		TRACE2("%p: couldn't find task for %p", current, thread);	return task;}static struct nt_thread *create_nt_thread(struct task_struct *task){	struct nt_thread *thread;	thread = allocate_object(sizeof(*thread), OBJECT_TYPE_NT_THREAD, NULL);	if (!thread) {		ERROR("couldn't allocate thread object");		EXIT2(return NULL);	}	thread->task = task;	if (task)		thread->pid = task->pid;	else		thread->pid = 0;	nt_spin_lock_init(&thread->lock);	InitializeListHead(&thread->irps);	initialize_object(&thread->dh, ThreadObject, 0);	thread->dh.size = sizeof(*thread);	return thread;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -