⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ntoskernel.h

📁 ndis在linux下的无线网卡驱动源码
💻 H
📖 第 1 页 / 共 3 页
字号:
NTSTATUS IoPassIrpDown(struct device_object *dev_obj, struct irp *irp) wstdcall;WIN_FUNC_DECL(IoPassIrpDown,2)NTSTATUS IoSyncForwardIrp(struct device_object *dev_obj,			  struct irp *irp) wstdcall;NTSTATUS IoAsyncForwardIrp(struct device_object *dev_obj,			   struct irp *irp) wstdcall;NTSTATUS IoInvalidDeviceRequest(struct device_object *dev_obj,				struct irp *irp) wstdcall;KIRQL KeGetCurrentIrql(void) wstdcall;void KeInitializeSpinLock(NT_SPIN_LOCK *lock) wstdcall;void KeAcquireSpinLock(NT_SPIN_LOCK *lock, KIRQL *irql) wstdcall;void KeReleaseSpinLock(NT_SPIN_LOCK *lock, KIRQL oldirql) wstdcall;KIRQL KeAcquireSpinLockRaiseToDpc(NT_SPIN_LOCK *lock) wstdcall;void IoAcquireCancelSpinLock(KIRQL *irql) wstdcall;void IoReleaseCancelSpinLock(KIRQL irql) wstdcall;void RtlCopyMemory(void *dst, const void *src, SIZE_T length) wstdcall;NTSTATUS RtlUnicodeStringToAnsiString	(struct ansi_string *dst, const struct unicode_string *src,	 BOOLEAN dup) wstdcall;NTSTATUS RtlAnsiStringToUnicodeString	(struct unicode_string *dst, const struct ansi_string *src,	 BOOLEAN dup) wstdcall;void RtlInitAnsiString(struct ansi_string *dst, const char *src) wstdcall;void RtlInitString(struct ansi_string *dst, const char *src) wstdcall;void RtlInitUnicodeString(struct unicode_string *dest,			  const wchar_t *src) wstdcall;void RtlFreeUnicodeString(struct unicode_string *string) wstdcall;void RtlFreeAnsiString(struct ansi_string *string) wstdcall;LONG RtlCompareUnicodeString(const struct unicode_string *s1,			     const struct unicode_string *s2,			     BOOLEAN case_insensitive) wstdcall;void RtlCopyUnicodeString(struct unicode_string *dst,			  struct unicode_string *src) wstdcall;void KeInitializeTimer(struct nt_timer *nt_timer) wstdcall;void KeInitializeTimerEx(struct nt_timer *nt_timer,			 enum timer_type type) wstdcall;BOOLEAN KeSetTimerEx(struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,		     LONG period_ms, struct kdpc *kdpc) wstdcall;BOOLEAN KeSetTimer(struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,		   struct kdpc *kdpc) wstdcall;BOOLEAN KeCancelTimer(struct nt_timer *nt_timer) wstdcall;void KeInitializeDpc(struct kdpc *kdpc, void *func, void *ctx) wstdcall;struct task_struct *KeGetCurrentThread(void) wstdcall;NTSTATUS ObReferenceObjectByHandle(void *handle, ACCESS_MASK desired_access,				   void *obj_type, KPROCESSOR_MODE access_mode,				   void **object, void *handle_info) wstdcall;void adjust_user_shared_data_addr(char *driver, unsigned long length);#define IoCompleteRequest(irp, prio) IofCompleteRequest(irp, prio)#define IoCallDriver(dev, irp) IofCallDriver(dev, irp)#if defined(IO_DEBUG)#define DUMP_IRP(irp)							\do {									\	struct io_stack_location *irp_sl;				\	irp_sl = IoGetCurrentIrpStackLocation(irp);			\	IOTRACE("irp: %p, stack size: %d, cl: %d, sl: %p, dev_obj: %p, " \		"mj_fn: %d, minor_fn: %d, nt_urb: %p, event: %p",	\		irp, irp->stack_count, (irp)->current_location,		\		irp_sl, irp_sl->dev_obj, irp_sl->major_fn,		\		irp_sl->minor_fn, IRP_URB(irp),				\		(irp)->user_event);					\} while (0)#else#define DUMP_IRP(irp) do { } while (0)#endifstatic inline KIRQL current_irql(void){	if (in_irq() || irqs_disabled())		EXIT6(return DEVICE_LEVEL);	if (#ifdef CONFIG_PREEMPT_RT		in_atomic() ||#endif		in_interrupt())		EXIT6(return DISPATCH_LEVEL);	EXIT6(return PASSIVE_LEVEL);}static inline KIRQL raise_irql(KIRQL newirql){	KIRQL irql = current_irql();//	assert (newirql == DISPATCH_LEVEL);	if (irql < DISPATCH_LEVEL && newirql == DISPATCH_LEVEL) {		local_bh_disable();		preempt_disable();	}	TRACE6("%d, %d", irql, newirql);	return irql;}static inline void lower_irql(KIRQL oldirql){	KIRQL irql = current_irql();	TRACE6("%d, %d", irql, oldirql);	DBG_BLOCK(2) {		if (irql < oldirql)			ERROR("invalid irql: %d < %d", irql, oldirql);	}	if (oldirql < DISPATCH_LEVEL && irql == DISPATCH_LEVEL) {		preempt_enable();		local_bh_enable();	}}#define gfp_irql() (current_irql() < DISPATCH_LEVEL ? GFP_KERNEL : GFP_ATOMIC)/* Windows spinlocks are of type ULONG_PTR which is not big enough to * store Linux spinlocks; so we implement Windows spinlocks using * ULONG_PTR space with our own functions/macros *//* Windows seems to use 0 for unlocked state of spinlock - if Linux * convention of 1 for unlocked state is used, at least prism54 driver * crashes */#define NT_SPIN_LOCK_UNLOCKED 0#define NT_SPIN_LOCK_LOCKED 1static inline void  nt_spin_lock_init(NT_SPIN_LOCK *lock){	*lock = NT_SPIN_LOCK_UNLOCKED;}#ifdef CONFIG_SMPstatic inline void nt_spin_lock(NT_SPIN_LOCK *lock){	__asm__ __volatile__(		"\n"		"1:\t"		"  xchgl %1, %0\n\t"		"  cmpl %2, %1\n\t"		"  je 3f\n"		"2:\t"		"  rep; nop\n\t"		"  cmpl %2, %0\n\t"		"  jne 2b\n\t"		"  jmp 1b\n"		"3:\n\t"		: "+m" (*lock)		: "r" (NT_SPIN_LOCK_LOCKED), "i" (NT_SPIN_LOCK_UNLOCKED));}static inline void nt_spin_unlock(NT_SPIN_LOCK *lock){	*lock = NT_SPIN_LOCK_UNLOCKED;}#else // CONFIG_SMP#define nt_spin_lock(lock) do { } while (0)#define nt_spin_unlock(lock)  do { } while (0)#endif // CONFIG_SMP/* raise IRQL to given (higher) IRQL if necessary before locking */static inline KIRQL nt_spin_lock_irql(NT_SPIN_LOCK *lock, KIRQL newirql){	KIRQL oldirql = raise_irql(newirql);	nt_spin_lock(lock);	return oldirql;}/* lower IRQL to given (lower) IRQL if necessary after unlocking */static inline void nt_spin_unlock_irql(NT_SPIN_LOCK *lock, KIRQL oldirql){	nt_spin_unlock(lock);	lower_irql(oldirql);}#ifdef CONFIG_PREEMPT_RT#define save_local_irq(flags) raw_local_irq_save(flags)#define restore_local_irq(flags) raw_local_irq_restore(flags)#else#define save_local_irq(flags) local_irq_save(flags)#define restore_local_irq(flags) local_irq_restore(flags)#endif#define nt_spin_lock_irqsave(lock, flags)				\do {									\	save_local_irq(flags);						\	preempt_disable();						\	nt_spin_lock(lock);						\} while (0)#define nt_spin_unlock_irqrestore(lock, flags)				\do {									\	nt_spin_unlock(lock);						\	restore_local_irq(flags);					\	preempt_enable();						\} while (0)#define atomic_unary_op(var, size, oper)				\do {									\	if (size == 1)							\		__asm__ __volatile__(					\			LOCK_PREFIX oper "b %b0\n\t" : "+m" (var));	\	else if (size == 2)						\		__asm__ __volatile__(					\			LOCK_PREFIX oper "w %w0\n\t" : "+m" (var));	\	else if (size == 4)						\		__asm__ __volatile__(					\			LOCK_PREFIX oper "l %0\n\t" : "+m" (var));	\	else if (size == 8)						\		__asm__ __volatile__(					\			LOCK_PREFIX oper "q %q0\n\t" : "+m" (var));	\	else {								\		extern void _invalid_op_size_(void);			\		_invalid_op_size_();					\	}								\} while (0)#define atomic_inc_var_size(var, size) atomic_unary_op(var, size, "inc")#define atomic_inc_var(var) atomic_inc_var_size(var, sizeof(var))#define atomic_dec_var_size(var, size) atomic_unary_op(var, size, "dec")#define atomic_dec_var(var) atomic_dec_var_size(var, sizeof(var))#define pre_atomic_add(var, i)					\({								\	typeof(var) pre;					\	__asm__ __volatile__(					\		LOCK_PREFIX "xadd %0, %1\n\t"			\		: "=r"(pre), "+m"(var)				\		: "0"(i));					\	pre;							\})#define post_atomic_add(var, i) (pre_atomic_add(var, i) + i)#define atomic_insert_list_head(oldhead, head, newhead)			\	do {								\		oldhead = (typeof(oldhead))head;			\	} while (cmpxchg(&(head), oldhead, newhead) != (typeof(head))oldhead)#define atomic_remove_list_head(head, newhead)				\({									\	typeof(head) oldhead;						\	do {								\		oldhead = head;						\		if (!oldhead)						\			break;						\	} while (cmpxchg(&(head), oldhead, newhead) != oldhead);	\	oldhead;							\})static inline ULONG SPAN_PAGES(void *ptr, SIZE_T length){	return PAGE_ALIGN(((unsigned long)ptr & (PAGE_SIZE - 1)) + length)		>> PAGE_SHIFT;}#ifdef CONFIG_X86_64/* TODO: can these be implemented without using spinlock? */static inline struct nt_slist *PushEntrySList(nt_slist_header *head,					      struct nt_slist *entry,					      NT_SPIN_LOCK *lock){	KIRQL irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);	entry->next = head->next;	head->next = entry;	head->depth++;	nt_spin_unlock_irql(lock, irql);	TRACE4("%p, %p, %p", head, entry, entry->next);	return entry->next;}static inline struct nt_slist *PopEntrySList(nt_slist_header *head,					     NT_SPIN_LOCK *lock){	struct nt_slist *entry;	KIRQL irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);	entry = head->next;	if (entry) {		head->next = entry->next;		head->depth--;	}	nt_spin_unlock_irql(lock, irql);	TRACE4("%p, %p", head, entry);	return entry;}#else#define u64_low_32(x) ((u32)x)#define u64_high_32(x) ((u32)(x >> 32))static inline u64 cmpxchg8b(volatile u64 *ptr, u64 old, u64 new){	u64 prev;	__asm__ __volatile__(		"\n"		LOCK_PREFIX "cmpxchg8b %0\n"		: "+m" (*ptr), "=A" (prev)		: "A" (old), "b" (u64_low_32(new)), "c" (u64_high_32(new)));	return prev;}/* slist routines below update slist atomically - no need for * spinlocks */static inline struct nt_slist *PushEntrySList(nt_slist_header *head,					      struct nt_slist *entry,					      NT_SPIN_LOCK *lock){	nt_slist_header old, new;	do {		old.align = head->align;		entry->next = old.next;		new.next = entry;		new.depth = old.depth + 1;	} while (cmpxchg8b(&head->align, old.align, new.align) != old.align);	TRACE4("%p, %p, %p", head, entry, old.next);	return old.next;}static inline struct nt_slist *PopEntrySList(nt_slist_header *head,					     NT_SPIN_LOCK *lock){	struct nt_slist *entry;	nt_slist_header old, new;	do {		old.align = head->align;		entry = old.next;		if (!entry)			break;		new.next = entry->next;		new.depth = old.depth - 1;	} while (cmpxchg8b(&head->align, old.align, new.align) != old.align);	TRACE4("%p, %p", head, entry);	return entry;}#endif#define sleep_hz(n)					\do {							\	set_current_state(TASK_INTERRUPTIBLE);		\	schedule_timeout(n);				\} while (0)#endif // _NTOSKERNEL_H_

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -