📄 module.h
字号:
/*
* COPYRIGHT: See COPYRIGHT.TXT
* PROJECT: Ext2 File System Driver for WinNT/2K/XP
* FILE: Modules.h
* PURPOSE: Header file: nls structures & linux kernel ...
* PROGRAMMER: Matt Wu <mattwu@163.com>
* HOMEPAGE: http://ext2.yeah.net
* UPDATE HISTORY:
*/
#ifndef _EXT2_MODULE_HEADER_
#define _EXT2_MODULE_HEADER_
/* INCLUDES *************************************************************/
#include <ntifs.h>
#include <ntdddisk.h>
#include <windef.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <wchar.h>
#include <linux/types.h>
#if _WIN32_WINNT <= 0x500
#define _WIN2K_TARGET_ 1
#endif
/* STRUCTS ******************************************************/
//
// Byte order swapping routines
//
/* use the runtime routine or compiler's implementation */
#if (defined(_M_IX86) && (_MSC_FULL_VER > 13009037)) || \
((defined(_M_AMD64) || defined(_M_IA64)) && \
(_MSC_FULL_VER > 13009175))
#ifdef __cplusplus
extern "C" {
#endif
unsigned short __cdecl _byteswap_ushort(unsigned short);
unsigned long __cdecl _byteswap_ulong (unsigned long);
unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
#ifdef __cplusplus
}
#endif
#pragma intrinsic(_byteswap_ushort)
#pragma intrinsic(_byteswap_ulong)
#pragma intrinsic(_byteswap_uint64)
#define RtlUshortByteSwap(_x) _byteswap_ushort((USHORT)(_x))
#define RtlUlongByteSwap(_x) _byteswap_ulong((_x))
#define RtlUlonglongByteSwap(_x) _byteswap_uint64((_x))
#else
USHORT
FASTCALL
RtlUshortByteSwap(
IN USHORT Source
);
ULONG
FASTCALL
RtlUlongByteSwap(
IN ULONG Source
);
ULONGLONG
FASTCALL
RtlUlonglongByteSwap(
IN ULONGLONG Source
);
#endif
#define __swab16(x) RtlUshortByteSwap(x)
#define __swab32(x) RtlUlongByteSwap(x)
#define __swab64(x) RtlUlonglongByteSwap(x)
#define __constant_swab32 __swab32
#define __constant_swab64 __swab64
#define __constant_htonl(x) __constant_swab32((x))
#define __constant_ntohl(x) __constant_swab32((x))
#define __constant_htons(x) __constant_swab16((x))
#define __constant_ntohs(x) __constant_swab16((x))
#define __constant_cpu_to_le64(x) ((__u64)(x))
#define __constant_le64_to_cpu(x) ((__u64)(x))
#define __constant_cpu_to_le32(x) ((__u32)(x))
#define __constant_le32_to_cpu(x) ((__u32)(x))
#define __constant_cpu_to_le16(x) ((__u16)(x))
#define __constant_le16_to_cpu(x) ((__u16)(x))
#define __constant_cpu_to_be64(x) __constant_swab64((x))
#define __constant_be64_to_cpu(x) __constant_swab64((x))
#define __constant_cpu_to_be32(x) __constant_swab32((x))
#define __constant_be32_to_cpu(x) __constant_swab32((x))
#define __constant_cpu_to_be16(x) __constant_swab16((x))
#define __constant_be16_to_cpu(x) __constant_swab16((x))
#define __cpu_to_le64(x) ((__u64)(x))
#define __le64_to_cpu(x) ((__u64)(x))
#define __cpu_to_le32(x) ((__u32)(x))
#define __le32_to_cpu(x) ((__u32)(x))
#define __cpu_to_le16(x) ((__u16)(x))
#define __le16_to_cpu(x) ((__u16)(x))
#define __cpu_to_be64(x) __swab64((x))
#define __be64_to_cpu(x) __swab64((x))
#define __cpu_to_be32(x) __swab32((x))
#define __be32_to_cpu(x) __swab32((x))
#define __cpu_to_be16(x) __swab16((x))
#define __be16_to_cpu(x) __swab16((x))
#define __cpu_to_le64p(x) (*(__u64*)(x))
#define __le64_to_cpup(x) (*(__u64*)(x))
#define __cpu_to_le32p(x) (*(__u32*)(x))
#define __le32_to_cpup(x) (*(__u32*)(x))
#define __cpu_to_le16p(x) (*(__u16*)(x))
#define __le16_to_cpup(x) (*(__u16*)(x))
#define __cpu_to_be64p(x) __swab64p((x))
#define __be64_to_cpup(x) __swab64p((x))
#define __cpu_to_be32p(x) __swab32p((x))
#define __be32_to_cpup(x) __swab32p((x))
#define __cpu_to_be16p(x) __swab16p((x))
#define __be16_to_cpup(x) __swab16p((x))
#define __cpu_to_le64s(x) ((__s64)(x))
#define __le64_to_cpus(x) ((__s64)(x))
#define __cpu_to_le32s(x) ((__s32)(x))
#define __le32_to_cpus(x) ((__s32)(x))
#define __cpu_to_le16s(x) ((__s16)(x))
#define __le16_to_cpus(x) ((__s16)(x))
#define __cpu_to_be64s(x) __swab64s((x))
#define __be64_to_cpus(x) __swab64s((x))
#define __cpu_to_be32s(x) __swab32s((x))
#define __be32_to_cpus(x) __swab32s((x))
#define __cpu_to_be16s(x) __swab16s((x))
#define __be16_to_cpus(x) __swab16s((x))
#ifndef cpu_to_le64
#define cpu_to_le64 __cpu_to_le64
#define le64_to_cpu __le64_to_cpu
#define cpu_to_le32 __cpu_to_le32
#define le32_to_cpu __le32_to_cpu
#define cpu_to_le16 __cpu_to_le16
#define le16_to_cpu __le16_to_cpu
#endif
#define cpu_to_be64 __cpu_to_be64
#define be64_to_cpu __be64_to_cpu
#define cpu_to_be32 __cpu_to_be32
#define be32_to_cpu __be32_to_cpu
#define cpu_to_be16 __cpu_to_be16
#define be16_to_cpu __be16_to_cpu
#define cpu_to_le64p __cpu_to_le64p
#define le64_to_cpup __le64_to_cpup
#define cpu_to_le32p __cpu_to_le32p
#define le32_to_cpup __le32_to_cpup
#define cpu_to_le16p __cpu_to_le16p
#define le16_to_cpup __le16_to_cpup
#define cpu_to_be64p __cpu_to_be64p
#define be64_to_cpup __be64_to_cpup
#define cpu_to_be32p __cpu_to_be32p
#define be32_to_cpup __be32_to_cpup
#define cpu_to_be16p __cpu_to_be16p
#define be16_to_cpup __be16_to_cpup
#define cpu_to_le64s __cpu_to_le64s
#define le64_to_cpus __le64_to_cpus
#define cpu_to_le32s __cpu_to_le32s
#define le32_to_cpus __le32_to_cpus
#define cpu_to_le16s __cpu_to_le16s
#define le16_to_cpus __le16_to_cpus
#define cpu_to_be64s __cpu_to_be64s
#define be64_to_cpus __be64_to_cpus
#define cpu_to_be32s __cpu_to_be32s
#define be32_to_cpus __be32_to_cpus
#define cpu_to_be16s __cpu_to_be16s
#define be16_to_cpus __be16_to_cpus
//
// Network to host byte swap functions
//
#define ntohl(x) ( ( ( ( x ) & 0x000000ff ) << 24 ) | \
( ( ( x ) & 0x0000ff00 ) << 8 ) | \
( ( ( x ) & 0x00ff0000 ) >> 8 ) | \
( ( ( x ) & 0xff000000 ) >> 24 ) )
#define ntohs(x) ( ( ( ( x ) & 0xff00 ) >> 8 ) | \
( ( ( x ) & 0x00ff ) << 8 ) )
#define htonl(x) ntohl(x)
#define htons(x) ntohs(x)
//
// kernel printk flags
//
#define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */
#define KERN_CRIT "<2>" /* critical conditions */
#define KERN_ERR "<3>" /* error conditions */
#define KERN_WARNING "<4>" /* warning conditions */
#define KERN_NOTICE "<5>" /* normal but significant condition */
#define KERN_INFO "<6>" /* informational */
#define KERN_DEBUG "<7>" /* debug-level messages */
#define printk DbgPrint
//
// Linux module definitions
//
#define likely
#define unlikely
#define __init
#define __exit
#define THIS_MODULE NULL
#define MODULE_LICENSE(x)
#define MODULE_ALIAS_NLS(x)
#define EXPORT_SYMBOL(x)
#define try_module_get(x) (TRUE)
#define module_put(x)
#define module_init(X) int __init module_##X() {return X();}
#define module_exit(X) void __exit module_##X() {X();}
#define DECLARE_INIT(X) int __init module_##X(void)
#define DECLARE_EXIT(X) void __exit module_##X(void)
#define LOAD_MODULE(X) do { \
rc = module_##X(); \
} while(0)
#define UNLOAD_MODULE(X) do { \
module_##X(); \
} while(0)
#define LOAD_NLS LOAD_MODULE
#define UNLOAD_NLS UNLOAD_MODULE
//
// spinlocks .....
//
typedef struct _spinlock_t {
KSPIN_LOCK lock;
KIRQL irql;
} spinlock_t;
#define spin_lock_init(sl) KeInitializeSpinLock(&((sl)->lock))
#define spin_lock(sl) KeAcquireSpinLock(&((sl)->lock), &((sl)->irql))
#define spin_unlock(sl) KeReleaseSpinLock(&((sl)->lock), (sl)->irql)
#define spin_lock_irqsave(sl, flags) do {spin_lock(sl); flags=(sl)->irql;} while(0)
#define spin_unlock_irqrestore(sl, flags) do {ASSERT((KIRQL)(flags)==(sl)->irql); spin_unlock(sl);} while(0)
#define assert_spin_locked(x) do {} while(0)
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPT,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
#ifdef CONFIG_PREEMPT
return spin_is_contended(lock);
#else
return 0;
#endif
}
//
// atomic
//
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) (i)
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
#define atomic_set(v,i) InterlockedExchange((PLONG)(&(v)->counter), (LONG)(i))
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v. Note that the guaranteed useful range
* of an atomic_t is only 24 bits.
*/
static inline void atomic_add(int volatile i, atomic_t volatile *v)
{
InterlockedExchangeAdd((PLONG)(&v->counter), (LONG) i);
}
/**
* atomic_sub - subtract the atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
static inline void atomic_sub(int volatile i, atomic_t volatile *v)
{
InterlockedExchangeAdd((PLONG)(&v->counter), (LONG) (-1*i));
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
static inline int atomic_sub_and_test(int volatile i, atomic_t volatile *v)
{
int counter, result;
do {
counter = v->counter;
result = counter - i;
} while ( InterlockedCompareExchange(
(PLONG) (&v->counter),
(LONG) result,
(LONG) counter) != counter);
return (result == 0);
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
static inline void atomic_inc(atomic_t volatile *v)
{
InterlockedIncrement((PLONG)(&v->counter));
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
static inline void atomic_dec(atomic_t volatile *v)
{
InterlockedDecrement((PLONG)(&v->counter));
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
static inline int atomic_dec_and_test(atomic_t volatile *v)
{
return (0 == InterlockedDecrement((PLONG)(&v->counter)));
}
/**
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
static inline int atomic_inc_and_test(atomic_t volatile *v)
{
return (0 == InterlockedIncrement((PLONG)(&v->counter)));
}
/**
* atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
static inline int atomic_add_negative(int volatile i, atomic_t volatile *v)
{
return (InterlockedExchangeAdd((PLONG)(&v->counter), (LONG) i) + i);
}
//
// bit operations
//
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void set_bit(int nr, volatile void *addr)
{
volatile unsigned char *ADDR;
unsigned char mask;
ADDR = (volatile unsigned char *) addr;
ADDR += nr >> 3;
mask = 1 << (nr & 0x07);
*ADDR |= mask;
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(int nr, volatile void *addr)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -