📄 acmacros.h
字号:
(( UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[5];\ (( UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[4];\ (( UINT8 *)(void *)(d))[4] = ((UINT8 *)(void *)(s))[3];\ (( UINT8 *)(void *)(d))[5] = ((UINT8 *)(void *)(s))[2];\ (( UINT8 *)(void *)(d))[6] = ((UINT8 *)(void *)(s))[1];\ (( UINT8 *)(void *)(d))[7] = ((UINT8 *)(void *)(s))[0];}#else/* * Macros for little-endian machines *//* This macro sets a buffer index, starting from the beginning of the buffer */#define ACPI_BUFFER_INDEX(BufLen,BufOffset,ByteGran) (BufOffset)#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED/* The hardware supports unaligned transfers, just do the little-endian move *//* 16-bit source, 16/32/64 destination */#define ACPI_MOVE_16_TO_16(d,s) *(UINT16 *)(void *)(d) = *(UINT16 *)(void *)(s)#define ACPI_MOVE_16_TO_32(d,s) *(UINT32 *)(void *)(d) = *(UINT16 *)(void *)(s)#define ACPI_MOVE_16_TO_64(d,s) *(UINT64 *)(void *)(d) = *(UINT16 *)(void *)(s)/* 32-bit source, 16/32/64 destination */#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_32_TO_32(d,s) *(UINT32 *)(void *)(d) = *(UINT32 *)(void *)(s)#define ACPI_MOVE_32_TO_64(d,s) *(UINT64 *)(void *)(d) = *(UINT32 *)(void *)(s)/* 64-bit source, 16/32/64 destination */#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */#define ACPI_MOVE_64_TO_64(d,s) *(UINT64 *)(void *)(d) = *(UINT64 *)(void *)(s)#else/* * The hardware does not support unaligned transfers. We must move the * data one byte at a time. These macros work whether the source or * the destination (or both) is/are unaligned. (Little-endian move) *//* 16-bit source, 16/32/64 destination */#define ACPI_MOVE_16_TO_16(d,s) {(( UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[0];\ (( UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[1];}#define ACPI_MOVE_16_TO_32(d,s) {(*(UINT32 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d,s);}#define ACPI_MOVE_16_TO_64(d,s) {(*(UINT64 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d,s);}/* 32-bit source, 16/32/64 destination */#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_32_TO_32(d,s) {(( UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[0];\ (( UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[1];\ (( UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[2];\ (( UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[3];}#define ACPI_MOVE_32_TO_64(d,s) {(*(UINT64 *)(void *)(d)) = 0; ACPI_MOVE_32_TO_32(d,s);}/* 64-bit source, 16/32/64 destination */#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */#define ACPI_MOVE_64_TO_64(d,s) {(( UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[0];\ (( UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[1];\ (( UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[2];\ (( UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[3];\ (( UINT8 *)(void *)(d))[4] = ((UINT8 *)(void *)(s))[4];\ (( UINT8 *)(void *)(d))[5] = ((UINT8 *)(void *)(s))[5];\ (( UINT8 *)(void *)(d))[6] = ((UINT8 *)(void *)(s))[6];\ (( UINT8 *)(void *)(d))[7] = ((UINT8 *)(void *)(s))[7];}#endif#endif/* Macros based on machine integer width */#if ACPI_MACHINE_WIDTH == 32#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_32_TO_16(d,s)#elif ACPI_MACHINE_WIDTH == 64#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_64_TO_16(d,s)#else#error unknown ACPI_MACHINE_WIDTH#endif/* * Fast power-of-two math macros for non-optimized compilers */#define _ACPI_DIV(value,PowerOf2) ((UINT32) ((value) >> (PowerOf2)))#define _ACPI_MUL(value,PowerOf2) ((UINT32) ((value) << (PowerOf2)))#define _ACPI_MOD(value,Divisor) ((UINT32) ((value) & ((Divisor) -1)))#define ACPI_DIV_2(a) _ACPI_DIV(a,1)#define ACPI_MUL_2(a) _ACPI_MUL(a,1)#define ACPI_MOD_2(a) _ACPI_MOD(a,2)#define ACPI_DIV_4(a) _ACPI_DIV(a,2)#define ACPI_MUL_4(a) _ACPI_MUL(a,2)#define ACPI_MOD_4(a) _ACPI_MOD(a,4)#define ACPI_DIV_8(a) _ACPI_DIV(a,3)#define ACPI_MUL_8(a) _ACPI_MUL(a,3)#define ACPI_MOD_8(a) _ACPI_MOD(a,8)#define ACPI_DIV_16(a) _ACPI_DIV(a,4)#define ACPI_MUL_16(a) _ACPI_MUL(a,4)#define ACPI_MOD_16(a) _ACPI_MOD(a,16)#define ACPI_DIV_32(a) _ACPI_DIV(a,5)#define ACPI_MUL_32(a) _ACPI_MUL(a,5)#define ACPI_MOD_32(a) _ACPI_MOD(a,32)/* * Rounding macros (Power of two boundaries only) */#define ACPI_ROUND_DOWN(value,boundary) (((ACPI_NATIVE_UINT)(value)) & \ (~(((ACPI_NATIVE_UINT) boundary)-1)))#define ACPI_ROUND_UP(value,boundary) ((((ACPI_NATIVE_UINT)(value)) + \ (((ACPI_NATIVE_UINT) boundary)-1)) & \ (~(((ACPI_NATIVE_UINT) boundary)-1)))/* Note: sizeof(ACPI_NATIVE_UINT) evaluates to either 2, 4, or 8 */#define ACPI_ROUND_DOWN_TO_32BIT(a) ACPI_ROUND_DOWN(a,4)#define ACPI_ROUND_DOWN_TO_64BIT(a) ACPI_ROUND_DOWN(a,8)#define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a) ACPI_ROUND_DOWN(a,sizeof(ACPI_NATIVE_UINT))#define ACPI_ROUND_UP_TO_32BIT(a) ACPI_ROUND_UP(a,4)#define ACPI_ROUND_UP_TO_64BIT(a) ACPI_ROUND_UP(a,8)#define ACPI_ROUND_UP_TO_NATIVE_WORD(a) ACPI_ROUND_UP(a,sizeof(ACPI_NATIVE_UINT))#define ACPI_ROUND_BITS_UP_TO_BYTES(a) ACPI_DIV_8((a) + 7)#define ACPI_ROUND_BITS_DOWN_TO_BYTES(a) ACPI_DIV_8((a))#define ACPI_ROUND_UP_TO_1K(a) (((a) + 1023) >> 10)/* Generic (non-power-of-two) rounding */#define ACPI_ROUND_UP_TO(value,boundary) (((value) + ((boundary)-1)) / (boundary))#define ACPI_IS_MISALIGNED(value) (((ACPI_NATIVE_UINT)value) & (sizeof(ACPI_NATIVE_UINT)-1))/* * Bitmask creation * Bit positions start at zero. * MASK_BITS_ABOVE creates a mask starting AT the position and above * MASK_BITS_BELOW creates a mask starting one bit BELOW the position */#define ACPI_MASK_BITS_ABOVE(position) (~((ACPI_INTEGER_MAX) << ((UINT32) (position))))#define ACPI_MASK_BITS_BELOW(position) ((ACPI_INTEGER_MAX) << ((UINT32) (position)))#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7'))/* Bitfields within ACPI registers */#define ACPI_REGISTER_PREPARE_BITS(Val, Pos, Mask) ((Val << Pos) & Mask)#define ACPI_REGISTER_INSERT_VALUE(Reg, Pos, Mask, Val) Reg = (Reg & (~(Mask))) | ACPI_REGISTER_PREPARE_BITS(Val, Pos, Mask)#define ACPI_INSERT_BITS(Target, Mask, Source) Target = ((Target & (~(Mask))) | (Source & Mask))/* Generate a UUID */#define ACPI_INIT_UUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ (a) & 0xFF, ((a) >> 8) & 0xFF, ((a) >> 16) & 0xFF, ((a) >> 24) & 0xFF, \ (b) & 0xFF, ((b) >> 8) & 0xFF, \ (c) & 0xFF, ((c) >> 8) & 0xFF, \ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)/* * An ACPI_NAMESPACE_NODE * can appear in some contexts, * where a pointer to an ACPI_OPERAND_OBJECT can also * appear. This macro is used to distinguish them. * * The "Descriptor" field is the first field in both structures. */#define ACPI_GET_DESCRIPTOR_TYPE(d) (((ACPI_DESCRIPTOR *)(void *)(d))->Common.DescriptorType)#define ACPI_SET_DESCRIPTOR_TYPE(d,t) (((ACPI_DESCRIPTOR *)(void *)(d))->Common.DescriptorType = t)/* Macro to test the object type */#define ACPI_GET_OBJECT_TYPE(d) (((ACPI_OPERAND_OBJECT *)(void *)(d))->Common.Type)/* Macro to check the table flags for SINGLE or MULTIPLE tables are allowed */#define ACPI_IS_SINGLE_TABLE(x) (((x) & 0x01) == ACPI_TABLE_SINGLE ? 1 : 0)/* * Macros for the master AML opcode table */#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)#define ACPI_OP(Name,PArgs,IArgs,ObjType,Class,Type,Flags) {Name,(UINT32)(PArgs),(UINT32)(IArgs),(UINT32)(Flags),ObjType,Class,Type}#else#define ACPI_OP(Name,PArgs,IArgs,ObjType,Class,Type,Flags) {(UINT32)(PArgs),(UINT32)(IArgs),(UINT32)(Flags),ObjType,Class,Type}#endif#ifdef ACPI_DISASSEMBLER#define ACPI_DISASM_ONLY_MEMBERS(a) a;#else#define ACPI_DISASM_ONLY_MEMBERS(a)#endif#define ARG_TYPE_WIDTH 5#define ARG_1(x) ((UINT32)(x))#define ARG_2(x) ((UINT32)(x) << (1 * ARG_TYPE_WIDTH))#define ARG_3(x) ((UINT32)(x) << (2 * ARG_TYPE_WIDTH))#define ARG_4(x) ((UINT32)(x) << (3 * ARG_TYPE_WIDTH))#define ARG_5(x) ((UINT32)(x) << (4 * ARG_TYPE_WIDTH))#define ARG_6(x) ((UINT32)(x) << (5 * ARG_TYPE_WIDTH))#define ARGI_LIST1(a) (ARG_1(a))#define ARGI_LIST2(a,b) (ARG_1(b)|ARG_2(a))#define ARGI_LIST3(a,b,c) (ARG_1(c)|ARG_2(b)|ARG_3(a))#define ARGI_LIST4(a,b,c,d) (ARG_1(d)|ARG_2(c)|ARG_3(b)|ARG_4(a))#define ARGI_LIST5(a,b,c,d,e) (ARG_1(e)|ARG_2(d)|ARG_3(c)|ARG_4(b)|ARG_5(a))#define ARGI_LIST6(a,b,c,d,e,f) (ARG_1(f)|ARG_2(e)|ARG_3(d)|ARG_4(c)|ARG_5(b)|ARG_6(a))#define ARGP_LIST1(a) (ARG_1(a))#define ARGP_LIST2(a,b) (ARG_1(a)|ARG_2(b))#define ARGP_LIST3(a,b,c) (ARG_1(a)|ARG_2(b)|ARG_3(c))#define ARGP_LIST4(a,b,c,d) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d))#define ARGP_LIST5(a,b,c,d,e) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e))#define ARGP_LIST6(a,b,c,d,e,f) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)|ARG_6(f))#define GET_CURRENT_ARG_TYPE(List) (List & ((UINT32) 0x1F))#define INCREMENT_ARG_LIST(List) (List >>= ((UINT32) ARG_TYPE_WIDTH))#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)/* * Module name is include in both debug and non-debug versions primarily for * error messages. The __FILE__ macro is not very useful for this, because it * often includes the entire pathname to the module */#define ACPI_MODULE_NAME(Name) static char ACPI_UNUSED_VAR *_AcpiModuleName = Name;#else#define ACPI_MODULE_NAME(Name)#endif/* * Ascii error messages can be configured out */#ifndef ACPI_NO_ERROR_MESSAGES#define AE_INFO _AcpiModuleName, __LINE__/* * Error reporting. Callers module and line number are inserted by AE_INFO, * the plist contains a set of parens to allow variable-length lists.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -