📄 acmacros.h
字号:
/****************************************************************************** * * Name: acmacros.h - C macros for the entire subsystem. * *****************************************************************************//* * Copyright (C) 2000 - 2007, R. Byron Moore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */#ifndef __ACMACROS_H__#define __ACMACROS_H__/* * Data manipulation macros */#define ACPI_LOWORD(l) ((u16)(u32)(l))#define ACPI_HIWORD(l) ((u16)((((u32)(l)) >> 16) & 0xFFFF))#define ACPI_LOBYTE(l) ((u8)(u16)(l))#define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF))#define ACPI_SET_BIT(target,bit) ((target) |= (bit))#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit))#define ACPI_MIN(a,b) (((a)<(b))?(a):(b))#define ACPI_MAX(a,b) (((a)>(b))?(a):(b))/* Size calculation */#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0]))#ifdef ACPI_NO_INTEGER64_SUPPORT/* * acpi_integer is 32-bits, no 64-bit support on this platform */#define ACPI_LODWORD(l) ((u32)(l))#define ACPI_HIDWORD(l) ((u32)(0))#else/* * Full 64-bit address/integer on both 32-bit and 64-bit platforms */#define ACPI_LODWORD(l) ((u32)(u64)(l))#define ACPI_HIDWORD(l) ((u32)(((*(struct uint64_struct *)(void *)(&l))).hi))#endif/* * printf() format helpers *//* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i),ACPI_LODWORD(i)/* * Extract data using a pointer. Any more than a byte and we * get into potential aligment issues -- see the STORE macros below. * Use with care. */#define ACPI_GET8(ptr) *ACPI_CAST_PTR (u8, ptr)#define ACPI_GET16(ptr) *ACPI_CAST_PTR (u16, ptr)#define ACPI_GET32(ptr) *ACPI_CAST_PTR (u32, ptr)#define ACPI_GET64(ptr) *ACPI_CAST_PTR (u64, ptr)#define ACPI_SET8(ptr) *ACPI_CAST_PTR (u8, ptr)#define ACPI_SET16(ptr) *ACPI_CAST_PTR (u16, ptr)#define ACPI_SET32(ptr) *ACPI_CAST_PTR (u32, ptr)#define ACPI_SET64(ptr) *ACPI_CAST_PTR (u64, ptr)/* * Pointer manipulation */#define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p))#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p))#define ACPI_ADD_PTR(t,a,b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8,(a)) + (acpi_native_uint)(b)))#define ACPI_PTR_DIFF(a,b) (acpi_native_uint) (ACPI_CAST_PTR (u8,(a)) - ACPI_CAST_PTR (u8,(b)))/* Pointer/Integer type conversions */#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void,(void *) NULL,(acpi_native_uint) i)#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p,(void *) NULL)#define ACPI_OFFSET(d,f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f),(void *) NULL)#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32,(a)) == *ACPI_CAST_PTR (u32,(b)))#else#define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char,(a)), ACPI_CAST_PTR (char,(b)), ACPI_NAME_SIZE))#endif/* * Macros for moving data around to/from buffers that are possibly unaligned. * If the hardware supports the transfer of unaligned data, just do the store. * Otherwise, we have to move one byte at a time. */#ifdef ACPI_BIG_ENDIAN/* * Macros for big-endian machines *//* This macro sets a buffer index, starting from the end of the buffer */#define ACPI_BUFFER_INDEX(buf_len,buf_offset,byte_gran) ((buf_len) - (((buf_offset)+1) * (byte_gran)))/* These macros reverse the bytes during the move, converting little-endian to big endian */ /* Big Endian <== Little Endian */ /* Hi...Lo Lo...Hi *//* 16-bit source, 16/32/64 destination */#define ACPI_MOVE_16_TO_16(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];}#define ACPI_MOVE_16_TO_32(d,s) {(*(u32 *)(void *)(d))=0;\ ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}#define ACPI_MOVE_16_TO_64(d,s) {(*(u64 *)(void *)(d))=0;\ ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}/* 32-bit source, 16/32/64 destination */#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_32_TO_32(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}#define ACPI_MOVE_32_TO_64(d,s) {(*(u64 *)(void *)(d))=0;\ ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\ ((u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\ ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}/* 64-bit source, 16/32/64 destination */#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */#define ACPI_MOVE_64_TO_64(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[7];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[6];\ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[5];\ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[4];\ (( u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\ (( u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\ (( u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ (( u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}#else/* * Macros for little-endian machines *//* This macro sets a buffer index, starting from the beginning of the buffer */#define ACPI_BUFFER_INDEX(buf_len,buf_offset,byte_gran) (buf_offset)#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED/* The hardware supports unaligned transfers, just do the little-endian move *//* 16-bit source, 16/32/64 destination */#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s)#define ACPI_MOVE_16_TO_32(d,s) *(u32 *)(void *)(d) = *(u16 *)(void *)(s)#define ACPI_MOVE_16_TO_64(d,s) *(u64 *)(void *)(d) = *(u16 *)(void *)(s)/* 32-bit source, 16/32/64 destination */#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_32_TO_32(d,s) *(u32 *)(void *)(d) = *(u32 *)(void *)(s)#define ACPI_MOVE_32_TO_64(d,s) *(u64 *)(void *)(d) = *(u32 *)(void *)(s)/* 64-bit source, 16/32/64 destination */#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */#define ACPI_MOVE_64_TO_64(d,s) *(u64 *)(void *)(d) = *(u64 *)(void *)(s)#else/* * The hardware does not support unaligned transfers. We must move the * data one byte at a time. These macros work whether the source or * the destination (or both) is/are unaligned. (Little-endian move) *//* 16-bit source, 16/32/64 destination */#define ACPI_MOVE_16_TO_16(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];}#define ACPI_MOVE_16_TO_32(d,s) {(*(u32 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d,s);}#define ACPI_MOVE_16_TO_64(d,s) {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d,s);}/* 32-bit source, 16/32/64 destination */#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_32_TO_32(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];}#define ACPI_MOVE_32_TO_64(d,s) {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_32_TO_32(d,s);}/* 64-bit source, 16/32/64 destination */#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */#define ACPI_MOVE_64_TO_64(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];\ (( u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[4];\ (( u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[5];\ (( u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[6];\ (( u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[7];}#endif#endif/* Macros based on machine integer width */#if ACPI_MACHINE_WIDTH == 32#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_32_TO_16(d,s)#elif ACPI_MACHINE_WIDTH == 64#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_64_TO_16(d,s)#else#error unknown ACPI_MACHINE_WIDTH#endif/* * Fast power-of-two math macros for non-optimized compilers */#define _ACPI_DIV(value,power_of2) ((u32) ((value) >> (power_of2)))#define _ACPI_MUL(value,power_of2) ((u32) ((value) << (power_of2)))#define _ACPI_MOD(value,divisor) ((u32) ((value) & ((divisor) -1)))#define ACPI_DIV_2(a) _ACPI_DIV(a,1)#define ACPI_MUL_2(a) _ACPI_MUL(a,1)#define ACPI_MOD_2(a) _ACPI_MOD(a,2)#define ACPI_DIV_4(a) _ACPI_DIV(a,2)#define ACPI_MUL_4(a) _ACPI_MUL(a,2)#define ACPI_MOD_4(a) _ACPI_MOD(a,4)#define ACPI_DIV_8(a) _ACPI_DIV(a,3)#define ACPI_MUL_8(a) _ACPI_MUL(a,3)#define ACPI_MOD_8(a) _ACPI_MOD(a,8)#define ACPI_DIV_16(a) _ACPI_DIV(a,4)#define ACPI_MUL_16(a) _ACPI_MUL(a,4)#define ACPI_MOD_16(a) _ACPI_MOD(a,16)#define ACPI_DIV_32(a) _ACPI_DIV(a,5)#define ACPI_MUL_32(a) _ACPI_MUL(a,5)#define ACPI_MOD_32(a) _ACPI_MOD(a,32)/* * Rounding macros (Power of two boundaries only) */#define ACPI_ROUND_DOWN(value,boundary) (((acpi_native_uint)(value)) & \ (~(((acpi_native_uint) boundary)-1)))#define ACPI_ROUND_UP(value,boundary) ((((acpi_native_uint)(value)) + \ (((acpi_native_uint) boundary)-1)) & \ (~(((acpi_native_uint) boundary)-1)))/* Note: sizeof(acpi_native_uint) evaluates to either 2, 4, or 8 */#define ACPI_ROUND_DOWN_TO_32BIT(a) ACPI_ROUND_DOWN(a,4)#define ACPI_ROUND_DOWN_TO_64BIT(a) ACPI_ROUND_DOWN(a,8)#define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a) ACPI_ROUND_DOWN(a,sizeof(acpi_native_uint))#define ACPI_ROUND_UP_TO_32BIT(a) ACPI_ROUND_UP(a,4)#define ACPI_ROUND_UP_TO_64BIT(a) ACPI_ROUND_UP(a,8)#define ACPI_ROUND_UP_TO_NATIVE_WORD(a) ACPI_ROUND_UP(a,sizeof(acpi_native_uint))#define ACPI_ROUND_BITS_UP_TO_BYTES(a) ACPI_DIV_8((a) + 7)#define ACPI_ROUND_BITS_DOWN_TO_BYTES(a) ACPI_DIV_8((a))#define ACPI_ROUND_UP_TO_1K(a) (((a) + 1023) >> 10)/* Generic (non-power-of-two) rounding */#define ACPI_ROUND_UP_TO(value,boundary) (((value) + ((boundary)-1)) / (boundary))#define ACPI_IS_MISALIGNED(value) (((acpi_native_uint)value) & (sizeof(acpi_native_uint)-1))/* * Bitmask creation * Bit positions start at zero. * MASK_BITS_ABOVE creates a mask starting AT the position and above * MASK_BITS_BELOW creates a mask starting one bit BELOW the position */#define ACPI_MASK_BITS_ABOVE(position) (~((ACPI_INTEGER_MAX) << ((u32) (position))))#define ACPI_MASK_BITS_BELOW(position) ((ACPI_INTEGER_MAX) << ((u32) (position)))#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7'))/* Bitfields within ACPI registers */#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) ((val << pos) & mask)#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask)#define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask))/* Generate a UUID */#define ACPI_INIT_UUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ (a) & 0xFF, ((a) >> 8) & 0xFF, ((a) >> 16) & 0xFF, ((a) >> 24) & 0xFF, \ (b) & 0xFF, ((b) >> 8) & 0xFF, \ (c) & 0xFF, ((c) >> 8) & 0xFF, \ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)/* * An struct acpi_namespace_node * can appear in some contexts, * where a pointer to an union acpi_operand_object can also
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -