📄 acmacros.h
字号:
/****************************************************************************** * * Name: acmacros.h - C macros for the entire subsystem. * *****************************************************************************//* * Copyright (C) 2000 - 2005, R. Byron Moore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */#ifndef __ACMACROS_H__#define __ACMACROS_H__/* * Data manipulation macros */#define ACPI_LOWORD(l) ((u16)(u32)(l))#define ACPI_HIWORD(l) ((u16)((((u32)(l)) >> 16) & 0xFFFF))#define ACPI_LOBYTE(l) ((u8)(u16)(l))#define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF))#define ACPI_SET_BIT(target,bit) ((target) |= (bit))#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit))#define ACPI_MIN(a,b) (((a)<(b))?(a):(b))#if ACPI_MACHINE_WIDTH == 16/* * For 16-bit addresses, we have to assume that the upper 32 bits * are zero. */#define ACPI_LODWORD(l) ((u32)(l))#define ACPI_HIDWORD(l) ((u32)(0))#define ACPI_GET_ADDRESS(a) ((a).lo)#define ACPI_STORE_ADDRESS(a,b) {(a).hi=0;(a).lo=(u32)(b);}#define ACPI_VALID_ADDRESS(a) ((a).hi | (a).lo)#else#ifdef ACPI_NO_INTEGER64_SUPPORT/* * acpi_integer is 32-bits, no 64-bit support on this platform */#define ACPI_LODWORD(l) ((u32)(l))#define ACPI_HIDWORD(l) ((u32)(0))#define ACPI_GET_ADDRESS(a) (a)#define ACPI_STORE_ADDRESS(a,b) ((a)=(b))#define ACPI_VALID_ADDRESS(a) (a)#else/* * Full 64-bit address/integer on both 32-bit and 64-bit platforms */#define ACPI_LODWORD(l) ((u32)(u64)(l))#define ACPI_HIDWORD(l) ((u32)(((*(struct uint64_struct *)(void *)(&l))).hi))#define ACPI_GET_ADDRESS(a) (a)#define ACPI_STORE_ADDRESS(a,b) ((a)=(acpi_physical_address)(b))#define ACPI_VALID_ADDRESS(a) (a)#endif#endif/* * printf() format helpers *//* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i),ACPI_LODWORD(i)/* * Extract a byte of data using a pointer. Any more than a byte and we * get into potential aligment issues -- see the STORE macros below */#define ACPI_GET8(addr) (*(u8*)(addr))/* Pointer arithmetic */#define ACPI_PTR_ADD(t,a,b) (t *) (void *)((char *)(a) + (acpi_native_uint)(b))#define ACPI_PTR_DIFF(a,b) (acpi_native_uint) ((char *)(a) - (char *)(b))/* Pointer/Integer type conversions */#define ACPI_TO_POINTER(i) ACPI_PTR_ADD (void, (void *) NULL,(acpi_native_uint)i)#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p,(void *) NULL)#define ACPI_OFFSET(d,f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f),(void *) NULL)#define ACPI_FADT_OFFSET(f) ACPI_OFFSET (FADT_DESCRIPTOR, f)#define ACPI_CAST_PTR(t, p) ((t *)(void *)(p))#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **)(void *)(p))#if ACPI_MACHINE_WIDTH == 16#define ACPI_STORE_POINTER(d,s) ACPI_MOVE_32_TO_32(d,s)#define ACPI_PHYSADDR_TO_PTR(i) (void *)(i)#define ACPI_PTR_TO_PHYSADDR(i) (u32) (char *)(i)#else#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)#endif/* * Macros for moving data around to/from buffers that are possibly unaligned. * If the hardware supports the transfer of unaligned data, just do the store. * Otherwise, we have to move one byte at a time. */#ifdef ACPI_BIG_ENDIAN/* * Macros for big-endian machines *//* This macro sets a buffer index, starting from the end of the buffer */#define ACPI_BUFFER_INDEX(buf_len,buf_offset,byte_gran) ((buf_len) - (((buf_offset)+1) * (byte_gran)))/* These macros reverse the bytes during the move, converting little-endian to big endian */ /* Big Endian <== Little Endian */ /* Hi...Lo Lo...Hi *//* 16-bit source, 16/32/64 destination */#define ACPI_MOVE_16_TO_16(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];}#define ACPI_MOVE_16_TO_32(d,s) {(*(u32 *)(void *)(d))=0;\ ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}#define ACPI_MOVE_16_TO_64(d,s) {(*(u64 *)(void *)(d))=0;\ ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}/* 32-bit source, 16/32/64 destination */#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_32_TO_32(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}#define ACPI_MOVE_32_TO_64(d,s) {(*(u64 *)(void *)(d))=0;\ ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\ ((u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\ ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}/* 64-bit source, 16/32/64 destination */#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */#define ACPI_MOVE_64_TO_64(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[7];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[6];\ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[5];\ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[4];\ (( u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\ (( u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\ (( u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ (( u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}#else/* * Macros for little-endian machines *//* This macro sets a buffer index, starting from the beginning of the buffer */#define ACPI_BUFFER_INDEX(buf_len,buf_offset,byte_gran) (buf_offset)#ifdef ACPI_MISALIGNED_TRANSFERS/* The hardware supports unaligned transfers, just do the little-endian move */#if ACPI_MACHINE_WIDTH == 16/* No 64-bit integers *//* 16-bit source, 16/32/64 destination */#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s)#define ACPI_MOVE_16_TO_32(d,s) *(u32 *)(void *)(d) = *(u16 *)(void *)(s)#define ACPI_MOVE_16_TO_64(d,s) ACPI_MOVE_16_TO_32(d,s)/* 32-bit source, 16/32/64 destination */#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_32_TO_32(d,s) *(u32 *)(void *)(d) = *(u32 *)(void *)(s)#define ACPI_MOVE_32_TO_64(d,s) ACPI_MOVE_32_TO_32(d,s)/* 64-bit source, 16/32/64 destination */#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */#define ACPI_MOVE_64_TO_64(d,s) ACPI_MOVE_32_TO_32(d,s)#else/* 16-bit source, 16/32/64 destination */#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s)#define ACPI_MOVE_16_TO_32(d,s) *(u32 *)(void *)(d) = *(u16 *)(void *)(s)#define ACPI_MOVE_16_TO_64(d,s) *(u64 *)(void *)(d) = *(u16 *)(void *)(s)/* 32-bit source, 16/32/64 destination */#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_32_TO_32(d,s) *(u32 *)(void *)(d) = *(u32 *)(void *)(s)#define ACPI_MOVE_32_TO_64(d,s) *(u64 *)(void *)(d) = *(u32 *)(void *)(s)/* 64-bit source, 16/32/64 destination */#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */#define ACPI_MOVE_64_TO_64(d,s) *(u64 *)(void *)(d) = *(u64 *)(void *)(s)#endif#else/* * The hardware does not support unaligned transfers. We must move the * data one byte at a time. These macros work whether the source or * the destination (or both) is/are unaligned. (Little-endian move) *//* 16-bit source, 16/32/64 destination */#define ACPI_MOVE_16_TO_16(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];}#define ACPI_MOVE_16_TO_32(d,s) {(*(u32 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d,s);}#define ACPI_MOVE_16_TO_64(d,s) {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d,s);}/* 32-bit source, 16/32/64 destination */#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_32_TO_32(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];}#define ACPI_MOVE_32_TO_64(d,s) {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_32_TO_32(d,s);}/* 64-bit source, 16/32/64 destination */#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */#define ACPI_MOVE_64_TO_64(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];\ (( u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[4];\ (( u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[5];\ (( u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[6];\ (( u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[7];}#endif#endif/* Macros based on machine integer width */#if ACPI_MACHINE_WIDTH == 16#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s)#elif ACPI_MACHINE_WIDTH == 32#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_32_TO_16(d,s)#elif ACPI_MACHINE_WIDTH == 64#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_64_TO_16(d,s)#else#error unknown ACPI_MACHINE_WIDTH#endif/* * Fast power-of-two math macros for non-optimized compilers */#define _ACPI_DIV(value,power_of2) ((u32) ((value) >> (power_of2)))#define _ACPI_MUL(value,power_of2) ((u32) ((value) << (power_of2)))#define _ACPI_MOD(value,divisor) ((u32) ((value) & ((divisor) -1)))#define ACPI_DIV_2(a) _ACPI_DIV(a,1)#define ACPI_MUL_2(a) _ACPI_MUL(a,1)#define ACPI_MOD_2(a) _ACPI_MOD(a,2)#define ACPI_DIV_4(a) _ACPI_DIV(a,2)#define ACPI_MUL_4(a) _ACPI_MUL(a,2)#define ACPI_MOD_4(a) _ACPI_MOD(a,4)#define ACPI_DIV_8(a) _ACPI_DIV(a,3)#define ACPI_MUL_8(a) _ACPI_MUL(a,3)#define ACPI_MOD_8(a) _ACPI_MOD(a,8)#define ACPI_DIV_16(a) _ACPI_DIV(a,4)#define ACPI_MUL_16(a) _ACPI_MUL(a,4)#define ACPI_MOD_16(a) _ACPI_MOD(a,16)/* * Rounding macros (Power of two boundaries only) */#define ACPI_ROUND_DOWN(value,boundary) (((acpi_native_uint)(value)) & (~(((acpi_native_uint) boundary)-1)))#define ACPI_ROUND_UP(value,boundary) ((((acpi_native_uint)(value)) + (((acpi_native_uint) boundary)-1)) & (~(((acpi_native_uint) boundary)-1)))#define ACPI_ROUND_DOWN_TO_32_BITS(a) ACPI_ROUND_DOWN(a,4)#define ACPI_ROUND_DOWN_TO_64_BITS(a) ACPI_ROUND_DOWN(a,8)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -