io.h
来自「linux 内核源代码」· C头文件 代码 · 共 607 行 · 第 1/2 页
H
607 行
#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ \static inline void pfx##write##bwlq(type val, \ volatile void __iomem *mem) \{ \ volatile type *__mem; \ type __val; \ \ __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ \ __val = pfx##ioswab##bwlq(__mem, val); \ \ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ *__mem = __val; \ else if (cpu_has_64bits) { \ unsigned long __flags; \ type __tmp; \ \ if (irq) \ local_irq_save(__flags); \ __asm__ __volatile__( \ ".set mips3" "\t\t# __writeq""\n\t" \ "dsll32 %L0, %L0, 0" "\n\t" \ "dsrl32 %L0, %L0, 0" "\n\t" \ "dsll32 %M0, %M0, 0" "\n\t" \ "or %L0, %L0, %M0" "\n\t" \ "sd %L0, %2" "\n\t" \ ".set mips0" "\n" \ : "=r" (__tmp) \ : "0" (__val), "m" (*__mem)); \ if (irq) \ local_irq_restore(__flags); \ } else \ BUG(); \} \ \static inline type pfx##read##bwlq(const volatile void __iomem *mem) \{ \ volatile type *__mem; \ type __val; \ \ __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ \ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ __val = *__mem; \ else if (cpu_has_64bits) { \ unsigned long __flags; \ \ if (irq) \ local_irq_save(__flags); \ __asm__ __volatile__( \ ".set mips3" "\t\t# __readq" "\n\t" \ "ld %L0, %1" "\n\t" \ "dsra32 %M0, %L0, 0" "\n\t" \ "sll %L0, %L0, 0" "\n\t" \ ".set mips0" "\n" \ : "=r" (__val) \ : "m" (*__mem)); \ if (irq) \ local_irq_restore(__flags); \ } else { \ __val = 0; \ BUG(); \ } \ \ return pfx##ioswab##bwlq(__mem, __val); \}#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ \static inline void pfx##out##bwlq##p(type val, unsigned long port) \{ \ volatile type *__addr; \ type __val; \ \ __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ \ __val = pfx##ioswab##bwlq(__addr, val); \ \ /* Really, we want this to be atomic */ \ BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ \ *__addr = __val; \ slow; \} \ \static inline type pfx##in##bwlq##p(unsigned long port) \{ \ volatile type *__addr; \ type __val; \ \ __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ \ BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ \ __val = *__addr; \ slow; \ \ return pfx##ioswab##bwlq(__addr, __val); \}#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ \__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)#define BUILDIO_MEM(bwlq, type) \ \__BUILD_MEMORY_PFX(__raw_, bwlq, type) \__BUILD_MEMORY_PFX(, bwlq, type) \__BUILD_MEMORY_PFX(__mem_, bwlq, type) \BUILDIO_MEM(b, u8)BUILDIO_MEM(w, u16)BUILDIO_MEM(l, u32)BUILDIO_MEM(q, u64)#define __BUILD_IOPORT_PFX(bus, bwlq, type) \ __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)#define BUILDIO_IOPORT(bwlq, type) \ __BUILD_IOPORT_PFX(, bwlq, type) \ __BUILD_IOPORT_PFX(__mem_, bwlq, type)BUILDIO_IOPORT(b, u8)BUILDIO_IOPORT(w, u16)BUILDIO_IOPORT(l, u32)#ifdef CONFIG_64BITBUILDIO_IOPORT(q, u64)#endif#define __BUILDIO(bwlq, type) \ \__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0)__BUILDIO(q, u64)#define readb_relaxed readb#define readw_relaxed readw#define readl_relaxed readl#define readq_relaxed readq/* * Some code tests for these symbols */#define readq readq#define writeq writeq#define __BUILD_MEMORY_STRING(bwlq, type) \ \static inline void writes##bwlq(volatile void __iomem *mem, \ const void *addr, unsigned int count) \{ \ const volatile type *__addr = addr; \ \ while (count--) { \ __mem_write##bwlq(*__addr, mem); \ __addr++; \ } \} \ \static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ unsigned int count) \{ \ volatile type *__addr = addr; \ \ while (count--) { \ *__addr = __mem_read##bwlq(mem); \ __addr++; \ } \}#define __BUILD_IOPORT_STRING(bwlq, type) \ \static inline void outs##bwlq(unsigned long port, const void *addr, \ unsigned int count) \{ \ const volatile type *__addr = addr; \ \ while (count--) { \ __mem_out##bwlq(*__addr, port); \ __addr++; \ } \} \ \static inline void ins##bwlq(unsigned long port, void *addr, \ unsigned int count) \{ \ volatile type *__addr = addr; \ \ while (count--) { \ *__addr = __mem_in##bwlq(port); \ __addr++; \ } \}#define BUILDSTRING(bwlq, type) \ \__BUILD_MEMORY_STRING(bwlq, type) \__BUILD_IOPORT_STRING(bwlq, type)BUILDSTRING(b, u8)BUILDSTRING(w, u16)BUILDSTRING(l, u32)#ifdef CONFIG_64BITBUILDSTRING(q, u64)#endif/* Depends on MIPS II instruction set */#define mmiowb() asm volatile ("sync" ::: "memory")static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count){ memset((void __force *) addr, val, count);}static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count){ memcpy(dst, (void __force *) src, count);}static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count){ memcpy((void __force *) dst, src, count);}/* * ISA space is 'always mapped' on currently supported MIPS systems, no need * to explicitly ioremap() it. The fact that the ISA IO space is mapped * to PAGE_OFFSET is pure coincidence - it does not mean ISA values * are physical addresses. The following constant pointer can be * used as the IO-area pointer (it can be iounmapped as well, so the * analogy with PCI is quite large): */#define __ISA_IO_base ((char *)(isa_slot_offset))/* * The caches on some architectures aren't dma-coherent and have need to * handle this in software. There are three types of operations that * can be applied to dma buffers. * * - dma_cache_wback_inv(start, size) makes caches and coherent by * writing the content of the caches back to memory, if necessary. * The function also invalidates the affected part of the caches as * necessary before DMA transfers from outside to memory. * - dma_cache_wback(start, size) makes caches and coherent by * writing the content of the caches back to memory, if necessary. * The function also invalidates the affected part of the caches as * necessary before DMA transfers from outside to memory. * - dma_cache_inv(start, size) invalidates the affected parts of the * caches. Dirty lines of the caches may be written back or simply * be discarded. This operation is necessary before dma operations * to the memory. * * This API used to be exported; it now is for arch code internal use only. */#ifdef CONFIG_DMA_NONCOHERENTextern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)#define dma_cache_wback(start, size) _dma_cache_wback(start, size)#define dma_cache_inv(start, size) _dma_cache_inv(start, size)#else /* Sane hardware */#define dma_cache_wback_inv(start,size) \ do { (void) (start); (void) (size); } while (0)#define dma_cache_wback(start,size) \ do { (void) (start); (void) (size); } while (0)#define dma_cache_inv(start,size) \ do { (void) (start); (void) (size); } while (0)#endif /* CONFIG_DMA_NONCOHERENT *//* * Read a 32-bit register that requires a 64-bit read cycle on the bus. * Avoid interrupt mucking, just adjust the address for 4-byte access. * Assume the addresses are 8-byte aligned. */#ifdef __MIPSEB__#define __CSR_32_ADJUST 4#else#define __CSR_32_ADJUST 0#endif#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))/* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access */#define xlate_dev_mem_ptr(p) __va(p)/* * Convert a virtual cached pointer to an uncached pointer */#define xlate_dev_kmem_ptr(p) p#endif /* _ASM_IO_H */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?