⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 bitops.h

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 H
📖 第 1 页 / 共 2 页
字号:
#ifndef _S390_BITOPS_H#define _S390_BITOPS_H/* *  include/asm-s390/bitops.h * *  S390 version *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) * *  Derived from "include/asm-i386/bitops.h" *    Copyright (C) 1992, Linus Torvalds * */#include <linux/config.h>/* * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr; * bit 32 is the LSB of *(addr+4). That combined with the * big endian byte order on S390 give the following bit * order in memory: *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \ *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 * after that follows the next long with bit numbers *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 * The reason for this bit ordering is the fact that * in the architecture independent code bits operations * of the form "flags |= (1 << bitnr)" are used INTERMIXED * with operation of the form "set_bit(bitnr, flags)". *//* set ALIGN_CS to 1 if the SMP safe bit operations should * align the address to 4 byte boundary. It seems to work * without the alignment.  */#ifdef __KERNEL__#define ALIGN_CS 0#else#define ALIGN_CS 1#ifndef CONFIG_SMP#error "bitops won't work without CONFIG_SMP"#endif#endif/* bitmap tables from arch/S390/kernel/bitmap.S */extern const char _oi_bitmap[];extern const char _ni_bitmap[];extern const char _zb_findmap[];#ifdef CONFIG_SMP/* * SMP save set_bit routine based on compare and swap (CS) */static __inline__ void set_bit_cs(int nr, volatile void * addr){        __asm__ __volatile__(#if ALIGN_CS == 1             "   lhi   1,3\n"          /* CS must be aligned on 4 byte b. */             "   nr    1,%1\n"         /* isolate last 2 bits of address */             "   xr    %1,1\n"         /* make addr % 4 == 0 */             "   sll   1,3\n"             "   ar    %0,1\n"         /* add alignement to bitnr */#endif             "   lhi   1,31\n"             "   nr    1,%0\n"         /* make shift value */             "   xr    %0,1\n"             "   srl   %0,3\n"             "   lhi   2,1\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   sll   2,0(1)\n"       /* make OR mask */             "   l     %0,0(%1)\n"             "0: lr    1,%0\n"         /* CS loop starts here */             "   or    1,2\n"          /* set bit */             "   cs    %0,1,0(%1)\n"             "   jl    0b"             : "+a" (nr), "+a" (addr) :             : "cc", "memory", "1", "2" );}/* * SMP save clear_bit routine based on compare and swap (CS) */static __inline__ void clear_bit_cs(int nr, volatile void * addr){        static const int mask = -1;        __asm__ __volatile__(#if ALIGN_CS == 1             "   lhi   1,3\n"          /* CS must be aligned on 4 byte b. */             "   nr    1,%1\n"         /* isolate last 2 bits of address */             "   xr    %1,1\n"         /* make addr % 4 == 0 */             "   sll   1,3\n"             "   ar    %0,1\n"         /* add alignement to bitnr */#endif             "   lhi   1,31\n"             "   nr    1,%0\n"         /* make shift value */             "   xr    %0,1\n"             "   srl   %0,3\n"             "   lhi   2,1\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   sll   2,0(1)\n"             "   x     2,%2\n"         /* make AND mask */             "   l     %0,0(%1)\n"             "0: lr    1,%0\n"         /* CS loop starts here */             "   nr    1,2\n"          /* clear bit */             "   cs    %0,1,0(%1)\n"             "   jl    0b"             : "+a" (nr), "+a" (addr) : "m" (mask)             : "cc", "memory", "1", "2" );}/* * SMP save change_bit routine based on compare and swap (CS) */static __inline__ void change_bit_cs(int nr, volatile void * addr){        __asm__ __volatile__(#if ALIGN_CS == 1             "   lhi   1,3\n"          /* CS must be aligned on 4 byte b. */             "   nr    1,%1\n"         /* isolate last 2 bits of address */             "   xr    %1,1\n"         /* make addr % 4 == 0 */             "   sll   1,3\n"             "   ar    %0,1\n"         /* add alignement to bitnr */#endif             "   lhi   1,31\n"             "   nr    1,%0\n"         /* make shift value */             "   xr    %0,1\n"             "   srl   %0,3\n"             "   lhi   2,1\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   sll   2,0(1)\n"       /* make XR mask */             "   l     %0,0(%1)\n"             "0: lr    1,%0\n"         /* CS loop starts here */             "   xr    1,2\n"          /* change bit */             "   cs    %0,1,0(%1)\n"             "   jl    0b"             : "+a" (nr), "+a" (addr) :              : "cc", "memory", "1", "2" );}/* * SMP save test_and_set_bit routine based on compare and swap (CS) */static __inline__ int test_and_set_bit_cs(int nr, volatile void * addr){        __asm__ __volatile__(#if ALIGN_CS == 1             "   lhi   1,3\n"          /* CS must be aligned on 4 byte b. */             "   nr    1,%1\n"         /* isolate last 2 bits of address */             "   xr    %1,1\n"         /* make addr % 4 == 0 */             "   sll   1,3\n"             "   ar    %0,1\n"         /* add alignement to bitnr */#endif             "   lhi   1,31\n"             "   nr    1,%0\n"         /* make shift value */             "   xr    %0,1\n"             "   srl   %0,3\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   lhi   2,1\n"             "   sll   2,0(1)\n"       /* make OR mask */             "   l     %0,0(%1)\n"             "0: lr    1,%0\n"         /* CS loop starts here */             "   or    1,2\n"          /* set bit */             "   cs    %0,1,0(%1)\n"             "   jl    0b\n"             "   nr    %0,2\n"         /* isolate old bit */             : "+a" (nr), "+a" (addr) :             : "cc", "memory", "1", "2" );        return nr;}/* * SMP save test_and_clear_bit routine based on compare and swap (CS) */static __inline__ int test_and_clear_bit_cs(int nr, volatile void * addr){        static const int mask = -1;        __asm__ __volatile__(#if ALIGN_CS == 1             "   lhi   1,3\n"          /* CS must be aligned on 4 byte b. */             "   nr    1,%1\n"         /* isolate last 2 bits of address */             "   xr    %1,1\n"         /* make addr % 4 == 0 */             "   sll   1,3\n"             "   ar    %0,1\n"         /* add alignement to bitnr */#endif             "   lhi   1,31\n"             "   nr    1,%0\n"         /* make shift value */             "   xr    %0,1\n"             "   srl   %0,3\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   lhi   2,1\n"             "   sll   2,0(1)\n"             "   x     2,%2\n"         /* make AND mask */             "   l     %0,0(%1)\n"             "0: lr    1,%0\n"         /* CS loop starts here */             "   nr    1,2\n"          /* clear bit */             "   cs    %0,1,0(%1)\n"             "   jl    0b\n"             "   x     2,%2\n"             "   nr    %0,2\n"         /* isolate old bit */             : "+a" (nr), "+a" (addr) : "m" (mask)             : "cc", "memory", "1", "2" );        return nr;}/* * SMP save test_and_change_bit routine based on compare and swap (CS)  */static __inline__ int test_and_change_bit_cs(int nr, volatile void * addr){        __asm__ __volatile__(#if ALIGN_CS == 1             "   lhi   1,3\n"          /* CS must be aligned on 4 byte b. */             "   nr    1,%1\n"         /* isolate last 2 bits of address */             "   xr    %1,1\n"         /* make addr % 4 == 0 */             "   sll   1,3\n"             "   ar    %0,1\n"         /* add alignement to bitnr */#endif             "   lhi   1,31\n"             "   nr    1,%0\n"         /* make shift value */             "   xr    %0,1\n"             "   srl   %0,3\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   lhi   2,1\n"             "   sll   2,0(1)\n"       /* make OR mask */             "   l     %0,0(%1)\n"             "0: lr    1,%0\n"         /* CS loop starts here */             "   xr    1,2\n"          /* change bit */             "   cs    %0,1,0(%1)\n"             "   jl    0b\n"             "   nr    %0,2\n"         /* isolate old bit */             : "+a" (nr), "+a" (addr) :             : "cc", "memory", "1", "2" );        return nr;}#endif /* CONFIG_SMP *//* * fast, non-SMP set_bit routine */static __inline__ void __set_bit(int nr, volatile void * addr){        __asm__ __volatile__(             "   lhi   2,24\n"             "   lhi   1,7\n"             "   xr    2,%0\n"             "   nr    1,%0\n"             "   srl   2,3\n"             "   la    2,0(2,%1)\n"             "   la    1,0(1,%2)\n"             "   oc    0(1,2),0(1)"             :  : "r" (nr), "a" (addr), "a" (&_oi_bitmap)             : "cc", "memory", "1", "2" );}static __inline__ void __constant_set_bit(const int nr, volatile void * addr){  switch (nr&7) {  case 0:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x01"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                           : : "1", "cc", "memory");    break;  case 1:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x02"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 2:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x04"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 3:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x08"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 4:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x10"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 5:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x20"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 6:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x40"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 7:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x80"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  }}#define set_bit_simple(nr,addr) \(__builtin_constant_p((nr)) ? \ __constant_set_bit((nr),(addr)) : \ __set_bit((nr),(addr)) )/* * fast, non-SMP clear_bit routine */static __inline__ void __clear_bit(int nr, volatile void * addr){        __asm__ __volatile__(             "   lhi   2,24\n"             "   lhi   1,7\n"             "   xr    2,%0\n"             "   nr    1,%0\n"             "   srl   2,3\n"             "   la    2,0(2,%1)\n"             "   la    1,0(1,%2)\n"             "   nc    0(1,2),0(1)"             :  : "r" (nr), "a" (addr), "a" (&_ni_bitmap)             : "cc", "memory", "1", "2" );}static __inline__ void __constant_clear_bit(const int nr, volatile void * addr){  switch (nr&7) {  case 0:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xFE"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 1:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xFD"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 2:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xFB"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 3:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xF7"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 4:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xEF"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "cc", "memory" );    break;  case 5:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xDF"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 6:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xBF"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  case 7:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0x7F"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "1", "cc", "memory" );    break;  }}#define clear_bit_simple(nr,addr) \(__builtin_constant_p((nr)) ? \ __constant_clear_bit((nr),(addr)) : \ __clear_bit((nr),(addr)) )/*  * fast, non-SMP change_bit routine  */static __inline__ void __change_bit(int nr, volatile void * addr){        __asm__ __volatile__(             "   lhi   2,24\n"             "   lhi   1,7\n"             "   xr    2,%0\n"             "   nr    1,%0\n"             "   srl   2,3\n"             "   la    2,0(2,%1)\n"             "   la    1,0(1,%2)\n"             "   xc    0(1,2),0(1)"             :  : "r" (nr), "a" (addr), "a" (&_oi_bitmap)             : "cc", "memory", "1", "2" );}static __inline__ void __constant_change_bit(const int nr, volatile void * addr) {  switch (nr&7) {  case 0:    __asm__ __volatile__ ("la 1,%0\n\t"                          "xi 0(1),0x01"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "cc", "memory" );    break;  case 1:    __asm__ __volatile__ ("la 1,%0\n\t"                          "xi 0(1),0x02"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "cc", "memory" );    break;  case 2:    __asm__ __volatile__ ("la 1,%0\n\t"                          "xi 0(1),0x04"                          : "=m" (*((volatile char *) addr + ((nr>>3)^3)))                          : : "cc", "memory" );    break;  case 3:    __asm__ __volatile__ ("la 1,%0\n\t"                          "xi 0(1),0x08"

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -