⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 bitops.h

📁 this SRC packet is the headfiles that MIZI vivi bootloader needed when compling
💻 H
📖 第 1 页 / 共 2 页
字号:
/* *  include/asm-s390/bitops.h * *  S390 version *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) * *  Derived from "include/asm-i386/bitops.h" *    Copyright (C) 1992, Linus Torvalds * */#ifndef _S390_BITOPS_H#define _S390_BITOPS_H/* * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr; * bit 64 is the LSB of *(addr+8). That combined with the * big endian byte order on S390 give the following bit * order in memory: *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 * after that follows the next long with bit numbers *    7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70 *    6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60 *    5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50 *    4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40 * The reason for this bit ordering is the fact that * in the architecture independent code bits operations * of the form "flags |= (1 << bitnr)" are used INTERMIXED * with operation of the form "set_bit(bitnr, flags)". */#include <linux/config.h>/* set ALIGN_CS to 1 if the SMP safe bit operations should * align the address to 4 byte boundary. It seems to work * without the alignment.  */#ifdef __KERNEL__#define ALIGN_CS 0#else#define ALIGN_CS 1#ifndef CONFIG_SMP#error "bitops won't work without CONFIG_SMP"#endif#endif/* bitmap tables from arch/S390/kernel/bitmap.S */extern const char _oi_bitmap[];extern const char _ni_bitmap[];extern const char _zb_findmap[];#ifdef CONFIG_SMP/* * SMP save set_bit routine based on compare and swap (CS) */static __inline__ void set_bit_cs(unsigned long nr, volatile void * addr){        unsigned long bits, mask;        __asm__ __volatile__(#if ALIGN_CS == 1             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */             "   sllg  %2,%2,3\n"             "   agr   %0,%2\n"        /* add alignement to bitnr */#endif             "   lghi  %2,63\n"             "   nr    %2,%0\n"        /* make shift value */             "   xr    %0,%2\n"             "   srlg  %0,%0,3\n"             "   lghi  %3,1\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   sllg  %3,%3,0(%2)\n"  /* make OR mask */             "   lg    %0,0(%1)\n"             "0: lgr   %2,%0\n"        /* CS loop starts here */             "   ogr   %2,%3\n"        /* set bit */             "   csg   %0,%2,0(%1)\n"             "   jl    0b"             : "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :             : "cc", "memory" );}/* * SMP save clear_bit routine based on compare and swap (CS) */static __inline__ void clear_bit_cs(unsigned long nr, volatile void * addr){        unsigned long bits, mask;        __asm__ __volatile__(#if ALIGN_CS == 1             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */             "   sllg  %2,%2,3\n"             "   agr   %0,%2\n"        /* add alignement to bitnr */#endif             "   lghi  %2,63\n"             "   nr    %2,%0\n"        /* make shift value */             "   xr    %0,%2\n"             "   srlg  %0,%0,3\n"             "   lghi  %3,-2\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   lghi  %3,-2\n"             "   rllg  %3,%3,0(%2)\n"  /* make AND mask */             "   lg    %0,0(%1)\n"             "0: lgr   %2,%0\n"        /* CS loop starts here */             "   ngr   %2,%3\n"        /* clear bit */             "   csg   %0,%2,0(%1)\n"             "   jl    0b"             : "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :             : "cc", "memory" );}/* * SMP save change_bit routine based on compare and swap (CS) */static __inline__ void change_bit_cs(unsigned long nr, volatile void * addr){        unsigned long bits, mask;        __asm__ __volatile__(#if ALIGN_CS == 1             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */             "   sllg  %2,%2,3\n"             "   agr   %0,%2\n"        /* add alignement to bitnr */#endif             "   lghi  %2,63\n"             "   nr    %2,%0\n"        /* make shift value */             "   xr    %0,%2\n"             "   srlg  %0,%0,3\n"             "   lghi  %3,1\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   sllg  %3,%3,0(%2)\n"  /* make XR mask */             "   lg    %0,0(%1)\n"             "0: lgr   %2,%0\n"        /* CS loop starts here */             "   xgr   %2,%3\n"        /* change bit */             "   csg   %0,%2,0(%1)\n"             "   jl    0b"             : "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :              : "cc", "memory" );}/* * SMP save test_and_set_bit routine based on compare and swap (CS) */static __inline__ int test_and_set_bit_cs(unsigned long nr, volatile void * addr){        unsigned long bits, mask;        __asm__ __volatile__(#if ALIGN_CS == 1             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */             "   sllg  %2,%2,3\n"             "   agr   %0,%2\n"        /* add alignement to bitnr */#endif             "   lghi  %2,63\n"             "   nr    %2,%0\n"        /* make shift value */             "   xr    %0,%2\n"             "   srlg  %0,%0,3\n"             "   lghi  %3,1\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   sllg  %3,%3,0(%2)\n"  /* make OR mask */             "   lg    %0,0(%1)\n"             "0: lgr   %2,%0\n"        /* CS loop starts here */             "   ogr   %2,%3\n"        /* set bit */             "   csg   %0,%2,0(%1)\n"             "   jl    0b\n"             "   ngr   %0,%3\n"        /* isolate old bit */             : "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :             : "cc", "memory" );        return nr != 0;}/* * SMP save test_and_clear_bit routine based on compare and swap (CS) */static __inline__ inttest_and_clear_bit_cs(unsigned long nr, volatile void * addr){        unsigned long bits, mask;        __asm__ __volatile__(#if ALIGN_CS == 1             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */             "   sllg  %2,%2,3\n"             "   agr   %0,%2\n"        /* add alignement to bitnr */#endif             "   lghi  %2,63\n"             "   nr    %2,%0\n"        /* make shift value */             "   xr    %0,%2\n"             "   srlg  %0,%0,3\n"             "   lghi  %3,-2\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   rllg  %3,%3,0(%2)\n"  /* make AND mask */             "   lg    %0,0(%1)\n"             "0: lgr   %2,%0\n"        /* CS loop starts here */             "   ngr   %2,%3\n"        /* clear bit */             "   csg   %0,%2,0(%1)\n"             "   jl    0b\n"             "   xgr   %0,%2\n"        /* isolate old bit */             : "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :             : "cc", "memory" );        return nr != 0;}/* * SMP save test_and_change_bit routine based on compare and swap (CS)  */static __inline__ inttest_and_change_bit_cs(unsigned long nr, volatile void * addr){        unsigned long bits, mask;        __asm__ __volatile__(#if ALIGN_CS == 1             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */             "   sllg  %2,%2,3\n"             "   agr   %0,%2\n"        /* add alignement to bitnr */#endif             "   lghi  %2,63\n"             "   nr    %2,%0\n"        /* make shift value */             "   xr    %0,%2\n"             "   srlg  %0,%0,3\n"             "   lghi  %3,1\n"             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */             "   sllg  %3,%3,0(%2)\n"  /* make OR mask */             "   lg    %0,0(%1)\n"             "0: lgr   %2,%0\n"        /* CS loop starts here */             "   xgr   %2,%3\n"        /* change bit */             "   csg   %0,%2,0(%1)\n"             "   jl    0b\n"             "   ngr   %0,%3\n"        /* isolate old bit */             : "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :             : "cc", "memory" );        return nr != 0;}#endif /* CONFIG_SMP *//* * fast, non-SMP set_bit routine */static __inline__ void __set_bit(unsigned long nr, volatile void * addr){        __asm__ __volatile__(             "   lghi  2,56\n"             "   lghi  1,7\n"             "   xgr   2,%0\n"             "   nr    1,%0\n"             "   srlg  2,2,3\n"             "   la    2,0(2,%1)\n"             "   la    1,0(1,%2)\n"             "   oc    0(1,2),0(1)"             :  : "a" (nr), "a" (addr), "a" (&_oi_bitmap)             : "cc", "memory", "1", "2" );}static __inline__ void __constant_set_bit(const unsigned long nr, volatile void * addr){  switch (nr&7) {  case 0:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x01"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                           : : "1", "cc", "memory");    break;  case 1:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x02"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 2:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x04"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 3:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x08"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 4:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x10"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 5:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x20"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 6:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x40"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 7:    __asm__ __volatile__ ("la 1,%0\n\t"                          "oi 0(1),0x80"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  }}#define set_bit_simple(nr,addr) \(__builtin_constant_p((nr)) ? \ __constant_set_bit((nr),(addr)) : \ __set_bit((nr),(addr)) )/* * fast, non-SMP clear_bit routine */static __inline__ void __clear_bit(unsigned long nr, volatile void * addr){        __asm__ __volatile__(             "   lghi  2,56\n"             "   lghi  1,7\n"             "   xgr   2,%0\n"             "   nr    1,%0\n"             "   srlg  2,2,3\n"             "   la    2,0(2,%1)\n"             "   la    1,0(1,%2)\n"             "   nc    0(1,2),0(1)"             :  : "d" (nr), "a" (addr), "a" (&_ni_bitmap)             : "cc", "memory", "1", "2" );}static __inline__ void __constant_clear_bit(const unsigned long nr, volatile void * addr){  switch (nr&7) {  case 0:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xFE"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 1:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xFD"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 2:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xFB"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 3:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xF7"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 4:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xEF"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "cc", "memory" );    break;  case 5:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xDF"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 6:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0xBF"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  case 7:    __asm__ __volatile__ ("la 1,%0\n\t"                          "ni 0(1),0x7F"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "1", "cc", "memory" );    break;  }}#define clear_bit_simple(nr,addr) \(__builtin_constant_p((nr)) ? \ __constant_clear_bit((nr),(addr)) : \ __clear_bit((nr),(addr)) )/*  * fast, non-SMP change_bit routine  */static __inline__ void __change_bit(unsigned long nr, volatile void * addr){        __asm__ __volatile__(             "   lghi  2,56\n"             "   lghi  1,7\n"             "   xgr   2,%0\n"             "   nr    1,%0\n"             "   srlg  2,2,3\n"             "   la    2,0(2,%1)\n"             "   la    1,0(1,%2)\n"             "   xc    0(1,2),0(1)"             :  : "d" (nr), "a" (addr), "a" (&_oi_bitmap)             : "cc", "memory", "1", "2" );}static __inline__ void __constant_change_bit(const unsigned long nr, volatile void * addr) {  switch (nr&7) {  case 0:    __asm__ __volatile__ ("la 1,%0\n\t"                          "xi 0(1),0x01"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "cc", "memory" );    break;  case 1:    __asm__ __volatile__ ("la 1,%0\n\t"                          "xi 0(1),0x02"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "cc", "memory" );    break;  case 2:    __asm__ __volatile__ ("la 1,%0\n\t"                          "xi 0(1),0x04"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "cc", "memory" );    break;  case 3:    __asm__ __volatile__ ("la 1,%0\n\t"                          "xi 0(1),0x08"                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))                          : : "cc", "memory" );

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -