⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 bitops.h

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 H
字号:
#ifndef _ALPHA_BITOPS_H#define _ALPHA_BITOPS_H#include <linux/config.h>#include <linux/kernel.h>/* * Copyright 1994, Linus Torvalds. *//* * These have to be done with inline assembly: that way the bit-setting * is guaranteed to be atomic. All bit operations return 0 if the bit * was cleared before the operation and != 0 if it was not. * * To get proper branch prediction for the main line, we must branch * forward to code at the end of this object's .text section, then * branch back to restart the operation. * * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). */static inline voidset_bit(unsigned long nr, volatile void * addr){	unsigned long temp;	int *m = ((int *) addr) + (nr >> 5);	__asm__ __volatile__(	"1:	ldl_l %0,%3\n"	"	bis %0,%2,%0\n"	"	stl_c %0,%1\n"	"	beq %0,2f\n"	".subsection 2\n"	"2:	br 1b\n"	".previous"	:"=&r" (temp), "=m" (*m)	:"Ir" (1UL << (nr & 31)), "m" (*m));}/* * WARNING: non atomic version. */static inline void__set_bit(unsigned long nr, volatile void * addr){	int *m = ((int *) addr) + (nr >> 5);	*m |= 1 << (nr & 31);}#define smp_mb__before_clear_bit()	smp_mb()#define smp_mb__after_clear_bit()	smp_mb()static inline voidclear_bit(unsigned long nr, volatile void * addr){	unsigned long temp;	int *m = ((int *) addr) + (nr >> 5);	__asm__ __volatile__(	"1:	ldl_l %0,%3\n"	"	and %0,%2,%0\n"	"	stl_c %0,%1\n"	"	beq %0,2f\n"	".subsection 2\n"	"2:	br 1b\n"	".previous"	:"=&r" (temp), "=m" (*m)	:"Ir" (~(1UL << (nr & 31))), "m" (*m));}/* * WARNING: non atomic version. */static __inline__ void__change_bit(unsigned long nr, volatile void * addr){	int *m = ((int *) addr) + (nr >> 5);	*m ^= 1 << (nr & 31);}static inline voidchange_bit(unsigned long nr, volatile void * addr){	unsigned long temp;	int *m = ((int *) addr) + (nr >> 5);	__asm__ __volatile__(	"1:	ldl_l %0,%3\n"	"	xor %0,%2,%0\n"	"	stl_c %0,%1\n"	"	beq %0,2f\n"	".subsection 2\n"	"2:	br 1b\n"	".previous"	:"=&r" (temp), "=m" (*m)	:"Ir" (1UL << (nr & 31)), "m" (*m));}static inline inttest_and_set_bit(unsigned long nr, volatile void *addr){	unsigned long oldbit;	unsigned long temp;	int *m = ((int *) addr) + (nr >> 5);	__asm__ __volatile__(	"1:	ldl_l %0,%4\n"	"	and %0,%3,%2\n"	"	bne %2,2f\n"	"	xor %0,%3,%0\n"	"	stl_c %0,%1\n"	"	beq %0,3f\n"	"2:\n"#ifdef CONFIG_SMP	"	mb\n"#endif	".subsection 2\n"	"3:	br 1b\n"	".previous"	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");	return oldbit != 0;}/* * WARNING: non atomic version. */static inline int__test_and_set_bit(unsigned long nr, volatile void * addr){	unsigned long mask = 1 << (nr & 0x1f);	int *m = ((int *) addr) + (nr >> 5);	int old = *m;	*m = old | mask;	return (old & mask) != 0;}static inline inttest_and_clear_bit(unsigned long nr, volatile void * addr){	unsigned long oldbit;	unsigned long temp;	int *m = ((int *) addr) + (nr >> 5);	__asm__ __volatile__(	"1:	ldl_l %0,%4\n"	"	and %0,%3,%2\n"	"	beq %2,2f\n"	"	xor %0,%3,%0\n"	"	stl_c %0,%1\n"	"	beq %0,3f\n"	"2:\n"#ifdef CONFIG_SMP	"	mb\n"#endif	".subsection 2\n"	"3:	br 1b\n"	".previous"	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");	return oldbit != 0;}/* * WARNING: non atomic version. */static inline int__test_and_clear_bit(unsigned long nr, volatile void * addr){	unsigned long mask = 1 << (nr & 0x1f);	int *m = ((int *) addr) + (nr >> 5);	int old = *m;	*m = old & ~mask;	return (old & mask) != 0;}/* * WARNING: non atomic version. */static __inline__ int__test_and_change_bit(unsigned long nr, volatile void * addr){	unsigned long mask = 1 << (nr & 0x1f);	int *m = ((int *) addr) + (nr >> 5);	int old = *m;	*m = old ^ mask;	return (old & mask) != 0;}static inline inttest_and_change_bit(unsigned long nr, volatile void * addr){	unsigned long oldbit;	unsigned long temp;	int *m = ((int *) addr) + (nr >> 5);	__asm__ __volatile__(	"1:	ldl_l %0,%4\n"	"	and %0,%3,%2\n"	"	xor %0,%3,%0\n"	"	stl_c %0,%1\n"	"	beq %0,3f\n"#ifdef CONFIG_SMP	"	mb\n"#endif	".subsection 2\n"	"3:	br 1b\n"	".previous"	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");	return oldbit != 0;}static inline inttest_bit(int nr, volatile void * addr){	return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;}/* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. * * Do a binary search on the bits.  Due to the nature of large * constants on the alpha, it is worthwhile to split the search. */static inline unsigned long ffz_b(unsigned long x){	unsigned long sum = 0;	x = ~x & -~x;		/* set first 0 bit, clear others */	if (x & 0xF0) sum += 4;	if (x & 0xCC) sum += 2;	if (x & 0xAA) sum += 1;	return sum;}static inline unsigned long ffz(unsigned long word){#if defined(__alpha_cix__) && defined(__alpha_fix__)	/* Whee.  EV67 can calculate it directly.  */	unsigned long result;	__asm__("cttz %1,%0" : "=r"(result) : "r"(~word));	return result;#else	unsigned long bits, qofs, bofs;	__asm__("cmpbge %1,%2,%0" : "=r"(bits) : "r"(word), "r"(~0UL));	qofs = ffz_b(bits);	__asm__("extbl %1,%2,%0" : "=r"(bits) : "r"(word), "r"(qofs));	bofs = ffz_b(bits);	return qofs*8 + bofs;#endif}#ifdef __KERNEL__/* * ffs: find first bit set. This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */static inline int ffs(int word){	int result = ffz(~word);	return word ? result+1 : 0;}/* Compute powers of two for the given integer.  */static inline int floor_log2(unsigned long word){	long bit;#if defined(__alpha_cix__) && defined(__alpha_fix__)	__asm__("ctlz %1,%0" : "=r"(bit) : "r"(word));	return 63 - bit;#else	for (bit = -1; word ; bit++)		word >>= 1;	return bit;#endif}static inline int ceil_log2(unsigned int word){	long bit = floor_log2(word);	return bit + (word > (1UL << bit));}/* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */#if defined(__alpha_cix__) && defined(__alpha_fix__)/* Whee.  EV67 can calculate it directly.  */static inline unsigned long hweight64(unsigned long w){	unsigned long result;	__asm__("ctpop %1,%0" : "=r"(result) : "r"(w));	return result;}#define hweight32(x) hweight64((x) & 0xfffffffful)#define hweight16(x) hweight64((x) & 0xfffful)#define hweight8(x)  hweight64((x) & 0xfful)#else#define hweight32(x) generic_hweight32(x)#define hweight16(x) generic_hweight16(x)#define hweight8(x)  generic_hweight8(x)#endif#endif /* __KERNEL__ *//* * Find next zero bit in a bitmap reasonably efficiently.. */static inline unsigned longfind_next_zero_bit(void * addr, unsigned long size, unsigned long offset){	unsigned long * p = ((unsigned long *) addr) + (offset >> 6);	unsigned long result = offset & ~63UL;	unsigned long tmp;	if (offset >= size)		return size;	size -= result;	offset &= 63UL;	if (offset) {		tmp = *(p++);		tmp |= ~0UL >> (64-offset);		if (size < 64)			goto found_first;		if (~tmp)			goto found_middle;		size -= 64;		result += 64;	}	while (size & ~63UL) {		if (~(tmp = *(p++)))			goto found_middle;		result += 64;		size -= 64;	}	if (!size)		return result;	tmp = *p;found_first:	tmp |= ~0UL << size;	if (tmp == ~0UL)        /* Are any bits zero? */		return result + size; /* Nope. */found_middle:	return result + ffz(tmp);}/* * The optimizer actually does good code for this case.. */#define find_first_zero_bit(addr, size) \	find_next_zero_bit((addr), (size), 0)#ifdef __KERNEL__#define ext2_set_bit                 __test_and_set_bit#define ext2_clear_bit               __test_and_clear_bit#define ext2_test_bit                test_bit#define ext2_find_first_zero_bit     find_first_zero_bit#define ext2_find_next_zero_bit      find_next_zero_bit/* Bitmap functions for the minix filesystem.  */#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)#define minix_set_bit(nr,addr) __set_bit(nr,addr)#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)#define minix_test_bit(nr,addr) test_bit(nr,addr)#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)#endif /* __KERNEL__ */#endif /* _ALPHA_BITOPS_H */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -