bitops.h

来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 961 行 · 第 1/2 页

H
961
字号
/* * This file is subject to the terms and conditions of the GNU General Public * License.  See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org) * Copyright (c) 1999, 2000  Silicon Graphics, Inc. */#ifndef _ASM_BITOPS_H#define _ASM_BITOPS_H#include <linux/config.h>#include <linux/compiler.h>#include <linux/types.h>#include <asm/byteorder.h>		/* sigh ... */#if (_MIPS_SZLONG == 32)#define SZLONG_LOG 5#define SZLONG_MASK 31UL#define __LL	"ll"#define __SC	"sc"#define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x)) #elif (_MIPS_SZLONG == 64)#define SZLONG_LOG 6#define SZLONG_MASK 63UL#define __LL	"lld"#define __SC	"scd"#define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x)) #endif#ifdef __KERNEL__#include <asm/sgidefs.h>#include <asm/system.h>/* * clear_bit() doesn't provide any barrier for the compiler. */#define smp_mb__before_clear_bit()	smp_mb()#define smp_mb__after_clear_bit()	smp_mb()/* * Only disable interrupt for kernel mode stuff to keep usermode stuff * that dares to use kernel include files alive. */#define __bi_flags			unsigned long flags#define __bi_cli()			local_irq_disable()#define __bi_save_flags(x)		local_save_flags(x)#define __bi_local_irq_save(x)		local_irq_save(x)#define __bi_local_irq_restore(x)	local_irq_restore(x)#else#define __bi_flags#define __bi_cli()#define __bi_save_flags(x)#define __bi_local_irq_save(x)#define __bi_local_irq_restore(x)#endif /* __KERNEL__ */#ifdef CONFIG_CPU_HAS_LLSC/* * These functions for MIPS ISA > 1 are interrupt and SMP proof and * interrupt friendly *//* * set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * This function is atomic and may not be reordered.  See __set_bit() * if you do not require the atomic guarantees. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */static inline void set_bit(unsigned long nr, volatile unsigned long *addr){	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);	unsigned long temp;	__asm__ __volatile__(		"1:\t" __LL "\t%0, %1\t\t# set_bit\n\t"		"or\t%0, %2\n\t"		__SC "\t%0, %1\n\t"		"beqz\t%0, 1b"		: "=&r" (temp), "=m" (*m)		: "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));}/* * __set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * Unlike set_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */static inline void __set_bit(unsigned long nr, volatile unsigned long * addr){	unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);	*m |= 1UL << (nr & SZLONG_MASK);}/* * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * clear_bit() is atomic and may not be reordered.  However, it does * not contain a memory barrier, so if it is used for locking purposes, * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */static inline void clear_bit(unsigned long nr, volatile unsigned long *addr){	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);	unsigned long temp;	__asm__ __volatile__(		"1:\t" __LL "\t%0, %1\t\t# clear_bit\n\t"		"and\t%0, %2\n\t"		__SC "\t%0, %1\n\t"		"beqz\t%0, 1b\n\t"		: "=&r" (temp), "=m" (*m)		: "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));}/* * __clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * Unlike clear_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr){	unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);	*m &= ~(1UL << (nr & SZLONG_MASK));}/* * change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * * change_bit() is atomic and may not be reordered. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */static inline void change_bit(unsigned long nr, volatile unsigned long *addr){	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);	unsigned long temp;	__asm__ __volatile__(		"1:\t" __LL "\t%0, %1\t\t# change_bit\n\t"		"xor\t%0, %2\n\t"		__SC "\t%0, %1\n\t"		"beqz\t%0, 1b"		: "=&r" (temp), "=m" (*m)		: "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));}/* * __change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * * Unlike change_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */static inline void __change_bit(unsigned long nr, volatile unsigned long * addr){	unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);	*m ^= 1UL << (nr & SZLONG_MASK);}/* * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */static inline int test_and_set_bit(unsigned long nr,	volatile unsigned long *addr){	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);	unsigned long temp, res;	__asm__ __volatile__(		".set\tnoreorder\t\t# test_and_set_bit\n"		"1:\t" __LL "\t%0, %1\n\t"		"or\t%2, %0, %3\n\t"		__SC "\t%2, %1\n\t"		"beqz\t%2, 1b\n\t"		" and\t%2, %0, %3\n\t"#ifdef CONFIG_SMP		"sync\n\t"#endif		".set\treorder"		: "=&r" (temp), "=m" (*m), "=&r" (res)		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)		: "memory");	return res != 0;}/* * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail.  You must protect multiple accesses with a lock. */static inline int __test_and_set_bit(unsigned long nr,	volatile unsigned long *addr){	volatile unsigned long *a = addr;	unsigned long mask;	int retval;	a += nr >> SZLONG_LOG;	mask = 1 << (nr & SZLONG_MASK);	retval = (mask & *a) != 0;	*a |= mask;	return retval;}/* * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */static inline int test_and_clear_bit(unsigned long nr,	volatile unsigned long *addr){	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);	unsigned long temp, res;	__asm__ __volatile__(		".set\tnoreorder\t\t# test_and_clear_bit\n"		"1:\t" __LL "\t%0, %1\n\t"		"or\t%2, %0, %3\n\t"		"xor\t%2, %3\n\t"		__SC "\t%2, %1\n\t"		"beqz\t%2, 1b\n\t"		" and\t%2, %0, %3\n\t"#ifdef CONFIG_SMP		"sync\n\t"#endif		".set\treorder"		: "=&r" (temp), "=m" (*m), "=&r" (res)		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)		: "memory");	return res != 0;}/* * __test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail.  You must protect multiple accesses with a lock. */static inline int __test_and_clear_bit(unsigned long nr,	volatile unsigned long * addr){	volatile unsigned long *a = addr;	unsigned long mask;	int retval;	a += (nr >> SZLONG_LOG);	mask = 1UL << (nr & SZLONG_MASK);	retval = ((mask & *a) != 0);	*a &= ~mask;	return retval;}/* * test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */static inline int test_and_change_bit(unsigned long nr,	volatile unsigned long *addr){	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);	unsigned long temp, res;	__asm__ __volatile__(		".set\tnoreorder\t\t# test_and_change_bit\n"		"1:\t" __LL "\t%0, %1\n\t"		"xor\t%2, %0, %3\n\t"		__SC "\t%2, %1\n\t"		"beqz\t%2, 1b\n\t"		" and\t%2, %0, %3\n\t"#ifdef CONFIG_SMP		"sync\n\t"#endif		".set\treorder"		: "=&r" (temp), "=m" (*m), "=&r" (res)		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)		: "memory");	return res != 0;}/* * __test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail.  You must protect multiple accesses with a lock. */static inline int __test_and_change_bit(unsigned long nr,	volatile unsigned long *addr){	volatile unsigned long *a = addr;	unsigned long mask;	int retval;	a += (nr >> SZLONG_LOG);	mask = 1UL << (nr & SZLONG_MASK);	retval = ((mask & *a) != 0);	*a ^= mask;	return retval;}#else /* MIPS I *//* * set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * This function is atomic and may not be reordered.  See __set_bit() * if you do not require the atomic guarantees. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */static inline void set_bit(unsigned long nr, volatile unsigned long * addr){	volatile unsigned long *a = addr;	unsigned long mask;	__bi_flags;	a += nr >> SZLONG_LOG;	mask = 1 << (nr & SZLONG_MASK);	__bi_local_irq_save(flags);	*a |= mask;	__bi_local_irq_restore(flags);}/* * __set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * Unlike set_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */static inline void __set_bit(unsigned long nr, volatile unsigned long * addr){	volatile unsigned long *a = addr;	unsigned long mask;	a += nr >> SZLONG_LOG;	mask = 1 << (nr & SZLONG_MASK);	*a |= mask;}/* * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * clear_bit() is atomic and may not be reordered.  However, it does * not contain a memory barrier, so if it is used for locking purposes, * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */static inline void clear_bit(unsigned long nr, volatile unsigned long * addr){	volatile unsigned long *a = addr;	unsigned long mask;	__bi_flags;	a += nr >> SZLONG_LOG;	mask = 1 << (nr & SZLONG_MASK);	__bi_local_irq_save(flags);	*a &= ~mask;	__bi_local_irq_restore(flags);}static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr){	volatile unsigned long *a = addr;	unsigned long mask;	a += nr >> SZLONG_LOG;	mask = 1 << (nr & SZLONG_MASK);	*a &= ~mask;}/* * change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * * change_bit() is atomic and may not be reordered. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */static inline void change_bit(unsigned long nr, volatile unsigned long * addr){	volatile unsigned long *a = addr;	unsigned long mask;	__bi_flags;	a += nr >> SZLONG_LOG;	mask = 1 << (nr & SZLONG_MASK);	__bi_local_irq_save(flags);	*a ^= mask;	__bi_local_irq_restore(flags);}/* * __change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * * Unlike change_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */static inline void __change_bit(unsigned long nr, volatile unsigned long * addr){	unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);	*m ^= 1UL << (nr & SZLONG_MASK);}/* * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */static inline int test_and_set_bit(unsigned long nr,	volatile unsigned long * addr){	volatile unsigned long *a = addr;	unsigned long mask;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?