system.h
字号:
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle * Copyright (C) 1996 by Paul M. Antoine * Copyright (C) 1999 Silicon Graphics * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. */#ifndef _ASM_SYSTEM_H#define _ASM_SYSTEM_H#include <linux/config.h>#include <asm/sgidefs.h>#include <linux/kernel.h>#include <asm/addrspace.h>#include <asm/ptrace.h>#include <asm/hazards.h>__asm__ ( ".macro\tlocal_irq_enable\n\t" ".set\tpush\n\t" ".set\treorder\n\t" ".set\tnoat\n\t" "mfc0\t$1,$12\n\t" "ori\t$1,0x1f\n\t" "xori\t$1,0x1e\n\t" "mtc0\t$1,$12\n\t" "irq_enable_hazard\n\t" ".set\tpop\n\t" ".endm");static inline void local_irq_enable(void){ __asm__ __volatile__( "local_irq_enable" : /* no outputs */ : /* no inputs */ : "memory");}/* * For cli() we have to insert nops to make sure that the new value * has actually arrived in the status register before the end of this * macro. * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs * no nops at all. */__asm__ ( ".macro\tlocal_irq_disable\n\t" ".set\tpush\n\t" ".set\tnoat\n\t" "mfc0\t$1,$12\n\t" "ori\t$1,1\n\t" "xori\t$1,1\n\t" ".set\tnoreorder\n\t" "mtc0\t$1,$12\n\t" "irq_disable_hazard\n\t" ".set\tpop\n\t" ".endm");static inline void local_irq_disable(void){ __asm__ __volatile__( "local_irq_disable" : /* no outputs */ : /* no inputs */ : "memory");}__asm__ ( ".macro\tlocal_save_flags flags\n\t" ".set\tpush\n\t" ".set\treorder\n\t" "mfc0\t\\flags, $12\n\t" ".set\tpop\n\t" ".endm");#define local_save_flags(x) \__asm__ __volatile__( \ "local_save_flags %0" \ : "=r" (x))__asm__ ( ".macro\tlocal_irq_save result\n\t" ".set\tpush\n\t" ".set\treorder\n\t" ".set\tnoat\n\t" "mfc0\t\\result, $12\n\t" "ori\t$1, \\result, 1\n\t" "xori\t$1, 1\n\t" ".set\tnoreorder\n\t" "mtc0\t$1, $12\n\t" "irq_disable_hazard\n\t" ".set\tpop\n\t" ".endm");#define local_irq_save(x) \__asm__ __volatile__( \ "local_irq_save\t%0" \ : "=r" (x) \ : /* no inputs */ \ : "memory")__asm__ ( ".macro\tlocal_irq_restore flags\n\t" ".set\tnoreorder\n\t" ".set\tnoat\n\t" "mfc0\t$1, $12\n\t" "andi\t\\flags, 1\n\t" "ori\t$1, 1\n\t" "xori\t$1, 1\n\t" "or\t\\flags, $1\n\t" "mtc0\t\\flags, $12\n\t" "irq_disable_hazard\n\t" ".set\tat\n\t" ".set\treorder\n\t" ".endm");#define local_irq_restore(flags) \do { \ unsigned long __tmp1; \ \ __asm__ __volatile__( \ "local_irq_restore\t%0" \ : "=r" (__tmp1) \ : "0" (flags) \ : "memory"); \} while(0)#define irqs_disabled() \({ \ unsigned long flags; \ local_save_flags(flags); \ !(flags & 1); \})/* * read_barrier_depends - Flush all pending reads that subsequents reads * depend on. * * No data-dependent reads from memory-like regions are ever reordered * over this barrier. All reads preceding this primitive are guaranteed * to access memory (but not necessarily other CPUs' caches) before any * reads following this primitive that depend on the data return by * any of the preceding reads. This primitive is much lighter weight than * rmb() on most CPUs, and is never heavier weight than is * rmb(). * * These ordering constraints are respected by both the local CPU * and the compiler. * * Ordering is not guaranteed by anything other than these primitives, * not even by data dependencies. See the documentation for * memory_barrier() for examples and URLs to more information. * * For example, the following code would force ordering (the initial * value of "a" is zero, "b" is one, and "p" is "&a"): * * <programlisting> * CPU 0 CPU 1 * * b = 2; * memory_barrier(); * p = &b; q = p; * read_barrier_depends(); * d = *q; * </programlisting> * * because the read of "*q" depends on the read of "p" and these * two reads are separated by a read_barrier_depends(). However, * the following code, with the same initial values for "a" and "b": * * <programlisting> * CPU 0 CPU 1 * * a = 2; * memory_barrier(); * b = 3; y = b; * read_barrier_depends(); * x = a; * </programlisting> * * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() * in cases like thiswhere there are no data dependencies. */#define read_barrier_depends() do { } while(0)#ifdef CONFIG_CPU_HAS_SYNC#define __sync() \ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ ".set mips2\n\t" \ "sync\n\t" \ ".set pop" \ : /* no output */ \ : /* no input */ \ : "memory")#else#define __sync() do { } while(0)#endif#define __fast_iob() \ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ "lw $0,%0\n\t" \ "nop\n\t" \ ".set pop" \ : /* no output */ \ : "m" (*(int *)CKSEG1) \ : "memory")#define fast_wmb() __sync()#define fast_rmb() __sync()#define fast_mb() __sync()#define fast_iob() \ do { \ __sync(); \ __fast_iob(); \ } while (0)#ifdef CONFIG_CPU_HAS_WB#include <asm/wbflush.h>#define wmb() fast_wmb()#define rmb() fast_rmb()#define mb() wbflush()#define iob() wbflush()#else /* !CONFIG_CPU_HAS_WB */#define wmb() fast_wmb()#define rmb() fast_rmb()#define mb() fast_mb()#define iob() fast_iob()#endif /* !CONFIG_CPU_HAS_WB */#ifdef CONFIG_SMP#define smp_mb() mb()#define smp_rmb() rmb()#define smp_wmb() wmb()#define smp_read_barrier_depends() read_barrier_depends()#else#define smp_mb() barrier()#define smp_rmb() barrier()#define smp_wmb() barrier()#define smp_read_barrier_depends() do { } while(0)#endif#define set_mb(var, value) \do { var = value; mb(); } while (0)#define set_wmb(var, value) \do { var = value; wmb(); } while (0)/* * switch_to(n) should switch tasks to task nr n, first * checking that n isn't the current task, in which case it does nothing. */extern asmlinkage void *resume(void *last, void *next, void *next_ti);struct task_struct;#define switch_to(prev,next,last) \do { \ (last) = resume(prev, next, next->thread_info); \} while(0)static inline unsigned long __xchg_u32(volatile int * m, unsigned int val){ __u32 retval;#ifdef CONFIG_CPU_HAS_LLSC unsigned long dummy; __asm__ __volatile__( ".set\tpush\t\t\t\t# xchg_u32\n\t" ".set\tnoreorder\n\t" ".set\tnomacro\n\t" "ll\t%0, %3\n" "1:\tmove\t%2, %z4\n\t" "sc\t%2, %1\n\t" "beqzl\t%2, 1b\n\t" " ll\t%0, %3\n\t"#ifdef CONFIG_SMP "sync\n\t"#endif ".set\tpop" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) : "memory");#else unsigned long flags; local_irq_save(flags); retval = *m; *m = val; local_irq_restore(flags); /* implies memory barrier */#endif return retval;}#ifdef CONFIG_MIPS64static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val){ __u64 retval;#ifdef CONFIG_CPU_HAS_LLDSCD unsigned long dummy; __asm__ __volatile__( ".set\tpush\t\t\t\t# xchg_u64\n\t" ".set\tnoreorder\n\t" ".set\tnomacro\n\t" "lld\t%0, %3\n" "1:\tmove\t%2, %z4\n\t" "scd\t%2, %1\n\t" "beqzl\t%2, 1b\n\t" " lld\t%0, %3\n\t"#ifdef CONFIG_SMP "sync\n\t"#endif ".set\tpop" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) : "memory");#else unsigned long flags; local_irq_save(flags); retval = *m; *m = val; local_irq_restore(flags); /* implies memory barrier */#endif return retval;}#elseextern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels#endif/* This function doesn't exist, so you'll get a linker error if something tries to do an invalid xchg(). */extern void __xchg_called_with_bad_pointer(void);static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size){ switch (size) { case 4: return __xchg_u32(ptr, x); case 8: return __xchg_u64(ptr, x); } __xchg_called_with_bad_pointer(); return x;}#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))#define tas(ptr) (xchg((ptr),1))#define __HAVE_ARCH_CMPXCHG 1static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, unsigned long new){ __u32 retval;#ifdef CONFIG_CPU_HAS_LLSC __asm__ __volatile__( " .set noat \n" "1: ll %0, %2 # __cmpxchg_u32 \n" " bne %0, %z3, 2f \n" " move $1, %z4 \n" " sc $1, %1 \n" " beqz $1, 1b \n"#ifdef CONFIG_SMP " sync \n"#endif "2: \n" " .set at \n" : "=&r" (retval), "=m" (*m) : "R" (*m), "Jr" (old), "Jr" (new) : "memory");#else unsigned long flags; local_irq_save(flags); retval = *m; if (retval == old) *m = new; local_irq_restore(flags); /* implies memory barrier */#endif return retval;}#ifdef CONFIG_MIPS64static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, unsigned long new){ __u64 retval;#ifdef CONFIG_CPU_HAS_LLDSCD __asm__ __volatile__( " .set noat \n" "1: lld %0, %2 # __cmpxchg_u64 \n" " bne %0, %z3, 2f \n" " move $1, %z4 \n" " scd $1, %1 \n" " beqz $1, 1b \n"#ifdef CONFIG_SMP " sync \n"#endif "2: \n" " .set at \n" : "=&r" (retval), "=m" (*m) : "R" (*m), "Jr" (old), "Jr" (new) : "memory");#else unsigned long flags; local_irq_save(flags); retval = *m; if (retval == old) *m = new; local_irq_restore(flags); /* implies memory barrier */#endif return retval;}#elseextern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( volatile int * m, unsigned long old, unsigned long new);#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels#endif/* This function doesn't exist, so you'll get a linker error if something tries to do an invalid cmpxchg(). */extern void __cmpxchg_called_with_bad_pointer(void);static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, unsigned long new, int size){ switch (size) { case 4: return __cmpxchg_u32(ptr, old, new); case 8: return __cmpxchg_u64(ptr, old, new); } __cmpxchg_called_with_bad_pointer(); return old;}#define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))extern void *set_except_vector(int n, void *addr);extern void per_cpu_trap_init(void);extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file, const char *func, unsigned long line);extern void __die_if_kernel(const char *, struct pt_regs *, const char *file, const char *func, unsigned long line);#define die(msg, regs) \ __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)#define die_if_kernel(msg, regs) \ __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)extern int serial_console;extern int stop_a_enabled;static __inline__ int con_is_present(void){ return serial_console ? 0 : 1;}/* * Taken from include/asm-ia64/system.h; prevents deadlock on SMP * systems. */#define prepare_arch_switch(rq, next) \do { \ spin_lock(&(next)->switch_lock); \ spin_unlock(&(rq)->lock); \} while (0)#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))#endif /* _ASM_SYSTEM_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -