processor.h
来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 635 行 · 第 1/2 页
H
635 行
/* Short-hand versions for a number of the above SPRNs */#define CTR SPRN_CTR /* Counter Register */#define DAR SPRN_DAR /* Data Address Register */#define DABR SPRN_DABR /* Data Address Breakpoint Register */#define DCMP SPRN_DCMP /* Data TLB Compare Register */#define DEC SPRN_DEC /* Decrement Register */#define DMISS SPRN_DMISS /* Data TLB Miss Register */#define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */#define EAR SPRN_EAR /* External Address Register */#define HASH1 SPRN_HASH1 /* Primary Hash Address Register */#define HASH2 SPRN_HASH2 /* Secondary Hash Address Register */#define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */#define MSRDORM SPRN_MSRDORM /* MSR Dormant Register */#define NIADORM SPRN_NIADORM /* NIA Dormant Register */#define TSC SPRN_TSC /* Thread switch control */#define TST SPRN_TST /* Thread switch timeout */#define IABR SPRN_IABR /* Instruction Address Breakpoint Register */#define ICMP SPRN_ICMP /* Instruction TLB Compare Register */#define IMISS SPRN_IMISS /* Instruction TLB Miss Register */#define IMMR SPRN_IMMR /* PPC 860/821 Internal Memory Map Register */#define L2CR SPRN_L2CR /* PPC 750 L2 control register */#define __LR SPRN_LR#define PVR SPRN_PVR /* Processor Version */#define PIR SPRN_PIR /* Processor ID */#define PURR SPRN_PURR /* Processor Utilization of Resource Register *///#define RPA SPRN_RPA /* Required Physical Address Register */#define SDR1 SPRN_SDR1 /* MMU hash base register */#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */#define SPR1 SPRN_SPRG1#define SPR2 SPRN_SPRG2#define SPR3 SPRN_SPRG3#define SPRG0 SPRN_SPRG0#define SPRG1 SPRN_SPRG1#define SPRG2 SPRN_SPRG2#define SPRG3 SPRN_SPRG3#define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */#define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */#define TBRL SPRN_TBRL /* Time Base Read Lower Register */#define TBRU SPRN_TBRU /* Time Base Read Upper Register */#define TBWL SPRN_TBWL /* Time Base Write Lower Register */#define TBWU SPRN_TBWU /* Time Base Write Upper Register */#define ICTC 1019#define THRM1 SPRN_THRM1 /* Thermal Management Register 1 */#define THRM2 SPRN_THRM2 /* Thermal Management Register 2 */#define THRM3 SPRN_THRM3 /* Thermal Management Register 3 */#define XER SPRN_XER/* Processor Version Register (PVR) field extraction */#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field *//* Processor Version Numbers */#define PV_NORTHSTAR 0x0033#define PV_PULSAR 0x0034#define PV_POWER4 0x0035#define PV_ICESTAR 0x0036#define PV_SSTAR 0x0037#define PV_POWER4p 0x0038#define PV_970 0x0039#define PV_POWER5 0x003A#define PV_POWER5p 0x003B#define PV_970FX 0x003C#define PV_630 0x0040#define PV_630p 0x0041/* Platforms supported by PPC64 */#define PLATFORM_PSERIES 0x0100#define PLATFORM_PSERIES_LPAR 0x0101#define PLATFORM_ISERIES_LPAR 0x0201#define PLATFORM_LPAR 0x0001#define PLATFORM_POWERMAC 0x0400/* Compatibility with drivers coming from PPC32 world */#define _machine (systemcfg->platform)#define _MACH_Pmac PLATFORM_POWERMAC/* * List of interrupt controllers. */#define IC_INVALID 0#define IC_OPEN_PIC 1#define IC_PPC_XIC 2#define XGLUE(a,b) a##b#define GLUE(a,b) XGLUE(a,b)/* iSeries CTRL register (for runlatch) */#define CTRLT 0x098#define CTRLF 0x088#define RUNLATCH 0x0001#ifdef __ASSEMBLY__#define _GLOBAL(name) \ .section ".text"; \ .align 2 ; \ .globl name; \ .globl GLUE(.,name); \ .section ".opd","aw"; \name: \ .quad GLUE(.,name); \ .quad .TOC.@tocbase; \ .quad 0; \ .previous; \ .type GLUE(.,name),@function; \GLUE(.,name):#define _STATIC(name) \ .section ".text"; \ .align 2 ; \ .section ".opd","aw"; \name: \ .quad GLUE(.,name); \ .quad .TOC.@tocbase; \ .quad 0; \ .previous; \ .type GLUE(.,name),@function; \GLUE(.,name):#else /* __ASSEMBLY__ *//* * Default implementation of macro that returns current * instruction pointer ("program counter"). */#define current_text_addr() ({ __label__ _l; _l: &&_l;})/* Macros for setting and retrieving special purpose registers */#define mfmsr() ({unsigned long rval; \ asm volatile("mfmsr %0" : "=r" (rval)); rval;})#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ : : "r" (v))#define mtmsrd(v) __mtmsrd((v), 0)#define mfspr(rn) ({unsigned long rval; \ asm volatile("mfspr %0," __stringify(rn) \ : "=r" (rval)); rval;})#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))#define mftb() ({unsigned long rval; \ asm volatile("mftb %0" : "=r" (rval)); rval;})#define mttbl(v) asm volatile("mttbl %0":: "r"(v))#define mttbu(v) asm volatile("mttbu %0":: "r"(v))#define mfasr() ({unsigned long rval; \ asm volatile("mfasr %0" : "=r" (rval)); rval;})static inline void set_tb(unsigned int upper, unsigned int lower){ mttbl(0); mttbu(upper); mttbl(lower);}#define __get_SP() ({unsigned long sp; \ asm volatile("mr %0,1": "=r" (sp)); sp;})#ifdef __KERNEL__extern int have_of;struct task_struct;void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);void release_thread(struct task_struct *);/* Prepare to copy thread state - unlazy all lazy status */extern void prepare_to_copy(struct task_struct *tsk);/* Create a new kernel thread. */extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);/* * Bus types */#define MCA_bus 0#define MCA_bus__is_a_macro /* for versions in ksyms.c *//* Lazy FPU handling on uni-processor */extern struct task_struct *last_task_used_math;extern struct task_struct *last_task_used_altivec;/* 64-bit user address space is 41-bits (2TBs user VM) */#define TASK_SIZE_USER64 (0x0000020000000000UL)/* * 32-bit user address space is 4GB - 1 page * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT */#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ TASK_SIZE_USER32 : TASK_SIZE_USER64)/* This decides where the kernel will search for a free chunk of vm * space during mmap's. */#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(STACK_TOP_USER32 / 4))#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(STACK_TOP_USER64 / 4))#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)||(ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \ TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )typedef struct { unsigned long seg;} mm_segment_t;struct thread_struct { unsigned long ksp; /* Kernel stack pointer */ unsigned long ksp_vsid; struct pt_regs *regs; /* Pointer to saved register state */ mm_segment_t fs; /* for get_fs() validation */ double fpr[32]; /* Complete floating point set */ unsigned long fpscr; /* Floating point status (plus pad) */ unsigned long fpexc_mode; /* Floating-point exception mode */ unsigned long pad[3]; /* was saved_msr, saved_softe */#ifdef CONFIG_ALTIVEC /* Complete AltiVec register set */ vector128 vr[32] __attribute((aligned(16))); /* AltiVec status */ vector128 vscr __attribute((aligned(16))); unsigned long vrsave; int used_vr; /* set if process has used altivec */#endif /* CONFIG_ALTIVEC */};#define ARCH_MIN_TASKALIGN 16#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)#define INIT_THREAD { \ .ksp = INIT_SP, \ .regs = (struct pt_regs *)INIT_SP - 1, \ .fs = KERNEL_DS, \ .fpr = {0}, \ .fpscr = 0, \ .fpexc_mode = MSR_FE0|MSR_FE1, \}/* * Note: the vm_start and vm_end fields here should *not* * be in kernel space. (Could vm_end == vm_start perhaps?) */#define IOREMAP_MMAP { &ioremap_mm, 0, 0x1000, NULL, \ PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, \ 1, NULL, NULL }extern struct mm_struct ioremap_mm;/* * Return saved PC of a blocked thread. For now, this is the "user" PC */#define thread_saved_pc(tsk) \ ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)unsigned long get_wchan(struct task_struct *p);#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)/* Get/set floating-point exception mode */#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);static inline unsigned int __unpack_fe01(unsigned long msr_bits){ return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);}static inline unsigned long __pack_fe01(unsigned int fpmode){ return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);}#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)/* * Prefetch macros. */#define ARCH_HAS_PREFETCH#define ARCH_HAS_PREFETCHW#define ARCH_HAS_SPINLOCK_PREFETCHstatic inline void prefetch(const void *x){ __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));}static inline void prefetchw(const void *x){ __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));}#define spin_lock_prefetch(x) prefetchw(x)#define HAVE_ARCH_PICK_MMAP_LAYOUT#endif /* __KERNEL__ */#endif /* __ASSEMBLY__ *//* * Number of entries in the SLB. If this ever changes we should handle * it with a use a cpu feature fixup. */#define SLB_NUM_ENTRIES 64#endif /* __ASM_PPC64_PROCESSOR_H */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?