📄 processor.h
字号:
#ifndef _ASM_IA64_PROCESSOR_H#define _ASM_IA64_PROCESSOR_H/* * Copyright (C) 1998-2004 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> * * 11/24/98 S.Eranian added ia64_set_iva() * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support */#include <linux/config.h>#include <asm/intrinsics.h>#include <asm/kregs.h>#include <asm/ptrace.h>#include <asm/ustack.h>/* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */#define ARCH_HAS_SCHED_DOMAIN#define IA64_NUM_DBG_REGS 8/* * Limits for PMC and PMD are set to less than maximum architected values * but should be sufficient for a while */#ifdef XEN/* * These are increased in linux-2.6.16. Montecito requires 35PMDs. * This ifdef will become unnecessary when this header file is * upgraded to 2.6.16 or newer. */#define IA64_NUM_PMC_REGS 64#define IA64_NUM_PMD_REGS 64#else#define IA64_NUM_PMC_REGS 32#define IA64_NUM_PMD_REGS 32#endif#define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)#define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)/* * TASK_SIZE really is a mis-named. It really is the maximum user * space address (plus one). On IA-64, there are five regions of 2TB * each (assuming 8KB page size), for a total of 8TB of user virtual * address space. */#define TASK_SIZE (current->thread.task_size)/* * This decides where the kernel will search for a free chunk of vm * space during mmap's. */#define TASK_UNMAPPED_BASE (current->thread.map_base)#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ /* bit 5 is currently unused */#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */#define IA64_THREAD_UAC_SHIFT 3#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)#define IA64_THREAD_FPEMU_SHIFT 6#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)/* * This shift should be large enough to be able to represent 1000000000/itc_freq with good * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits * (this will give enough slack to represent 10 seconds worth of time as a scaled number). */#define IA64_NSEC_PER_CYC_SHIFT 30#ifndef __ASSEMBLY__#include <linux/cache.h>#include <linux/compiler.h>#include <linux/threads.h>#include <linux/types.h>#include <asm/fpu.h>#include <asm/page.h>#include <asm/percpu.h>#include <asm/rse.h>#include <asm/unwind.h>#include <asm/atomic.h>#ifdef CONFIG_NUMA#include <asm/nodedata.h>#endif#ifdef XEN#include <asm/xenprocessor.h>#include <xen/bitops.h>#else/* like above but expressed as bitfields for more efficient access: */struct ia64_psr { __u64 reserved0 : 1; __u64 be : 1; __u64 up : 1; __u64 ac : 1; __u64 mfl : 1; __u64 mfh : 1; __u64 reserved1 : 7; __u64 ic : 1; __u64 i : 1; __u64 pk : 1; __u64 reserved2 : 1; __u64 dt : 1; __u64 dfl : 1; __u64 dfh : 1; __u64 sp : 1; __u64 pp : 1; __u64 di : 1; __u64 si : 1; __u64 db : 1; __u64 lp : 1; __u64 tb : 1; __u64 rt : 1; __u64 reserved3 : 4; __u64 cpl : 2; __u64 is : 1; __u64 mc : 1; __u64 it : 1; __u64 id : 1; __u64 da : 1; __u64 dd : 1; __u64 ss : 1; __u64 ri : 2; __u64 ed : 1; __u64 bn : 1; __u64 reserved4 : 19;};#endif/* * CPU type, hardware bug flags, and per-CPU state. Frequently used * state comes earlier: */struct cpuinfo_ia64 { __u32 softirq_pending; __u64 itm_delta; /* # of clock cycles between clock ticks */ __u64 itm_next; /* interval timer mask value to use for next clock tick */ __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */ __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ __u64 itc_freq; /* frequency of ITC counter */ __u64 proc_freq; /* frequency of processor */ __u64 cyc_per_usec; /* itc_freq/1000000 */ __u64 ptce_base; __u32 ptce_count[2]; __u32 ptce_stride[2]; struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */#ifdef CONFIG_SMP __u64 loops_per_jiffy; int cpu; __u32 socket_id; /* physical processor socket id */ __u16 core_id; /* core id */ __u16 thread_id; /* thread id */ __u16 num_log; /* Total number of logical processors on * this socket that were successfully booted */ __u8 cores_per_socket; /* Cores per processor socket */ __u8 threads_per_core; /* Threads per core */#endif /* CPUID-derived information: */ __u64 ppn; __u64 features; __u8 number; __u8 revision; __u8 model; __u8 family; __u8 archrev; char vendor[16];#ifdef CONFIG_NUMA struct ia64_node_data *node_data;#endif};DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);/* * The "local" data variable. It refers to the per-CPU data of the currently executing * CPU, much like "current" points to the per-task data of the currently executing task. * Do not use the address of local_cpu_data, since it will be different from * cpu_data(smp_processor_id())! */#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))extern void identify_cpu (struct cpuinfo_ia64 *);extern void print_cpu_info (struct cpuinfo_ia64 *);typedef struct { unsigned long seg;} mm_segment_t;#define SET_UNALIGN_CTL(task,value) \({ \ (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \ 0; \})#define GET_UNALIGN_CTL(task,addr) \({ \ put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \ (int __user *) (addr)); \})#define SET_FPEMU_CTL(task,value) \({ \ (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \ | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \ 0; \})#define GET_FPEMU_CTL(task,addr) \({ \ put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \ (int __user *) (addr)); \})#ifdef CONFIG_IA32_SUPPORTstruct desc_struct { unsigned int a, b;};#define desc_empty(desc) (!((desc)->a + (desc)->b))#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))#define GDT_ENTRY_TLS_ENTRIES 3#define GDT_ENTRY_TLS_MIN 6#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)struct partial_page_list;#endifstruct thread_struct { __u32 flags; /* various thread flags (see IA64_THREAD_*) */ /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ __u8 on_ustack; /* executing on user-stacks? */ __u8 pad[3]; __u64 ksp; /* kernel stack pointer */ __u64 map_base; /* base address for get_unmapped_area() */ __u64 task_size; /* limit for task size */ __u64 rbs_bot; /* the base address for the RBS */ int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */#ifdef CONFIG_IA32_SUPPORT __u64 eflag; /* IA32 EFLAGS reg */ __u64 fsr; /* IA32 floating pt status reg */ __u64 fcr; /* IA32 floating pt control reg */ __u64 fir; /* IA32 fp except. instr. reg */ __u64 fdr; /* IA32 fp except. data reg */ __u64 old_k1; /* old value of ar.k1 */ __u64 old_iob; /* old IOBase value */ struct partial_page_list *ppl; /* partial page list for 4K page size issue */ /* cached TLS descriptors. */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];# define INIT_THREAD_IA32 .eflag = 0, \ .fsr = 0, \ .fcr = 0x17800000037fULL, \ .fir = 0, \ .fdr = 0, \ .old_k1 = 0, \ .old_iob = 0, \ .ppl = NULL,#else# define INIT_THREAD_IA32#endif /* CONFIG_IA32_SUPPORT */#ifdef CONFIG_PERFMON __u64 pmcs[IA64_NUM_PMC_REGS]; __u64 pmds[IA64_NUM_PMD_REGS]; void *pfm_context; /* pointer to detailed PMU context */ unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */# define INIT_THREAD_PM .pmcs = {0UL, }, \ .pmds = {0UL, }, \ .pfm_context = NULL, \ .pfm_needs_checking = 0UL,#else# define INIT_THREAD_PM#endif#ifndef XEN __u64 dbr[IA64_NUM_DBG_REGS]; __u64 ibr[IA64_NUM_DBG_REGS];#endif struct ia64_fpreg fph[96]; /* saved/loaded on demand */};#ifndef XEN#define INIT_THREAD { \ .flags = 0, \ .on_ustack = 0, \ .ksp = 0, \ .map_base = DEFAULT_MAP_BASE, \ .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ .task_size = DEFAULT_TASK_SIZE, \ .last_fph_cpu = -1, \ INIT_THREAD_IA32 \ INIT_THREAD_PM \ .dbr = {0, }, \ .ibr = {0, }, \ .fph = {{{{0}}}, } \}#define start_thread(regs,new_ip,new_sp) do { \ set_fs(USER_DS); \ regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \ & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \ regs->cr_iip = new_ip; \ regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ regs->ar_rnat = 0; \ regs->ar_bspstore = current->thread.rbs_bot; \ regs->ar_fpsr = FPSR_DEFAULT; \ regs->loadrs = 0; \ regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ if (unlikely(!current->mm->dumpable)) { \ /* \ * Zap scratch regs to avoid leaking bits between processes with different \ * uid/privileges. \ */ \ regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \ regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \ } \} while (0)#endif/* Forward declarations, a strange C thing... */struct mm_struct;struct task_struct;/* * Free all resources held by a thread. This is called after the * parent of DEAD_TASK has collected the exit status of the task via * wait(). */#define release_thread(dead_task)/* Prepare to copy thread state - unlazy all lazy status */#define prepare_to_copy(tsk) do { } while (0)/* * This is the mechanism for creating a new kernel thread. * * NOTE 1: Only a kernel-only process (ie the swapper or direct * descendants who haven't done an "execve()") should use this: it * will work within a system call from a "real" process, but the * process memory space will not be free'd until both the parent and * the child have exited. * * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get * into trouble in init/main.c when the child thread returns to * do_basic_setup() and the timing is such that free_initmem() has * been called already. */extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);/* Get wait channel for task P. */extern unsigned long get_wchan (struct task_struct *p);/* Return instruction pointer of blocked task TSK. */#define KSTK_EIP(tsk) \ ({ \ struct pt_regs *_regs = ia64_task_regs(tsk); \ _regs->cr_iip + ia64_psr(_regs)->ri; \ })/* Return stack pointer of blocked task TSK. */#define KSTK_ESP(tsk) ((tsk)->thread.ksp)extern void ia64_getreg_unknown_kr (void);extern void ia64_setreg_unknown_kr (void);#define ia64_get_kr(regnum) \({ \ unsigned long r = 0; \
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -