📄 smtc.c
字号:
/* Copyright (C) 2004 Mips Technologies, Inc */#include <linux/clockchips.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/cpumask.h>#include <linux/interrupt.h>#include <linux/kernel_stat.h>#include <linux/module.h>#include <asm/cpu.h>#include <asm/processor.h>#include <asm/atomic.h>#include <asm/system.h>#include <asm/hardirq.h>#include <asm/hazards.h>#include <asm/irq.h>#include <asm/mmu_context.h>#include <asm/smp.h>#include <asm/mipsregs.h>#include <asm/cacheflush.h>#include <asm/time.h>#include <asm/addrspace.h>#include <asm/smtc.h>#include <asm/smtc_ipi.h>#include <asm/smtc_proc.h>/* * SMTC Kernel needs to manipulate low-level CPU interrupt mask * in do_IRQ. These are passed in setup_irq_smtc() and stored * in this table. */unsigned long irq_hwmask[NR_IRQS];#define LOCK_MT_PRA() \ local_irq_save(flags); \ mtflags = dmt()#define UNLOCK_MT_PRA() \ emt(mtflags); \ local_irq_restore(flags)#define LOCK_CORE_PRA() \ local_irq_save(flags); \ mtflags = dvpe()#define UNLOCK_CORE_PRA() \ evpe(mtflags); \ local_irq_restore(flags)/* * Data structures purely associated with SMTC parallelism *//* * Table for tracking ASIDs whose lifetime is prolonged. */asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];/* * Clock interrupt "latch" buffers, per "CPU" */static atomic_t ipi_timer_latch[NR_CPUS];/* * Number of InterProcessor Interupt (IPI) message buffers to allocate */#define IPIBUF_PER_CPU 4static struct smtc_ipi_q IPIQ[NR_CPUS];static struct smtc_ipi_q freeIPIq;/* Forward declarations */void ipi_decode(struct smtc_ipi *);static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);static void setup_cross_vpe_interrupts(unsigned int nvpe);void init_smtc_stats(void);/* Global SMTC Status */unsigned int smtc_status = 0;/* Boot command line configuration overrides */static int vpe0limit;static int ipibuffers = 0;static int nostlb = 0;static int asidmask = 0;unsigned long smtc_asid_mask = 0xff;static int __init vpe0tcs(char *str){ get_option(&str, &vpe0limit); return 1;}static int __init ipibufs(char *str){ get_option(&str, &ipibuffers); return 1;}static int __init stlb_disable(char *s){ nostlb = 1; return 1;}static int __init asidmask_set(char *str){ get_option(&str, &asidmask); switch (asidmask) { case 0x1: case 0x3: case 0x7: case 0xf: case 0x1f: case 0x3f: case 0x7f: case 0xff: smtc_asid_mask = (unsigned long)asidmask; break; default: printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); } return 1;}__setup("vpe0tcs=", vpe0tcs);__setup("ipibufs=", ipibufs);__setup("nostlb", stlb_disable);__setup("asidmask=", asidmask_set);#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUGstatic int hang_trig = 0;static int __init hangtrig_enable(char *s){ hang_trig = 1; return 1;}__setup("hangtrig", hangtrig_enable);#define DEFAULT_BLOCKED_IPI_LIMIT 32static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;static int __init tintq(char *str){ get_option(&str, &timerq_limit); return 1;}__setup("tintq=", tintq);static int imstuckcount[2][8];/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */static int vpemask[2][8] = { {0, 0, 1, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0, 0, 0, 1}};int tcnoprog[NR_CPUS];static atomic_t idle_hook_initialized = {0};static int clock_hang_reported[NR_CPUS];#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG *//* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */void __init sanitize_tlb_entries(void){ printk("Deprecated sanitize_tlb_entries() invoked\n");}/* * Configure shared TLB - VPC configuration bit must be set by caller */static void smtc_configure_tlb(void){ int i, tlbsiz, vpes; unsigned long mvpconf0; unsigned long config1val; /* Set up ASID preservation table */ for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { for(i = 0; i < MAX_SMTC_ASIDS; i++) { smtc_live_asid[vpes][i] = 0; } } mvpconf0 = read_c0_mvpconf0(); if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { /* If we have multiple VPEs, try to share the TLB */ if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { /* * If TLB sizing is programmable, shared TLB * size is the total available complement. * Otherwise, we have to take the sum of all * static VPE TLB entries. */ if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT)) == 0) { /* * If there's more than one VPE, there had better * be more than one TC, because we need one to bind * to each VPE in turn to be able to read * its configuration state! */ settc(1); /* Stop the TC from doing anything foolish */ write_tc_c0_tchalt(TCHALT_H); mips_ihb(); /* No need to un-Halt - that happens later anyway */ for (i=0; i < vpes; i++) { write_tc_c0_tcbind(i); /* * To be 100% sure we're really getting the right * information, we exit the configuration state * and do an IHB after each rebinding. */ write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); mips_ihb(); /* * Only count if the MMU Type indicated is TLB */ if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { config1val = read_vpe_c0_config1(); tlbsiz += ((config1val >> 25) & 0x3f) + 1; } /* Put core back in configuration state */ write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); mips_ihb(); } } write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); ehb(); /* * Setup kernel data structures to use software total, * rather than read the per-VPE Config1 value. The values * for "CPU 0" gets copied to all the other CPUs as part * of their initialization in smtc_cpu_setup(). */ /* MIPS32 limits TLB indices to 64 */ if (tlbsiz > 64) tlbsiz = 64; cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; smtc_status |= SMTC_TLB_SHARED; local_flush_tlb_all(); printk("TLB of %d entry pairs shared by %d VPEs\n", tlbsiz, vpes); } else { printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); } }}/* * Incrementally build the CPU map out of constituent MIPS MT cores, * using the specified available VPEs and TCs. Plaform code needs * to ensure that each MIPS MT core invokes this routine on reset, * one at a time(!). * * This version of the build_cpu_map and prepare_cpus routines assumes * that *all* TCs of a MIPS MT core will be used for Linux, and that * they will be spread across *all* available VPEs (to minimise the * loss of efficiency due to exception service serialization). * An improved version would pick up configuration information and * possibly leave some TCs/VPEs as "slave" processors. * * Use c0_MVPConf0 to find out how many TCs are available, setting up * phys_cpu_present_map and the logical/physical mappings. */int __init mipsmt_build_cpu_map(int start_cpu_slot){ int i, ntcs; /* * The CPU map isn't actually used for anything at this point, * so it's not clear what else we should do apart from set * everything up so that "logical" = "physical". */ ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { cpu_set(i, phys_cpu_present_map); __cpu_number_map[i] = i; __cpu_logical_map[i] = i; }#ifdef CONFIG_MIPS_MT_FPAFF /* Initialize map of CPUs with FPUs */ cpus_clear(mt_fpu_cpumask);#endif /* One of those TC's is the one booting, and not a secondary... */ printk("%i available secondary CPU TC(s)\n", i - 1); return i;}/* * Common setup before any secondaries are started * Make sure all CPU's are in a sensible state before we boot any of the * secondaries. * * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly * as possible across the available VPEs. */static void smtc_tc_setup(int vpe, int tc, int cpu){ settc(tc); write_tc_c0_tchalt(TCHALT_H); mips_ihb(); write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A); write_tc_c0_tccontext(0); /* Bind tc to vpe */ write_tc_c0_tcbind(vpe); /* In general, all TCs should have the same cpu_data indications */ memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */ if (cpu_data[0].cputype == CPU_34K) cpu_data[cpu].options &= ~MIPS_CPU_FPU; cpu_data[cpu].vpe_id = vpe; cpu_data[cpu].tc_id = tc;}void mipsmt_prepare_cpus(void){ int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; unsigned long flags; unsigned long val; int nipi; struct smtc_ipi *pipi; /* disable interrupts so we can disable MT */ local_irq_save(flags); /* disable MT so we can configure */ dvpe(); dmt(); spin_lock_init(&freeIPIq.lock); /* * We probably don't have as many VPEs as we do SMP "CPUs", * but it's possible - and in any case we'll never use more! */ for (i=0; i<NR_CPUS; i++) { IPIQ[i].head = IPIQ[i].tail = NULL; spin_lock_init(&IPIQ[i].lock); IPIQ[i].depth = 0; atomic_set(&ipi_timer_latch[i], 0); } /* cpu_data index starts at zero */ cpu = 0; cpu_data[cpu].vpe_id = 0; cpu_data[cpu].tc_id = 0; cpu++; /* Report on boot-time options */ mips_mt_set_cpuoptions(); if (vpelimit > 0) printk("Limit of %d VPEs set\n", vpelimit); if (tclimit > 0) printk("Limit of %d TCs set\n", tclimit); if (nostlb) { printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); } if (asidmask) printk("ASID mask value override to 0x%x\n", asidmask); /* Temporary */#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG if (hang_trig) printk("Logic Analyser Trigger on suspected TC hang\n");#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ /* Put MVPE's into 'configuration state' */ write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); val = read_c0_mvpconf0(); nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; if (vpelimit > 0 && nvpe > vpelimit) nvpe = vpelimit; ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; if (ntc > NR_CPUS) ntc = NR_CPUS; if (tclimit > 0 && ntc > tclimit) ntc = tclimit; slop = ntc % nvpe; for (i = 0; i < nvpe; i++) { tcpervpe[i] = ntc / nvpe; if (slop) { if((slop - i) > 0) tcpervpe[i]++; } } /* Handle command line override for VPE0 */ if (vpe0limit > ntc) vpe0limit = ntc; if (vpe0limit > 0) { int slopslop; if (vpe0limit < tcpervpe[0]) { /* Reducing TC count - distribute to others */ slop = tcpervpe[0] - vpe0limit; slopslop = slop % (nvpe - 1); tcpervpe[0] = vpe0limit; for (i = 1; i < nvpe; i++) { tcpervpe[i] += slop / (nvpe - 1); if(slopslop && ((slopslop - (i - 1) > 0))) tcpervpe[i]++; } } else if (vpe0limit > tcpervpe[0]) { /* Increasing TC count - steal from others */ slop = vpe0limit - tcpervpe[0]; slopslop = slop % (nvpe - 1); tcpervpe[0] = vpe0limit; for (i = 1; i < nvpe; i++) { tcpervpe[i] -= slop / (nvpe - 1); if(slopslop && ((slopslop - (i - 1) > 0))) tcpervpe[i]--; } } } /* Set up shared TLB */ smtc_configure_tlb(); for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { /* * Set the MVP bits. */ settc(tc); write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP); if (vpe != 0) printk(", "); printk("VPE %d: TC", vpe); for (i = 0; i < tcpervpe[vpe]; i++) { /* * TC 0 is bound to VPE 0 at reset, * and is presumably executing this * code. Leave it alone! */ if (tc != 0) { smtc_tc_setup(vpe, tc, cpu); cpu++; } printk(" %d", tc); tc++; } if (vpe != 0) { /* * Clear any stale software interrupts from VPE's Cause */ write_vpe_c0_cause(0);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -