📄 smtc.c
字号:
/* DEBUG */ int q = smp_processor_id(); /* * Test is not atomic, but much faster than a dequeue, * and the vast majority of invocations will have a null queue. */ if (IPIQ[q].head != NULL) { while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { /* ipi_decode() should be called with interrupts off */ local_irq_save(flags); ipi_decode(pipi); local_irq_restore(flags); } }}/* * Cross-VPE interrupts in the SMTC prototype use "software interrupts" * set via cross-VPE MTTR manipulation of the Cause register. It would be * in some regards preferable to have external logic for "doorbell" hardware * interrupts. */static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;static irqreturn_t ipi_interrupt(int irq, void *dev_idm){ int my_vpe = cpu_data[smp_processor_id()].vpe_id; int my_tc = cpu_data[smp_processor_id()].tc_id; int cpu; struct smtc_ipi *pipi; unsigned long tcstatus; int sent; long flags; unsigned int mtflags; unsigned int vpflags; /* * So long as cross-VPE interrupts are done via * MFTR/MTTR read-modify-writes of Cause, we need * to stop other VPEs whenever the local VPE does * anything similar. */ local_irq_save(flags); vpflags = dvpe(); clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); irq_enable_hazard(); evpe(vpflags); local_irq_restore(flags); /* * Cross-VPE Interrupt handler: Try to directly deliver IPIs * queued for TCs on this VPE other than the current one. * Return-from-interrupt should cause us to drain the queue * for the current TC, so we ought not to have to do it explicitly here. */ for_each_online_cpu(cpu) { if (cpu_data[cpu].vpe_id != my_vpe) continue; pipi = smtc_ipi_dq(&IPIQ[cpu]); if (pipi != NULL) { if (cpu_data[cpu].tc_id != my_tc) { sent = 0; LOCK_MT_PRA(); settc(cpu_data[cpu].tc_id); write_tc_c0_tchalt(TCHALT_H); mips_ihb(); tcstatus = read_tc_c0_tcstatus(); if ((tcstatus & TCSTATUS_IXMT) == 0) { post_direct_ipi(cpu, pipi); sent = 1; } write_tc_c0_tchalt(0); UNLOCK_MT_PRA(); if (!sent) { smtc_ipi_req(&IPIQ[cpu], pipi); } } else { /* * ipi_decode() should be called * with interrupts off */ local_irq_save(flags); ipi_decode(pipi); local_irq_restore(flags); } } } return IRQ_HANDLED;}static void ipi_irq_dispatch(void){ do_IRQ(cpu_ipi_irq);}static struct irqaction irq_ipi = { .handler = ipi_interrupt, .flags = IRQF_DISABLED, .name = "SMTC_IPI", .flags = IRQF_PERCPU};static void setup_cross_vpe_interrupts(unsigned int nvpe){ if (nvpe < 1) return; if (!cpu_has_vint) panic("SMTC Kernel requires Vectored Interupt support"); set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); set_irq_handler(cpu_ipi_irq, handle_percpu_irq);}/* * SMTC-specific hacks invoked from elsewhere in the kernel. * * smtc_ipi_replay is called from raw_local_irq_restore which is only ever * called with interrupts disabled. We do rely on interrupts being disabled * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would * result in a recursive call to raw_local_irq_restore(). */static void __smtc_ipi_replay(void){ unsigned int cpu = smp_processor_id(); /* * To the extent that we've ever turned interrupts off, * we may have accumulated deferred IPIs. This is subtle. * If we use the smtc_ipi_qdepth() macro, we'll get an * exact number - but we'll also disable interrupts * and create a window of failure where a new IPI gets * queued after we test the depth but before we re-enable * interrupts. So long as IXMT never gets set, however, * we should be OK: If we pick up something and dispatch * it here, that's great. If we see nothing, but concurrent * with this operation, another TC sends us an IPI, IXMT * is clear, and we'll handle it as a real pseudo-interrupt * and not a pseudo-pseudo interrupt. */ if (IPIQ[cpu].depth > 0) { while (1) { struct smtc_ipi_q *q = &IPIQ[cpu]; struct smtc_ipi *pipi; extern void self_ipi(struct smtc_ipi *); spin_lock(&q->lock); pipi = __smtc_ipi_dq(q); spin_unlock(&q->lock); if (!pipi) break; self_ipi(pipi); smtc_cpu_stats[cpu].selfipis++; } }}void smtc_ipi_replay(void){ raw_local_irq_disable(); __smtc_ipi_replay();}EXPORT_SYMBOL(smtc_ipi_replay);void smtc_idle_loop_hook(void){#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG int im; int flags; int mtflags; int bit; int vpe; int tc; int hook_ntcs; /* * printk within DMT-protected regions can deadlock, * so buffer diagnostic messages for later output. */ char *pdb_msg; char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ if (atomic_add_return(1, &idle_hook_initialized) == 1) { int mvpconf0; /* Tedious stuff to just do once */ mvpconf0 = read_c0_mvpconf0(); hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; if (hook_ntcs > NR_CPUS) hook_ntcs = NR_CPUS; for (tc = 0; tc < hook_ntcs; tc++) { tcnoprog[tc] = 0; clock_hang_reported[tc] = 0; } for (vpe = 0; vpe < 2; vpe++) for (im = 0; im < 8; im++) imstuckcount[vpe][im] = 0; printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); atomic_set(&idle_hook_initialized, 1000); } else { /* Someone else is initializing in parallel - let 'em finish */ while (atomic_read(&idle_hook_initialized) < 1000) ; } } /* Have we stupidly left IXMT set somewhere? */ if (read_c0_tcstatus() & 0x400) { write_c0_tcstatus(read_c0_tcstatus() & ~0x400); ehb(); printk("Dangling IXMT in cpu_idle()\n"); } /* Have we stupidly left an IM bit turned off? */#define IM_LIMIT 2000 local_irq_save(flags); mtflags = dmt(); pdb_msg = &id_ho_db_msg[0]; im = read_c0_status(); vpe = current_cpu_data.vpe_id; for (bit = 0; bit < 8; bit++) { /* * In current prototype, I/O interrupts * are masked for VPE > 0 */ if (vpemask[vpe][bit]) { if (!(im & (0x100 << bit))) imstuckcount[vpe][bit]++; else imstuckcount[vpe][bit] = 0; if (imstuckcount[vpe][bit] > IM_LIMIT) { set_c0_status(0x100 << bit); ehb(); imstuckcount[vpe][bit] = 0; pdb_msg += sprintf(pdb_msg, "Dangling IM %d fixed for VPE %d\n", bit, vpe); } } } /* * Now that we limit outstanding timer IPIs, check for hung TC */ for (tc = 0; tc < NR_CPUS; tc++) { /* Don't check ourself - we'll dequeue IPIs just below */ if ((tc != smp_processor_id()) && atomic_read(&ipi_timer_latch[tc]) > timerq_limit) { if (clock_hang_reported[tc] == 0) { pdb_msg += sprintf(pdb_msg, "TC %d looks hung with timer latch at %d\n", tc, atomic_read(&ipi_timer_latch[tc])); clock_hang_reported[tc]++; } } } emt(mtflags); local_irq_restore(flags); if (pdb_msg != &id_ho_db_msg[0]) printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ /* * Replay any accumulated deferred IPIs. If "Instant Replay" * is in use, there should never be any. */#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY { unsigned long flags; local_irq_save(flags); __smtc_ipi_replay(); local_irq_restore(flags); }#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */}void smtc_soft_dump(void){ int i; printk("Counter Interrupts taken per CPU (TC)\n"); for (i=0; i < NR_CPUS; i++) { printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); } printk("Self-IPI invocations:\n"); for (i=0; i < NR_CPUS; i++) { printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); } smtc_ipi_qdump(); printk("Timer IPI Backlogs:\n"); for (i=0; i < NR_CPUS; i++) { printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i])); } printk("%d Recoveries of \"stolen\" FPU\n", atomic_read(&smtc_fpu_recoveries));}/* * TLB management routines special to SMTC */void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu){ unsigned long flags, mtflags, tcstat, prevhalt, asid; int tlb, i; /* * It would be nice to be able to use a spinlock here, * but this is invoked from within TLB flush routines * that protect themselves with DVPE, so if a lock is * held by another TC, it'll never be freed. * * DVPE/DMT must not be done with interrupts enabled, * so even so most callers will already have disabled * them, let's be really careful... */ local_irq_save(flags); if (smtc_status & SMTC_TLB_SHARED) { mtflags = dvpe(); tlb = 0; } else { mtflags = dmt(); tlb = cpu_data[cpu].vpe_id; } asid = asid_cache(cpu); do { if (!((asid += ASID_INC) & ASID_MASK) ) { if (cpu_has_vtag_icache) flush_icache_all(); /* Traverse all online CPUs (hack requires contigous range) */ for_each_online_cpu(i) { /* * We don't need to worry about our own CPU, nor those of * CPUs who don't share our TLB. */ if ((i != smp_processor_id()) && ((smtc_status & SMTC_TLB_SHARED) || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { settc(cpu_data[i].tc_id); prevhalt = read_tc_c0_tchalt() & TCHALT_H; if (!prevhalt) { write_tc_c0_tchalt(TCHALT_H); mips_ihb(); } tcstat = read_tc_c0_tcstatus(); smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); if (!prevhalt) write_tc_c0_tchalt(0); } } if (!asid) /* fix version if needed */ asid = ASID_FIRST_VERSION; local_flush_tlb_all(); /* start new asid cycle */ } } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); /* * SMTC shares the TLB within VPEs and possibly across all VPEs. */ for_each_online_cpu(i) { if ((smtc_status & SMTC_TLB_SHARED) || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) cpu_context(i, mm) = asid_cache(i) = asid; } if (smtc_status & SMTC_TLB_SHARED) evpe(mtflags); else emt(mtflags); local_irq_restore(flags);}/* * Invoked from macros defined in mmu_context.h * which must already have disabled interrupts * and done a DVPE or DMT as appropriate. */void smtc_flush_tlb_asid(unsigned long asid){ int entry; unsigned long ehi; entry = read_c0_wired(); /* Traverse all non-wired entries */ while (entry < current_cpu_data.tlbsize) { write_c0_index(entry); ehb(); tlb_read(); ehb(); ehi = read_c0_entryhi(); if ((ehi & ASID_MASK) == asid) { /* * Invalidate only entries with specified ASID, * makiing sure all entries differ. */ write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); write_c0_entrylo0(0); write_c0_entrylo1(0); mtc0_tlbw_hazard(); tlb_write_indexed(); } entry++; } write_c0_index(PARKED_INDEX); tlbw_use_hazard();}/* * Support for single-threading cache flush operations. */static int halt_state_save[NR_CPUS];/* * To really, really be sure that nothing is being done * by other TCs, halt them all. This code assumes that * a DVPE has already been done, so while their Halted * state is theoretically architecturally unstable, in * practice, it's not going to change while we're looking * at it. */void smtc_cflush_lockdown(void){ int cpu; for_each_online_cpu(cpu) { if (cpu != smp_processor_id()) { settc(cpu_data[cpu].tc_id); halt_state_save[cpu] = read_tc_c0_tchalt(); write_tc_c0_tchalt(TCHALT_H); } } mips_ihb();}/* It would be cheating to change the cpu_online states during a flush! */void smtc_cflush_release(void){ int cpu; /* * Start with a hazard barrier to ensure * that all CACHE ops have played through. */ mips_ihb(); for_each_online_cpu(cpu) { if (cpu != smp_processor_id()) { settc(cpu_data[cpu].tc_id); write_tc_c0_tchalt(halt_state_save[cpu]); } } mips_ihb();}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -