📄 pmac_smp.c
字号:
/* * BK Id: %F% %I% %G% %U% %#% *//* * SMP support for power macintosh. * * We support both the old "powersurge" SMP architecture * and the current Core99 (G4 PowerMac) machines. * * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net) * and Ben Herrenschmidt <benh@kernel.crashing.org>. * * Support for DayStar quad CPU cards * Copyright (C) XLR8, Inc. 1994-2000 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#include <linux/config.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/smp.h>#include <linux/smp_lock.h>#include <linux/interrupt.h>#include <linux/kernel_stat.h>#include <linux/delay.h>#define __KERNEL_SYSCALLS__#include <linux/unistd.h>#include <linux/init.h>#include <linux/spinlock.h>#include <asm/ptrace.h>#include <asm/atomic.h>#include <asm/irq.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/hardirq.h>#include <asm/softirq.h>#include <asm/sections.h>#include <asm/io.h>#include <asm/prom.h>#include <asm/smp.h>#include <asm/residual.h>#include <asm/machdep.h>#include <asm/pmac_feature.h>#include <asm/time.h>#include <asm/gemini.h>#include <asm/processor.h>#include "open_pic.h"/* * Powersurge (old powermac SMP) support. */extern void __secondary_start_psurge(void);extern void __secondary_start_psurge2(void); /* Temporary horrible hack */extern void __secondary_start_psurge3(void); /* Temporary horrible hack *//* Addresses for powersurge registers */#define HAMMERHEAD_BASE 0xf8000000#define HHEAD_CONFIG 0x90#define HHEAD_SEC_INTR 0xc0/* register for interrupting the primary processor on the powersurge *//* N.B. this is actually the ethernet ROM! */#define PSURGE_PRI_INTR 0xf3019000/* register for storing the start address for the secondary processor *//* N.B. this is the PCI config space address register for the 1st bridge */#define PSURGE_START 0xf2800000/* Daystar/XLR8 4-CPU card */#define PSURGE_QUAD_REG_ADDR 0xf8800000#define PSURGE_QUAD_IRQ_SET 0#define PSURGE_QUAD_IRQ_CLR 1#define PSURGE_QUAD_IRQ_PRIMARY 2#define PSURGE_QUAD_CKSTOP_CTL 3#define PSURGE_QUAD_PRIMARY_ARB 4#define PSURGE_QUAD_BOARD_ID 6#define PSURGE_QUAD_WHICH_CPU 7#define PSURGE_QUAD_CKSTOP_RDBK 8#define PSURGE_QUAD_RESET_CTL 11#define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))#define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)#define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))#define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))/* virtual addresses for the above */static volatile u8 *hhead_base;static volatile u8 *quad_base;static volatile u32 *psurge_pri_intr;static volatile u8 *psurge_sec_intr;static volatile u32 *psurge_start;/* what sort of powersurge board we have */static int psurge_type;/* values for psurge_type */#define PSURGE_DUAL 0#define PSURGE_QUAD_OKEE 1#define PSURGE_QUAD_COTTON 2#define PSURGE_QUAD_ICEGRASS 3volatile static long int core99_l2_cache;volatile static long int core99_l3_cache;static void __initcore99_init_caches(void){ int cpu = smp_processor_id(); if (!(cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR)) return; if (cpu == 0){ core99_l2_cache = _get_L2CR(); printk("CPU0: L2CR is %lx\n", core99_l2_cache); } else { printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR()); _set_L2CR(0); _set_L2CR(core99_l2_cache); printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache); } if (!(cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR)) return; if (cpu == 0){ core99_l3_cache = _get_L3CR(); printk("CPU0: L3CR is %lx\n", core99_l3_cache); } else { printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR()); _set_L3CR(0); _set_L3CR(core99_l3_cache); printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache); }}/* Some CPU registers have to be saved from the first CPU and * applied to others. Note that we override what is setup by * the cputable intentionally. */#define reg_hid0 0#define reg_hid1 1#define reg_msscr0 2#define reg_msssr0 3#define reg_ictrl 4#define reg_ldstcr 5#define reg_ldstdb 6#define reg_count 7static unsigned long cpu_regs[reg_count];static void __pmaccpu_setup_grab(void){ unsigned int pvers = mfspr(SPRN_PVR)>>16; /* Read cache setting of CPU 0 */ core99_init_caches(); /* 7400/7410/7450 */ if (pvers == 0x8000 || pvers == 0x000c || pvers == 0x800c) { cpu_regs[reg_hid0] = mfspr(SPRN_HID0); cpu_regs[reg_msscr0] = mfspr(SPRN_MSSCR0); cpu_regs[reg_msssr0] = mfspr(SPRN_MSSSR0); } /* 7450 only */ if (pvers == 0x8000) { cpu_regs[reg_hid1] = mfspr(SPRN_HID1); cpu_regs[reg_ictrl] = mfspr(SPRN_ICTRL); cpu_regs[reg_ldstcr] = mfspr(SPRN_LDSTCR); cpu_regs[reg_ldstdb] = mfspr(SPRN_LDSTDB); } flush_dcache_range((unsigned long)cpu_regs, (unsigned long)&cpu_regs[reg_count]);}static void __pmaccpu_setup_apply(int cpu_nr){ unsigned int pvers = mfspr(SPRN_PVR)>>16; /* Apply cache setting from CPU 0 */ core99_init_caches(); /* 7400/7410/7450 */ if (pvers == 0x8000 || pvers == 0x000c || pvers == 0x800c) { unsigned long tmp; __asm__ __volatile__ ( "lwz %0,4*"stringify(reg_hid0)"(%1)\n" "sync\n" "mtspr "stringify(SPRN_HID0)", %0\n" "isync;sync\n" "lwz %0, 4*"stringify(reg_msscr0)"(%1)\n" "sync\n" "mtspr "stringify(SPRN_MSSCR0)", %0\n" "isync;sync\n"// "lwz %0, "stringify(reg_msssr0)"(%1)\n"// "sync\n"// "mtspr "stringify(SPRN_MSSSR0)", %0\n"// "isync;sync\n" : "=&r" (tmp) : "r" (cpu_regs)); } /* 7410 only */ if (pvers == 0x800c) { unsigned long tmp; __asm__ __volatile__ ( "li %0, 0\n" "sync\n" "mtspr "stringify(SPRN_L2CR2)", %0\n" "isync;sync\n" : "=&r" (tmp)); } /* 7450 only */ if (pvers == 0x8000) { unsigned long tmp; __asm__ __volatile__ ( "lwz %0, 4*"stringify(reg_hid1)"(%1)\n" "sync\n" "mtspr "stringify(SPRN_HID1)", %0\n" "isync;sync\n" "lwz %0, 4*"stringify(reg_ictrl)"(%1)\n" "sync\n" "mtspr "stringify(SPRN_ICTRL)", %0\n" "isync;sync\n" "lwz %0, 4*"stringify(reg_ldstcr)"(%1)\n" "sync\n" "mtspr "stringify(SPRN_LDSTCR)", %0\n" "isync;sync\n" "lwz %0, 4*"stringify(reg_ldstdb)"(%1)\n" "sync\n" "mtspr "stringify(SPRN_LDSTDB)", %0\n" "isync;sync\n" : "=&r" (tmp) : "r" (cpu_regs)); }}/* * Set and clear IPIs for powersurge. */static inline void psurge_set_ipi(int cpu){ if (cpu == 0) in_be32(psurge_pri_intr); else if (psurge_type == PSURGE_DUAL) out_8(psurge_sec_intr, 0); else PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);}static inline void psurge_clr_ipi(int cpu){ if (cpu > 0) { if (psurge_type == PSURGE_DUAL) out_8(psurge_sec_intr, ~0); else PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu); }}/* * On powersurge (old SMP powermac architecture) we don't have * separate IPIs for separate messages like openpic does. Instead * we have a bitmap for each processor, where a 1 bit means that * the corresponding message is pending for that processor. * Ideally each cpu's entry would be in a different cache line. * -- paulus. */static unsigned long psurge_smp_message[NR_CPUS];void __pmacpsurge_smp_message_recv(struct pt_regs *regs){ int cpu = smp_processor_id(); int msg; /* clear interrupt */ psurge_clr_ipi(cpu); if (smp_num_cpus < 2) return; /* make sure there is a message there */ for (msg = 0; msg < 4; msg++) if (test_and_clear_bit(msg, &psurge_smp_message[cpu])) smp_message_recv(msg, regs);}void __pmacpsurge_primary_intr(int irq, void *d, struct pt_regs *regs){ psurge_smp_message_recv(regs);}static void __pmacsmp_psurge_message_pass(int target, int msg, unsigned long data, int wait){ int i; if (smp_num_cpus < 2) return; for (i = 0; i < smp_num_cpus; i++) { if (target == MSG_ALL || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) || target == i) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -