📄 ml_sn_init.c
字号:
/* $Id$ * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc. * Copyright (C) 2000 by Colin Ngam */#include <linux/types.h>#include <linux/config.h>#include <linux/slab.h>#include <asm/sn/sgi.h>#include <asm/sn/iograph.h>#include <asm/sn/invent.h>#include <asm/sn/hcl.h>#include <asm/sn/labelcl.h>#include <asm/sn/nodemask.h>#include <asm/sn/sn_private.h>#include <asm/sn/klconfig.h>#include <asm/sn/sn_cpuid.h>#include <asm/sn/synergy.h>#if defined (CONFIG_SGI_IP35) || defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)#include <asm/sn/sn1/ip27config.h>#include <asm/sn/sn1/hubdev.h>#include <asm/sn/sn1/sn1.h>#endif /* CONFIG_SGI_IP35 || CONFIG_IA64_SGI_SN1 */extern int numcpus;extern char arg_maxnodes[];extern cpuid_t master_procid;extern void * kmem_alloc_node(register size_t, register int , cnodeid_t);extern synergy_da_t *Synergy_da_indr[];extern int hasmetarouter;int maxcpus;cpumask_t boot_cpumask;hubreg_t region_mask = 0;extern xwidgetnum_t hub_widget_id(nasid_t);#ifndef CONFIG_IA64_SGI_IO#if defined (IP27)short cputype = CPU_IP27;#elif defined (IP33)short cputype = CPU_IP33;#elif defined (IP35)short cputype = CPU_IP35;#else#error <BOMB! define new cputype here >#endif#endif /* CONFIG_IA64_SGI_IO */static int fine_mode = 0;#ifndef CONFIG_IA64_SGI_IO/* Global variables */pdaindr_t pdaindr[MAXCPUS];#endifstatic cnodemask_t hub_init_mask; /* Mask of cpu in a node doing init */static volatile cnodemask_t hub_init_done_mask; /* Node mask where we wait for * per hub initialization */spinlock_t hub_mask_lock; /* Lock for hub_init_mask above. */extern int valid_icache_reasons; /* Reasons to flush the icache */extern int valid_dcache_reasons; /* Reasons to flush the dcache */extern int numnodes;extern u_char miniroot;extern volatile int need_utlbmiss_patch;extern void iograph_early_init(void);nasid_t master_nasid = INVALID_NASID;/* * mlreset(int slave) * very early machine reset - at this point NO interrupts have been * enabled; nor is memory, tlb, p0, etc setup. * * slave is zero when mlreset is called for the master processor and * is nonzero thereafter. */voidmlreset(int slave){ if (!slave) { /* * We are the master cpu and node. */ master_nasid = get_nasid(); set_master_bridge_base(); FIXME("mlreset: Enable when we support ioc3 ..");#ifndef CONFIG_IA64_SGI_IO if (get_console_nasid() == master_nasid) /* Set up the IOC3 */ ioc3_mlreset((ioc3_cfg_t *)KL_CONFIG_CH_CONS_INFO(master_nasid)->config_base, (ioc3_mem_t *)KL_CONFIG_CH_CONS_INFO(master_nasid)->memory_base); /* * Initialize Master nvram base. */ nvram_baseinit(); fine_mode = is_fine_dirmode();#endif /* CONFIG_IA64_SGI_IO */ /* We're the master processor */ master_procid = smp_processor_id(); master_nasid = cpuid_to_nasid(master_procid); /* * master_nasid we get back better be same as one from * get_nasid() */ ASSERT_ALWAYS(master_nasid == get_nasid());#ifndef CONFIG_IA64_SGI_IO /* * Activate when calias is implemented. */ /* Set all nodes' calias sizes to 8k */ for (i = 0; i < maxnodes; i++) { nasid_t nasid; int sn; nasid = COMPACT_TO_NASID_NODEID(i); /* * Always have node 0 in the region mask, otherwise CALIAS accesses * get exceptions since the hub thinks it is a node 0 address. */ for (sn=0; sn<NUM_SUBNODES; sn++) { REMOTE_HUB_PI_S(nasid, sn, PI_REGION_PRESENT, (region_mask | 1)); REMOTE_HUB_PI_S(nasid, sn, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K); } /* * Set up all hubs to havew a big window pointing at * widget 0. * Memory mode, widget 0, offset 0 */ REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN), ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) | (0 << IIO_ITTE_WIDGET_SHIFT))); }#endif /* CONFIG_IA64_SGI_IO */ /* Set up the hub initialization mask and init the lock */ CNODEMASK_CLRALL(hub_init_mask); CNODEMASK_CLRALL(hub_init_done_mask); spin_lock_init(&hub_mask_lock); /* early initialization of iograph */ iograph_early_init(); /* Initialize Hub Pseudodriver Management */ hubdev_init();#ifndef CONFIG_IA64_SGI_IO /* * Our IO system doesn't require cache writebacks. Set some * variables appropriately. */ cachewrback = 0; valid_icache_reasons &= ~(CACH_AVOID_VCES | CACH_IO_COHERENCY); valid_dcache_reasons &= ~(CACH_AVOID_VCES | CACH_IO_COHERENCY); /* * make sure we are running with the right rev of chips */ verify_snchip_rev(); /* * Since we've wiped out memory at this point, we * need to reset the ARCS vector table so that it * points to appropriate functions in the kernel * itself. In this way, we can maintain the ARCS * vector table conventions without having to actually * keep redundant PROM code in memory. */ he_arcs_set_vectors();#endif /* CONFIG_IA64_SGI_IO */ } else { /* slave != 0 */ /* * This code is performed ONLY by slave processors. */ }}/* XXX - Move the meat of this to intr.c ? *//* * Set up the platform-dependent fields in the nodepda. */void init_platform_nodepda(nodepda_t *npda, cnodeid_t node){ hubinfo_t hubinfo; int sn; cnodeid_t i; ushort *numcpus_p; extern void router_map_init(nodepda_t *); extern void router_queue_init(nodepda_t *,cnodeid_t);#if defined(DEBUG) extern lock_t intr_dev_targ_map_lock; extern uint64_t intr_dev_targ_map_size; /* Initialize the lock to access the device - target cpu mapping * table. This table is explicitly for debugging purposes only and * to aid the "intrmap" idbg command */ if (node == 0) { /* Make sure we do this only once . * There is always a cnode 0 present. */ intr_dev_targ_map_size = 0; init_spinlock(&intr_dev_targ_map_lock,"dtmap_lock",0); }#endif /* DEBUG */ /* Allocate per-node platform-dependent data */ hubinfo = (hubinfo_t)kmem_alloc_node(sizeof(struct hubinfo_s), GFP_ATOMIC, node); ASSERT_ALWAYS(hubinfo); npda->pdinfo = (void *)hubinfo; hubinfo->h_nodepda = npda; hubinfo->h_cnodeid = node; hubinfo->h_nasid = COMPACT_TO_NASID_NODEID(node); printk("init_platform_nodepda: hubinfo 0x%p, &hubinfo->h_crblock 0x%p\n", hubinfo, &hubinfo->h_crblock); spin_lock_init(&hubinfo->h_crblock); hubinfo->h_widgetid = hub_widget_id(hubinfo->h_nasid); npda->xbow_peer = INVALID_NASID; /* Initialize the linked list of * router info pointers to the dependent routers */ npda->npda_rip_first = NULL; /* npda_rip_last always points to the place * where the next element is to be inserted * into the list */ npda->npda_rip_last = &npda->npda_rip_first; npda->dependent_routers = 0; npda->module_id = INVALID_MODULE; /* * Initialize the subnodePDA. */ for (sn=0; sn<NUM_SUBNODES; sn++) { SNPDA(npda,sn)->prof_count = 0; SNPDA(npda,sn)->next_prof_timeout = 0;// ajm#ifndef CONFIG_IA64_SGI_IO intr_init_vecblk(npda, node, sn);#endif } npda->vector_unit_busy = 0; spin_lock_init(&npda->vector_lock); init_MUTEX_LOCKED(&npda->xbow_sema); /* init it locked? */ spin_lock_init(&npda->fprom_lock); spin_lock_init(&npda->node_utlbswitchlock); npda->ni_error_print = 0;#ifndef CONFIG_IA64_SGI_IO if (need_utlbmiss_patch) { npda->node_need_utlbmiss_patch = 1; npda->node_utlbmiss_patched = 1; }#endif /* * Clear out the nasid mask. */ for (i = 0; i < NASID_MASK_BYTES; i++) npda->nasid_mask[i] = 0; for (i = 0; i < numnodes; i++) { nasid_t nasid = COMPACT_TO_NASID_NODEID(i); /* Set my mask bit */ npda->nasid_mask[nasid / 8] |= (1 << nasid % 8); }#ifndef CONFIG_IA64_SGI_IO npda->node_first_cpu = get_cnode_cpu(node);#endif if (npda->node_first_cpu != CPU_NONE) { /* * Count number of cpus only if first CPU is valid. */ numcpus_p = &npda->node_num_cpus; *numcpus_p = 0; for (i = npda->node_first_cpu; i < MAXCPUS; i++) { if (CPUID_TO_COMPACT_NODEID(i) != node) break; else (*numcpus_p)++; } } else { npda->node_num_cpus = 0; } /* Allocate memory for the dump stack on each node * This is useful during nmi handling since we * may not be guaranteed shared memory at that time * which precludes depending on a global dump stack */#ifndef CONFIG_IA64_SGI_IO npda->dump_stack = (uint64_t *)kmem_zalloc_node(DUMP_STACK_SIZE,VM_NOSLEEP, node); ASSERT_ALWAYS(npda->dump_stack); ASSERT(npda->dump_stack);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -