⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 voyager_smp.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	calibrate_delay();	/* save our processor parameters */	smp_store_cpu_info(cpuid);	/* if we're a quad, we may need to bootstrap other CPUs */	do_quad_bootstrap();	/* FIXME: this is rather a poor hack to prevent the CPU	 * activating softirqs while it's supposed to be waiting for	 * permission to proceed.  Without this, the new per CPU stuff	 * in the softirqs will fail */	local_irq_disable();	cpu_set(cpuid, cpu_callin_map);	/* signal that we're done */	cpu_booted_map = 1;	while (!cpu_isset(cpuid, smp_commenced_mask))		rep_nop();	local_irq_enable();	local_flush_tlb();	cpu_set(cpuid, cpu_online_map);	wmb();	cpu_idle();}/* Routine to kick start the given CPU and wait for it to report ready * (or timeout in startup).  When this routine returns, the requested * CPU is either fully running and configured or known to be dead. * * We call this routine sequentially 1 CPU at a time, so no need for * locking */static void __initdo_boot_cpu(__u8 cpu){	struct task_struct *idle;	int timeout;	unsigned long flags;	int quad_boot = (1<<cpu) & voyager_quad_processors 		& ~( voyager_extended_vic_processors		     & voyager_allowed_boot_processors);	/* This is an area in head.S which was used to set up the	 * initial kernel stack.  We need to alter this to give the	 * booting CPU a new stack (taken from its idle process) */	extern struct {		__u8 *esp;		unsigned short ss;	} stack_start;	/* This is the format of the CPI IDT gate (in real mode) which	 * we're hijacking to boot the CPU */	union 	IDTFormat {		struct seg {			__u16	Offset;			__u16	Segment;		} idt;		__u32 val;	} hijack_source;	__u32 *hijack_vector;	__u32 start_phys_address = setup_trampoline();	/* There's a clever trick to this: The linux trampoline is	 * compiled to begin at absolute location zero, so make the	 * address zero but have the data segment selector compensate	 * for the actual address */	hijack_source.idt.Offset = start_phys_address & 0x000F;	hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;	cpucount++;	alternatives_smp_switch(1);	idle = fork_idle(cpu);	if(IS_ERR(idle))		panic("failed fork for CPU%d", cpu);	idle->thread.eip = (unsigned long) start_secondary;	/* init_tasks (in sched.c) is indexed logically */	stack_start.esp = (void *) idle->thread.esp;	init_gdt(cpu); 	per_cpu(current_task, cpu) = idle;	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);	irq_ctx_init(cpu);	/* Note: Don't modify initial ss override */	VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, 		(unsigned long)hijack_source.val, hijack_source.idt.Segment,		hijack_source.idt.Offset, stack_start.esp));	/* init lowmem identity mapping */	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,			min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));	flush_tlb_all();	if(quad_boot) {		printk("CPU %d: non extended Quad boot\n", cpu);		hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE)*4);		*hijack_vector = hijack_source.val;	} else {		printk("CPU%d: extended VIC boot\n", cpu);		hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE)*4);		*hijack_vector = hijack_source.val;		/* VIC errata, may also receive interrupt at this address */		hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + VIC_DEFAULT_CPI_BASE)*4);		*hijack_vector = hijack_source.val;	}	/* All non-boot CPUs start with interrupts fully masked.  Need	 * to lower the mask of the CPI we're about to send.  We do	 * this in the VIC by masquerading as the processor we're	 * about to boot and lowering its interrupt mask */	local_irq_save(flags);	if(quad_boot) {		send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI);	} else {		outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);		/* here we're altering registers belonging to `cpu' */				outb(VIC_BOOT_INTERRUPT_MASK, 0x21);		/* now go back to our original identity */		outb(boot_cpu_id, VIC_PROCESSOR_ID);		/* and boot the CPU */		send_CPI((1<<cpu), VIC_CPU_BOOT_CPI);	}	cpu_booted_map = 0;	local_irq_restore(flags);	/* now wait for it to become ready (or timeout) */	for(timeout = 0; timeout < 50000; timeout++) {		if(cpu_booted_map)			break;		udelay(100);	}	/* reset the page table */	zap_low_mappings();	  	if (cpu_booted_map) {		VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",			cpu, smp_processor_id()));			printk("CPU%d: ", cpu);		print_cpu_info(&cpu_data(cpu));		wmb();		cpu_set(cpu, cpu_callout_map);		cpu_set(cpu, cpu_present_map);	}	else {		printk("CPU%d FAILED TO BOOT: ", cpu);		if (*((volatile unsigned char *)phys_to_virt(start_phys_address))==0xA5)			printk("Stuck.\n");		else			printk("Not responding.\n");				cpucount--;	}}void __initsmp_boot_cpus(void){	int i;	/* CAT BUS initialisation must be done after the memory */	/* FIXME: The L4 has a catbus too, it just needs to be	 * accessed in a totally different way */	if(voyager_level == 5) {		voyager_cat_init();		/* now that the cat has probed the Voyager System Bus, sanity		 * check the cpu map */		if( ((voyager_quad_processors | voyager_extended_vic_processors)		     & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) {			/* should panic */			printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n");		}	} else if(voyager_level == 4)		voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0];	/* this sets up the idle task to run on the current cpu */	voyager_extended_cpus = 1;	/* Remove the global_irq_holder setting, it triggers a BUG() on	 * schedule at the moment */	//global_irq_holder = boot_cpu_id;	/* FIXME: Need to do something about this but currently only works	 * on CPUs with a tsc which none of mine have. 	smp_tune_scheduling();	 */	smp_store_cpu_info(boot_cpu_id);	printk("CPU%d: ", boot_cpu_id);	print_cpu_info(&cpu_data(boot_cpu_id));	if(is_cpu_quad()) {		/* booting on a Quad CPU */		printk("VOYAGER SMP: Boot CPU is Quad\n");		qic_setup();		do_quad_bootstrap();	}	/* enable our own CPIs */	vic_enable_cpi();	cpu_set(boot_cpu_id, cpu_online_map);	cpu_set(boot_cpu_id, cpu_callout_map);		/* loop over all the extended VIC CPUs and boot them.  The 	 * Quad CPUs must be bootstrapped by their extended VIC cpu */	for(i = 0; i < NR_CPUS; i++) {		if(i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))			continue;		do_boot_cpu(i);		/* This udelay seems to be needed for the Quad boots		 * don't remove unless you know what you're doing */		udelay(1000);	}	/* we could compute the total bogomips here, but why bother?,	 * Code added from smpboot.c */	{		unsigned long bogosum = 0;		for (i = 0; i < NR_CPUS; i++)			if (cpu_isset(i, cpu_online_map))				bogosum += cpu_data(i).loops_per_jiffy;		printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",			cpucount+1,			bogosum/(500000/HZ),			(bogosum/(5000/HZ))%100);	}	voyager_extended_cpus = hweight32(voyager_extended_vic_processors);	printk("VOYAGER: Extended (interrupt handling CPUs): %d, non-extended: %d\n", voyager_extended_cpus, num_booting_cpus() - voyager_extended_cpus);	/* that's it, switch to symmetric mode */	outb(0, VIC_PRIORITY_REGISTER);	outb(0, VIC_CLAIM_REGISTER_0);	outb(0, VIC_CLAIM_REGISTER_1);		VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));}/* Reload the secondary CPUs task structure (this function does not * return ) */void __init initialize_secondary(void){#if 0	// AC kernels only	set_current(hard_get_current());#endif	/*	 * We don't actually need to load the full TSS,	 * basically just the stack pointer and the eip.	 */	asm volatile(		"movl %0,%%esp\n\t"		"jmp *%1"		:		:"r" (current->thread.esp),"r" (current->thread.eip));}/* handle a Voyager SYS_INT -- If we don't, the base board will * panic the system. * * System interrupts occur because some problem was detected on the * various busses.  To find out what you have to probe all the * hardware via the CAT bus.  FIXME: At the moment we do nothing. */fastcall voidsmp_vic_sys_interrupt(struct pt_regs *regs){	ack_CPI(VIC_SYS_INT);	printk("Voyager SYSTEM INTERRUPT\n");	}/* Handle a voyager CMN_INT; These interrupts occur either because of * a system status change or because a single bit memory error * occurred.  FIXME: At the moment, ignore all this. */fastcall voidsmp_vic_cmn_interrupt(struct pt_regs *regs){	static __u8 in_cmn_int = 0;	static DEFINE_SPINLOCK(cmn_int_lock);	/* common ints are broadcast, so make sure we only do this once */	_raw_spin_lock(&cmn_int_lock);	if(in_cmn_int)		goto unlock_end;	in_cmn_int++;	_raw_spin_unlock(&cmn_int_lock);	VDEBUG(("Voyager COMMON INTERRUPT\n"));	if(voyager_level == 5)		voyager_cat_do_common_interrupt();	_raw_spin_lock(&cmn_int_lock);	in_cmn_int = 0; unlock_end:	_raw_spin_unlock(&cmn_int_lock);	ack_CPI(VIC_CMN_INT);}/* * Reschedule call back. Nothing to do, all the work is done * automatically when we return from the interrupt.  */static voidsmp_reschedule_interrupt(void){	/* do nothing */}static struct mm_struct * flush_mm;static unsigned long flush_va;static DEFINE_SPINLOCK(tlbstate_lock);#define FLUSH_ALL	0xffffffff/* * We cannot call mmdrop() because we are in interrupt context,  * instead update mm->cpu_vm_mask. * * We need to reload %cr3 since the page tables may be going * away from under us.. */static inline voidleave_mm (unsigned long cpu){	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)		BUG();	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);	load_cr3(swapper_pg_dir);}/* * Invalidate call-back */static void smp_invalidate_interrupt(void){	__u8 cpu = smp_processor_id();	if (!test_bit(cpu, &smp_invalidate_needed))		return;	/* This will flood messages.  Don't uncomment unless you see	 * Problems with cross cpu invalidation	VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",		smp_processor_id()));	*/	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {			if (flush_va == FLUSH_ALL)				local_flush_tlb();			else				__flush_tlb_one(flush_va);		} else			leave_mm(cpu);	}	smp_mb__before_clear_bit();	clear_bit(cpu, &smp_invalidate_needed);	smp_mb__after_clear_bit();}/* All the new flush operations for 2.4 *//* This routine is called with a physical cpu mask */static voidvoyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,			  unsigned long va){	int stuck = 50000;	if (!cpumask)		BUG();	if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask)		BUG();	if (cpumask & (1 << smp_processor_id()))		BUG();	if (!mm)		BUG();	spin_lock(&tlbstate_lock);		flush_mm = mm;	flush_va = va;	atomic_set_mask(cpumask, &smp_invalidate_needed);	/*	 * We have to send the CPI only to	 * CPUs affected.	 */	send_CPI(cpumask, VIC_INVALIDATE_CPI);	while (smp_invalidate_needed) {		mb();		if(--stuck == 0) {			printk("***WARNING*** Stuck doing invalidate CPI (CPU%d)\n", smp_processor_id());			break;		}	}	/* Uncomment only to debug invalidation problems	VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));	*/	flush_mm = NULL;	flush_va = 0;	spin_unlock(&tlbstate_lock);}voidflush_tlb_current_task(void){	struct mm_struct *mm = current->mm;	unsigned long cpu_mask;	preempt_disable();	cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());	local_flush_tlb();	if (cpu_mask)		voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);	preempt_enable();}voidflush_tlb_mm (struct mm_struct * mm){	unsigned long cpu_mask;	preempt_disable();	cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());	if (current->active_mm == mm) {		if (current->mm)			local_flush_tlb();		else			leave_mm(smp_processor_id());	}	if (cpu_mask)		voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);	preempt_enable();}void flush_tlb_page(struct vm_area_struct * vma, unsigned long va){	struct mm_struct *mm = vma->vm_mm;	unsigned long cpu_mask;	preempt_disable();	cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());	if (current->active_mm == mm) {		if(current->mm)			__flush_tlb_one(va);		 else		 	leave_mm(smp_processor_id());	}	if (cpu_mask)		voyager_flush_tlb_others(cpu_mask, mm, va);	preempt_enable();}EXPORT_SYMBOL(flush_tlb_page);/* enable the requested IRQs */static voidsmp_enable_irq_interrupt(void){	__u8 irq;	__u8 cpu = get_cpu();	VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu,	       vic_irq_enable_mask[cpu]));	spin_lock(&vic_irq_lock);	for(irq = 0; irq < 16; irq++) {		if(vic_irq_enable_mask[cpu] & (1<<irq))			enable_local_vic_irq(irq);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -