⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
 */voidlocal_flush_tlb_mm(struct mm_struct *mm){	mm->context = NO_CONTEXT;	if (mm == current->mm)		activate_mm(mm, mm);#ifdef CONFIG_SMP	smp_send_tlb_invalidate(0);#endif	}voidlocal_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr){	if (vmaddr < TASK_SIZE)		flush_hash_page(vma->vm_mm->context, vmaddr);	else		flush_hash_page(0, vmaddr);#ifdef CONFIG_SMP	smp_send_tlb_invalidate(0);#endif	}/* * for each page addr in the range, call MMU_invalidate_page() * if the range is very large and the hash table is small it might be * faster to do a search of the hash table and just invalidate pages * that are in the range but that's for study later. * -- Cort */voidlocal_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end){	start &= PAGE_MASK;	if (end - start > 20 * PAGE_SIZE)	{		flush_tlb_mm(mm);		return;	}	for (; start < end && start < TASK_SIZE; start += PAGE_SIZE)	{		flush_hash_page(mm->context, start);	}#ifdef CONFIG_SMP	smp_send_tlb_invalidate(0);#endif	}/* * The context counter has overflowed. * We set mm->context to NO_CONTEXT for all mm's in the system. * We assume we can get to all mm's by looking as tsk->mm for * all tasks in the system. */voidmmu_context_overflow(void){	struct task_struct *tsk;	printk(KERN_DEBUG "mmu_context_overflow\n");	read_lock(&tasklist_lock); 	for_each_task(tsk) {		if (tsk->mm)			tsk->mm->context = NO_CONTEXT;	}	read_unlock(&tasklist_lock);	flush_hash_segments(0x10, 0xffffff);#ifdef CONFIG_SMP	smp_send_tlb_invalidate(0);#endif		atomic_set(&next_mmu_context, 0);	/* make sure current always has a context */	current->mm->context = MUNGE_CONTEXT(atomic_inc_return(&next_mmu_context));	/* The PGD is only a placeholder.  It is only used on	 * 8xx processors.	 */	set_context(current->mm->context, current->mm->pgd);}#endif /* CONFIG_8xx */void flush_page_to_ram(struct page *page){	unsigned long vaddr = (unsigned long) kmap(page);	__flush_page_to_ram(vaddr);	kunmap(page);}#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)static void get_mem_prop(char *, struct mem_pieces *);#if defined(CONFIG_ALL_PPC)/* * Read in a property describing some pieces of memory. */static void __init get_mem_prop(char *name, struct mem_pieces *mp){	struct reg_property *rp;	int s;	rp = (struct reg_property *) get_property(memory_node, name, &s);	if (rp == NULL) {		printk(KERN_ERR "error: couldn't get %s property on /memory\n",		       name);		abort();	}	mp->n_regions = s / sizeof(mp->regions[0]);	memcpy(mp->regions, rp, s);	/* Make sure the pieces are sorted. */	mem_pieces_sort(mp);	mem_pieces_coalesce(mp);}#endif /* CONFIG_ALL_PPC *//* * Set up one of the I/D BAT (block address translation) register pairs. * The parameters are not checked; in particular size must be a power * of 2 between 128k and 256M. */void __init setbat(int index, unsigned long virt, unsigned long phys,       unsigned int size, int flags){	unsigned int bl;	int wimgxpp;	union ubat *bat = BATS[index];	bl = (size >> 17) - 1;	if ((_get_PVR() >> 16) != 1) {		/* 603, 604, etc. */		/* Do DBAT first */		wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE				   | _PAGE_COHERENT | _PAGE_GUARDED);		wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;		bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */		bat[1].word[1] = phys | wimgxpp;#ifndef CONFIG_KGDB /* want user access for breakpoints */		if (flags & _PAGE_USER)#endif			bat[1].bat.batu.vp = 1;		if (flags & _PAGE_GUARDED) {			/* G bit must be zero in IBATs */			bat[0].word[0] = bat[0].word[1] = 0;		} else {			/* make IBAT same as DBAT */			bat[0] = bat[1];		}	} else {		/* 601 cpu */		if (bl > BL_8M)			bl = BL_8M;		wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE				   | _PAGE_COHERENT);		wimgxpp |= (flags & _PAGE_RW)?			((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;		bat->word[0] = virt | wimgxpp | 4;	/* Ks=0, Ku=1 */		bat->word[1] = phys | bl | 0x40;	/* V=1 */	}	bat_addrs[index].start = virt;	bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;	bat_addrs[index].phys = phys;}#define IO_PAGE	(_PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_RW)#ifdef CONFIG_SMP#define RAM_PAGE (_PAGE_RW|_PAGE_COHERENT)#else#define RAM_PAGE (_PAGE_RW)#endif#endif /* CONFIG_8xx *//* * Map in all of physical memory starting at KERNELBASE. */#define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)static void __init mapin_ram(void){	int i;	unsigned long v, p, s, f;#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx) && !defined(CONFIG_POWER4)	if (!__map_without_bats) {		unsigned long tot, mem_base, bl, done;		unsigned long max_size = (256<<20);		unsigned long align;		/* Set up BAT2 and if necessary BAT3 to cover RAM. */		mem_base = __pa(KERNELBASE);		/* Make sure we don't map a block larger than the		   smallest alignment of the physical address. */		/* alignment of mem_base */		align = ~(mem_base-1) & mem_base;		/* set BAT block size to MIN(max_size, align) */		if (align && align < max_size)			max_size = align;		tot = total_lowmem;		for (bl = 128<<10; bl < max_size; bl <<= 1) {			if (bl * 2 > tot)				break;		}		setbat(2, KERNELBASE, mem_base, bl, RAM_PAGE);		done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;		if ((done < tot) && !bat_addrs[3].limit) {			/* use BAT3 to cover a bit more */			tot -= done;			for (bl = 128<<10; bl < max_size; bl <<= 1)				if (bl * 2 > tot)					break;			setbat(3, KERNELBASE+done, mem_base+done, bl, 			       RAM_PAGE);		}	}#endif /* !CONFIG_4xx && !CONFIG_8xx && !CONFIG_POWER4 */	for (i = 0; i < phys_mem.n_regions; ++i) {		v = (ulong)__va(phys_mem.regions[i].address);		p = phys_mem.regions[i].address;		if (p >= total_lowmem)			break;		for (s = 0; s < phys_mem.regions[i].size; s += PAGE_SIZE) {                        /* On the MPC8xx, we want the page shared so we                         * don't get ASID compares on kernel space.                         */			f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED;#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) 			/* Allows stub to set breakpoints everywhere */ 			f |= _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE;#else			if ((char *) v < _stext || (char *) v >= etext)				f |= _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE;#ifndef CONFIG_8xx			else				/* On the powerpc (not 8xx), no user access				   forces R/W kernel access */				f |= _PAGE_USER;#endif /* CONFIG_8xx */#endif /* CONFIG_KGDB */			map_page(v, p, f);			v += PAGE_SIZE;			p += PAGE_SIZE;			if (p >= total_lowmem)				break;		}	}}/* In fact this is only called until mem_init is done. */static void __init *MMU_get_page(void){	void *p;	if (mem_init_done) {		p = (void *) __get_free_page(GFP_KERNEL);	} else if (init_bootmem_done) {		p = alloc_bootmem_pages(PAGE_SIZE);	} else {		p = mem_pieces_find(PAGE_SIZE, PAGE_SIZE);	}	if (p == 0)		panic("couldn't get a page in MMU_get_page");	__clear_user(p, PAGE_SIZE);	return p;}static void free_sec(unsigned long start, unsigned long end, const char *name){	unsigned long cnt = 0;	while (start < end) {	  	clear_bit(PG_reserved, &virt_to_page(start)->flags);		set_page_count(virt_to_page(start), 1);		free_page(start);		cnt++;		start += PAGE_SIZE; 	}	if (cnt)		printk(" %ldk %s", PGTOKB(cnt), name);}void free_initmem(void){#define FREESEC(TYPE) \	free_sec((unsigned long)(&__ ## TYPE ## _begin), \		 (unsigned long)(&__ ## TYPE ## _end), \		 #TYPE);	printk ("Freeing unused kernel memory:");	FREESEC(init);	if (_machine != _MACH_Pmac)		FREESEC(pmac);	if (_machine != _MACH_chrp)		FREESEC(chrp);	if (_machine != _MACH_prep)		FREESEC(prep);	if (_machine != _MACH_apus)		FREESEC(apus);	if (!have_of)		FREESEC(openfirmware); 	printk("\n");#undef FREESEC}#ifdef CONFIG_BLK_DEV_INITRDvoid free_initrd_mem(unsigned long start, unsigned long end){	for (; start < end; start += PAGE_SIZE) {		ClearPageReserved(virt_to_page(start));		set_page_count(virt_to_page(start), 1);		free_page(start);		totalram_pages++;	}	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);}#endifextern boot_infos_t *disp_bi;/* * Do very early mm setup such as finding the size of memory * and setting up the hash table. * A lot of this is prep/pmac specific but a lot of it could * still be merged. * -- Cort */#if defined(CONFIG_4xx)void __initMMU_init(void){	/*	 * The Zone Protection Register (ZPR) defines how protection will	 * be applied to every page which is a member of a given zone. At	 * present, we utilize only two of the 4xx's zones. The first, zone	 * 0, is set at '00b and only allows access in supervisor-mode based	 * on the EX and WR bits. No user-mode access is allowed. The second,	 * zone 1, is set at '10b and in supervisor-mode allows access	 * without regard to the EX and WR bits. In user-mode, access is	 * allowed based on the EX and WR bits.	 */        mtspr(SPRN_ZPR, 0x2aaaaaaa);	/* Hardwire any TLB entries necessary here. */	PPC4xx_tlb_pin(KERNELBASE, 0, TLB_PAGESZ(PAGESZ_16M), 1);	/*	 * Find the top of physical memory and map all of it in starting	 * at KERNELBASE.	 */        total_memory = total_lowmem = oak_find_end_of_memory();	end_of_DRAM = __va(total_memory);        mapin_ram();	/*	 * Set up the real-mode cache parameters for the exception vector	 * handlers (which are run in real-mode).	 */        mtspr(SPRN_DCWR, 0x00000000);	/* All caching is write-back */        /*	 * Cache instruction and data space where the exception	 * vectors and the kernel live in real-mode.	 */        mtspr(SPRN_DCCR, 0x80000000);	/* 128 MB of data space at 0x0. */        mtspr(SPRN_ICCR, 0x80000000);	/* 128 MB of instr. space at 0x0. */}#else	/* How about ppc_md.md_find_end_of_memory instead of these	 * ifdefs?  -- Dan.	 */#ifdef CONFIG_BOOTX_TEXTextern boot_infos_t *disp_bi;#endifvoid __init MMU_init(void){	if ( ppc_md.progress ) ppc_md.progress("MMU:enter", 0x111);#ifndef CONFIG_8xx	if (have_of)		total_memory = pmac_find_end_of_memory();#ifdef CONFIG_APUS	else if (_machine == _MACH_apus )		total_memory = apus_find_end_of_memory();#endif#ifdef CONFIG_GEMINI		else if ( _machine == _MACH_gemini )		total_memory = gemini_find_end_of_memory();#endif /* CONFIG_GEMINI	*/#if defined(CONFIG_8260)	else		total_memory = m8260_find_end_of_memory();#else	else /* prep */		total_memory = prep_find_end_of_memory();#endif	total_lowmem = total_memory;#ifdef CONFIG_HIGHMEM	if (total_lowmem > MAX_LOW_MEM) {		total_lowmem = MAX_LOW_MEM;		mem_pieces_remove(&phys_avail, total_lowmem,				  total_memory - total_lowmem, 0);	}#endif /* CONFIG_HIGHMEM */	end_of_DRAM = __va(total_lowmem);	if ( ppc_md.progress ) ppc_md.progress("MMU:hash init", 0x300);        hash_init();#ifndef CONFIG_PPC64BRIDGE        _SDR1 = __pa(Hash) | (Hash_mask >> 10);#endif		ioremap_base = 0xf8000000;	if ( ppc_md.progress ) ppc_md.progress("MMU:mapin", 0x301);	/* Map in all of RAM starting at KERNELBASE */	mapin_ram();#ifdef CONFIG_POWER4	ioremap_base = ioremap_bot = 0xfffff000;	isa_io_base = (unsigned long) ioremap(0xffd00000, 0x200000) + 0x100000;#else /* CONFIG_POWER4 */	/*	 * Setup the bat mappings we're going to load that cover	 * the io areas.  RAM was mapped by mapin_ram().	 * -- Cort	 */	if ( ppc_md.progress ) ppc_md.progress("MMU:setbat", 0x302);	switch (_machine) {	case _MACH_prep:		setbat(0, 0x80000000, 0x80000000, 0x10000000, IO_PAGE);		setbat(1, 0xf0000000, 0xc0000000, 0x08000000, IO_PAGE);		ioremap_base = 0xf0000000;		break;	case _MACH_chrp:		setbat(0, 0xf8000000, 0xf8000000, 0x08000000, IO_PAGE);#ifdef CONFIG_PPC64BRIDGE		setbat(1, 0x80000000, 0xc0000000, 0x10000000, IO_PAGE);#else		setbat(1, 0x80000000, 0x80000000, 0x10000000, IO_PAGE);		setbat(3, 0x90000000, 0x90000000, 0x10000000, IO_PAGE);#endif		break;	case _MACH_Pmac:		ioremap_base = 0xfe000000;		break;	case _MACH_apus:		/* Map PPC exception vectors. */		setbat(0, 0xfff00000, 0xfff00000, 0x00020000, RAM_PAGE);		/* Map chip and ZorroII memory */		setbat(1, zTwoBase,   0x00000000, 0x01000000, IO_PAGE);		break;	case _MACH_gemini:		setbat(0, 0xf0000000, 0xf0000000, 0x10000000, IO_PAGE);		setbat(1, 0x80000000, 0x80000000, 0x10000000, IO_PAGE);		break;	case _MACH_8260:		/* Map the IMMR, plus anything else we can cover		 * in that upper space according to the memory controller		 * chip select mapping.  Grab another bunch of space		 * below that for stuff we can't cover in the upper.		 */		setbat(0, 0xf0000000, 0xf0000000, 0x10000000, IO_PAGE);		setbat(1, 0xe0000000, 0xe0000000, 0x10000000, IO_PAGE);		ioremap_base = 0xe0000000;		break;	}	ioremap_bot = ioremap_base;#endif /* CONFIG_POWER4 */#else /* CONFIG_8xx */	total_memory = total_lowmem = m8xx_find_end_of_memory();#ifdef CONFIG_HIGHMEM	if (total_lowmem > MAX_LOW_MEM) {		total_lowmem = MAX_LOW_MEM;		mem_pieces_remove(&phys_avail, total_lowmem,				  total_memory - total_lowmem, 0);	}#endif /* CONFIG_HIGHMEM */	end_of_DRAM = __va(total_lowmem);        /* Map in all of RAM starting at KERNELBASE */        mapin_ram();        /* Now map in some of the I/O space that is generically needed         * or shared with multiple devices.         * All of this fits into the same 4Mbyte region, so it only         * requires one page table page.         */        ioremap(IMAP_ADDR, IMAP_SIZE);#ifdef CONFIG_MBX        ioremap(NVRAM_ADDR, NVRAM_SIZE);        ioremap(MBX_CSR_ADDR, MBX_CSR_SIZE);        ioremap(PCI_CSR_ADDR, PCI_CSR_SIZE);	/* Map some of the PCI/ISA I/O space to get the IDE interface.	*/        ioremap(PCI_ISA_IO_ADDR, 0x4000);        ioremap(PCI_IDE_ADDR, 0x4000);#endif#ifdef CONFIG_RPXLITE	ioremap(RPX_CSR_ADDR, RPX_CSR_SIZE);	ioremap(HIOX_CSR_ADDR, HIOX_CSR_SIZE);#endif#ifdef CONFIG_RPXCLASSIC        ioremap(PCI_CSR_ADDR, PCI_CSR_SIZE);	ioremap(RPX_CSR_ADDR, RPX_CSR_SIZE);#endif#endif /* CONFIG_8xx */	if ( ppc_md.progress ) ppc_md.progress("MMU:exit", 0x211);#ifdef CONFIG_BOOTX_TEXT	/* Must be done last, or ppc_md.progress will die */	if (_machine == _MACH_Pmac || _machine == _MACH_chrp)		map_bootx_text();#endif}#endif /* CONFIG_4xx *//* * Initialize the bootmem system and give it all the memory we * have available. */void __init do_init_bootmem(void){	unsigned long start, size;	int i;	/*	 * Find an area to use for the bootmem bitmap.	 * We look for the first area which is at least	 * 128kB in length (128kB is enough for a bitmap	 * for 4GB of memory, using 4kB pages), plus 1 page	 * (in case the address isn't page-aligned).	 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -