init.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 1,774 行 · 第 1/4 页

C
1,774
字号
/*  $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $ *  arch/sparc64/mm/init.c * *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/config.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/string.h>#include <linux/init.h>#include <linux/bootmem.h>#include <linux/mm.h>#include <linux/hugetlb.h>#include <linux/slab.h>#include <linux/initrd.h>#include <linux/swap.h>#include <linux/pagemap.h>#include <linux/fs.h>#include <linux/seq_file.h>#include <asm/head.h>#include <asm/system.h>#include <asm/page.h>#include <asm/pgalloc.h>#include <asm/pgtable.h>#include <asm/oplib.h>#include <asm/iommu.h>#include <asm/io.h>#include <asm/uaccess.h>#include <asm/mmu_context.h>#include <asm/tlbflush.h>#include <asm/dma.h>#include <asm/starfire.h>#include <asm/tlb.h>#include <asm/spitfire.h>#include <asm/sections.h>extern void device_scan(void);struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];unsigned long *sparc64_valid_addr_bitmap;/* Ugly, but necessary... -DaveM */unsigned long phys_base;unsigned long kern_base;unsigned long kern_size;unsigned long pfn_base;/* This is even uglier. We have a problem where the kernel may not be * located at phys_base. However, initial __alloc_bootmem() calls need to * be adjusted to be within the 4-8Megs that the kernel is mapped to, else * those page mappings wont work. Things are ok after inherit_prom_mappings * is called though. Dave says he'll clean this up some other time. * -- BenC */static unsigned long bootmap_base;/* get_new_mmu_context() uses "cache + 1".  */spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];/* References to special section boundaries */extern char  _start[], _end[];/* Initial ramdisk setup */extern unsigned long sparc_ramdisk_image64;extern unsigned int sparc_ramdisk_image;extern unsigned int sparc_ramdisk_size;struct page *mem_map_zero;int bigkernel = 0;/* XXX Tune this... */#define PGT_CACHE_LOW	25#define PGT_CACHE_HIGH	50void check_pgt_cache(void){	preempt_disable();	if (pgtable_cache_size > PGT_CACHE_HIGH) {		do {#ifdef CONFIG_SMP			if (pgd_quicklist)				free_pgd_slow(get_pgd_fast());#endif			if (pte_quicklist[0])				free_pte_slow(pte_alloc_one_fast(NULL, 0));			if (pte_quicklist[1])				free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));		} while (pgtable_cache_size > PGT_CACHE_LOW);	}#ifndef CONFIG_SMP        if (pgd_cache_size > PGT_CACHE_HIGH / 4) {		struct page *page, *page2;                for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {                        if ((unsigned long)page->lru.prev == 3) {                                if (page2)                                        page2->lru.next = page->lru.next;                                else                                        pgd_quicklist = (void *) page->lru.next;                                pgd_cache_size -= 2;                                __free_page(page);                                if (page2)                                        page = (struct page *)page2->lru.next;                                else                                        page = (struct page *)pgd_quicklist;                                if (pgd_cache_size <= PGT_CACHE_LOW / 4)                                        break;                                continue;                        }                        page2 = page;                        page = (struct page *)page->lru.next;                }        }#endif	preempt_enable();}#ifdef CONFIG_DEBUG_DCFLUSHatomic_t dcpage_flushes = ATOMIC_INIT(0);#ifdef CONFIG_SMPatomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);#endif#endif__inline__ void flush_dcache_page_impl(struct page *page){#ifdef CONFIG_DEBUG_DCFLUSH	atomic_inc(&dcpage_flushes);#endif#if (L1DCACHE_SIZE > PAGE_SIZE)	__flush_dcache_page(page_address(page),			    ((tlb_type == spitfire) &&			     page_mapping(page) != NULL));#else	if (page_mapping(page) != NULL &&	    tlb_type == spitfire)		__flush_icache_page(__pa(page_address(page)));#endif}#define PG_dcache_dirty		PG_arch_1#define dcache_dirty_cpu(page) \	(((page)->flags >> 24) & (NR_CPUS - 1UL))static __inline__ void set_dcache_dirty(struct page *page, int this_cpu){	unsigned long mask = this_cpu;	unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL);	mask = (mask << 24) | (1UL << PG_dcache_dirty);	__asm__ __volatile__("1:\n\t"			     "ldx	[%2], %%g7\n\t"			     "and	%%g7, %1, %%g5\n\t"			     "or	%%g5, %0, %%g5\n\t"			     "casx	[%2], %%g7, %%g5\n\t"			     "cmp	%%g7, %%g5\n\t"			     "bne,pn	%%xcc, 1b\n\t"			     " membar	#StoreLoad | #StoreStore"			     : /* no outputs */			     : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)			     : "g5", "g7");}static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu){	unsigned long mask = (1UL << PG_dcache_dirty);	__asm__ __volatile__("! test_and_clear_dcache_dirty\n"			     "1:\n\t"			     "ldx	[%2], %%g7\n\t"			     "srlx	%%g7, 24, %%g5\n\t"			     "and	%%g5, %3, %%g5\n\t"			     "cmp	%%g5, %0\n\t"			     "bne,pn	%%icc, 2f\n\t"			     " andn	%%g7, %1, %%g5\n\t"			     "casx	[%2], %%g7, %%g5\n\t"			     "cmp	%%g7, %%g5\n\t"			     "bne,pn	%%xcc, 1b\n\t"			     " membar	#StoreLoad | #StoreStore\n"			     "2:"			     : /* no outputs */			     : "r" (cpu), "r" (mask), "r" (&page->flags),			       "i" (NR_CPUS - 1UL)			     : "g5", "g7");}extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte){	struct page *page;	unsigned long pfn;	unsigned long pg_flags;	pfn = pte_pfn(pte);	if (pfn_valid(pfn) &&	    (page = pfn_to_page(pfn), page_mapping(page)) &&	    ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {		int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));		int this_cpu = get_cpu();		/* This is just to optimize away some function calls		 * in the SMP case.		 */		if (cpu == this_cpu)			flush_dcache_page_impl(page);		else			smp_flush_dcache_page_impl(page, cpu);		clear_dcache_dirty_cpu(page, cpu);		put_cpu();	}	if (get_thread_fault_code())		__update_mmu_cache(vma->vm_mm->context & TAG_CONTEXT_BITS,				   address, pte, get_thread_fault_code());}void flush_dcache_page(struct page *page){	struct address_space *mapping = page_mapping(page);	int dirty = test_bit(PG_dcache_dirty, &page->flags);	int dirty_cpu = dcache_dirty_cpu(page);	int this_cpu = get_cpu();	if (mapping && !mapping_mapped(mapping)) {		if (dirty) {			if (dirty_cpu == this_cpu)				goto out;			smp_flush_dcache_page_impl(page, dirty_cpu);		}		set_dcache_dirty(page, this_cpu);	} else {		/* We could delay the flush for the !page_mapping		 * case too.  But that case is for exec env/arg		 * pages and those are %99 certainly going to get		 * faulted into the tlb (and thus flushed) anyways.		 */		flush_dcache_page_impl(page);	}out:	put_cpu();}void flush_icache_range(unsigned long start, unsigned long end){	/* Cheetah has coherent I-cache. */	if (tlb_type == spitfire) {		unsigned long kaddr;		for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)			__flush_icache_page(__get_phys(kaddr));	}}unsigned long page_to_pfn(struct page *page){	return (unsigned long) ((page - mem_map) + pfn_base);}struct page *pfn_to_page(unsigned long pfn){	return (mem_map + (pfn - pfn_base));}void show_mem(void){	printk("Mem-info:\n");	show_free_areas();	printk("Free swap:       %6ldkB\n",	       nr_swap_pages << (PAGE_SHIFT-10));	printk("%ld pages of RAM\n", num_physpages);	printk("%d free pages\n", nr_free_pages());	printk("%d pages in page table cache\n",pgtable_cache_size);#ifndef CONFIG_SMP	printk("%d entries in page dir cache\n",pgd_cache_size);#endif	}void mmu_info(struct seq_file *m){	if (tlb_type == cheetah)		seq_printf(m, "MMU Type\t: Cheetah\n");	else if (tlb_type == cheetah_plus)		seq_printf(m, "MMU Type\t: Cheetah+\n");	else if (tlb_type == spitfire)		seq_printf(m, "MMU Type\t: Spitfire\n");	else		seq_printf(m, "MMU Type\t: ???\n");#ifdef CONFIG_DEBUG_DCFLUSH	seq_printf(m, "DCPageFlushes\t: %d\n",		   atomic_read(&dcpage_flushes));#ifdef CONFIG_SMP	seq_printf(m, "DCPageFlushesXC\t: %d\n",		   atomic_read(&dcpage_flushes_xcall));#endif /* CONFIG_SMP */#endif /* CONFIG_DEBUG_DCFLUSH */}struct linux_prom_translation {	unsigned long virt;	unsigned long size;	unsigned long data;};extern unsigned long prom_boot_page;extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);extern int prom_get_mmu_ihandle(void);extern void register_prom_callbacks(void);/* Exported for SMP bootup purposes. */unsigned long kern_locked_tte_data;void __init early_pgtable_allocfail(char *type){	prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);	prom_halt();}#define BASE_PAGE_SIZE 8192static pmd_t *prompmd;/* * Translate PROM's mapping we capture at boot time into physical address. * The second parameter is only set from prom_callback() invocations. */unsigned long prom_virt_to_phys(unsigned long promva, int *error){	pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);	pte_t *ptep;	unsigned long base;	if (pmd_none(*pmdp)) {		if (error)			*error = 1;		return(0);	}	ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);	if (!pte_present(*ptep)) {		if (error)			*error = 1;		return(0);	}	if (error) {		*error = 0;		return(pte_val(*ptep));	}	base = pte_val(*ptep) & _PAGE_PADDR;	return(base + (promva & (BASE_PAGE_SIZE - 1)));}static void inherit_prom_mappings(void){	struct linux_prom_translation *trans;	unsigned long phys_page, tte_vaddr, tte_data;	void (*remap_func)(unsigned long, unsigned long, int);	pmd_t *pmdp;	pte_t *ptep;	int node, n, i, tsz;	extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];	node = prom_finddevice("/virtual-memory");	n = prom_getproplen(node, "translations");	if (n == 0 || n == -1) {		prom_printf("Couldn't get translation property\n");		prom_halt();	}	n += 5 * sizeof(struct linux_prom_translation);	for (tsz = 1; tsz < n; tsz <<= 1)		/* empty */;	trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);	if (trans == NULL) {		prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");		prom_halt();	}	memset(trans, 0, tsz);	if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {		prom_printf("Couldn't get translation property\n");		prom_halt();	}	n = n / sizeof(*trans);	/*	 * The obp translations are saved based on 8k pagesize, since obp can use	 * a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000, ie obp 	 * range, are handled in entry.S and do not use the vpte scheme (see rant	 * in inherit_locked_prom_mappings()).	 */#define OBP_PMD_SIZE 2048	prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);	if (prompmd == NULL)		early_pgtable_allocfail("pmd");	memset(prompmd, 0, OBP_PMD_SIZE);	for (i = 0; i < n; i++) {		unsigned long vaddr;		if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {			for (vaddr = trans[i].virt;			     ((vaddr < trans[i].virt + trans[i].size) && 			     (vaddr < HI_OBP_ADDRESS));			     vaddr += BASE_PAGE_SIZE) {				unsigned long val;				pmdp = prompmd + ((vaddr >> 23) & 0x7ff);				if (pmd_none(*pmdp)) {					ptep = __alloc_bootmem(BASE_PAGE_SIZE,							       BASE_PAGE_SIZE,							       bootmap_base);					if (ptep == NULL)						early_pgtable_allocfail("pte");					memset(ptep, 0, BASE_PAGE_SIZE);					pmd_set(pmdp, ptep);				}				ptep = (pte_t *)__pmd_page(*pmdp) +						((vaddr >> 13) & 0x3ff);				val = trans[i].data;				/* Clear diag TTE bits. */				if (tlb_type == spitfire)					val &= ~0x0003fe0000000000UL;				set_pte (ptep, __pte(val | _PAGE_MODIFIED));				trans[i].data += BASE_PAGE_SIZE;			}		}	}	phys_page = __pa(prompmd);	obp_iaddr_patch[0] |= (phys_page >> 10);	obp_iaddr_patch[1] |= (phys_page & 0x3ff);	flushi((long)&obp_iaddr_patch[0]);	obp_daddr_patch[0] |= (phys_page >> 10);	obp_daddr_patch[1] |= (phys_page & 0x3ff);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?