⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 上传linux-jx2410的源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
/*  $Id: init.c,v 1.1.1.1 2004/02/04 12:55:52 laputa Exp $ *  arch/sparc64/mm/init.c * *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/config.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/string.h>#include <linux/init.h>#include <linux/bootmem.h>#include <linux/mm.h>#include <linux/slab.h>#include <linux/blk.h>#include <linux/swap.h>#include <linux/swapctl.h>#include <linux/pagemap.h>#include <linux/fs.h>#include <linux/seq_file.h>#include <asm/head.h>#include <asm/system.h>#include <asm/page.h>#include <asm/pgalloc.h>#include <asm/pgtable.h>#include <asm/oplib.h>#include <asm/iommu.h>#include <asm/io.h>#include <asm/uaccess.h>#include <asm/mmu_context.h>#include <asm/dma.h>#include <asm/starfire.h>#include <asm/tlb.h>#include <asm/spitfire.h>mmu_gather_t mmu_gathers[NR_CPUS];extern void device_scan(void);struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];unsigned long *sparc64_valid_addr_bitmap;/* Ugly, but necessary... -DaveM */unsigned long phys_base;enum ultra_tlb_layout tlb_type = spitfire;/* get_new_mmu_context() uses "cache + 1".  */spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];/* References to section boundaries */extern char __init_begin, __init_end, _start, _end, etext, edata;/* Initial ramdisk setup */extern unsigned int sparc_ramdisk_image;extern unsigned int sparc_ramdisk_size;struct page *mem_map_zero;int bigkernel = 0;int do_check_pgt_cache(int low, int high){        int freed = 0;	if (pgtable_cache_size > high) {		do {#ifdef CONFIG_SMP			if (pgd_quicklist)				free_pgd_slow(get_pgd_fast()), freed++;#endif			if (pte_quicklist[0])				free_pte_slow(pte_alloc_one_fast(NULL, 0)), freed++;			if (pte_quicklist[1])				free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))), freed++;		} while (pgtable_cache_size > low);	}#ifndef CONFIG_SMP         if (pgd_cache_size > high / 4) {		struct page *page, *page2;                for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {                        if ((unsigned long)page->pprev_hash == 3) {                                if (page2)                                        page2->next_hash = page->next_hash;                                else                                        (struct page *)pgd_quicklist = page->next_hash;                                page->next_hash = NULL;                                page->pprev_hash = NULL;                                pgd_cache_size -= 2;                                __free_page(page);                                freed++;                                if (page2)                                        page = page2->next_hash;                                else                                        page = (struct page *)pgd_quicklist;                                if (pgd_cache_size <= low / 4)                                        break;                                continue;                        }                        page2 = page;                        page = page->next_hash;                }        }#endif        return freed;}extern void __update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);#ifdef CONFIG_DEBUG_DCFLUSHatomic_t dcpage_flushes = ATOMIC_INIT(0);#ifdef CONFIG_SMPatomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);#endif#endif__inline__ void flush_dcache_page_impl(struct page *page){#ifdef CONFIG_DEBUG_DCFLUSH	atomic_inc(&dcpage_flushes);#endif#if (L1DCACHE_SIZE > PAGE_SIZE)	__flush_dcache_page(page->virtual,			    ((tlb_type == spitfire) &&			     page->mapping != NULL));#else	if (page->mapping != NULL &&	    tlb_type == spitfire)		__flush_icache_page(__pa(page->virtual));#endif}#define PG_dcache_dirty		PG_arch_1#define dcache_dirty_cpu(page) \	(((page)->flags >> 24) & (NR_CPUS - 1UL))static __inline__ void set_dcache_dirty(struct page *page){	unsigned long mask = smp_processor_id();	unsigned long non_cpu_bits = (1UL << 24UL) - 1UL;	mask = (mask << 24) | (1UL << PG_dcache_dirty);	__asm__ __volatile__("1:\n\t"			     "ldx	[%2], %%g7\n\t"			     "and	%%g7, %1, %%g5\n\t"			     "or	%%g5, %0, %%g5\n\t"			     "casx	[%2], %%g7, %%g5\n\t"			     "cmp	%%g7, %%g5\n\t"			     "bne,pn	%%xcc, 1b\n\t"			     " membar	#StoreLoad | #StoreStore"			     : /* no outputs */			     : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)			     : "g5", "g7");}static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu){	unsigned long mask = (1UL << PG_dcache_dirty);	__asm__ __volatile__("! test_and_clear_dcache_dirty\n"			     "1:\n\t"			     "ldx	[%2], %%g7\n\t"			     "srlx	%%g7, 24, %%g5\n\t"			     "cmp	%%g5, %0\n\t"			     "bne,pn	%%icc, 2f\n\t"			     " andn	%%g7, %1, %%g5\n\t"			     "casx	[%2], %%g7, %%g5\n\t"			     "cmp	%%g7, %%g5\n\t"			     "bne,pn	%%xcc, 1b\n\t"			     " membar	#StoreLoad | #StoreStore\n"			     "2:"			     : /* no outputs */			     : "r" (cpu), "r" (mask), "r" (&page->flags)			     : "g5", "g7");}void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte){	struct page *page = pte_page(pte);	unsigned long pg_flags;	if (VALID_PAGE(page) &&	    page->mapping &&	    ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {		int cpu = (pg_flags >> 24);		/* This is just to optimize away some function calls		 * in the SMP case.		 */		if (cpu == smp_processor_id())			flush_dcache_page_impl(page);		else			smp_flush_dcache_page_impl(page, cpu);		clear_dcache_dirty_cpu(page, cpu);	}	__update_mmu_cache(vma, address, pte);}void flush_dcache_page(struct page *page){	int dirty = test_bit(PG_dcache_dirty, &page->flags);	int dirty_cpu = dcache_dirty_cpu(page);	if (page->mapping &&	    page->mapping->i_mmap == NULL &&	    page->mapping->i_mmap_shared == NULL) {		if (dirty) {			if (dirty_cpu == smp_processor_id())				return;			smp_flush_dcache_page_impl(page, dirty_cpu);		}		set_dcache_dirty(page);	} else {		/* We could delay the flush for the !page->mapping		 * case too.  But that case is for exec env/arg		 * pages and those are %99 certainly going to get		 * faulted into the tlb (and thus flushed) anyways.		 */		flush_dcache_page_impl(page);	}}void flush_icache_range(unsigned long start, unsigned long end){	/* Cheetah has coherent I-cache. */	if (tlb_type == spitfire) {		unsigned long kaddr;		for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)			__flush_icache_page(__get_phys(kaddr));	}}void show_mem(void){	printk("Mem-info:\n");	show_free_areas();	printk("Free swap:       %6dkB\n",	       nr_swap_pages << (PAGE_SHIFT-10));	printk("%ld pages of RAM\n", num_physpages);	printk("%d free pages\n", nr_free_pages());	printk("%d pages in page table cache\n",pgtable_cache_size);#ifndef CONFIG_SMP	printk("%d entries in page dir cache\n",pgd_cache_size);#endif		show_buffers();}void mmu_info(struct seq_file *m){	if (tlb_type == cheetah)		seq_printf(m, "MMU Type\t: Cheetah\n");	else if (tlb_type == spitfire)		seq_printf(m, "MMU Type\t: Spitfire\n");	else		seq_printf(m, "MMU Type\t: ???\n");#ifdef CONFIG_DEBUG_DCFLUSH	seq_printf(m, "DCPageFlushes\t: %d\n",		   atomic_read(&dcpage_flushes));#ifdef CONFIG_SMP	seq_printf(m, "DCPageFlushesXC\t: %d\n",		   atomic_read(&dcpage_flushes_xcall));#endif /* CONFIG_SMP */#endif /* CONFIG_DEBUG_DCFLUSH */}struct linux_prom_translation {	unsigned long virt;	unsigned long size;	unsigned long data;};extern unsigned long prom_boot_page;extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);extern int prom_get_mmu_ihandle(void);extern void register_prom_callbacks(void);/* Exported for SMP bootup purposes. */unsigned long kern_locked_tte_data;void __init early_pgtable_allocfail(char *type){	prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);	prom_halt();}#define BASE_PAGE_SIZE 8192static pmd_t *prompmd;/* * Translate PROM's mapping we capture at boot time into physical address. * The second parameter is only set from prom_callback() invocations. */unsigned long prom_virt_to_phys(unsigned long promva, int *error){	pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);	pte_t *ptep;	unsigned long base;	if (pmd_none(*pmdp)) {		if (error)			*error = 1;		return(0);	}	ptep = (pte_t *)pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);	if (!pte_present(*ptep)) {		if (error)			*error = 1;		return(0);	}	if (error) {		*error = 0;		return(pte_val(*ptep));	}	base = pte_val(*ptep) & _PAGE_PADDR;	return(base + (promva & (BASE_PAGE_SIZE - 1)));}static void inherit_prom_mappings(void){	struct linux_prom_translation *trans;	unsigned long phys_page, tte_vaddr, tte_data;	void (*remap_func)(unsigned long, unsigned long, int);	pmd_t *pmdp;	pte_t *ptep;	int node, n, i, tsz;	extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];	node = prom_finddevice("/virtual-memory");	n = prom_getproplen(node, "translations");	if (n == 0 || n == -1) {		prom_printf("Couldn't get translation property\n");		prom_halt();	}	n += 5 * sizeof(struct linux_prom_translation);	for (tsz = 1; tsz < n; tsz <<= 1)		/* empty */;	trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, 0UL);	if (trans == NULL) {		prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");		prom_halt();	}	memset(trans, 0, tsz);	if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {		prom_printf("Couldn't get translation property\n");		prom_halt();	}	n = n / sizeof(*trans);	/*	 * The obp translations are saved based on 8k pagesize, since obp can use	 * a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000, ie obp 	 * range, are handled in entry.S and do not use the vpte scheme (see rant	 * in inherit_locked_prom_mappings()).	 */#define OBP_PMD_SIZE 2048	prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, 0UL);	if (prompmd == NULL)		early_pgtable_allocfail("pmd");	memset(prompmd, 0, OBP_PMD_SIZE);	for (i = 0; i < n; i++) {		unsigned long vaddr;		if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {			for (vaddr = trans[i].virt;			     ((vaddr < trans[i].virt + trans[i].size) && 			     (vaddr < HI_OBP_ADDRESS));			     vaddr += BASE_PAGE_SIZE) {				unsigned long val;				pmdp = prompmd + ((vaddr >> 23) & 0x7ff);				if (pmd_none(*pmdp)) {					ptep = __alloc_bootmem(BASE_PAGE_SIZE,							       BASE_PAGE_SIZE,							       0UL);					if (ptep == NULL)						early_pgtable_allocfail("pte");					memset(ptep, 0, BASE_PAGE_SIZE);					pmd_set(pmdp, ptep);				}				ptep = (pte_t *)pmd_page(*pmdp) +						((vaddr >> 13) & 0x3ff);				val = trans[i].data;				/* Clear diag TTE bits. */				if (tlb_type == spitfire)					val &= ~0x0003fe0000000000UL;				set_pte (ptep, __pte(val | _PAGE_MODIFIED));				trans[i].data += BASE_PAGE_SIZE;			}		}	}	phys_page = __pa(prompmd);	obp_iaddr_patch[0] |= (phys_page >> 10);	obp_iaddr_patch[1] |= (phys_page & 0x3ff);	flushi((long)&obp_iaddr_patch[0]);	obp_daddr_patch[0] |= (phys_page >> 10);	obp_daddr_patch[1] |= (phys_page & 0x3ff);	flushi((long)&obp_daddr_patch[0]);	/* Now fixup OBP's idea about where we really are mapped. */	prom_printf("Remapping the kernel... ");	/* Spitfire Errata #32 workaround */	__asm__ __volatile__("stxa	%0, [%1] %2\n\t"			     "flush	%%g6"			     : /* No outputs */			     : "r" (0),			     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));	switch (tlb_type) {	default:	case spitfire:		phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());		break;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -