⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/*  $Id: init.c,v 1.161 2000/12/09 20:16:58 davem Exp $ *  arch/sparc64/mm/init.c * *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/config.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/string.h>#include <linux/init.h>#include <linux/bootmem.h>#include <linux/mm.h>#include <linux/malloc.h>#include <linux/blk.h>#include <linux/swap.h>#include <linux/swapctl.h>#include <asm/head.h>#include <asm/system.h>#include <asm/page.h>#include <asm/pgalloc.h>#include <asm/pgtable.h>#include <asm/oplib.h>#include <asm/iommu.h>#include <asm/io.h>#include <asm/uaccess.h>#include <asm/mmu_context.h>#include <asm/vaddrs.h>#include <asm/dma.h>#include <asm/starfire.h>extern void device_scan(void);struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];unsigned long *sparc64_valid_addr_bitmap;/* Ugly, but necessary... -DaveM */unsigned long phys_base;/* get_new_mmu_context() uses "cache + 1".  */spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];/* References to section boundaries */extern char __init_begin, __init_end, _start, _end, etext, edata;/* Initial ramdisk setup */extern unsigned int sparc_ramdisk_image;extern unsigned int sparc_ramdisk_size;int do_check_pgt_cache(int low, int high){        int freed = 0;	if(pgtable_cache_size > high) {		do {#ifdef CONFIG_SMP			if(pgd_quicklist)				free_pgd_slow(get_pgd_fast()), freed++;#endif			if(pte_quicklist[0])				free_pte_slow(get_pte_fast(0)), freed++;			if(pte_quicklist[1])				free_pte_slow(get_pte_fast(1)), freed++;		} while(pgtable_cache_size > low);	}#ifndef CONFIG_SMP         if (pgd_cache_size > high / 4) {		struct page *page, *page2;                for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {                        if ((unsigned long)page->pprev_hash == 3) {                                if (page2)                                        page2->next_hash = page->next_hash;                                else                                        (struct page *)pgd_quicklist = page->next_hash;                                page->next_hash = NULL;                                page->pprev_hash = NULL;                                pgd_cache_size -= 2;                                __free_page(page);                                freed++;                                if (page2)                                        page = page2->next_hash;                                else                                        page = (struct page *)pgd_quicklist;                                if (pgd_cache_size <= low / 4)                                        break;                                continue;                        }                        page2 = page;                        page = page->next_hash;                }        }#endif        return freed;}extern void __update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte){	struct page *page = pte_page(pte);	if (VALID_PAGE(page) && page->mapping &&	    test_bit(PG_dcache_dirty, &page->flags)) {		__flush_dcache_page(page->virtual, 1);		clear_bit(PG_dcache_dirty, &page->flags);	}	__update_mmu_cache(vma, address, pte);}/* In arch/sparc64/mm/ultra.S */extern void __flush_icache_page(unsigned long);void flush_icache_range(unsigned long start, unsigned long end){	unsigned long kaddr;	for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)		__flush_icache_page(__get_phys(kaddr));}/* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a * do_exit(), but using this instead means there is less risk * for a process dying in kernel mode, possibly leaving an inode * unused etc.. * * BAD_PAGETABLE is the accompanying page-table: it is initialized * to point to BAD_PAGE entries. * * ZERO_PAGE is a special page that is used for zero-initialized * data and COW. */pte_t __bad_page(void){	memset((void *) &empty_bad_page, 0, PAGE_SIZE);	return pte_mkdirty(mk_pte_phys((((unsigned long) &empty_bad_page) 					- ((unsigned long)&empty_zero_page)					+ phys_base),				       PAGE_SHARED));}void show_mem(void){	printk("Mem-info:\n");	show_free_areas();	printk("Free swap:       %6dkB\n",	       nr_swap_pages << (PAGE_SHIFT-10));	printk("%ld pages of RAM\n", num_physpages);	printk("%d free pages\n", nr_free_pages());	printk("%d pages in page table cache\n",pgtable_cache_size);#ifndef CONFIG_SMP	printk("%d entries in page dir cache\n",pgd_cache_size);#endif		show_buffers();}int mmu_info(char *buf){	/* We'll do the rest later to make it nice... -DaveM */#if 0	if (this_is_cheetah)		sprintf(buf, "MMU Type\t: One bad ass cpu\n");	else#endif	return sprintf(buf, "MMU Type\t: Spitfire\n");}struct linux_prom_translation {	unsigned long virt;	unsigned long size;	unsigned long data;};extern unsigned long prom_boot_page;extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);extern int prom_get_mmu_ihandle(void);extern void register_prom_callbacks(void);/* Exported for SMP bootup purposes. */unsigned long kern_locked_tte_data;void __init early_pgtable_allocfail(char *type){	prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);	prom_halt();}static void inherit_prom_mappings(void){	struct linux_prom_translation *trans;	unsigned long phys_page, tte_vaddr, tte_data;	void (*remap_func)(unsigned long, unsigned long, int);	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	int node, n, i, tsz;	node = prom_finddevice("/virtual-memory");	n = prom_getproplen(node, "translations");	if (n == 0 || n == -1) {		prom_printf("Couldn't get translation property\n");		prom_halt();	}	n += 5 * sizeof(struct linux_prom_translation);	for (tsz = 1; tsz < n; tsz <<= 1)		/* empty */;	trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, 0UL);	if (trans == NULL) {		prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");		prom_halt();	}	memset(trans, 0, tsz);	if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {		prom_printf("Couldn't get translation property\n");		prom_halt();	}	n = n / sizeof(*trans);	for (i = 0; i < n; i++) {		unsigned long vaddr;		if (trans[i].virt >= 0xf0000000 && trans[i].virt < 0x100000000) {			for (vaddr = trans[i].virt;			     vaddr < trans[i].virt + trans[i].size;			     vaddr += PAGE_SIZE) {				pgdp = pgd_offset(&init_mm, vaddr);				if (pgd_none(*pgdp)) {					pmdp = __alloc_bootmem(PMD_TABLE_SIZE,							       PMD_TABLE_SIZE,							       0UL);					if (pmdp == NULL)						early_pgtable_allocfail("pmd");					memset(pmdp, 0, PMD_TABLE_SIZE);					pgd_set(pgdp, pmdp);				}				pmdp = pmd_offset(pgdp, vaddr);				if (pmd_none(*pmdp)) {					ptep = __alloc_bootmem(PTE_TABLE_SIZE,							       PTE_TABLE_SIZE,							       0UL);					if (ptep == NULL)						early_pgtable_allocfail("pte");					memset(ptep, 0, PTE_TABLE_SIZE);					pmd_set(pmdp, ptep);				}				ptep = pte_offset(pmdp, vaddr);				set_pte (ptep, __pte(trans[i].data | _PAGE_MODIFIED));				trans[i].data += PAGE_SIZE;			}		}	}	/* Now fixup OBP's idea about where we really are mapped. */	prom_printf("Remapping the kernel... ");	/* Spitfire Errata #32 workaround */	__asm__ __volatile__("stxa	%0, [%1] %2\n\t"			     "flush	%%g6"			     : /* No outputs */			     : "r" (0),			     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));	phys_page = spitfire_get_dtlb_data(63) & _PAGE_PADDR;	phys_page += ((unsigned long)&prom_boot_page -		      (unsigned long)&empty_zero_page);	/* Lock this into i/d tlb entry 59 */	__asm__ __volatile__(		"stxa	%%g0, [%2] %3\n\t"		"stxa	%0, [%1] %4\n\t"		"membar	#Sync\n\t"		"flush	%%g6\n\t"		"stxa	%%g0, [%2] %5\n\t"		"stxa	%0, [%1] %6\n\t"		"membar	#Sync\n\t"		"flush	%%g6"		: : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |			 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),		    "r" (59 << 3), "r" (TLB_TAG_ACCESS),		    "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),		    "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)		: "memory");	tte_vaddr = (unsigned long) &empty_zero_page;	/* Spitfire Errata #32 workaround */	__asm__ __volatile__("stxa	%0, [%1] %2\n\t"			     "flush	%%g6"			     : /* No outputs */			     : "r" (0),			     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));	kern_locked_tte_data = tte_data = spitfire_get_dtlb_data(63);	remap_func = (void *)  ((unsigned long) &prom_remap -				(unsigned long) &prom_boot_page);	/* Spitfire Errata #32 workaround */	__asm__ __volatile__("stxa	%0, [%1] %2\n\t"			     "flush	%%g6"			     : /* No outputs */			     : "r" (0),			     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));	remap_func(spitfire_get_dtlb_data(63) & _PAGE_PADDR,		   (unsigned long) &empty_zero_page,		   prom_get_mmu_ihandle());	/* Flush out that temporary mapping. */	spitfire_flush_dtlb_nucleus_page(0x0);	spitfire_flush_itlb_nucleus_page(0x0);	/* Now lock us back into the TLBs via OBP. */	prom_dtlb_load(63, tte_data, tte_vaddr);	prom_itlb_load(63, tte_data, tte_vaddr);	/* Re-read translations property. */	if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {		prom_printf("Couldn't get translation property\n");		prom_halt();	}	n = n / sizeof(*trans);	for (i = 0; i < n; i++) {		unsigned long vaddr = trans[i].virt;		unsigned long size = trans[i].size;		if (vaddr < 0xf0000000UL) {			unsigned long avoid_start = (unsigned long) &empty_zero_page;			unsigned long avoid_end = avoid_start + (4 * 1024 * 1024);			if (vaddr < avoid_start) {				unsigned long top = vaddr + size;				if (top > avoid_start)					top = avoid_start;				prom_unmap(top - vaddr, vaddr);			}			if ((vaddr + size) > avoid_end) {				unsigned long bottom = vaddr;				if (bottom < avoid_end)					bottom = avoid_end;				prom_unmap((vaddr + size) - bottom, bottom);			}		}	}	prom_printf("done.\n");	register_prom_callbacks();}/* The OBP specifications for sun4u mark 0xfffffffc00000000 and * upwards as reserved for use by the firmware (I wonder if this * will be the same on Cheetah...).  We use this virtual address * range for the VPTE table mappings of the nucleus so we need * to zap them when we enter the PROM.  -DaveM */static void __flush_nucleus_vptes(void){	unsigned long prom_reserved_base = 0xfffffffc00000000UL;	int i;	/* Only DTLB must be checked for VPTE entries. */	for(i = 0; i < 63; i++) {		unsigned long tag;		/* Spitfire Errata #32 workaround */		__asm__ __volatile__("stxa	%0, [%1] %2\n\t"				     "flush	%%g6"				     : /* No outputs */				     : "r" (0),				     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));		tag = spitfire_get_dtlb_tag(i);		if(((tag & ~(PAGE_MASK)) == 0) &&		   ((tag &  (PAGE_MASK)) >= prom_reserved_base)) {			__asm__ __volatile__("stxa %%g0, [%0] %1"					     : /* no outputs */					     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));			membar("#Sync");			spitfire_put_dtlb_data(i, 0x0UL);			membar("#Sync");		}	}}static int prom_ditlb_set = 0;struct prom_tlb_entry {	int		tlb_ent;	unsigned long	tlb_tag;	unsigned long	tlb_data;};struct prom_tlb_entry prom_itlb[8], prom_dtlb[8];void prom_world(int enter){	unsigned long pstate;	int i;	if (!enter)		set_fs(current->thread.current_ds);	if (!prom_ditlb_set)		return;	/* Make sure the following runs atomically. */	__asm__ __volatile__("flushw\n\t"			     "rdpr	%%pstate, %0\n\t"			     "wrpr	%0, %1, %%pstate"			     : "=r" (pstate)			     : "i" (PSTATE_IE));	if (enter) {		/* Kick out nucleus VPTEs. */		__flush_nucleus_vptes();		/* Install PROM world. */		for (i = 0; i < 8; i++) {			if (prom_dtlb[i].tlb_ent != -1) {				__asm__ __volatile__("stxa %0, [%1] %2"					: : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),					"i" (ASI_DMMU));				membar("#Sync");				spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -