⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 4 页
字号:
/*  $Id: init.c,v 1.207 2001/11/30 06:55:39 davem Exp $ *  arch/sparc64/mm/init.c * *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/config.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/string.h>#include <linux/init.h>#include <linux/bootmem.h>#include <linux/mm.h>#include <linux/slab.h>#include <linux/blk.h>#include <linux/swap.h>#include <linux/swapctl.h>#include <linux/pagemap.h>#include <linux/fs.h>#include <linux/seq_file.h>#include <asm/head.h>#include <asm/system.h>#include <asm/page.h>#include <asm/pgalloc.h>#include <asm/pgtable.h>#include <asm/oplib.h>#include <asm/iommu.h>#include <asm/io.h>#include <asm/uaccess.h>#include <asm/mmu_context.h>#include <asm/dma.h>#include <asm/starfire.h>#include <asm/tlb.h>#include <asm/spitfire.h>mmu_gather_t mmu_gathers[NR_CPUS];extern void device_scan(void);struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];unsigned long *sparc64_valid_addr_bitmap;/* Ugly, but necessary... -DaveM */unsigned long phys_base;enum ultra_tlb_layout tlb_type = spitfire;/* get_new_mmu_context() uses "cache + 1".  */spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];/* References to section boundaries */extern char __init_begin, __init_end, _start, _end, etext, edata;/* Initial ramdisk setup */extern unsigned int sparc_ramdisk_image;extern unsigned int sparc_ramdisk_size;struct page *mem_map_zero;int bigkernel = 0;int do_check_pgt_cache(int low, int high){        int freed = 0;	if (pgtable_cache_size > high) {		do {#ifdef CONFIG_SMP			if (pgd_quicklist)				free_pgd_slow(get_pgd_fast()), freed++;#endif			if (pte_quicklist[0])				free_pte_slow(pte_alloc_one_fast(NULL, 0)), freed++;			if (pte_quicklist[1])				free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))), freed++;		} while (pgtable_cache_size > low);	}#ifndef CONFIG_SMP         if (pgd_cache_size > high / 4) {		struct page *page, *page2;                for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {                        if ((unsigned long)page->pprev_hash == 3) {                                if (page2)                                        page2->next_hash = page->next_hash;                                else                                        (struct page *)pgd_quicklist = page->next_hash;                                page->next_hash = NULL;                                page->pprev_hash = NULL;                                pgd_cache_size -= 2;                                __free_page(page);                                freed++;                                if (page2)                                        page = page2->next_hash;                                else                                        page = (struct page *)pgd_quicklist;                                if (pgd_cache_size <= low / 4)                                        break;                                continue;                        }                        page2 = page;                        page = page->next_hash;                }        }#endif        return freed;}extern void __update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);#ifdef CONFIG_DEBUG_DCFLUSHatomic_t dcpage_flushes = ATOMIC_INIT(0);#ifdef CONFIG_SMPatomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);#endif#endif__inline__ void flush_dcache_page_impl(struct page *page){#ifdef CONFIG_DEBUG_DCFLUSH	atomic_inc(&dcpage_flushes);#endif#if (L1DCACHE_SIZE > PAGE_SIZE)	__flush_dcache_page(page->virtual,			    ((tlb_type == spitfire) &&			     page->mapping != NULL));#else	if (page->mapping != NULL &&	    tlb_type == spitfire)		__flush_icache_page(__pa(page->virtual));#endif}#define PG_dcache_dirty		PG_arch_1#define dcache_dirty_cpu(page) \	(((page)->flags >> 24) & (NR_CPUS - 1UL))static __inline__ void set_dcache_dirty(struct page *page){	unsigned long mask = smp_processor_id();	unsigned long non_cpu_bits = (1UL << 24UL) - 1UL;	mask = (mask << 24) | (1UL << PG_dcache_dirty);	__asm__ __volatile__("1:\n\t"			     "ldx	[%2], %%g7\n\t"			     "and	%%g7, %1, %%g5\n\t"			     "or	%%g5, %0, %%g5\n\t"			     "casx	[%2], %%g7, %%g5\n\t"			     "cmp	%%g7, %%g5\n\t"			     "bne,pn	%%xcc, 1b\n\t"			     " membar	#StoreLoad | #StoreStore"			     : /* no outputs */			     : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)			     : "g5", "g7");}static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu){	unsigned long mask = (1UL << PG_dcache_dirty);	__asm__ __volatile__("! test_and_clear_dcache_dirty\n"			     "1:\n\t"			     "ldx	[%2], %%g7\n\t"			     "srlx	%%g7, 24, %%g5\n\t"			     "cmp	%%g5, %0\n\t"			     "bne,pn	%%icc, 2f\n\t"			     " andn	%%g7, %1, %%g5\n\t"			     "casx	[%2], %%g7, %%g5\n\t"			     "cmp	%%g7, %%g5\n\t"			     "bne,pn	%%xcc, 1b\n\t"			     " membar	#StoreLoad | #StoreStore\n"			     "2:"			     : /* no outputs */			     : "r" (cpu), "r" (mask), "r" (&page->flags)			     : "g5", "g7");}void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte){	struct page *page = pte_page(pte);	unsigned long pg_flags;	if (VALID_PAGE(page) &&	    page->mapping &&	    ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {		int cpu = (pg_flags >> 24);		/* This is just to optimize away some function calls		 * in the SMP case.		 */		if (cpu == smp_processor_id())			flush_dcache_page_impl(page);		else			smp_flush_dcache_page_impl(page, cpu);		clear_dcache_dirty_cpu(page, cpu);	}	__update_mmu_cache(vma, address, pte);}void flush_dcache_page(struct page *page){	int dirty = test_bit(PG_dcache_dirty, &page->flags);	int dirty_cpu = dcache_dirty_cpu(page);	if (page->mapping &&	    page->mapping->i_mmap == NULL &&	    page->mapping->i_mmap_shared == NULL) {		if (dirty) {			if (dirty_cpu == smp_processor_id())				return;			smp_flush_dcache_page_impl(page, dirty_cpu);		}		set_dcache_dirty(page);	} else {		/* We could delay the flush for the !page->mapping		 * case too.  But that case is for exec env/arg		 * pages and those are %99 certainly going to get		 * faulted into the tlb (and thus flushed) anyways.		 */		flush_dcache_page_impl(page);	}}void flush_icache_range(unsigned long start, unsigned long end){	/* Cheetah has coherent I-cache. */	if (tlb_type == spitfire) {		unsigned long kaddr;		for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)			__flush_icache_page(__get_phys(kaddr));	}}void show_mem(void){	printk("Mem-info:\n");	show_free_areas();	printk("Free swap:       %6dkB\n",	       nr_swap_pages << (PAGE_SHIFT-10));	printk("%ld pages of RAM\n", num_physpages);	printk("%d free pages\n", nr_free_pages());	printk("%d pages in page table cache\n",pgtable_cache_size);#ifndef CONFIG_SMP	printk("%d entries in page dir cache\n",pgd_cache_size);#endif		show_buffers();}void mmu_info(struct seq_file *m){	if (tlb_type == cheetah)		seq_printf(m, "MMU Type\t: Cheetah\n");	else if (tlb_type == cheetah_plus)		seq_printf(m, "MMU Type\t: Cheetah+\n");	else if (tlb_type == spitfire)		seq_printf(m, "MMU Type\t: Spitfire\n");	else		seq_printf(m, "MMU Type\t: ???\n");#ifdef CONFIG_DEBUG_DCFLUSH	seq_printf(m, "DCPageFlushes\t: %d\n",		   atomic_read(&dcpage_flushes));#ifdef CONFIG_SMP	seq_printf(m, "DCPageFlushesXC\t: %d\n",		   atomic_read(&dcpage_flushes_xcall));#endif /* CONFIG_SMP */#endif /* CONFIG_DEBUG_DCFLUSH */}struct linux_prom_translation {	unsigned long virt;	unsigned long size;	unsigned long data;};extern unsigned long prom_boot_page;extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);extern int prom_get_mmu_ihandle(void);extern void register_prom_callbacks(void);/* Exported for SMP bootup purposes. */unsigned long kern_locked_tte_data;void __init early_pgtable_allocfail(char *type){	prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);	prom_halt();}#define BASE_PAGE_SIZE 8192static pmd_t *prompmd;/* When shared+writable mmaps of files go away, we lose all dirty * page state, so we have to deal with D-cache aliasing here. * * This code relies on the fact that flush_cache_range() is always * called for an area composed by a single VMA.  It also assumes that * the MM's page_table_lock is held. */static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long address, unsigned long size){	unsigned long offset;	pte_t *ptep;	if (pmd_none(*pmd))		return;	ptep = pte_offset(pmd, address);	offset = address & ~PMD_MASK;	if (offset + size > PMD_SIZE)		size = PMD_SIZE - offset;	size &= PAGE_MASK;	for (offset = 0; offset < size; ptep++, offset += PAGE_SIZE) {		pte_t pte = *ptep;		if (pte_none(pte))			continue;		if (pte_present(pte) && pte_dirty(pte)) {			struct page *page = pte_page(pte);			unsigned long pgaddr, uaddr;			if (!VALID_PAGE(page) || PageReserved(page) || !page->mapping)				continue;			pgaddr = (unsigned long) page_address(page);			uaddr = address + offset;			if ((pgaddr ^ uaddr) & (1 << 13))				flush_dcache_page_all(mm, page);		}	}}static inline void flush_cache_pmd_range(struct mm_struct *mm, pgd_t *dir, unsigned long address, unsigned long size){	pmd_t *pmd;	unsigned long end;	if (pgd_none(*dir))		return;	pmd = pmd_offset(dir, address);	end = address + size;	if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))		end = ((address + PGDIR_SIZE) & PGDIR_MASK);	do {		flush_cache_pte_range(mm, pmd, address, end - address);		address = (address + PMD_SIZE) & PMD_MASK;		pmd++;	} while (address < end);}void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end){	struct mm_struct *mm = vma->vm_mm;	pgd_t *dir = pgd_offset(mm, start);	if (mm == current->mm)		flushw_user();	if (vma->vm_file == NULL ||	    ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)))		return;	do {		flush_cache_pmd_range(mm, dir, start, end - start);		start = (start + PGDIR_SIZE) & PGDIR_MASK;		dir++;	} while (start && (start < end));}/* * Translate PROM's mapping we capture at boot time into physical address. * The second parameter is only set from prom_callback() invocations. */unsigned long prom_virt_to_phys(unsigned long promva, int *error){	pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);	pte_t *ptep;	unsigned long base;	if (pmd_none(*pmdp)) {		if (error)			*error = 1;		return(0);	}	ptep = (pte_t *)pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);	if (!pte_present(*ptep)) {		if (error)			*error = 1;		return(0);	}	if (error) {		*error = 0;		return(pte_val(*ptep));	}	base = pte_val(*ptep) & _PAGE_PADDR;	return(base + (promva & (BASE_PAGE_SIZE - 1)));}static void inherit_prom_mappings(void){	struct linux_prom_translation *trans;	unsigned long phys_page, tte_vaddr, tte_data;	void (*remap_func)(unsigned long, unsigned long, int);	pmd_t *pmdp;	pte_t *ptep;	int node, n, i, tsz;	extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];	node = prom_finddevice("/virtual-memory");	n = prom_getproplen(node, "translations");	if (n == 0 || n == -1) {		prom_printf("Couldn't get translation property\n");		prom_halt();	}	n += 5 * sizeof(struct linux_prom_translation);	for (tsz = 1; tsz < n; tsz <<= 1)		/* empty */;	trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, 0UL);	if (trans == NULL) {		prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");		prom_halt();	}	memset(trans, 0, tsz);	if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {		prom_printf("Couldn't get translation property\n");		prom_halt();	}	n = n / sizeof(*trans);	/*	 * The obp translations are saved based on 8k pagesize, since obp can use	 * a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000, ie obp 	 * range, are handled in entry.S and do not use the vpte scheme (see rant	 * in inherit_locked_prom_mappings()).	 */#define OBP_PMD_SIZE 2048	prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, 0UL);	if (prompmd == NULL)		early_pgtable_allocfail("pmd");	memset(prompmd, 0, OBP_PMD_SIZE);	for (i = 0; i < n; i++) {		unsigned long vaddr;		if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {			for (vaddr = trans[i].virt;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -