init.c

来自「底层驱动开发」· C语言 代码 · 共 870 行 · 第 1/2 页

C
870
字号
/* *  PowerPC version  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) *  and Cort Dougan (PReP) (cort@cs.nmt.edu) *    Copyright (C) 1996 Paul Mackerras *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). * *  Derived from "arch/i386/mm/init.c" *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds * *  Dave Engebretsen <engebret@us.ibm.com> *      Rework for PPC64 port. * *  This program is free software; you can redistribute it and/or *  modify it under the terms of the GNU General Public License *  as published by the Free Software Foundation; either version *  2 of the License, or (at your option) any later version. * */#include <linux/config.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/stddef.h>#include <linux/vmalloc.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/bootmem.h>#include <linux/highmem.h>#include <linux/idr.h>#include <linux/nodemask.h>#include <linux/module.h>#include <asm/pgalloc.h>#include <asm/page.h>#include <asm/prom.h>#include <asm/lmb.h>#include <asm/rtas.h>#include <asm/io.h>#include <asm/mmu_context.h>#include <asm/pgtable.h>#include <asm/mmu.h>#include <asm/uaccess.h>#include <asm/smp.h>#include <asm/machdep.h>#include <asm/tlb.h>#include <asm/eeh.h>#include <asm/processor.h>#include <asm/mmzone.h>#include <asm/cputable.h>#include <asm/ppcdebug.h>#include <asm/sections.h>#include <asm/system.h>#include <asm/iommu.h>#include <asm/abs_addr.h>#include <asm/vdso.h>#include <asm/imalloc.h>#if PGTABLE_RANGE > USER_VSID_RANGE#warning Limited user VSID range means pagetable space is wasted#endif#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)#warning TASK_SIZE is smaller than it needs to be.#endifint mem_init_done;unsigned long ioremap_bot = IMALLOC_BASE;static unsigned long phbs_io_bot = PHBS_IO_BASE;extern pgd_t swapper_pg_dir[];extern struct task_struct *current_set[NR_CPUS];unsigned long klimit = (unsigned long)_end;unsigned long _SDR1=0;unsigned long _ASR=0;/* max amount of RAM to use */unsigned long __max_memory;/* info on what we think the IO hole is */unsigned long 	io_hole_start;unsigned long	io_hole_size;void show_mem(void){	unsigned long total = 0, reserved = 0;	unsigned long shared = 0, cached = 0;	struct page *page;	pg_data_t *pgdat;	unsigned long i;	printk("Mem-info:\n");	show_free_areas();	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));	for_each_pgdat(pgdat) {		for (i = 0; i < pgdat->node_spanned_pages; i++) {			page = pgdat_page_nr(pgdat, i);			total++;			if (PageReserved(page))				reserved++;			else if (PageSwapCache(page))				cached++;			else if (page_count(page))				shared += page_count(page) - 1;		}	}	printk("%ld pages of RAM\n", total);	printk("%ld reserved pages\n", reserved);	printk("%ld pages shared\n", shared);	printk("%ld pages swap cached\n", cached);}#ifdef CONFIG_PPC_ISERIESvoid __iomem *ioremap(unsigned long addr, unsigned long size){	return (void __iomem *)addr;}extern void __iomem *__ioremap(unsigned long addr, unsigned long size,		       unsigned long flags){	return (void __iomem *)addr;}void iounmap(volatile void __iomem *addr){	return;}#else/* * map_io_page currently only called by __ioremap * map_io_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it */static int map_io_page(unsigned long ea, unsigned long pa, int flags){	pgd_t *pgdp;	pud_t *pudp;	pmd_t *pmdp;	pte_t *ptep;	unsigned long vsid;	if (mem_init_done) {		spin_lock(&init_mm.page_table_lock);		pgdp = pgd_offset_k(ea);		pudp = pud_alloc(&init_mm, pgdp, ea);		if (!pudp)			return -ENOMEM;		pmdp = pmd_alloc(&init_mm, pudp, ea);		if (!pmdp)			return -ENOMEM;		ptep = pte_alloc_kernel(&init_mm, pmdp, ea);		if (!ptep)			return -ENOMEM;		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,							  __pgprot(flags)));		spin_unlock(&init_mm.page_table_lock);	} else {		unsigned long va, vpn, hash, hpteg;		/*		 * If the mm subsystem is not fully up, we cannot create a		 * linux page table entry for this mapping.  Simply bolt an		 * entry in the hardware page table.		 */		vsid = get_kernel_vsid(ea);		va = (vsid << 28) | (ea & 0xFFFFFFF);		vpn = va >> PAGE_SHIFT;		hash = hpt_hash(vpn, 0);		hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);		/* Panic if a pte grpup is full */		if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,				       HPTE_V_BOLTED,				       _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)		    == -1) {			panic("map_io_page: could not insert mapping");		}	}	return 0;}static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,			    unsigned long ea, unsigned long size,			    unsigned long flags){	unsigned long i;	if ((flags & _PAGE_PRESENT) == 0)		flags |= pgprot_val(PAGE_KERNEL);	for (i = 0; i < size; i += PAGE_SIZE)		if (map_io_page(ea+i, pa+i, flags))			return NULL;	return (void __iomem *) (ea + (addr & ~PAGE_MASK));}void __iomem *ioremap(unsigned long addr, unsigned long size){	return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);}void __iomem * __ioremap(unsigned long addr, unsigned long size,			 unsigned long flags){	unsigned long pa, ea;	void __iomem *ret;	/*	 * Choose an address to map it to.	 * Once the imalloc system is running, we use it.	 * Before that, we map using addresses going	 * up from ioremap_bot.  imalloc will use	 * the addresses from ioremap_bot through	 * IMALLOC_END	 * 	 */	pa = addr & PAGE_MASK;	size = PAGE_ALIGN(addr + size) - pa;	if (size == 0)		return NULL;	if (mem_init_done) {		struct vm_struct *area;		area = im_get_free_area(size);		if (area == NULL)			return NULL;		ea = (unsigned long)(area->addr);		ret = __ioremap_com(addr, pa, ea, size, flags);		if (!ret)			im_free(area->addr);	} else {		ea = ioremap_bot;		ret = __ioremap_com(addr, pa, ea, size, flags);		if (ret)			ioremap_bot += size;	}	return ret;}#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))int __ioremap_explicit(unsigned long pa, unsigned long ea,		       unsigned long size, unsigned long flags){	struct vm_struct *area;	void __iomem *ret;		/* For now, require page-aligned values for pa, ea, and size */	if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||	    !IS_PAGE_ALIGNED(size)) {		printk(KERN_ERR	"unaligned value in %s\n", __FUNCTION__);		return 1;	}		if (!mem_init_done) {		/* Two things to consider in this case:		 * 1) No records will be kept (imalloc, etc) that the region		 *    has been remapped		 * 2) It won't be easy to iounmap() the region later (because		 *    of 1)		 */		;	} else {		area = im_get_area(ea, size,			IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);		if (area == NULL) {			/* Expected when PHB-dlpar is in play */			return 1;		}		if (ea != (unsigned long) area->addr) {			printk(KERN_ERR "unexpected addr return from "			       "im_get_area\n");			return 1;		}	}		ret = __ioremap_com(pa, pa, ea, size, flags);	if (ret == NULL) {		printk(KERN_ERR "ioremap_explicit() allocation failure !\n");		return 1;	}	if (ret != (void *) ea) {		printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");		return 1;	}	return 0;}/*   * Unmap an IO region and remove it from imalloc'd list. * Access to IO memory should be serialized by driver. * This code is modeled after vmalloc code - unmap_vm_area() * * XXX	what about calls before mem_init_done (ie python_countermeasures()) */void iounmap(volatile void __iomem *token){	void *addr;	if (!mem_init_done)		return;		addr = (void *) ((unsigned long __force) token & PAGE_MASK);	im_free(addr);}static int iounmap_subset_regions(unsigned long addr, unsigned long size){	struct vm_struct *area;	/* Check whether subsets of this region exist */	area = im_get_area(addr, size, IM_REGION_SUPERSET);	if (area == NULL)		return 1;	while (area) {		iounmap((void __iomem *) area->addr);		area = im_get_area(addr, size,				IM_REGION_SUPERSET);	}	return 0;}int iounmap_explicit(volatile void __iomem *start, unsigned long size){	struct vm_struct *area;	unsigned long addr;	int rc;		addr = (unsigned long __force) start & PAGE_MASK;	/* Verify that the region either exists or is a subset of an existing	 * region.  In the latter case, split the parent region to create 	 * the exact region 	 */	area = im_get_area(addr, size, 			    IM_REGION_EXISTS | IM_REGION_SUBSET);	if (area == NULL) {		/* Determine whether subset regions exist.  If so, unmap */		rc = iounmap_subset_regions(addr, size);		if (rc) {			printk(KERN_ERR			       "%s() cannot unmap nonexistent range 0x%lx\n", 				__FUNCTION__, addr);			return 1;		}	} else {		iounmap((void __iomem *) area->addr);	}	/*	 * FIXME! This can't be right:	iounmap(area->addr);	 * Maybe it should be "iounmap(area);"	 */	return 0;}#endifEXPORT_SYMBOL(ioremap);EXPORT_SYMBOL(__ioremap);EXPORT_SYMBOL(iounmap);void free_initmem(void){	unsigned long addr;	addr = (unsigned long)__init_begin;	for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {		memset((void *)addr, 0xcc, PAGE_SIZE);		ClearPageReserved(virt_to_page(addr));		set_page_count(virt_to_page(addr), 1);		free_page(addr);		totalram_pages++;	}	printk ("Freeing unused kernel memory: %luk freed\n",		((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);}#ifdef CONFIG_BLK_DEV_INITRDvoid free_initrd_mem(unsigned long start, unsigned long end){	if (start < end)		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);	for (; start < end; start += PAGE_SIZE) {		ClearPageReserved(virt_to_page(start));		set_page_count(virt_to_page(start), 1);		free_page(start);		totalram_pages++;	}}#endifstatic DEFINE_SPINLOCK(mmu_context_lock);static DEFINE_IDR(mmu_context_idr);int init_new_context(struct task_struct *tsk, struct mm_struct *mm){	int index;	int err;again:	if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))		return -ENOMEM;	spin_lock(&mmu_context_lock);	err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);	spin_unlock(&mmu_context_lock);	if (err == -EAGAIN)

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?