init.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 907 行 · 第 1/2 页
C
907 行
/* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <engebret@us.ibm.com> * Rework for PPC64 port. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */#include <linux/config.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/stddef.h>#include <linux/vmalloc.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/bootmem.h>#include <linux/highmem.h>#include <linux/idr.h>#include <asm/pgalloc.h>#include <asm/page.h>#include <asm/abs_addr.h>#include <asm/prom.h>#include <asm/lmb.h>#include <asm/rtas.h>#include <asm/io.h>#include <asm/mmu_context.h>#include <asm/pgtable.h>#include <asm/mmu.h>#include <asm/uaccess.h>#include <asm/smp.h>#include <asm/machdep.h>#include <asm/tlb.h>#include <asm/naca.h>#include <asm/eeh.h>#include <asm/processor.h>#include <asm/mmzone.h>#include <asm/cputable.h>#include <asm/ppcdebug.h>#include <asm/sections.h>#include <asm/system.h>#include <asm/iommu.h>#include <asm/abs_addr.h>int mem_init_done;unsigned long ioremap_bot = IMALLOC_BASE;static unsigned long phbs_io_bot = PHBS_IO_BASE;extern pgd_t swapper_pg_dir[];extern struct task_struct *current_set[NR_CPUS];extern pgd_t ioremap_dir[];pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;unsigned long klimit = (unsigned long)_end;unsigned long _SDR1=0;unsigned long _ASR=0;/* max amount of RAM to use */unsigned long __max_memory;/* info on what we think the IO hole is */unsigned long io_hole_start;unsigned long io_hole_size;void show_mem(void){ unsigned long total = 0, reserved = 0; unsigned long shared = 0, cached = 0; struct page *page; pg_data_t *pgdat; unsigned long i; printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); for_each_pgdat(pgdat) { for (i = 0; i < pgdat->node_spanned_pages; i++) { page = pgdat->node_mem_map + i; total++; if (PageReserved(page)) reserved++; else if (PageSwapCache(page)) cached++; else if (page_count(page)) shared += page_count(page) - 1; } } printk("%ld pages of RAM\n", total); printk("%ld reserved pages\n", reserved); printk("%ld pages shared\n", shared); printk("%ld pages swap cached\n", cached);}#ifdef CONFIG_PPC_ISERIESvoid __iomem *ioremap(unsigned long addr, unsigned long size){ return (void __iomem *)addr;}extern void __iomem *__ioremap(unsigned long addr, unsigned long size, unsigned long flags){ return (void __iomem *)addr;}void iounmap(volatile void __iomem *addr){ return;}#else/* * map_io_page currently only called by __ioremap * map_io_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it */static void map_io_page(unsigned long ea, unsigned long pa, int flags){ pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; unsigned long vsid; if (mem_init_done) { spin_lock(&ioremap_mm.page_table_lock); pgdp = pgd_offset_i(ea); pmdp = pmd_alloc(&ioremap_mm, pgdp, ea); ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); pa = abs_to_phys(pa); set_pte(ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); spin_unlock(&ioremap_mm.page_table_lock); } else { unsigned long va, vpn, hash, hpteg; /* * If the mm subsystem is not fully up, we cannot create a * linux page table entry for this mapping. Simply bolt an * entry in the hardware page table. */ vsid = get_kernel_vsid(ea); va = (vsid << 28) | (ea & 0xFFFFFFF); vpn = va >> PAGE_SHIFT; hash = hpt_hash(vpn, 0); hpteg = ((hash & htab_data.htab_hash_mask)*HPTES_PER_GROUP); /* Panic if a pte grpup is full */ if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0, _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX, 1, 0) == -1) { panic("map_io_page: could not insert mapping"); } }}static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, unsigned long ea, unsigned long size, unsigned long flags){ unsigned long i; if ((flags & _PAGE_PRESENT) == 0) flags |= pgprot_val(PAGE_KERNEL); if (flags & (_PAGE_NO_CACHE | _PAGE_WRITETHRU)) flags |= _PAGE_GUARDED; for (i = 0; i < size; i += PAGE_SIZE) { map_io_page(ea+i, pa+i, flags); } return (void __iomem *) (ea + (addr & ~PAGE_MASK));}void __iomem *ioremap(unsigned long addr, unsigned long size){ return __ioremap(addr, size, _PAGE_NO_CACHE);}void __iomem *__ioremap(unsigned long addr, unsigned long size, unsigned long flags){ unsigned long pa, ea; /* * Choose an address to map it to. * Once the imalloc system is running, we use it. * Before that, we map using addresses going * up from ioremap_bot. imalloc will use * the addresses from ioremap_bot through * IMALLOC_END (0xE000001fffffffff) * */ pa = addr & PAGE_MASK; size = PAGE_ALIGN(addr + size) - pa; if (size == 0) return NULL; if (mem_init_done) { struct vm_struct *area; area = im_get_free_area(size); if (area == NULL) return NULL; ea = (unsigned long)(area->addr); } else { ea = ioremap_bot; ioremap_bot += size; } return __ioremap_com(addr, pa, ea, size, flags);}#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))int __ioremap_explicit(unsigned long pa, unsigned long ea, unsigned long size, unsigned long flags){ struct vm_struct *area; /* For now, require page-aligned values for pa, ea, and size */ if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || !IS_PAGE_ALIGNED(size)) { printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__); return 1; } if (!mem_init_done) { /* Two things to consider in this case: * 1) No records will be kept (imalloc, etc) that the region * has been remapped * 2) It won't be easy to iounmap() the region later (because * of 1) */ ; } else { area = im_get_area(ea, size, IM_REGION_UNUSED|IM_REGION_SUBSET); if (area == NULL) { printk(KERN_ERR "could not obtain imalloc area for ea 0x%lx\n", ea); return 1; } if (ea != (unsigned long) area->addr) { printk(KERN_ERR "unexpected addr return from im_get_area\n"); return 1; } } if (__ioremap_com(pa, pa, ea, size, flags) != (void *) ea) { printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); return 1; } return 0;}static void unmap_im_area_pte(pmd_t *pmd, unsigned long address, unsigned long size){ unsigned long end; pte_t *pte; if (pmd_none(*pmd)) return; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); return; } pte = pte_offset_kernel(pmd, address); address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { pte_t page; page = ptep_get_and_clear(pte); address += PAGE_SIZE; pte++; if (pte_none(page)) continue; if (pte_present(page)) continue; printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n"); } while (address < end);}static void unmap_im_area_pmd(pgd_t *dir, unsigned long address, unsigned long size){ unsigned long end; pmd_t *pmd; if (pgd_none(*dir)) return; if (pgd_bad(*dir)) { pgd_ERROR(*dir); pgd_clear(dir); return; } pmd = pmd_offset(dir, address); address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { unmap_im_area_pte(pmd, address, end - address); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address < end);}/* * Unmap an IO region and remove it from imalloc'd list. * Access to IO memory should be serialized by driver. * This code is modeled after vmalloc code - unmap_vm_area() * * XXX what about calls before mem_init_done (ie python_countermeasures()) */void iounmap(volatile void __iomem *token){ unsigned long address, start, end, size; struct mm_struct *mm; pgd_t *dir; void *addr; if (!mem_init_done) { return; } addr = (void *) ((unsigned long __force) token & PAGE_MASK); if ((size = im_free(addr)) == 0) { return; } address = (unsigned long)addr; start = address; end = address + size; mm = &ioremap_mm; spin_lock(&mm->page_table_lock); dir = pgd_offset_i(address); flush_cache_vunmap(address, end); do { unmap_im_area_pmd(dir, address, end - address); address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (address && (address < end)); flush_tlb_kernel_range(start, end); spin_unlock(&mm->page_table_lock); return;}static int iounmap_subset_regions(unsigned long addr, unsigned long size){ struct vm_struct *area; /* Check whether subsets of this region exist */ area = im_get_area(addr, size, IM_REGION_SUPERSET); if (area == NULL) return 1; while (area) { iounmap((void __iomem *) area->addr); area = im_get_area(addr, size, IM_REGION_SUPERSET); } return 0;}int iounmap_explicit(volatile void __iomem *start, unsigned long size){ struct vm_struct *area; unsigned long addr; int rc; addr = (unsigned long __force) start & PAGE_MASK; /* Verify that the region either exists or is a subset of an existing * region. In the latter case, split the parent region to create * the exact region */ area = im_get_area(addr, size, IM_REGION_EXISTS | IM_REGION_SUBSET); if (area == NULL) { /* Determine whether subset regions exist. If so, unmap */ rc = iounmap_subset_regions(addr, size); if (rc) { printk(KERN_ERR "%s() cannot unmap nonexistent range 0x%lx\n", __FUNCTION__, addr); return 1; } } else { iounmap((void __iomem *) area->addr); } /* * FIXME! This can't be right: iounmap(area->addr); * Maybe it should be "iounmap(area);" */ return 0;}#endifvoid free_initmem(void){ unsigned long addr; addr = (unsigned long)__init_begin; for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); set_page_count(virt_to_page(addr), 1); free_page(addr); totalram_pages++; } printk ("Freeing unused kernel memory: %luk freed\n",
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?