init.c

来自「是关于linux2.5.1的完全源码」· C语言 代码 · 共 707 行 · 第 1/2 页

C
707
字号
/* *   * *  PowerPC version  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) *  and Cort Dougan (PReP) (cort@cs.nmt.edu) *    Copyright (C) 1996 Paul Mackerras *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). * *  Derived from "arch/i386/mm/init.c" *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds * *  Dave Engebretsen <engebret@us.ibm.com> *      Rework for PPC64 port. * *  This program is free software; you can redistribute it and/or *  modify it under the terms of the GNU General Public License *  as published by the Free Software Foundation; either version *  2 of the License, or (at your option) any later version. * */#include <linux/config.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/ptrace.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/stddef.h>#include <linux/vmalloc.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/bootmem.h>#include <linux/highmem.h>#ifdef CONFIG_BLK_DEV_INITRD#include <linux/blk.h>		/* for initrd_* */#endif#include <asm/pgalloc.h>#include <asm/page.h>#include <asm/abs_addr.h>#include <asm/prom.h>#include <asm/lmb.h>#include <asm/rtas.h>#include <asm/io.h>#include <asm/mmu_context.h>#include <asm/pgtable.h>#include <asm/mmu.h>#include <asm/uaccess.h>#include <asm/smp.h>#include <asm/machdep.h>#include <asm/tlb.h>#include <asm/Naca.h>#ifdef CONFIG_PPC_EEH#include <asm/eeh.h>#endif#include <asm/ppcdebug.h>#define	PGTOKB(pages)	(((pages) * PAGE_SIZE) >> 10)#ifdef CONFIG_PPC_ISERIES#include <asm/iSeries/iSeries_dma.h>#endifstruct mmu_context_queue_t mmu_context_queue;int mem_init_done;unsigned long ioremap_bot = IMALLOC_BASE;static int boot_mapsize;static unsigned long totalram_pages;extern pgd_t swapper_pg_dir[];extern char __init_begin, __init_end;extern char __chrp_begin, __chrp_end;extern char __openfirmware_begin, __openfirmware_end;extern struct _of_tce_table of_tce_table[];extern char _start[], _end[];extern char _stext[], etext[];extern struct task_struct *current_set[NR_CPUS];extern struct Naca *naca;void mm_init_ppc64(void);unsigned long *pmac_find_end_of_memory(void);extern unsigned long *find_end_of_memory(void);extern pgd_t ioremap_dir[];pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;static void map_io_page(unsigned long va, unsigned long pa, int flags);extern void die_if_kernel(char *,struct pt_regs *,long);unsigned long klimit = (unsigned long)_end;HPTE *Hash=0;unsigned long Hash_size=0;unsigned long _SDR1=0;unsigned long _ASR=0;/* max amount of RAM to use */unsigned long __max_memory;/* This is declared as we are using the more or less generic  * include/asm-ppc64/tlb.h file -- tgall */mmu_gather_t     mmu_gathers[NR_CPUS];void show_mem(void){	int i,free = 0,total = 0,reserved = 0;	int shared = 0, cached = 0;	printk("Mem-info:\n");	show_free_areas();	printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));	i = max_mapnr;	while (i-- > 0) {		total++;		if (PageReserved(mem_map+i))			reserved++;		else if (PageSwapCache(mem_map+i))			cached++;		else if (!atomic_read(&mem_map[i].count))			free++;		else			shared += atomic_read(&mem_map[i].count) - 1;	}	printk("%d pages of RAM\n",total);	printk("%d free pages\n",free);	printk("%d reserved pages\n",reserved);	printk("%d pages shared\n",shared);	printk("%d pages swap cached\n",cached);	show_buffers();}void si_meminfo(struct sysinfo *val){ 	val->totalram = totalram_pages;	val->sharedram = 0;	val->freeram = nr_free_pages();	val->bufferram = atomic_read(&buffermem_pages);	val->totalhigh = 0;	val->freehigh = 0;	val->mem_unit = PAGE_SIZE;}void *ioremap(unsigned long addr, unsigned long size){#ifdef CONFIG_PPC_ISERIES	return (void*)addr;#else#ifdef CONFIG_PPC_EEH	if(mem_init_done && (addr >> 60UL)) {		if (IS_EEH_TOKEN_DISABLED(addr))			return IO_TOKEN_TO_ADDR(addr);		return (void*)addr; /* already mapped address or EEH token. */	}#endif	return __ioremap(addr, size, _PAGE_NO_CACHE);#endif}extern struct vm_struct * get_im_area( unsigned long size );void *__ioremap(unsigned long addr, unsigned long size, unsigned long flags){	unsigned long pa, ea, i;	/*	 * Choose an address to map it to.	 * Once the imalloc system is running, we use it.	 * Before that, we map using addresses going	 * up from ioremap_bot.  imalloc will use	 * the addresses from ioremap_bot through	 * IMALLOC_END (0xE000001fffffffff)	 * 	 */	pa = addr & PAGE_MASK;	size = PAGE_ALIGN(addr + size) - pa;	if (size == 0)		return NULL;	if (mem_init_done) {		struct vm_struct *area;		area = get_im_area(size);		if (area == 0)			return NULL;		ea = (unsigned long)(area->addr);	} 	else {		ea = ioremap_bot;		ioremap_bot += size;        }	if ((flags & _PAGE_PRESENT) == 0)		flags |= pgprot_val(PAGE_KERNEL);	if (flags & (_PAGE_NO_CACHE | _PAGE_WRITETHRU))		flags |= _PAGE_GUARDED;	for (i = 0; i < size; i += PAGE_SIZE) {		map_io_page(ea+i, pa+i, flags);	}	return (void *) (ea + (addr & ~PAGE_MASK));}void iounmap(void *addr) {#ifdef CONFIG_PPC_ISERIES     /* iSeries I/O Remap is a noop              */	return;#else 	/* DRENG / PPPBBB todo */	return;#endif}/* * map_io_page currently only called by __ioremap * map_io_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it */static void map_io_page(unsigned long ea, unsigned long pa, int flags){	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	unsigned long vsid;		if (mem_init_done) {		spin_lock(&ioremap_mm.page_table_lock);		pgdp = pgd_offset_i(ea);		pmdp = pmd_alloc(&ioremap_mm, pgdp, ea);		ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);		pa = absolute_to_phys(pa);		set_pte(ptep, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));		spin_unlock(&ioremap_mm.page_table_lock);	} else {		/* If the mm subsystem is not fully up, we cannot create a		 * linux page table entry for this mapping.  Simply bolt an		 * entry in the hardware page table.  		 */		vsid = get_kernel_vsid(ea);		ppc_md.make_pte(htab_data.htab,			(vsid << 28) | (ea & 0xFFFFFFF), // va (NOT the ea)			pa, 			_PAGE_NO_CACHE | _PAGE_GUARDED | PP_RWXX,			htab_data.htab_hash_mask, 0);	}}voidflush_tlb_mm(struct mm_struct *mm){	if (mm->map_count) {		struct vm_area_struct *mp;		for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)			__flush_tlb_range(mm, mp->vm_start, mp->vm_end);	} else {		/* MIKEC: It is not clear why this is needed */		/* paulus: it is needed to clear out stale HPTEs		 * when an address space (represented by an mm_struct)		 * is being destroyed. */		__flush_tlb_range(mm, USER_START, USER_END);	}	/* XXX are there races with checking cpu_vm_mask? - Anton */	mm->cpu_vm_mask = 0;}/* * Callers should hold the mm->page_table_lock */voidflush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr){	unsigned long context = 0;	pgd_t *pgd;	pmd_t *pmd;	pte_t *ptep;	pte_t pte;	int local = 0;	switch( REGION_ID(vmaddr) ) {	case VMALLOC_REGION_ID:		pgd = pgd_offset_k( vmaddr );		break;	case IO_REGION_ID:		pgd = pgd_offset_i( vmaddr );		break;	case USER_REGION_ID:		pgd = pgd_offset( vma->vm_mm, vmaddr );		context = vma->vm_mm->context;		/* XXX are there races with checking cpu_vm_mask? - Anton */		if (vma->vm_mm->cpu_vm_mask == (1 << smp_processor_id()))			local = 1;		break;	default:		panic("flush_tlb_page: invalid region 0x%016lx", vmaddr);		}	if (!pgd_none(*pgd)) {		pmd = pmd_offset(pgd, vmaddr);		if (!pmd_none(*pmd)) {			ptep = pte_offset_kernel(pmd, vmaddr);			/* Check if HPTE might exist and flush it if so */			pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));			if ( pte_val(pte) & _PAGE_HASHPTE ) {				flush_hash_page(context, vmaddr, pte, local);			}		}	}}struct tlb_batch_data tlb_batch_array[NR_CPUS][MAX_BATCH_FLUSH];void__flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end){	pgd_t *pgd;	pmd_t *pmd;	pte_t *ptep;	pte_t pte;	unsigned long pgd_end, pmd_end;	unsigned long context;	int i = 0;	struct tlb_batch_data *ptes = &tlb_batch_array[smp_processor_id()][0];	int local = 0;	if ( start >= end )		panic("flush_tlb_range: start (%016lx) greater than end (%016lx)\n", start, end );	if ( REGION_ID(start) != REGION_ID(end) )		panic("flush_tlb_range: start (%016lx) and end (%016lx) not in same region\n", start, end );		context = 0;	switch( REGION_ID(start) ) {	case VMALLOC_REGION_ID:

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?