⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 microwindows移植到S3C44B0的源码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *   * *  PowerPC version  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) *  and Cort Dougan (PReP) (cort@cs.nmt.edu) *    Copyright (C) 1996 Paul Mackerras *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). * *  Derived from "arch/i386/mm/init.c" *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds * *  Dave Engebretsen <engebret@us.ibm.com> *      Rework for PPC64 port. * *  This program is free software; you can redistribute it and/or *  modify it under the terms of the GNU General Public License *  as published by the Free Software Foundation; either version *  2 of the License, or (at your option) any later version. * */#include <linux/config.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/ptrace.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/stddef.h>#include <linux/vmalloc.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/bootmem.h>#include <linux/highmem.h>#ifdef CONFIG_BLK_DEV_INITRD#include <linux/blk.h>		/* for initrd_* */#endif#include <asm/pgalloc.h>#include <asm/page.h>#include <asm/abs_addr.h>#include <asm/prom.h>#include <asm/lmb.h>#include <asm/rtas.h>#include <asm/io.h>#include <asm/mmu_context.h>#include <asm/pgtable.h>#include <asm/mmu.h>#include <asm/uaccess.h>#include <asm/smp.h>#include <asm/machdep.h>#include <asm/tlb.h>#include <asm/naca.h>#include <asm/eeh.h>#include <asm/ppcdebug.h>#define	PGTOKB(pages)	(((pages) * PAGE_SIZE) >> 10)#ifdef CONFIG_PPC_ISERIES#include <asm/iSeries/iSeries_dma.h>#endifstruct mmu_context_queue_t mmu_context_queue;int mem_init_done;unsigned long ioremap_bot = IMALLOC_BASE;static int boot_mapsize;static unsigned long totalram_pages;extern pgd_t swapper_pg_dir[];extern char __init_begin, __init_end;extern char __chrp_begin, __chrp_end;extern char __openfirmware_begin, __openfirmware_end;extern struct _of_tce_table of_tce_table[];extern char _start[], _end[];extern char _stext[], etext[];extern struct task_struct *current_set[NR_CPUS];void mm_init_ppc64(void);unsigned long *pmac_find_end_of_memory(void);extern unsigned long *find_end_of_memory(void);extern pgd_t ioremap_dir[];pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;static void map_io_page(unsigned long va, unsigned long pa, int flags);extern void die_if_kernel(char *,struct pt_regs *,long);unsigned long klimit = (unsigned long)_end;HPTE *Hash=0;unsigned long Hash_size=0;unsigned long _SDR1=0;unsigned long _ASR=0;/* max amount of RAM to use */unsigned long __max_memory;/* This is declared as we are using the more or less generic  * include/asm-ppc64/tlb.h file -- tgall */mmu_gather_t     mmu_gathers[NR_CPUS];int do_check_pgt_cache(int low, int high){	int freed = 0;	if (pgtable_cache_size > high) {		do {			if (pgd_quicklist)				free_page((unsigned long)pgd_alloc_one_fast(0)), ++freed;			if (pmd_quicklist)				free_page((unsigned long)pmd_alloc_one_fast(0, 0)), ++freed;			if (pte_quicklist)				free_page((unsigned long)pte_alloc_one_fast(0, 0)), ++freed;		} while (pgtable_cache_size > low);	}	return freed;	}void show_mem(void){	int i,free = 0,total = 0,reserved = 0;	int shared = 0, cached = 0;	printk("Mem-info:\n");	show_free_areas();	printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));	i = max_mapnr;	while (i-- > 0) {		total++;		if (PageReserved(mem_map+i))			reserved++;		else if (PageSwapCache(mem_map+i))			cached++;		else if (!atomic_read(&mem_map[i].count))			free++;		else			shared += atomic_read(&mem_map[i].count) - 1;	}	printk("%d pages of RAM\n",total);	printk("%d free pages\n",free);	printk("%d reserved pages\n",reserved);	printk("%d pages shared\n",shared);	printk("%d pages swap cached\n",cached);	printk("%d pages in page table cache\n",(int)pgtable_cache_size);	show_buffers();}void si_meminfo(struct sysinfo *val){ 	val->totalram = totalram_pages;	val->sharedram = 0;	val->freeram = nr_free_pages();	val->bufferram = atomic_read(&buffermem_pages);	val->totalhigh = 0;	val->freehigh = 0;	val->mem_unit = PAGE_SIZE;}void *ioremap(unsigned long addr, unsigned long size){#ifdef CONFIG_PPC_ISERIES	return (void*)addr;#else	if(mem_init_done && (addr >> 60UL)) {		if (IS_EEH_TOKEN_DISABLED(addr))			return IO_TOKEN_TO_ADDR(addr);		return (void*)addr; /* already mapped address or EEH token. */	}	return __ioremap(addr, size, _PAGE_NO_CACHE);#endif}extern struct vm_struct * get_im_area( unsigned long size );void *__ioremap(unsigned long addr, unsigned long size, unsigned long flags){	unsigned long pa, ea, i;	/*	 * Choose an address to map it to.	 * Once the imalloc system is running, we use it.	 * Before that, we map using addresses going	 * up from ioremap_bot.  imalloc will use	 * the addresses from ioremap_bot through	 * IMALLOC_END (0xE000001fffffffff)	 * 	 */	pa = addr & PAGE_MASK;	size = PAGE_ALIGN(addr + size) - pa;	if (size == 0)		return NULL;	if (mem_init_done) {		struct vm_struct *area;		area = get_im_area(size);		if (area == 0)			return NULL;		ea = (unsigned long)(area->addr);	} 	else {		ea = ioremap_bot;		ioremap_bot += size;        }	if ((flags & _PAGE_PRESENT) == 0)		flags |= pgprot_val(PAGE_KERNEL);	if (flags & (_PAGE_NO_CACHE | _PAGE_WRITETHRU))		flags |= _PAGE_GUARDED;	for (i = 0; i < size; i += PAGE_SIZE) {		map_io_page(ea+i, pa+i, flags);	}	return (void *) (ea + (addr & ~PAGE_MASK));}void iounmap(void *addr) {#ifdef CONFIG_PPC_ISERIES     /* iSeries I/O Remap is a noop              */	return;#else 	/* DRENG / PPPBBB todo */	return;#endif}/* * map_io_page currently only called by __ioremap * map_io_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it */static void map_io_page(unsigned long ea, unsigned long pa, int flags){	pgd_t *pgdp;	pmd_t *pmdp;	pte_t *ptep;	unsigned long vsid;		if (mem_init_done) {		spin_lock(&ioremap_mm.page_table_lock);		pgdp = pgd_offset_i(ea);		pmdp = pmd_alloc(&ioremap_mm, pgdp, ea);		ptep = pte_alloc(&ioremap_mm, pmdp, ea);		pa = absolute_to_phys(pa);		set_pte(ptep, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));		spin_unlock(&ioremap_mm.page_table_lock);	} else {		/* If the mm subsystem is not fully up, we cannot create a		 * linux page table entry for this mapping.  Simply bolt an		 * entry in the hardware page table.  		 */		vsid = get_kernel_vsid(ea);		make_pte(htab_data.htab,			(vsid << 28) | (ea & 0xFFFFFFF), // va (NOT the ea)			pa, 			_PAGE_NO_CACHE | _PAGE_GUARDED | PP_RWXX,			htab_data.htab_hash_mask, 0);	}}voidlocal_flush_tlb_all(void){	/* Implemented to just flush the vmalloc area.	 * vmalloc is the only user of flush_tlb_all.	 */	local_flush_tlb_range( NULL, VMALLOC_START, VMALLOC_END );}voidlocal_flush_tlb_mm(struct mm_struct *mm){	if ( mm->map_count ) {		struct vm_area_struct *mp;		for ( mp = mm->mmap; mp != NULL; mp = mp->vm_next )			local_flush_tlb_range( mm, mp->vm_start, mp->vm_end );	}	else	/* MIKEC: It is not clear why this is needed */		/* paulus: it is needed to clear out stale HPTEs		 * when an address space (represented by an mm_struct)		 * is being destroyed. */		local_flush_tlb_range( mm, USER_START, USER_END );}/* * Callers should hold the mm->page_table_lock */voidlocal_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr){	unsigned long context = 0;	pgd_t *pgd;	pmd_t *pmd;	pte_t *ptep;		switch( REGION_ID(vmaddr) ) {	case VMALLOC_REGION_ID:		pgd = pgd_offset_k( vmaddr );		break;	case IO_REGION_ID:		pgd = pgd_offset_i( vmaddr );		break;	case USER_REGION_ID:		pgd = pgd_offset( vma->vm_mm, vmaddr );		context = vma->vm_mm->context;		break;	default:		panic("local_flush_tlb_page: invalid region 0x%016lx", vmaddr);		}	if (!pgd_none(*pgd)) {		pmd = pmd_offset(pgd, vmaddr);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -