⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 2 页
字号:
		make_pte(htab_data.htab,			(vsid << 28) | (ea & 0xFFFFFFF), // va (NOT the ea)			pa, 			_PAGE_NO_CACHE | _PAGE_GUARDED | PP_RWXX,			htab_data.htab_hash_mask); 	}}voidlocal_flush_tlb_all(void){	/* Implemented to just flush the vmalloc area.	 * vmalloc is the only user of flush_tlb_all.	 */	local_flush_tlb_range( NULL, VMALLOC_START, VMALLOC_END );}voidlocal_flush_tlb_mm(struct mm_struct *mm){	if ( mm->map_count ) {		struct vm_area_struct *mp;		for ( mp = mm->mmap; mp != NULL; mp = mp->vm_next )			local_flush_tlb_range( mm, mp->vm_start, mp->vm_end );	}	else	/* MIKEC: It is not clear why this is needed */		local_flush_tlb_range( mm, USER_START, USER_END );}/* * Callers should hold the mm->page_table_lock */voidlocal_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr){	unsigned long context = 0;	pgd_t *pgd;	pmd_t *pmd;	pte_t *ptep;	pte_t pte;		switch( REGION_ID(vmaddr) ) {	case VMALLOC_REGION_ID:		pgd = pgd_offset_k( vmaddr );		break;	case IO_REGION_ID:		pgd = pgd_offset_i( vmaddr );		break;	case BOLTED_REGION_ID:		pgd = pgd_offset_b( vmaddr );		break;	case USER_REGION_ID:		pgd = pgd_offset( vma->vm_mm, vmaddr );		context = vma->vm_mm->context;		break;	default:		panic("local_flush_tlb_page: invalid region 0x%016lx", vmaddr);		}	if (!pgd_none(*pgd)) {		pmd = pmd_offset(pgd, vmaddr);		if (!pmd_none(*pmd)) {			ptep = pte_offset(pmd, vmaddr);			/* Check if HPTE might exist and flush it if so */			pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));			if ( pte_val(pte) & _PAGE_HASHPTE ) {				flush_hash_page(context, vmaddr, pte);			}		}	}}voidlocal_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end){	pgd_t *pgd;	pmd_t *pmd;	pte_t *ptep;	pte_t pte;	unsigned long pgd_end, pmd_end;	unsigned long context;	if ( start >= end )		panic("flush_tlb_range: start (%016lx) greater than end (%016lx)\n", start, end );	if ( REGION_ID(start) != REGION_ID(end) )		panic("flush_tlb_range: start (%016lx) and end (%016lx) not in same region\n", start, end );		context = 0;	switch( REGION_ID(start) ) {	case VMALLOC_REGION_ID:		pgd = pgd_offset_k( start );		break;	case IO_REGION_ID:		pgd = pgd_offset_i( start );		break;	case BOLTED_REGION_ID:		pgd = pgd_offset_b( start );		break;	case USER_REGION_ID:		pgd = pgd_offset( mm, start );		context = mm->context;		break;	default:		panic("flush_tlb_range: invalid region for start (%016lx) and end (%016lx)\n", start, end);		}	do {		pgd_end = (start + PGDIR_SIZE) & PGDIR_MASK;		if ( pgd_end > end ) 			pgd_end = end;		if ( !pgd_none( *pgd ) ) {			pmd = pmd_offset( pgd, start );			do {				pmd_end = ( start + PMD_SIZE ) & PMD_MASK;				if ( pmd_end > end )					pmd_end = end;				if ( !pmd_none( *pmd ) ) {					ptep = pte_offset( pmd, start );					do {						if ( pte_val(*ptep) & _PAGE_HASHPTE ) {							pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));							if ( pte_val(pte) & _PAGE_HASHPTE )								flush_hash_page( context, start, pte );						}						start += PAGE_SIZE;						++ptep;					} while ( start < pmd_end );				}				else					start = pmd_end;				++pmd;			} while ( start < pgd_end );		}		else			start = pgd_end;		++pgd;	} while ( start < end );}void __init free_initmem(void){	unsigned long a;	unsigned long num_freed_pages = 0;#define FREESEC(START,END,CNT) do { \	a = (unsigned long)(&START); \	for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \	  	clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \		set_page_count(mem_map+MAP_NR(a), 1); \		free_page(a); \		CNT++; \	} \} while (0)	FREESEC(__init_begin,__init_end,num_freed_pages);	printk ("Freeing unused kernel memory: %ldk init\n",		PGTOKB(num_freed_pages));}#ifdef CONFIG_BLK_DEV_INITRDvoid free_initrd_mem(unsigned long start, unsigned long end){	for (; start < end; start += PAGE_SIZE) {		ClearPageReserved(mem_map + MAP_NR(start));		set_page_count(mem_map+MAP_NR(start), 1);		free_page(start);		totalram_pages++;	}	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);}#endif/* Reserve all contexts < FIRST_USER_CONTEXT for kernel use. * The range of contexts [FIRST_USER_CONTEXT, NUM_USER_CONTEXT) * are stored on a stack for easy allocation and deallocation. */voidinit_context_stack(void){        mm_context_t top;        mmu_context_stack.lock = SPIN_LOCK_UNLOCKED;        mmu_context_stack.top = FIRST_USER_CONTEXT;        for(top=0; top < FIRST_USER_CONTEXT ;top++) {                mmu_context_stack.stack[top] = NO_CONTEXT;        }        for(top=FIRST_USER_CONTEXT; top < NUM_USER_CONTEXT ;top++) {                mmu_context_stack.stack[top] = top;        }}/* * Do very early mm setup. */void __init mm_init_ppc64(void) {	ppc_md.progress("MM:init", 0);	/* Reserve all contexts < FIRST_USER_CONTEXT for kernel use.	 * The range of contexts [FIRST_USER_CONTEXT, NUM_USER_CONTEXT)	 * are stored on a stack for easy allocation and deallocation.	 */	init_context_stack();	ppc_md.progress("MM:exit", 0x211);}/* * Initialize the bootmem system and give it all the memory we * have available. */void __init do_init_bootmem(void){	unsigned long i;	unsigned long start, bootmap_pages;	unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;	PPCDBG(PPCDBG_MMINIT, "do_init_bootmem: start\n");	/*	 * Find an area to use for the bootmem bitmap.  Calculate the size of	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.	 * Add 1 additional page in case the address isn't page-aligned.	 */	bootmap_pages = bootmem_bootmap_pages(total_pages);	start = (unsigned long)__a2p(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE));	if( start == 0 ) {		udbg_printf("do_init_bootmem: failed to allocate a bitmap.\n");		udbg_printf("\tbootmap_pages = 0x%lx.\n", bootmap_pages);		PPCDBG_ENTER_DEBUGGER(); 	}	PPCDBG(PPCDBG_MMINIT, "\tstart               = 0x%lx\n", start);	PPCDBG(PPCDBG_MMINIT, "\tbootmap_pages       = 0x%lx\n", bootmap_pages);	PPCDBG(PPCDBG_MMINIT, "\tphysicalMemorySize  = 0x%lx\n", naca->physicalMemorySize);	boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);	PPCDBG(PPCDBG_MMINIT, "\tboot_mapsize        = 0x%lx\n", boot_mapsize);	/* add all physical memory to the bootmem map */	for (i=0; i < lmb.memory.cnt ;i++) {		unsigned long physbase = lmb.memory.region[i].physbase;		unsigned long size = lmb.memory.region[i].size;		free_bootmem(physbase, size);	}	/* reserve the sections we're already using */	for (i=0; i < lmb.reserved.cnt ;i++) {		unsigned long physbase = lmb.reserved.region[i].physbase;		unsigned long size = lmb.reserved.region[i].size;#if 0 /* PPPBBB */		if ( (physbase == 0) && (size < (16<<20)) ) {			size = 16 << 20;		}#endif		reserve_bootmem(physbase, size);	}	PPCDBG(PPCDBG_MMINIT, "do_init_bootmem: end\n");}/* * paging_init() sets up the page tables - in fact we've already done this. */void __init paging_init(void){	unsigned long zones_size[MAX_NR_ZONES], i;	/*	 * All pages are DMA-able so we put them all in the DMA zone.	 */	zones_size[0] = lmb_end_of_DRAM() >> PAGE_SHIFT;	for (i = 1; i < MAX_NR_ZONES; i++)		zones_size[i] = 0;	free_area_init(zones_size);}void __init mem_init(void){	extern char *sysmap; 	extern unsigned long sysmap_size;	unsigned long addr;	int codepages = 0;	int datapages = 0;	int initpages = 0;	unsigned long va_rtas_base = (unsigned long)__va(rtas.base);	max_mapnr = max_low_pfn;	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);	num_physpages = max_mapnr;	/* RAM is assumed contiguous */	totalram_pages += free_all_bootmem();	ifppcdebug(PPCDBG_MMINIT) {		udbg_printf("mem_init: totalram_pages = 0x%lx\n", totalram_pages);		udbg_printf("mem_init: va_rtas_base   = 0x%lx\n", va_rtas_base); 		udbg_printf("mem_init: va_rtas_end    = 0x%lx\n", PAGE_ALIGN(va_rtas_base+rtas.size)); 		udbg_printf("mem_init: pinned start   = 0x%lx\n", __va(0)); 		udbg_printf("mem_init: pinned end     = 0x%lx\n", PAGE_ALIGN(klimit)); 	}	if ( sysmap_size )		for (addr = (unsigned long)sysmap;		     addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ;		     addr += PAGE_SIZE)			SetPageReserved(mem_map + MAP_NR(addr));		for (addr = KERNELBASE; addr <= (unsigned long)__va(lmb_end_of_DRAM());	     addr += PAGE_SIZE) {		if (!PageReserved(mem_map + MAP_NR(addr)))			continue;		if (addr < (ulong) etext)			codepages++;		else if (addr >= (unsigned long)&__init_begin			 && addr < (unsigned long)&__init_end)			initpages++;		else if (addr < klimit)			datapages++;	}        printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",	       (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),	       codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),	       initpages<< (PAGE_SHIFT-10),	       PAGE_OFFSET, __va(lmb_end_of_DRAM()));	mem_init_done = 1;#ifdef CONFIG_PPC_ISERIES	create_virtual_bus_tce_table();#endif}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -