📄 init.c
字号:
if (!pmd_none(*pmd)) { ptep = pte_offset(pmd, vmaddr); /* Check if HPTE might exist and flush it if so */ if (pte_val(*ptep) & _PAGE_HASHPTE) flush_hash_page(context, vmaddr, ptep); } }}voidlocal_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end){ pgd_t *pgd; pmd_t *pmd; pte_t *ptep; unsigned long pgd_end, pmd_end; unsigned long context; if ( start >= end ) panic("flush_tlb_range: start (%016lx) greater than end (%016lx)\n", start, end ); if ( REGION_ID(start) != REGION_ID(end) ) panic("flush_tlb_range: start (%016lx) and end (%016lx) not in same region\n", start, end ); context = 0; switch( REGION_ID(start) ) { case VMALLOC_REGION_ID: pgd = pgd_offset_k( start ); break; case IO_REGION_ID: pgd = pgd_offset_i( start ); break; case USER_REGION_ID: pgd = pgd_offset( mm, start ); context = mm->context; break; default: panic("flush_tlb_range: invalid region for start (%016lx) and end (%016lx)\n", start, end); } do { pgd_end = (start + PGDIR_SIZE) & PGDIR_MASK; if ( pgd_end > end ) pgd_end = end; if ( !pgd_none( *pgd ) ) { pmd = pmd_offset( pgd, start ); do { pmd_end = ( start + PMD_SIZE ) & PMD_MASK; if ( pmd_end > end ) pmd_end = end; if ( !pmd_none( *pmd ) ) { ptep = pte_offset( pmd, start ); do { if ( pte_val(*ptep) & _PAGE_HASHPTE ) flush_hash_page( context, start, ptep ); start += PAGE_SIZE; ++ptep; } while ( start < pmd_end ); } else start = pmd_end; ++pmd; } while ( start < pgd_end ); } else start = pgd_end; ++pgd; } while ( start < end );}void __init free_initmem(void){ unsigned long a; unsigned long num_freed_pages = 0;#define FREESEC(START,END,CNT) do { \ a = (unsigned long)(&START); \ for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \ clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \ set_page_count(mem_map+MAP_NR(a), 1); \ free_page(a); \ CNT++; \ } \} while (0) FREESEC(__init_begin,__init_end,num_freed_pages); printk ("Freeing unused kernel memory: %ldk init\n", PGTOKB(num_freed_pages));}#ifdef CONFIG_BLK_DEV_INITRDvoid free_initrd_mem(unsigned long start, unsigned long end){ unsigned long xstart = start; for (; start < end; start += PAGE_SIZE) { ClearPageReserved(mem_map + MAP_NR(start)); set_page_count(mem_map+MAP_NR(start), 1); free_page(start); totalram_pages++; } printk ("Freeing initrd memory: %ldk freed\n", (end - xstart) >> 10);}#endif/* * Do very early mm setup. */void __init mm_init_ppc64(void) { struct paca_struct *lpaca; unsigned long guard_page, index; ppc_md.progress("MM:init", 0); /* Reserve all contexts < FIRST_USER_CONTEXT for kernel use. * The range of contexts [FIRST_USER_CONTEXT, NUM_USER_CONTEXT) * are stored on a stack/queue for easy allocation and deallocation. */ mmu_context_queue.lock = SPIN_LOCK_UNLOCKED; mmu_context_queue.head = 0; mmu_context_queue.tail = NUM_USER_CONTEXT-1; mmu_context_queue.size = NUM_USER_CONTEXT; for(index=0; index < NUM_USER_CONTEXT ;index++) { mmu_context_queue.elements[index] = index+FIRST_USER_CONTEXT; } /* Setup guard pages for the Paca's */ for (index = 0; index < NR_CPUS; index++) { lpaca = &paca[index]; guard_page = ((unsigned long)lpaca) + 0x1000; ppc_md.hpte_updateboltedpp(PP_RXRX, guard_page); } ppc_md.progress("MM:exit", 0x211);}/* * Initialize the bootmem system and give it all the memory we * have available. */void __init do_init_bootmem(void){ unsigned long i; unsigned long start, bootmap_pages; unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT; PPCDBG(PPCDBG_MMINIT, "do_init_bootmem: start\n"); /* * Find an area to use for the bootmem bitmap. Calculate the size of * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. * Add 1 additional page in case the address isn't page-aligned. */ bootmap_pages = bootmem_bootmap_pages(total_pages); start = (unsigned long)__a2p(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE)); if( start == 0 ) { udbg_printf("do_init_bootmem: failed to allocate a bitmap.\n"); udbg_printf("\tbootmap_pages = 0x%lx.\n", bootmap_pages); PPCDBG_ENTER_DEBUGGER(); } PPCDBG(PPCDBG_MMINIT, "\tstart = 0x%lx\n", start); PPCDBG(PPCDBG_MMINIT, "\tbootmap_pages = 0x%lx\n", bootmap_pages); PPCDBG(PPCDBG_MMINIT, "\tphysicalMemorySize = 0x%lx\n", naca->physicalMemorySize); boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); PPCDBG(PPCDBG_MMINIT, "\tboot_mapsize = 0x%lx\n", boot_mapsize); /* add all physical memory to the bootmem map */ for (i=0; i < lmb.memory.cnt ;i++) { unsigned long physbase, size; unsigned long type = lmb.memory.region[i].type; if ( type != LMB_MEMORY_AREA ) continue; physbase = lmb.memory.region[i].physbase; size = lmb.memory.region[i].size; free_bootmem(physbase, size); } /* reserve the sections we're already using */ for (i=0; i < lmb.reserved.cnt ;i++) { unsigned long physbase = lmb.reserved.region[i].physbase; unsigned long size = lmb.reserved.region[i].size;#if 0 /* PPPBBB */ if ( (physbase == 0) && (size < (16<<20)) ) { size = 16 << 20; }#endif reserve_bootmem(physbase, size); } PPCDBG(PPCDBG_MMINIT, "do_init_bootmem: end\n");}/* * paging_init() sets up the page tables - in fact we've already done this. */void __init paging_init(void){ unsigned long zones_size[MAX_NR_ZONES], i; /* * All pages are DMA-able so we put them all in the DMA zone. */ zones_size[0] = lmb_end_of_DRAM() >> PAGE_SHIFT; for (i = 1; i < MAX_NR_ZONES; i++) zones_size[i] = 0; free_area_init(zones_size);}extern unsigned long prof_shift;extern unsigned long prof_len;extern unsigned int * prof_buffer;extern unsigned long dprof_shift;extern unsigned long dprof_len;extern unsigned int * dprof_buffer;void initialize_paca_hardware_interrupt_stack(void);void __init mem_init(void){ extern char *sysmap; extern unsigned long sysmap_size; unsigned long addr; int codepages = 0; int datapages = 0; int initpages = 0; unsigned long va_rtas_base = (unsigned long)__va(rtas.base); max_mapnr = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); num_physpages = max_mapnr; /* RAM is assumed contiguous */ totalram_pages += free_all_bootmem(); ifppcdebug(PPCDBG_MMINIT) { udbg_printf("mem_init: totalram_pages = 0x%lx\n", totalram_pages); udbg_printf("mem_init: va_rtas_base = 0x%lx\n", va_rtas_base); udbg_printf("mem_init: va_rtas_end = 0x%lx\n", PAGE_ALIGN(va_rtas_base+rtas.size)); udbg_printf("mem_init: pinned start = 0x%lx\n", __va(0)); udbg_printf("mem_init: pinned end = 0x%lx\n", PAGE_ALIGN(klimit)); } if ( sysmap_size ) for (addr = (unsigned long)sysmap; addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ; addr += PAGE_SIZE) SetPageReserved(mem_map + MAP_NR(addr)); for (addr = KERNELBASE; addr <= (unsigned long)__va(lmb_end_of_DRAM()); addr += PAGE_SIZE) { if (!PageReserved(mem_map + MAP_NR(addr))) continue; if (addr < (ulong) etext) codepages++; else if (addr >= (unsigned long)&__init_begin && addr < (unsigned long)&__init_end) initpages++; else if (addr < klimit) datapages++; } printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n", (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10), codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10), initpages<< (PAGE_SHIFT-10), PAGE_OFFSET, (unsigned long)__va(lmb_end_of_DRAM())); mem_init_done = 1; /* set the last page of each hardware interrupt stack to be protected */ initialize_paca_hardware_interrupt_stack();#ifdef CONFIG_PPC_ISERIES create_virtual_bus_tce_table(); /* HACK HACK This allows the iSeries profiling to use /proc/profile */ prof_shift = dprof_shift; prof_len = dprof_len; prof_buffer = dprof_buffer;#endif}/* * This is called when a page has been modified by the kernel. * It just marks the page as not i-cache clean. We do the i-cache * flush later when the page is given to a user process, if necessary. */void flush_dcache_page(struct page *page){ clear_bit(PG_arch_1, &page->flags);}void flush_icache_page(struct vm_area_struct *vma, struct page *page){ if (page->mapping && !PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { __flush_dcache_icache(page_address(page)); set_bit(PG_arch_1, &page->flags); }}void clear_user_page(void *page, unsigned long vaddr){ clear_page(page);}void copy_user_page(void *vto, void *vfrom, unsigned long vaddr){ copy_page(vto, vfrom); __flush_dcache_icache(vto);}void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len){ unsigned long maddr; maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK); flush_icache_range(maddr, maddr + len);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -