📄 init.c
字号:
++ptep; } while ( start < pmd_end ); } else start = pmd_end; ++pmd; } while ( start < pgd_end ); } else start = pgd_end; ++pgd; } while ( start < end );}void __init free_initmem(void){ unsigned long a; unsigned long num_freed_pages = 0;#define FREESEC(START,END,CNT) do { \ a = (unsigned long)(&START); \ for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \ clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \ set_page_count(mem_map+MAP_NR(a), 1); \ free_page(a); \ CNT++; \ } \} while (0) FREESEC(__init_begin,__init_end,num_freed_pages); printk ("Freeing unused kernel memory: %ldk init\n", PGTOKB(num_freed_pages));}#ifdef CONFIG_BLK_DEV_INITRDvoid free_initrd_mem(unsigned long start, unsigned long end){ unsigned long xstart = start; for (; start < end; start += PAGE_SIZE) { ClearPageReserved(mem_map + MAP_NR(start)); set_page_count(mem_map+MAP_NR(start), 1); free_page(start); totalram_pages++; } printk ("Freeing initrd memory: %ldk freed\n", (end - xstart) >> 10);}#endif/* * Do very early mm setup. */void __init mm_init_ppc64(void){ struct paca_struct *lpaca; unsigned long guard_page, index; ppc_md.progress("MM:init", 0); /* Reserve all contexts < FIRST_USER_CONTEXT for kernel use. * The range of contexts [FIRST_USER_CONTEXT, NUM_USER_CONTEXT) * are stored on a stack/queue for easy allocation and deallocation. */ mmu_context_queue.lock = SPIN_LOCK_UNLOCKED; mmu_context_queue.head = 0; mmu_context_queue.tail = NUM_USER_CONTEXT-1; mmu_context_queue.size = NUM_USER_CONTEXT; for(index=0; index < NUM_USER_CONTEXT ;index++) { mmu_context_queue.elements[index] = index+FIRST_USER_CONTEXT; } /* Setup guard pages for the Paca's */ for (index = 0; index < NR_CPUS; index++) { lpaca = &paca[index]; guard_page = ((unsigned long)lpaca) + 0x1000; ppc_md.hpte_updateboltedpp(PP_RXRX, guard_page); } ppc_md.progress("MM:exit", 0x211);}/* * Initialize the bootmem system and give it all the memory we * have available. */void __init do_init_bootmem(void){ unsigned long i; unsigned long start, bootmap_pages; unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT; PPCDBG(PPCDBG_MMINIT, "do_init_bootmem: start\n"); /* * Find an area to use for the bootmem bitmap. Calculate the size of * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. * Add 1 additional page in case the address isn't page-aligned. */ bootmap_pages = bootmem_bootmap_pages(total_pages); start = (unsigned long)__a2p(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE)); if (start == 0) { udbg_printf("do_init_bootmem: failed to allocate a bitmap.\n"); udbg_printf("\tbootmap_pages = 0x%lx.\n", bootmap_pages); PPCDBG_ENTER_DEBUGGER(); } PPCDBG(PPCDBG_MMINIT, "\tstart = 0x%lx\n", start); PPCDBG(PPCDBG_MMINIT, "\tbootmap_pages = 0x%lx\n", bootmap_pages); PPCDBG(PPCDBG_MMINIT, "\tphysicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize); boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); PPCDBG(PPCDBG_MMINIT, "\tboot_mapsize = 0x%lx\n", boot_mapsize); /* add all physical memory to the bootmem map */ for (i=0; i < lmb.memory.cnt; i++) { unsigned long physbase, size; unsigned long type = lmb.memory.region[i].type; if ( type != LMB_MEMORY_AREA ) continue; physbase = lmb.memory.region[i].physbase; size = lmb.memory.region[i].size; free_bootmem(physbase, size); } /* reserve the sections we're already using */ for (i=0; i < lmb.reserved.cnt; i++) { unsigned long physbase = lmb.reserved.region[i].physbase; unsigned long size = lmb.reserved.region[i].size;#if 0 /* PPPBBB */ if ( (physbase == 0) && (size < (16<<20)) ) { size = 16 << 20; }#endif reserve_bootmem(physbase, size); } PPCDBG(PPCDBG_MMINIT, "do_init_bootmem: end\n");}/* * paging_init() sets up the page tables - in fact we've already done this. */void __init paging_init(void){ unsigned long zones_size[MAX_NR_ZONES], i; /* * All pages are DMA-able so we put them all in the DMA zone. */ zones_size[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT; for (i = 1; i < MAX_NR_ZONES; i++) zones_size[i] = 0; free_area_init(zones_size);}void initialize_paca_hardware_interrupt_stack(void);void __init mem_init(void){ extern char *sysmap; extern unsigned long sysmap_size; unsigned long addr; int codepages = 0; int datapages = 0; int initpages = 0; unsigned long va_rtas_base = (unsigned long)__va(rtas.base); max_mapnr = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); num_physpages = max_mapnr; /* RAM is assumed contiguous */ totalram_pages += free_all_bootmem(); ifppcdebug(PPCDBG_MMINIT) { udbg_printf("mem_init: totalram_pages = 0x%lx\n", totalram_pages); udbg_printf("mem_init: va_rtas_base = 0x%lx\n", va_rtas_base); udbg_printf("mem_init: va_rtas_end = 0x%lx\n", PAGE_ALIGN(va_rtas_base+rtas.size)); udbg_printf("mem_init: pinned start = 0x%lx\n", __va(0)); udbg_printf("mem_init: pinned end = 0x%lx\n", PAGE_ALIGN(klimit)); } if ( sysmap_size ) for (addr = (unsigned long)sysmap; addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ; addr += PAGE_SIZE) SetPageReserved(mem_map + MAP_NR(addr)); for (addr = KERNELBASE; addr <= (unsigned long)__va(lmb_end_of_DRAM()); addr += PAGE_SIZE) { if (!PageReserved(mem_map + MAP_NR(addr))) continue; if (addr < (ulong) etext) codepages++; else if (addr >= (unsigned long)&__init_begin && addr < (unsigned long)&__init_end) initpages++; else if (addr < klimit) datapages++; } printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n", (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10), codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10), initpages<< (PAGE_SHIFT-10), PAGE_OFFSET, (unsigned long)__va(lmb_end_of_DRAM())); mem_init_done = 1; /* set the last page of each hardware interrupt stack to be protected */ initialize_paca_hardware_interrupt_stack();#ifdef CONFIG_PPC_ISERIES create_virtual_bus_tce_table();#endif}/* * This is called when a page has been modified by the kernel. * It just marks the page as not i-cache clean. We do the i-cache * flush later when the page is given to a user process, if necessary. */void flush_dcache_page(struct page *page){ clear_bit(PG_arch_1, &page->flags);}void flush_icache_page(struct vm_area_struct *vma, struct page *page){ if (page->mapping && !PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { __flush_dcache_icache(page_address(page)); set_bit(PG_arch_1, &page->flags); }}void clear_user_page(void *page, unsigned long vaddr){ clear_page(page); __flush_dcache_icache(page);}void copy_user_page(void *vto, void *vfrom, unsigned long vaddr){ copy_page(vto, vfrom); __flush_dcache_icache(vto);}void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len){ unsigned long maddr; maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK); flush_icache_range(maddr, maddr + len);}#ifdef CONFIG_SHARED_MEMORY_ADDRESSINGstatic spinlock_t shared_malloc_lock = SPIN_LOCK_UNLOCKED;struct vm_struct *shared_list = NULL;static struct vm_struct *get_shared_area(unsigned long size, unsigned long flags);void *shared_malloc(unsigned long size){ pgprot_t prot; struct vm_struct *area; unsigned long ea; spin_lock(&shared_malloc_lock); printk("shared_malloc1 (no _PAGE_USER): addr = 0x%lx, size = 0x%lx\n", SMALLOC_START, size); area = get_shared_area(size, 0); if (!area) { spin_unlock(&shared_malloc_lock); return NULL; } ea = (unsigned long) area->addr; prot = __pgprot(pgprot_val(PAGE_KERNEL)); if (vmalloc_area_pages(VMALLOC_VMADDR(ea), size, GFP_KERNEL, prot)) { spin_unlock(&shared_malloc_lock); return NULL; } printk("shared_malloc: addr = 0x%lx, size = 0x%lx\n", ea, size); spin_unlock(&shared_malloc_lock); return(ea); }void shared_free(void *addr){ struct vm_struct **p, *tmp; if (!addr) return; if ((PAGE_SIZE-1) & (unsigned long) addr) { printk(KERN_ERR "Trying to shared_free() bad address (%p)\n", addr); return; } spin_lock(&shared_malloc_lock); printk("shared_free: addr = 0x%p\n", addr); /* Scan the memory list for an entry matching * the address to be freed, get the size (in bytes) * and free the entry. The list lock is not dropped * until the page table entries are removed. */ for(p = &shared_list; (tmp = *p); p = &tmp->next ) { if (tmp->addr == addr) { *p = tmp->next; vmfree_area_pages(VMALLOC_VMADDR(tmp->addr),tmp->size); spin_unlock(&shared_malloc_lock); kfree(tmp); return; } } spin_unlock(&shared_malloc_lock); printk("shared_free: error\n"); }static struct vm_struct *get_shared_area(unsigned long size, unsigned long flags){ unsigned long addr; struct vm_struct **p, *tmp, *area; area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL); if (!area) return NULL; size += PAGE_SIZE; if (!size) { kfree (area); return NULL; } addr = SMALLOC_START; for (p = &shared_list; (tmp = *p) ; p = &tmp->next) { if ((size + addr) < addr) { kfree(area); return NULL; } if (size + addr <= (unsigned long) tmp->addr) break; addr = tmp->size + (unsigned long) tmp->addr; if (addr > SMALLOC_END-size) { kfree(area); return NULL; } } if (addr + size > SMALLOC_END) { kfree(area); return NULL; } area->flags = flags; area->addr = (void *)addr; area->size = size; area->next = *p; *p = area; return area;}int shared_task_mark(void){ current->thread.flags |= PPC_FLAG_SHARED; printk("current->thread.flags = 0x%lx\n", current->thread.flags); return 0;}int shared_task_unmark(){ if(current->thread.flags & PPC_FLAG_SHARED) { current->thread.flags &= (~PPC_FLAG_SHARED); return 0; } else { return -1; }}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -