📄 init.c
字号:
total++; if (PageReserved(p)) reserved++; else if (PageSwapCache(p)) cached++; else if (!page_count(p)) free++; else shared += page_count(p) - 1; pgdat_resize_unlock(NODE_DATA(i), &flags); } }#endif printk(KERN_INFO "%d pages of RAM\n", total); printk(KERN_INFO "%d reserved pages\n", reserved); printk(KERN_INFO "%d pages shared\n", shared); printk(KERN_INFO "%d pages swap cached\n", cached);#ifdef CONFIG_DISCONTIGMEM { struct zonelist *zl; int i, j, k; for (i = 0; i < npmem_ranges; i++) { for (j = 0; j < MAX_NR_ZONES; j++) { zl = NODE_DATA(i)->node_zonelists + j; printk("Zone list for zone %d on node %d: ", j, i); for (k = 0; zl->zones[k] != NULL; k++) printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name); printk("\n"); } } }#endif}static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot){ pgd_t *pg_dir; pmd_t *pmd; pte_t *pg_table; unsigned long end_paddr; unsigned long start_pmd; unsigned long start_pte; unsigned long tmp1; unsigned long tmp2; unsigned long address; unsigned long ro_start; unsigned long ro_end; unsigned long fv_addr; unsigned long gw_addr; extern const unsigned long fault_vector_20; extern void * const linux_gateway_page; ro_start = __pa((unsigned long)&_text); ro_end = __pa((unsigned long)&data_start); fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; end_paddr = start_paddr + size; pg_dir = pgd_offset_k(start_vaddr);#if PTRS_PER_PMD == 1 start_pmd = 0;#else start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));#endif start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); address = start_paddr; while (address < end_paddr) {#if PTRS_PER_PMD == 1 pmd = (pmd_t *)__pa(pg_dir);#else pmd = (pmd_t *)pgd_address(*pg_dir); /* * pmd is physical at this point */ if (!pmd) { pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER); pmd = (pmd_t *) __pa(pmd); } pgd_populate(NULL, pg_dir, __va(pmd));#endif pg_dir++; /* now change pmd to kernel virtual addresses */ pmd = (pmd_t *)__va(pmd) + start_pmd; for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) { /* * pg_table is physical at this point */ pg_table = (pte_t *)pmd_address(*pmd); if (!pg_table) { pg_table = (pte_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE); pg_table = (pte_t *) __pa(pg_table); } pmd_populate_kernel(NULL, pmd, __va(pg_table)); /* now change pg_table to kernel virtual addresses */ pg_table = (pte_t *) __va(pg_table) + start_pte; for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) { pte_t pte; /* * Map the fault vector writable so we can * write the HPMC checksum. */ if (address >= ro_start && address < ro_end && address != fv_addr && address != gw_addr) pte = __mk_pte(address, PAGE_KERNEL_RO); else pte = __mk_pte(address, pgprot); if (address >= end_paddr) pte_val(pte) = 0; set_pte(pg_table, pte); address += PAGE_SIZE; } start_pte = 0; if (address >= end_paddr) break; } start_pmd = 0; }}/* * pagetable_init() sets up the page tables * * Note that gateway_init() places the Linux gateway page at page 0. * Since gateway pages cannot be dereferenced this has the desirable * side effect of trapping those pesky NULL-reference errors in the * kernel. */static void __init pagetable_init(void){ int range; /* Map each physical memory range to its kernel vaddr */ for (range = 0; range < npmem_ranges; range++) { unsigned long start_paddr; unsigned long end_paddr; unsigned long size; start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); size = pmem_ranges[range].pages << PAGE_SHIFT; map_pages((unsigned long)__va(start_paddr), start_paddr, size, PAGE_KERNEL); }#ifdef CONFIG_BLK_DEV_INITRD if (initrd_end && initrd_end > mem_limit) { printk("initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); map_pages(initrd_start, __pa(initrd_start), initrd_end - initrd_start, PAGE_KERNEL); }#endif empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); memset(empty_zero_page, 0, PAGE_SIZE);}static void __init gateway_init(void){ unsigned long linux_gateway_page_addr; /* FIXME: This is 'const' in order to trick the compiler into not treating it as DP-relative data. */ extern void * const linux_gateway_page; linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; /* * Setup Linux Gateway page. * * The Linux gateway page will reside in kernel space (on virtual * page 0), so it doesn't need to be aliased into user space. */ map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), PAGE_SIZE, PAGE_GATEWAY);}#ifdef CONFIG_HPUXvoidmap_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm){ pgd_t *pg_dir; pmd_t *pmd; pte_t *pg_table; unsigned long start_pmd; unsigned long start_pte; unsigned long address; unsigned long hpux_gw_page_addr; /* FIXME: This is 'const' in order to trick the compiler into not treating it as DP-relative data. */ extern void * const hpux_gateway_page; hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK; /* * Setup HP-UX Gateway page. * * The HP-UX gateway page resides in the user address space, * so it needs to be aliased into each process. */ pg_dir = pgd_offset(mm,hpux_gw_page_addr);#if PTRS_PER_PMD == 1 start_pmd = 0;#else start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));#endif start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); address = __pa(&hpux_gateway_page);#if PTRS_PER_PMD == 1 pmd = (pmd_t *)__pa(pg_dir);#else pmd = (pmd_t *) pgd_address(*pg_dir); /* * pmd is physical at this point */ if (!pmd) { pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL); pmd = (pmd_t *) __pa(pmd); } __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);#endif /* now change pmd to kernel virtual addresses */ pmd = (pmd_t *)__va(pmd) + start_pmd; /* * pg_table is physical at this point */ pg_table = (pte_t *) pmd_address(*pmd); if (!pg_table) pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL)); __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table); /* now change pg_table to kernel virtual addresses */ pg_table = (pte_t *) __va(pg_table) + start_pte; set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));}EXPORT_SYMBOL(map_hpux_gateway_page);#endifextern void flush_tlb_all_local(void);void __init paging_init(void){ int i; setup_bootmem(); pagetable_init(); gateway_init(); flush_cache_all_local(); /* start with known state */ flush_tlb_all_local(); for (i = 0; i < npmem_ranges; i++) { unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 }; /* We have an IOMMU, so all memory can go into a single ZONE_DMA zone. */ zones_size[ZONE_DMA] = pmem_ranges[i].pages;#ifdef CONFIG_DISCONTIGMEM /* Need to initialize the pfnnid_map before we can initialize the zone */ { int j; for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT); j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT); j++) { pfnnid_map[j] = i; } }#endif free_area_init_node(i, NODE_DATA(i), zones_size, pmem_ranges[i].start_pfn, NULL); }}#ifdef CONFIG_PA20/* * Currently, all PA20 chips have 18 bit protection id's, which is the * limiting factor (space ids are 32 bits). */#define NR_SPACE_IDS 262144#else/* * Currently we have a one-to-one relationship between space id's and * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only * support 15 bit protection id's, so that is the limiting factor. * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's * probably not worth the effort for a special case here. */#define NR_SPACE_IDS 32768#endif /* !CONFIG_PA20 */#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */static unsigned long dirty_space_id[SID_ARRAY_SIZE];static unsigned long space_id_index;static unsigned long free_space_ids = NR_SPACE_IDS - 1;static unsigned long dirty_space_ids = 0;static DEFINE_SPINLOCK(sid_lock);unsigned long alloc_sid(void){ unsigned long index; spin_lock(&sid_lock); if (free_space_ids == 0) { if (dirty_space_ids != 0) { spin_unlock(&sid_lock); flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ spin_lock(&sid_lock); } if (free_space_ids == 0) BUG(); } free_space_ids--; index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); space_id_index = index; spin_unlock(&sid_lock); return index << SPACEID_SHIFT;}void free_sid(unsigned long spaceid){ unsigned long index = spaceid >> SPACEID_SHIFT; unsigned long *dirty_space_offset; dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); index &= (BITS_PER_LONG - 1); spin_lock(&sid_lock); if (*dirty_space_offset & (1L << index)) BUG(); /* attempt to free space id twice */ *dirty_space_offset |= (1L << index); dirty_space_ids++; spin_unlock(&sid_lock);}#ifdef CONFIG_SMPstatic void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array){ int i; /* NOTE: sid_lock must be held upon entry */ *ndirtyptr = dirty_space_ids; if (dirty_space_ids != 0) { for (i = 0; i < SID_ARRAY_SIZE; i++) { dirty_array[i] = dirty_space_id[i]; dirty_space_id[i] = 0; } dirty_space_ids = 0; } return;}static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array){ int i; /* NOTE: sid_lock must be held upon entry */ if (ndirty != 0) { for (i = 0; i < SID_ARRAY_SIZE; i++) { space_id[i] ^= dirty_array[i]; } free_space_ids += ndirty; space_id_index = 0; }}#else /* CONFIG_SMP */static void recycle_sids(void){ int i; /* NOTE: sid_lock must be held upon entry */ if (dirty_space_ids != 0) { for (i = 0; i < SID_ARRAY_SIZE; i++) { space_id[i] ^= dirty_space_id[i]; dirty_space_id[i] = 0; } free_space_ids += dirty_space_ids; dirty_space_ids = 0; space_id_index = 0; }}#endif/* * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is * purged, we can safely reuse the space ids that were released but * not flushed from the tlb. */#ifdef CONFIG_SMPstatic unsigned long recycle_ndirty;static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];static unsigned int recycle_inuse = 0;void flush_tlb_all(void){ int do_recycle; do_recycle = 0; spin_lock(&sid_lock); if (dirty_space_ids > RECYCLE_THRESHOLD) { if (recycle_inuse) { BUG(); /* FIXME: Use a semaphore/wait queue here */ } get_dirty_sids(&recycle_ndirty,recycle_dirty_array); recycle_inuse++; do_recycle++; } spin_unlock(&sid_lock); on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); if (do_recycle) { spin_lock(&sid_lock); recycle_sids(recycle_ndirty,recycle_dirty_array); recycle_inuse = 0; spin_unlock(&sid_lock); }}#elsevoid flush_tlb_all(void){ spin_lock(&sid_lock); flush_tlb_all_local(); recycle_sids(); spin_unlock(&sid_lock);}#endif#ifdef CONFIG_BLK_DEV_INITRDvoid free_initrd_mem(unsigned long start, unsigned long end){#if 0 if (start < end) printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); for (; start < end; start += PAGE_SIZE) { ClearPageReserved(virt_to_page(start)); set_page_count(virt_to_page(start), 1); free_page(start); num_physpages++; totalram_pages++; }#endif}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -