⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 2 页
字号:
	unsigned long gw_addr;	extern const unsigned long fault_vector_20;	extern void * const linux_gateway_page;	ro_start = __pa((unsigned long)&_text);	ro_end   = __pa((unsigned long)&data_start);	fv_addr  = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;	gw_addr  = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;	end_paddr = start_paddr + size;	pg_dir = pgd_offset_k(start_vaddr);#if PTRS_PER_PMD == 1	start_pmd = 0;#else	start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));#endif	start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));	address = start_paddr;	while (address < end_paddr) {#if PTRS_PER_PMD == 1		pmd = (pmd_t *)__pa(pg_dir);#else		pmd = (pmd_t *) (PAGE_MASK & pgd_val(*pg_dir));		/*		 * pmd is physical at this point		 */		if (!pmd) {			pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);			pmd = (pmd_t *) __pa(pmd);		}		pgd_val(*pg_dir) = _PAGE_TABLE | (unsigned long) pmd;#endif		pg_dir++;		/* now change pmd to kernel virtual addresses */		pmd = (pmd_t *)__va(pmd) + start_pmd;		for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {			/*			 * pg_table is physical at this point			 */			pg_table = (pte_t *) (PAGE_MASK & pmd_val(*pmd));			if (!pg_table) {				pg_table = (pte_t *)					alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);				pg_table = (pte_t *) __pa(pg_table);			}			pmd_val(*pmd) = _PAGE_TABLE |					   (unsigned long) pg_table;			/* now change pg_table to kernel virtual addresses */			pg_table = (pte_t *) __va(pg_table) + start_pte;			for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {				pte_t pte;#if !defined(CONFIG_KWDB) && !defined(CONFIG_STI_CONSOLE)#warning STI console should explicitly allocate executable pages but does not/* KWDB needs to write kernel text when setting break points.**** The right thing to do seems like KWDB modify only the pte which** has a break point on it...otherwise we might mask worse bugs.*/				/*				 * Map the fault vector writable so we can				 * write the HPMC checksum.				 */				if (address >= ro_start && address < ro_end							&& address != fv_addr							&& address != gw_addr)				    pte = __mk_pte(address, PAGE_KERNEL_RO);				else#endif				    pte = __mk_pte(address, pgprot);				if (address >= end_paddr)					pte_val(pte) = 0;				set_pte(pg_table, pte);				address += PAGE_SIZE;			}			start_pte = 0;			if (address >= end_paddr)			    break;		}		start_pmd = 0;	}}/* * pagetable_init() sets up the page tables * * Note that gateway_init() places the Linux gateway page at page 0. * Since gateway pages cannot be dereferenced this has the desirable * side effect of trapping those pesky NULL-reference errors in the * kernel. */static void __init pagetable_init(void){	int range;	printk("pagetable_init\n");	/* Map each physical memory range to its kernel vaddr */	for (range = 0; range < npmem_ranges; range++) {		unsigned long start_paddr;		unsigned long end_paddr;		unsigned long size;		start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;		end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);		size = pmem_ranges[range].pages << PAGE_SHIFT;		map_pages((unsigned long)__va(start_paddr), start_paddr,			size, PAGE_KERNEL);	}#ifdef CONFIG_BLK_DEV_INITRD	if (initrd_end && initrd_end > mem_limit) {		printk("initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);		map_pages(initrd_start, __pa(initrd_start),			initrd_end - initrd_start, PAGE_KERNEL);	}#endif	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);	memset(empty_zero_page, 0, PAGE_SIZE);}static void __init gateway_init(void){	unsigned long linux_gateway_page_addr;	/* FIXME: This is 'const' in order to trick the compiler	   into not treating it as DP-relative data. */	extern void * const linux_gateway_page;	linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;	/*	 * Setup Linux Gateway page.	 *	 * The Linux gateway page will reside in kernel space (on virtual	 * page 0), so it doesn't need to be aliased into user space.	 */	map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),		PAGE_SIZE, PAGE_GATEWAY);}voidmap_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm){	pgd_t *pg_dir;	pmd_t *pmd;	pte_t *pg_table;	unsigned long start_pmd;	unsigned long start_pte;	unsigned long address;	unsigned long hpux_gw_page_addr;	/* FIXME: This is 'const' in order to trick the compiler	   into not treating it as DP-relative data. */	extern void * const hpux_gateway_page;	hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;	/*	 * Setup HP-UX Gateway page.	 *	 * The HP-UX gateway page resides in the user address space,	 * so it needs to be aliased into each process.	 */	pg_dir = pgd_offset(mm,hpux_gw_page_addr);#if PTRS_PER_PMD == 1	start_pmd = 0;#else	start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));#endif	start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));	address = __pa(&hpux_gateway_page);#if PTRS_PER_PMD == 1	pmd = (pmd_t *)__pa(pg_dir);#else	pmd = (pmd_t *) (PAGE_MASK & pgd_val(*pg_dir));	/*	 * pmd is physical at this point	 */	if (!pmd) {		pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);		pmd = (pmd_t *) __pa(pmd);	}	pgd_val(*pg_dir) = _PAGE_TABLE | (unsigned long) pmd;#endif	/* now change pmd to kernel virtual addresses */	pmd = (pmd_t *)__va(pmd) + start_pmd;	/*	 * pg_table is physical at this point	 */	pg_table = (pte_t *) (PAGE_MASK & pmd_val(*pmd));	if (!pg_table)		pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));	pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) pg_table;	/* now change pg_table to kernel virtual addresses */	pg_table = (pte_t *) __va(pg_table) + start_pte;	set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));}extern void flush_tlb_all_local(void);void __init paging_init(void){	int i;	setup_bootmem();	pagetable_init();	gateway_init();	flush_cache_all_local(); /* start with known state */	flush_tlb_all_local();	for (i = 0; i < npmem_ranges; i++) {		unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0, };		zones_size[ZONE_DMA] = pmem_ranges[i].pages;		free_area_init_node(i,NODE_DATA(i),NULL,zones_size,				(pmem_ranges[i].start_pfn << PAGE_SHIFT),0);	}#ifdef CONFIG_DISCONTIGMEM	/*	 * Initialize support for virt_to_page() macro.	 *	 * Note that MAX_ADDRESS is the largest virtual address that	 * we can map. However, since we map all physical memory into	 * the kernel address space, it also has an effect on the maximum	 * physical address we can map (MAX_ADDRESS - PAGE_OFFSET).	 */	maxchunkmap = MAX_ADDRESS >> CHUNKSHIFT;	chunkmap = (unsigned char *)alloc_bootmem(maxchunkmap);	for (i = 0; i < maxchunkmap; i++)	    chunkmap[i] = BADCHUNK;	for (i = 0; i < npmem_ranges; i++) {		ADJ_NODE_MEM_MAP(i) = NODE_MEM_MAP(i) - pmem_ranges[i].start_pfn;		{			unsigned long chunk_paddr;			unsigned long end_paddr;			int chunknum;			chunk_paddr = (pmem_ranges[i].start_pfn << PAGE_SHIFT);			end_paddr = chunk_paddr + (pmem_ranges[i].pages << PAGE_SHIFT);			chunk_paddr &= CHUNKMASK;			chunknum = (int)CHUNKNUM(chunk_paddr);			while (chunk_paddr < end_paddr) {				if (chunknum >= maxchunkmap)					goto badchunkmap1;				if (chunkmap[chunknum] != BADCHUNK)					goto badchunkmap2;				chunkmap[chunknum] = (unsigned char)i;				chunk_paddr += CHUNKSZ;				chunknum++;			}		}	}	return;badchunkmap1:	panic("paging_init: Physical address exceeds maximum address space!\n");badchunkmap2:	panic("paging_init: Collision in chunk map array. CHUNKSZ needs to be smaller\n");#endif}#ifdef CONFIG_PA20/* * Currently, all PA20 chips have 18 bit protection id's, which is the * limiting factor (space ids are 32 bits). */#define NR_SPACE_IDS 262144#else/* * Currently we have a one-to-one relationship between space id's and * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only * support 15 bit protection id's, so that is the limiting factor. * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's * probably not worth the effort for a special case here. */#define NR_SPACE_IDS 32768#endif  /* !CONFIG_PA20 */#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)#define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */static unsigned long dirty_space_id[SID_ARRAY_SIZE];static unsigned long space_id_index;static unsigned long free_space_ids = NR_SPACE_IDS - 1;static unsigned long dirty_space_ids = 0;static spinlock_t sid_lock = SPIN_LOCK_UNLOCKED;unsigned long alloc_sid(void){	unsigned long index;	spin_lock(&sid_lock);	if (free_space_ids == 0) {		if (dirty_space_ids != 0) {			spin_unlock(&sid_lock);			flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */			spin_lock(&sid_lock);		}		if (free_space_ids == 0)			BUG();	}	free_space_ids--;	index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);	space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));	space_id_index = index;	spin_unlock(&sid_lock);	return index << SPACEID_SHIFT;}void free_sid(unsigned long spaceid){	unsigned long index = spaceid >> SPACEID_SHIFT;	unsigned long *dirty_space_offset;	dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);	index &= (BITS_PER_LONG - 1);	spin_lock(&sid_lock);	if (*dirty_space_offset & (1L << index))	    BUG(); /* attempt to free space id twice */	*dirty_space_offset |= (1L << index);	dirty_space_ids++;	spin_unlock(&sid_lock);}#ifdef CONFIG_SMPstatic void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array){	int i;	/* NOTE: sid_lock must be held upon entry */	*ndirtyptr = dirty_space_ids;	if (dirty_space_ids != 0) {	    for (i = 0; i < SID_ARRAY_SIZE; i++) {		dirty_array[i] = dirty_space_id[i];		dirty_space_id[i] = 0;	    }	    dirty_space_ids = 0;	}	return;}static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array){	int i;	/* NOTE: sid_lock must be held upon entry */	if (ndirty != 0) {		for (i = 0; i < SID_ARRAY_SIZE; i++) {			space_id[i] ^= dirty_array[i];		}		free_space_ids += ndirty;		space_id_index = 0;	}}#else /* CONFIG_SMP */static void recycle_sids(void){	int i;	/* NOTE: sid_lock must be held upon entry */	if (dirty_space_ids != 0) {		for (i = 0; i < SID_ARRAY_SIZE; i++) {			space_id[i] ^= dirty_space_id[i];			dirty_space_id[i] = 0;		}		free_space_ids += dirty_space_ids;		dirty_space_ids = 0;		space_id_index = 0;	}}#endif/* * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is * purged, we can safely reuse the space ids that were released but * not flushed from the tlb. */#ifdef CONFIG_SMPstatic unsigned long recycle_ndirty;static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];static unsigned int recycle_inuse = 0;void flush_tlb_all(void){	int do_recycle;	do_recycle = 0;	spin_lock(&sid_lock);	if (dirty_space_ids > RECYCLE_THRESHOLD) {	    if (recycle_inuse) {		BUG();  /* FIXME: Use a semaphore/wait queue here */	    }	    get_dirty_sids(&recycle_ndirty,recycle_dirty_array);	    recycle_inuse++;	    do_recycle++;	}	spin_unlock(&sid_lock);	smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);	flush_tlb_all_local();	if (do_recycle) {	    spin_lock(&sid_lock);	    recycle_sids(recycle_ndirty,recycle_dirty_array);	    recycle_inuse = 0;	    spin_unlock(&sid_lock);	}}#elsevoid flush_tlb_all(void){	spin_lock(&sid_lock);	flush_tlb_all_local();	recycle_sids();	spin_unlock(&sid_lock);}#endif#ifdef CONFIG_BLK_DEV_INITRDvoid free_initrd_mem(unsigned long start, unsigned long end){#if 0	if (start < end)		printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);	for (; start < end; start += PAGE_SIZE) {		ClearPageReserved(virt_to_page(start));		set_page_count(virt_to_page(start), 1);		free_page(start);		num_physpages++;	}#endif}#endifvoid si_meminfo(struct sysinfo *val){	val->totalram = num_physpages;	val->sharedram = 0;	val->freeram = nr_free_pages();	val->bufferram = atomic_read(&buffermem_pages);	val->totalhigh = 0;	val->freehigh = 0;	val->mem_unit = PAGE_SIZE;	return;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -