⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 init.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 4 页
字号:
extern void sun_serial_setup(void);extern void cheetah_ecache_flush_init(void);static unsigned long last_valid_pfn;void __init paging_init(void){	extern pmd_t swapper_pmd_dir[1024];	extern unsigned int sparc64_vpte_patchme1[1];	extern unsigned int sparc64_vpte_patchme2[1];	unsigned long alias_base = phys_base + PAGE_OFFSET;	unsigned long second_alias_page = 0;	unsigned long pt, flags, end_pfn, pages_avail;	unsigned long shift = alias_base - ((unsigned long)KERNBASE);	unsigned long real_end;	set_bit(0, mmu_context_bmap);	real_end = (unsigned long)&_end;	if ((real_end > ((unsigned long)KERNBASE + 0x400000)))		bigkernel = 1;#ifdef CONFIG_BLK_DEV_INITRD	if (sparc_ramdisk_image)		real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size));#endif	/* We assume physical memory starts at some 4mb multiple,	 * if this were not true we wouldn't boot up to this point	 * anyways.	 */	pt  = phys_base | _PAGE_VALID | _PAGE_SZ4MB;	pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;	__save_and_cli(flags);	if (tlb_type == spitfire) {		__asm__ __volatile__(	"	stxa	%1, [%0] %3\n"	"	stxa	%2, [%5] %4\n"	"	membar	#Sync\n"	"	flush	%%g6\n"	"	nop\n"	"	nop\n"	"	nop\n"		: /* No outputs */		: "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),		  "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)		: "memory");		if (real_end >= KERNBASE + 0x340000) {			second_alias_page = alias_base + 0x400000;			__asm__ __volatile__(		"	stxa	%1, [%0] %3\n"		"	stxa	%2, [%5] %4\n"		"	membar	#Sync\n"		"	flush	%%g6\n"		"	nop\n"		"	nop\n"		"	nop\n"			: /* No outputs */			: "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),			  "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)			: "memory");		}	} else if (tlb_type == cheetah) {		__asm__ __volatile__(	"	stxa	%1, [%0] %3\n"	"	stxa	%2, [%5] %4\n"	"	membar	#Sync\n"	"	flush	%%g6\n"	"	nop\n"	"	nop\n"	"	nop\n"		: /* No outputs */		: "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),		  "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))		: "memory");		if (real_end >= KERNBASE + 0x340000) {			second_alias_page = alias_base + 0x400000;			__asm__ __volatile__(		"	stxa	%1, [%0] %3\n"		"	stxa	%2, [%5] %4\n"		"	membar	#Sync\n"		"	flush	%%g6\n"		"	nop\n"		"	nop\n"		"	nop\n"			: /* No outputs */			: "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),			  "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))			: "memory");		}	}	__restore_flags(flags);		/* Now set kernel pgd to upper alias so physical page computations	 * work.	 */	init_mm.pgd += ((shift) / (sizeof(pgd_t)));		memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));	/* Now can init the kernel/bad page tables. */	pgd_set(&swapper_pg_dir[0], swapper_pmd_dir + (shift / sizeof(pgd_t)));		sparc64_vpte_patchme1[0] |= (pgd_val(init_mm.pgd[0]) >> 10);	sparc64_vpte_patchme2[0] |= (pgd_val(init_mm.pgd[0]) & 0x3ff);	flushi((long)&sparc64_vpte_patchme1[0]);		/* Setup bootmem... */	pages_avail = 0;	last_valid_pfn = end_pfn = bootmem_init(&pages_avail);#ifdef CONFIG_SUN_SERIAL	/* This does not logically belong here, but we need to	 * call it at the moment we are able to use the bootmem	 * allocator.	 */	sun_serial_setup();#endif	/* Inherit non-locked OBP mappings. */	inherit_prom_mappings();		/* Ok, we can use our TLB miss and window trap handlers safely.	 * We need to do a quick peek here to see if we are on StarFire	 * or not, so setup_tba can setup the IRQ globals correctly (it	 * needs to get the hard smp processor id correctly).	 */	{		extern void setup_tba(int);		setup_tba(this_is_starfire);	}	inherit_locked_prom_mappings(1);		/* We only created DTLB mapping of this stuff. */	spitfire_flush_dtlb_nucleus_page(alias_base);	if (second_alias_page)		spitfire_flush_dtlb_nucleus_page(second_alias_page);	__flush_tlb_all();	{		unsigned long zones_size[MAX_NR_ZONES];		unsigned long zholes_size[MAX_NR_ZONES];		unsigned long npages;		int znum;		for (znum = 0; znum < MAX_NR_ZONES; znum++)			zones_size[znum] = zholes_size[znum] = 0;		npages = end_pfn - (phys_base >> PAGE_SHIFT);		zones_size[ZONE_DMA] = npages;		zholes_size[ZONE_DMA] = npages - pages_avail;		free_area_init_node(0, NULL, NULL, zones_size,				    phys_base, zholes_size);	}	device_scan();}/* Ok, it seems that the prom can allocate some more memory chunks * as a side effect of some prom calls we perform during the * boot sequence.  My most likely theory is that it is from the * prom_set_traptable() call, and OBP is allocating a scratchpad * for saving client program register state etc. */void __init sort_memlist(struct linux_mlist_p1275 *thislist){	int swapi = 0;	int i, mitr;	unsigned long tmpaddr, tmpsize;	unsigned long lowest;	for (i = 0; thislist[i].theres_more != 0; i++) {		lowest = thislist[i].start_adr;		for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)			if (thislist[mitr].start_adr < lowest) {				lowest = thislist[mitr].start_adr;				swapi = mitr;			}		if (lowest == thislist[i].start_adr)			continue;		tmpaddr = thislist[swapi].start_adr;		tmpsize = thislist[swapi].num_bytes;		for (mitr = swapi; mitr > i; mitr--) {			thislist[mitr].start_adr = thislist[mitr-1].start_adr;			thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;		}		thislist[i].start_adr = tmpaddr;		thislist[i].num_bytes = tmpsize;	}}void __init rescan_sp_banks(void){	struct linux_prom64_registers memlist[64];	struct linux_mlist_p1275 avail[64], *mlist;	unsigned long bytes, base_paddr;	int num_regs, node = prom_finddevice("/memory");	int i;	num_regs = prom_getproperty(node, "available",				    (char *) memlist, sizeof(memlist));	num_regs = (num_regs / sizeof(struct linux_prom64_registers));	for (i = 0; i < num_regs; i++) {		avail[i].start_adr = memlist[i].phys_addr;		avail[i].num_bytes = memlist[i].reg_size;		avail[i].theres_more = &avail[i + 1];	}	avail[i - 1].theres_more = NULL;	sort_memlist(avail);	mlist = &avail[0];	i = 0;	bytes = mlist->num_bytes;	base_paddr = mlist->start_adr;  	sp_banks[0].base_addr = base_paddr;	sp_banks[0].num_bytes = bytes;	while (mlist->theres_more != NULL){		i++;		mlist = mlist->theres_more;		bytes = mlist->num_bytes;		if (i >= SPARC_PHYS_BANKS-1) {			printk ("The machine has more banks than "				"this kernel can support\n"				"Increase the SPARC_PHYS_BANKS "				"setting (currently %d)\n",				SPARC_PHYS_BANKS);			i = SPARC_PHYS_BANKS-1;			break;		}    		sp_banks[i].base_addr = mlist->start_adr;		sp_banks[i].num_bytes = mlist->num_bytes;	}	i++;	sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;	sp_banks[i].num_bytes = 0;	for (i = 0; sp_banks[i].num_bytes != 0; i++)		sp_banks[i].num_bytes &= PAGE_MASK;}static void __init taint_real_pages(void){	struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];	int i;	for (i = 0; i < SPARC_PHYS_BANKS; i++) {		saved_sp_banks[i].base_addr =			sp_banks[i].base_addr;		saved_sp_banks[i].num_bytes =			sp_banks[i].num_bytes;	}	rescan_sp_banks();	/* Find changes discovered in the sp_bank rescan and	 * reserve the lost portions in the bootmem maps.	 */	for (i = 0; saved_sp_banks[i].num_bytes; i++) {		unsigned long old_start, old_end;		old_start = saved_sp_banks[i].base_addr;		old_end = old_start +			saved_sp_banks[i].num_bytes;		while (old_start < old_end) {			int n;			for (n = 0; sp_banks[n].num_bytes; n++) {				unsigned long new_start, new_end;				new_start = sp_banks[n].base_addr;				new_end = new_start + sp_banks[n].num_bytes;				if (new_start <= old_start &&				    new_end >= (old_start + PAGE_SIZE)) {					set_bit (old_start >> 22,						 sparc64_valid_addr_bitmap);					goto do_next_page;				}			}			reserve_bootmem(old_start, PAGE_SIZE);		do_next_page:			old_start += PAGE_SIZE;		}	}}void __init mem_init(void){	unsigned long codepages, datapages, initpages;	unsigned long addr, last;	int i;	i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);	i += 1;	sparc64_valid_addr_bitmap = (unsigned long *)		__alloc_bootmem(i << 3, SMP_CACHE_BYTES, 0UL);	if (sparc64_valid_addr_bitmap == NULL) {		prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");		prom_halt();	}	memset(sparc64_valid_addr_bitmap, 0, i << 3);	addr = PAGE_OFFSET + phys_base;	last = PAGE_ALIGN((unsigned long)&_end) -		((unsigned long) KERNBASE);	last += PAGE_OFFSET + phys_base;	while (addr < last) {		set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);		addr += PAGE_SIZE;	}	taint_real_pages();	max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT);	high_memory = __va(last_valid_pfn << PAGE_SHIFT);	num_physpages = free_all_bootmem() - 1;	/*	 * Set up the zero page, mark it reserved, so that page count	 * is not manipulated when freeing the page from user ptes.	 */	mem_map_zero = _alloc_pages(GFP_KERNEL, 0);	if (mem_map_zero == NULL) {		prom_printf("paging_init: Cannot alloc zero page.\n");		prom_halt();	}	SetPageReserved(mem_map_zero);	clear_page(page_address(mem_map_zero));	codepages = (((unsigned long) &etext) - ((unsigned long)&_start));	codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;	datapages = (((unsigned long) &edata) - ((unsigned long)&etext));	datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;	initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));	initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;#ifndef CONFIG_SMP	{		/* Put empty_pg_dir on pgd_quicklist */		extern pgd_t empty_pg_dir[1024];		unsigned long addr = (unsigned long)empty_pg_dir;		unsigned long alias_base = phys_base + PAGE_OFFSET -			(long)(KERNBASE);				memset(empty_pg_dir, 0, sizeof(empty_pg_dir));		addr += alias_base;		free_pgd_fast((pgd_t *)addr);		num_physpages++;	}#endif	printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",	       nr_free_pages() << (PAGE_SHIFT-10),	       codepages << (PAGE_SHIFT-10),	       datapages << (PAGE_SHIFT-10), 	       initpages << (PAGE_SHIFT-10), 	       PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));	if (tlb_type == cheetah)		cheetah_ecache_flush_init();}void free_initmem (void){	unsigned long addr, initend;	/*	 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.	 */	addr = PAGE_ALIGN((unsigned long)(&__init_begin));	initend = (unsigned long)(&__init_end) & PAGE_MASK;	for (; addr < initend; addr += PAGE_SIZE) {		unsigned long page;		struct page *p;		page = (addr +			((unsigned long) __va(phys_base)) -			((unsigned long) KERNBASE));		p = virt_to_page(page);		ClearPageReserved(p);		set_page_count(p, 1);		__free_page(p);		num_physpages++;	}}#ifdef CONFIG_BLK_DEV_INITRDvoid free_initrd_mem(unsigned long start, unsigned long end){	if (start < end)		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);	for (; start < end; start += PAGE_SIZE) {		struct page *p = virt_to_page(start);		ClearPageReserved(p);		set_page_count(p, 1);		__free_page(p);		num_physpages++;	}}#endifvoid si_meminfo(struct sysinfo *val){	val->totalram = num_physpages;	val->sharedram = 0;	val->freeram = nr_free_pages();	val->bufferram = atomic_read(&buffermem_pages);	/* These are always zero on Sparc64. */	val->totalhigh = 0;	val->freehigh = 0;	val->mem_unit = PAGE_SIZE;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -