⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 iseries_setup.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 2 页
字号:
	u32 currChunk, thisChunk, absChunk;	u32 currDword;	u32 chunkBit;	u64 map;	struct MemoryBlock mb[32];	unsigned long numMemoryBlocks, curBlock;	/* Chunk size on iSeries is 256K bytes */	totalChunks = (u32)HvLpConfig_getMsChunks();	klimit = msChunks_alloc(klimit, totalChunks, 1UL<<18);	/* Get absolute address of our load area	 * and map it to physical address 0	 * This guarantees that the loadarea ends up at physical 0	 * otherwise, it might not be returned by PLIC as the first	 * chunks	 */		loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);	loadAreaSize =  itLpNaca.xLoadAreaChunks;	/* Only add the pages already mapped here.  	 * Otherwise we might add the hpt pages 	 * The rest of the pages of the load area	 * aren't in the HPT yet and can still	 * be assigned an arbitrary physical address	 */	if ( (loadAreaSize * 64) > HvPagesToMap )		loadAreaSize = HvPagesToMap / 64;	loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;	/* TODO Do we need to do something if the HPT is in the 64MB load area?	 * This would be required if the itLpNaca.xLoadAreaChunks includes 	 * the HPT size	 */	printk( "Mapping load area - physical addr = 0000000000000000\n"                "                    absolute addr = %016lx\n", 			chunk_to_addr(loadAreaFirstChunk) );	printk( "Load area size %dK\n", loadAreaSize*256 );		for (	nextPhysChunk = 0; 		nextPhysChunk < loadAreaSize; 		++nextPhysChunk ) {		msChunks.abs[nextPhysChunk] = loadAreaFirstChunk+nextPhysChunk;	}		/* Get absolute address of our HPT and remember it so	 * we won't map it to any physical address	 */	hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());	hptSizePages =  (u32)(HvCallHpt_getHptPages());	hptSizeChunks = hptSizePages >> (msChunks.chunk_shift-PAGE_SHIFT);	hptLastChunk = hptFirstChunk + hptSizeChunks - 1;		printk( "HPT absolute addr = %016lx, size = %dK\n",			chunk_to_addr(hptFirstChunk), hptSizeChunks*256 );	/* Fill in the htab_data structure */		/* Fill in size of hashed page table */	num_ptegs = hptSizePages * (PAGE_SIZE/(sizeof(HPTE)*HPTES_PER_GROUP));	htab_data.htab_num_ptegs = num_ptegs;	htab_data.htab_hash_mask = num_ptegs - 1;		/* The actual hashed page table is in the hypervisor, we have no direct access */	htab_data.htab = NULL;	/* Determine if absolute memory has any	 * holes so that we can interpret the	 * access map we get back from the hypervisor	 * correctly.	 */	numMemoryBlocks = iSeries_process_mainstore_vpd( mb, 32 );	/* Process the main store access map from the hypervisor	 * to build up our physical -> absolute translation table	 */	curBlock = 0;	currChunk = 0;	currDword = 0;	moreChunks = totalChunks;	while ( moreChunks ) {		map = HvCallSm_get64BitsOfAccessMap( itLpNaca.xLpIndex,						     currDword );		thisChunk = currChunk;		while ( map ) {			chunkBit = map >> 63;			map <<= 1;			if ( chunkBit ) {				--moreChunks;				while ( thisChunk >= mb[curBlock].logicalEnd ) {					++curBlock;					if ( curBlock >= numMemoryBlocks )						panic("out of memory blocks");				}				if ( thisChunk < mb[curBlock].logicalStart )					panic("memory block error");				absChunk = mb[curBlock].absStart + ( thisChunk - mb[curBlock].logicalStart );				if ( ( ( absChunk < hptFirstChunk ) ||				       ( absChunk > hptLastChunk ) ) &&				     ( ( absChunk < loadAreaFirstChunk ) ||				       ( absChunk > loadAreaLastChunk ) ) ) {					msChunks.abs[nextPhysChunk] = absChunk;					++nextPhysChunk;				}			}			++thisChunk;		}		++currDword;		currChunk += 64;	}						/* main store size (in chunks) is 	 *   totalChunks - hptSizeChunks	 * which should be equal to 	 *   nextPhysChunk	 */	naca->physicalMemorySize = chunk_to_addr(nextPhysChunk);	/* Bolt kernel mappings for all of memory */	iSeries_bolt_kernel( 0, naca->physicalMemorySize );	lmb_init();	lmb_add( 0, naca->physicalMemorySize );	lmb_analyze();	/* ?? */	lmb_reserve( 0, __pa(klimit));	/* 	 * Hardcode to GP size.  I am not sure where to get this info. DRENG	 */	naca->slb_size = 64;}/* * Set up the variables that describe the cache line sizes * for this machine. */static void __init setup_iSeries_cache_sizes(void){	unsigned i,n;	unsigned procIx = get_paca()->xLpPaca.xDynHvPhysicalProcIndex;	naca->iCacheL1LineSize = xIoHriProcessorVpd[procIx].xInstCacheOperandSize;	naca->dCacheL1LineSize = xIoHriProcessorVpd[procIx].xDataCacheOperandSize;	naca->iCacheL1LinesPerPage = PAGE_SIZE / naca->iCacheL1LineSize;	naca->dCacheL1LinesPerPage = PAGE_SIZE / naca->dCacheL1LineSize;	i = naca->iCacheL1LineSize;	n = 0;	while ((i=(i/2))) ++n;	naca->iCacheL1LogLineSize = n;	i = naca->dCacheL1LineSize;	n = 0;	while ((i=(i/2))) ++n;	naca->dCacheL1LogLineSize = n;	printk( "D-cache line size = %d  (log = %d)\n",			(unsigned)naca->dCacheL1LineSize,			(unsigned)naca->dCacheL1LogLineSize );	printk( "I-cache line size = %d  (log = %d)\n",			(unsigned)naca->iCacheL1LineSize,			(unsigned)naca->iCacheL1LogLineSize );	}/* * Bolt the kernel addr space into the HPT */static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr){	unsigned long pa;	unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;	HPTE hpte;	for (pa=saddr; pa < eaddr ;pa+=PAGE_SIZE) {		unsigned long ea = (unsigned long)__va(pa);		unsigned long vsid = get_kernel_vsid( ea );		unsigned long va = ( vsid << 28 ) | ( pa & 0xfffffff );		unsigned long vpn = va >> PAGE_SHIFT;		unsigned long slot = HvCallHpt_findValid( &hpte, vpn );		if (hpte.dw0.dw0.v) {			/* HPTE exists, so just bolt it */			HvCallHpt_setSwBits(slot, 0x10, 0);		} else {			/* No HPTE exists, so create a new bolted one */			make_pte(NULL, va, (unsigned long)__v2a(ea), 				 mode_rw, 0, 0);		}	}}#endif /* CONFIG_PPC_ISERIES */extern unsigned long ppc_proc_freq;extern unsigned long ppc_tb_freq;/* * Document me. */void __initiSeries_setup_arch(void){	void *	eventStack;	unsigned procIx = get_paca()->xLpPaca.xDynHvPhysicalProcIndex;	/* Setup the Lp Event Queue */	/* Allocate a page for the Event Stack	 * The hypervisor wants the absolute real address, so	 * we subtract out the KERNELBASE and add in the	 * absolute real address of the kernel load area	 */		eventStack = alloc_bootmem_pages( LpEventStackSize );		memset( eventStack, 0, LpEventStackSize );		/* Invoke the hypervisor to initialize the event stack */		HvCallEvent_setLpEventStack( 0, eventStack, LpEventStackSize );		/* Initialize fields in our Lp Event Queue */		xItLpQueue.xSlicEventStackPtr = (char *)eventStack;	xItLpQueue.xSlicCurEventPtr = (char *)eventStack;	xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack + 					(LpEventStackSize - LpEventMaxSize);	xItLpQueue.xIndex = 0;		/* Compute processor frequency */	procFreqHz = (((1UL<<34) * 1000000) / xIoHriProcessorVpd[procIx].xProcFreq );	procFreqMhz = procFreqHz / 1000000;	procFreqMhzHundreths = (procFreqHz/10000) - (procFreqMhz*100);	ppc_proc_freq = procFreqHz;	/* Compute time base frequency */	tbFreqHz = (((1UL<<32) * 1000000) / xIoHriProcessorVpd[procIx].xTimeBaseFreq );	tbFreqMhz = tbFreqHz / 1000000;	tbFreqMhzHundreths = (tbFreqHz/10000) - (tbFreqMhz*100);	ppc_tb_freq = tbFreqHz;	printk("Max  logical processors = %d\n", 			itVpdAreas.xSlicMaxLogicalProcs );	printk("Max physical processors = %d\n",			itVpdAreas.xSlicMaxPhysicalProcs );	printk("Processor frequency = %lu.%02lu\n",			procFreqMhz, 			procFreqMhzHundreths );	printk("Time base frequency = %lu.%02lu\n",			tbFreqMhz,			tbFreqMhzHundreths );	printk("Processor version = %x\n",			xIoHriProcessorVpd[procIx].xPVR );}/* * int as400_setup_residual() * * Description: *   This routine pretty-prints CPU information gathered from the VPD     *   for use in /proc/cpuinfo                                * * Input(s): *  *buffer - Buffer into which CPU data is to be printed.              * * Output(s): *  *buffer - Buffer with CPU data. * * Returns: *   The number of bytes copied into 'buffer' if OK, otherwise zero or less *   on error. */void iSeries_setup_residual(struct seq_file *m){		seq_printf(m,"clock\t\t: %lu.%02luMhz\n",		procFreqMhz, procFreqMhzHundreths );	seq_printf(m,"time base\t: %lu.%02luMHz\n",		tbFreqMhz, tbFreqMhzHundreths );	seq_printf(m,"i-cache\t\t: %d\n",		naca->iCacheL1LineSize);	seq_printf(m,"d-cache\t\t: %d\n",		naca->dCacheL1LineSize);}void iSeries_get_cpuinfo(struct seq_file *m){	seq_printf(m,"machine\t\t: 64-bit iSeries Logical Partition\n");}/* * Document me. * and Implement me. */intiSeries_get_irq(struct pt_regs *regs){	/* -2 means ignore this interrupt */	return -2;}/* * Document me. */voidiSeries_restart(char *cmd){	mf_reboot();}/* * Document me. */voidiSeries_power_off(void){	mf_powerOff();}/* * Document me. */voidiSeries_halt(void){	mf_powerOff();}/* * Nothing to do here. */void __initiSeries_time_init(void){	/* Nothing to do */}/* JDH Hack */unsigned long jdh_time = 0;extern void setup_default_decr(void);/* * void __init iSeries_calibrate_decr() * * Description: *   This routine retrieves the internal processor frequency from the VPD, *   and sets up the kernel timer decrementer based on that value. * */void __initiSeries_calibrate_decr(void){	unsigned long	cyclesPerUsec;	struct div_result divres;		/* Compute decrementer (and TB) frequency 	 * in cycles/sec 	 */	cyclesPerUsec = ppc_tb_freq / 1000000;	/* cycles / usec */	/* Set the amount to refresh the decrementer by.  This	 * is the number of decrementer ticks it takes for 	 * 1/HZ seconds.	 */	tb_ticks_per_jiffy = ppc_tb_freq / HZ;#if 0	/* TEST CODE FOR ADJTIME */	tb_ticks_per_jiffy += tb_ticks_per_jiffy / 5000;	/* END OF TEST CODE */#endif	/*	 * tb_ticks_per_sec = freq; would give better accuracy	 * but tb_ticks_per_sec = tb_ticks_per_jiffy*HZ; assures	 * that jiffies (and xtime) will match the time returned	 * by do_gettimeofday.	 */	tb_ticks_per_sec   = tb_ticks_per_jiffy * HZ;	tb_ticks_per_usec = cyclesPerUsec;	div128_by_32( 1024*1024, 0, tb_ticks_per_sec, &divres );	tb_to_xs = divres.result_low;	setup_default_decr();}void __initiSeries_progress( char * st, unsigned short code ){	printk( "Progress: [%04x] - %s\n", (unsigned)code, st );	if ( !piranha_simulator && mf_initialized ) {	    if (code != 0xffff)		mf_displayProgress( code );	    else		mf_clearSrc();	}}void iSeries_fixup_klimit(void){	/* Change klimit to take into account any ram disk that may be included */	if (naca->xRamDisk)		klimit = KERNELBASE + (u64)naca->xRamDisk + (naca->xRamDiskSize * PAGE_SIZE);	else {		/* No ram disk was included - check and see if there was an embedded system map */		/* Change klimit to take into account any embedded system map */		if (embedded_sysmap_end)			klimit = KERNELBASE + ((embedded_sysmap_end+4095) & 0xfffffffffffff000);	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -