📄 page.c
字号:
upperBound = (LL)(((module*reservedMemoryPerNode)%totalMemPerProc) + reservedMemoryPerNode); #ifdef DEBUG_PAGE_VERBOSE CPUPrint("LOOK: lower reserved bound is %llx, " "curr = %llx, upper bound is %llx\n", lowerBound, ((LL)(((cachepage[(module * numFrames) + pageFrame] - 1) << LOG_ONE_MB) + (pageFrame << SOLO_PAGE_SHIFT))), upperBound);#endif while ((lowerBound <= (((cachepage[(module * numFrames) + pageFrame] - 1) << LOG_ONE_MB) + (pageFrame << SOLO_PAGE_SHIFT) + applicationMemoryStart)) && ((((cachepage[(module * numFrames) + pageFrame] - 1) << LOG_ONE_MB) + (pageFrame << SOLO_PAGE_SHIFT) + applicationMemoryStart) < upperBound)) { /* complicated, but the gist of it is that you fell into the reserved memory portion of memory. increment the stripe so that this is no longer true */#ifdef DEBUG_PAGE_VERBOSE CPUPrint("LOOK: lower reserved bound is %llx, " "curr = %llx, upper bound is %llx\n",lowerBound, ((LL)(((cachepage[(module * numFrames) + pageFrame] - 1) << LOG_ONE_MB) + (pageFrame << SOLO_PAGE_SHIFT) + applicationMemoryStart)), upperBound); CPUPrint("NextCacheablePage reserved memory conflict: " "module = %d, pageFrame = %X, count = %x, MemPerProc = %X\n", module, pageFrame, cachepage[(module * numFrames)+pageFrame], totalMemPerProc); #endif ASSERT(cachepage[(module * numFrames) + pageFrame] != (totalMemPerProc/ONE_MEGABYTE)); cachepage[(module * numFrames) + pageFrame]++; } } }#ifdef DEBUG_PAGE_VERBOSE CPUPrint( "AllocatePage: module = %d, pageFrame = %X, count = %x, MemPerProc = %X\n", module, pageFrame, cachepage[(module * numFrames)+pageFrame], totalMemPerProc);#endif offset = (((PA)(((cachepage[(module * numFrames) + pageFrame] - 1) << 20) + (pageFrame << SOLO_PAGE_SHIFT))) + applicationMemoryStart) + (va & SOLO_PAGE_OFFSET_MASK); if ((PA) offset > (PA)soloLockBase) { CPUError("Mipsy: application brk() would have encroached on lock space:\nCurrent heap top: %8.8x soloLockBase: %8.8x\n", offset, soloLockBase); } return SOLO_FORM_PA(module,0,offset);}/* *************************************************************************** *//* * InitMemAllocation initializes the per-module page counters * *//* *************************************************************************** */static voidInitMemAllocation(void){ unsigned i; unsigned j; unsigned numFrames; int membits; char *buf; applicationMemoryStart = 0; totalProcs = TOTAL_CPUS; ParamLookup(&totalMemPerProc, "MEMSYS.FLASH.TotalMemPerProc", PARAM_INT); /* * Compute values needed for compress/decompress of Solo 64 bit addresses * into 32bit PAs. We do this by dividing the 32 bits into a node and * memory offset field. To preserve the cache coloring we do we require * that the memory offset value at least enough bits for the cache color. */ membits = NumBits(totalMemPerProc+applicationMemoryStart); if (membits < NumBits(ONE_MEGABYTE-1)) { membits = NumBits(ONE_MEGABYTE-1); } if ((NumBits(totalProcs)+membits) > sizeof(PA)*8) { CPUError("%d processors with %d bytes of memory is too much\n", totalProcs, totalMemPerProc); } soloPACompressNodeShift = membits; soloPACompressOffsetMask = (1 << soloPACompressNodeShift)-1; soloTotalMemory = totalMemPerProc; soloLockBase = totalMemPerProc - 64*1024; /* Reserve 32K for space 1 */ soloBarrierBase = totalMemPerProc - 32*1024; /* Reserve 32K for space 2 */ ParamLookup(&buf, "MEMSYS.FLASH.PageAllocationPolicy", PARAM_STRING); if (strcasecmp(buf, "ROUND_ROBIN") == 0) { allocationPolicy = ROUND_ROBIN; } else if (strcasecmp(buf, "FIRST_TOUCHED") == 0) { allocationPolicy = FIRST_TOUCHED; } else { CPUError("Unknown page allocation policy %s\n", buf); } numFrames = ONE_MEGABYTE >> SOLO_PAGE_SHIFT; cachepage = (unsigned *) malloc (numFrames*sizeof(int)*totalProcs); if (cachepage == (unsigned *) NULL) { CPUError("Out of memory in SoloInitMemAllocation"); } nextPageFrame = (unsigned *) malloc(sizeof(unsigned)*totalProcs); if (nextPageFrame == (unsigned *) NULL) { CPUError("Out of memory in SoloInitMemAllocation"); } /* starting page for each cluster's cacheable space */ for (i = 0; i < totalProcs; i++) { /* NOTE: This isn't so good for P > 256 processors since numFrames * is 256 */ nextPageFrame[i] = ((numFrames/totalProcs) % numFrames) * i; for (j=0; j < numFrames; j++) { cachepage[(i * numFrames) + j] = 0; } }}typedef struct mapping_struct mapping_pair;struct mapping_struct{ VA va; /* virtual address */ SoloPA pa; /* physical address */ int flavor; /* flavor of mapping */ mapping_pair *v_next; /* ptr to next virtual mapping_pair; nil if last mapping_pair */ mapping_pair *p_next; /* ptr to next physical mapping_pair; nil if last mapping_pair */};static mapping_pair *v_map[MAPPING_HASH_SIZE]; /* array of lists that make up tlb */static mapping_pair *p_map[MAPPING_HASH_SIZE]; /* array of lists for reverse map *//* WARNING -> this routine is terribly slow due to the compiler not handling long long worth squat. I've hard coded the routine in assembly *//*#define mapping_hash(_a) (((_a) >> SOLO_PAGE_SHIFT) % MAPPING_HASH_SIZE)*/extern unsigned mapping_hash(unsigned long long addr);/* InitPageTrans initializes address translation. This is not necessary if *//* a map is about to be read in, as ReadMap also performs the necessary *//* initialization. */static voidInitPageTrans(void){ int i; for (i = 0; i < MAPPING_HASH_SIZE; i++) { v_map[i] = 0; p_map[i] = 0; }}/* NewMapping takes care of the map hash table for a new virtual-to- *//* physical mapping. It inserts the new mapping at the head of the *//* appropriate virtual hash bin. The user is responsible for ensuring *//* that the mapping does not already exist before calling this routine. */voidNewMapping(VA v_addr, SoloPA p_addr, uint flavor)/* cpu #(private); SHAREDBLK(shared) *//* virtual address *//* physical block number *//* address space type */{ VA my_virtual; SoloPA physical; mapping_pair *current; unsigned v_hash_index, p_hash_index; VA alignedVA; FLASHAddress alignedPA;#ifdef DEBUG_PAGE_VERBOSE CPUPrint("New Mapping: va %#x pa %#16.16llx\n", v_addr, p_addr);#endif my_virtual = v_addr & SOLO_PAGE_NUMBER_MASK; physical = p_addr & (SoloPA)SOLO_PAGE_NUMBER_MASK_LONG; /* create new mapping mapping_pair */ if ((current = (mapping_pair *) malloc(sizeof(mapping_pair))) == 0) { CPUError("malloc of new mapping mapping_pair failed\n"); } current->va = my_virtual; current->pa = physical; current->flavor = flavor; /* insert new mapping into hash table */ v_hash_index = mapping_hash(my_virtual); current->v_next = v_map[v_hash_index]; v_map[v_hash_index] = current; p_hash_index = mapping_hash(physical); current->p_next = p_map[p_hash_index]; p_map[p_hash_index] = current; #ifdef DEBUG_PAGE_VERBOSE CPUPrint("New Mapping: VA: %8.8x PA: %16.16llx\n", v_addr, p_addr);#endif }/* GetPhysicalAddr performs a virtual-to-physical address translation *//* using the map hash table. TRUE is returned if the translation *//* was successful. */boolGetPhysicalAddr(VA v_addr, SoloPA *p_addr, uint *flavor){ VA my_virtual; mapping_pair *current; unsigned hash_index; my_virtual = v_addr & SOLO_PAGE_NUMBER_MASK; hash_index = mapping_hash(my_virtual); /* search for this virtual page's mapping in hash table */ current = v_map[hash_index]; while (current != 0) { if (current->va == my_virtual) { *p_addr = current->pa + (v_addr & SOLO_PAGE_OFFSET_MASK); *flavor = current->flavor; return TRUE; } current = current->v_next; } return FALSE;}/* GetMemoryAddr performs a physical-to-virtual address translation *//* using the map hash table. TRUE is returned if the translation *//* was successful. */char *SoloGetMemoryAddr(SoloPA p_addr){ SoloPA physical; mapping_pair *current; unsigned hash_index; unsigned offset; VA v_addr; /* ADDED PURELY BECAUSE OF THE COMA PROBLEM * I DESCRIBE BELOW */ unsigned fake_coma_i = 0; unsigned fake_coma_offset; unsigned fake_coma_space = 0; SoloPA new_node_id;/* // because we initialize all directory entries to be EXCL at // (fake) boot time, when you access data, it may return and force // displacement of something that has not even been accessed // yet (this displaced data would have its state at EXCL due to // the initialization, so it would be thought that replacement is // necessary). In this case, it will not have a valid VA->PA translation // so the above test will fail and the simulation will die // unnecessarily. The hack I am using to avoid this is to pick // a different PA to access if the current one has no translation. // We print a warning to make sure that if it is a real bug we // catch it.*/ physical = p_addr & (SoloPA)SOLO_PAGE_NUMBER_MASK_LONG; hash_index = mapping_hash(physical);#ifdef DEBUG_PAGE_VERBOSE CPUPrint("GetVirtualAddr: physical: %16.16llx\n", p_addr);#endif#ifdef T5_MODEL v_addr = (VA) (p_addr & 0xffffffffLL); /* Extract low bits... */ CPUPrint("GetVirtualAddr: Returning VA==PA: %8.8x\n", v_addr); return (char*) v_addr;#else /* search for this virtual page's mapping in hash table */ current = p_map[hash_index]; while (current != 0) {#ifdef DEBUG_PAGE_VERBOSE CPUPrint("GetVirtualAddr: VA: %8.8x PA: %16.16llx args: physical %llx\n", current->va, current->pa, physical);#endif if (current->pa == physical) { v_addr = current->va; offset = (unsigned) (p_addr & (SoloPA)SOLO_PAGE_OFFSET_MASK_LONG); v_addr |= offset; return (char *) v_addr; } else current = current->p_next; } if (!strcmp(protocol,"COMA")) { /* CPUWarning("Bad argument passed to GetMemoryAddress\n");*/ /* // CPUWarning("Bad argument passed to GetMemoryAddress, addr = %16.16llx\n", // physical); // COMA HACK...SEE COMMENT AT TOP OF FUNCTION // we actually choose a known valid VA so we do // not accidentally overwrite a valid PA. We // should do extra checks to make sure we are // doing this because of our replacement problem // rather than because there is an actual bug */ while (fake_coma_i < totalProcs){ fake_coma_space = SOLO_PA_SPACE(p_addr); fake_coma_offset = SOLO_PA_OFFSET(p_addr); /* we choose a different physical address: same node number, */ /* CPUPrint("Checking for COMA rplc case, node %u, curr_addr = %16.16llx\n", fake_coma_i, physical); */ p_addr = SOLO_FORM_PA(fake_coma_i, fake_coma_space, fake_coma_offset); physical = p_addr & (SoloPA)SOLO_PAGE_NUMBER_MASK; hash_index = mapping_hash(physical); /* search for this virtual page's mapping in hash table */ current = p_map[hash_index]; while (current != 0) { if (current->pa == physical) { v_addr = current->va; offset = (unsigned) (p_addr & (SoloPA)SOLO_PAGE_OFFSET_MASK); v_addr |= offset; /* return (char *) v_addr; */ /* here, we realize this is probably due to the * COMA replacement problem, so we return the * fake VA that we set up above */ return (char *)&(junk.comaHACKADDR); } else current = current->p_next; } fake_coma_i++; } } return NULL;#endif /* T5_MODEL */ }static intNumBits(unsigned int number){ int i; for (i = 0; i < 32; i++) { if ((1 << i) > number) return i; } return 32;}/* RemoveMapping removes an existing mapping for a VA. */static boolRemoveMapping(VA v_addr){ VA my_virtual; mapping_pair *current, *prev; int hash_index; bool found; my_virtual = v_addr & SOLO_PAGE_NUMBER_MASK; hash_index = mapping_hash(my_virtual); /* search for this virtual page's mapping in hash table */ current = v_map[hash_index]; prev = 0; /* Indicate first element is head */ found = FALSE; while ((!found) && (current != 0)) { if (current->va == my_virtual) { found = TRUE; break; } else { prev = current; current = current->v_next; } } if (found) {#ifdef DEBUG_PAGE_VERBOSE CPUPrint("RemoveMapping: VA: %8.8x PA: %16.16llx\n", current->va, current->pa);#endif if (prev == 0) { v_map[hash_index] = current->v_next; /* Remove current from hash bucket head */ } else { prev->v_next = current->v_next; /* Remove current from prev's chain */ } } return found;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -