⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 qc64.c

📁 一个用在mips体系结构中的操作系统
💻 C
📖 第 1 页 / 共 2 页
字号:
void qc_cache_inval_page( int cpuNum, int idx ){  ASSERT( embra.emode == EMBRA_CACHE );  /* No dothing */  }/* Always invalidate the previous virtual line.  That way, physical *//* entries are aliased only once */void set_qc_state( int cpuNum, VLN vline, PLN pline, int new_state ){   PA pAddr = SLINE2ADDR(pline);#ifdef BROKEN  ASSERT(!SIMFIREWALL_ON);  /* this function doesn't support the firewall yet*/#endif  /* WATCHPOINTS */  if (annWatchpoints == TRUE) {     if (AnnFMRangeCheck(vline << log2SCACHE_LINE_SIZE,                         ANNFM_LD_TYPE | ANNFM_ST_TYPE)) {        new_state = MEM_INVALID;     }  }  if (new_state==MEM_D_EXCLUSIVE &&      TCcoherence_is_code(PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), pAddr))) {     new_state = MEM_D_SHARED;  }     switch( new_state ) {    case MEM_INVALID:       EMP[cpuNum].pa_p[pline] = PA_SET_INV;      break;    case MEM_D_EXCLUSIVE:    case MEM_I_EXCLUSIVE:      EMP[cpuNum].pa_p[pline] = PA_SET_DIRTY;      break;    case MEM_I_SHARED:    case MEM_D_SHARED:      EMP[cpuNum].pa_p[pline] = PA_SET_SHARED;      break;    }}/* XXX - Caller check ASID and/or global bit */void qc_cache_reload_page( int cpuNum, VA vLine, EntryLo lo0, EntryLo lo1){   PA pLine;/* Need to modify for multiple page sizes */   if( IS_VALID(lo0) ) {     for( pLine = ADDR2SLINE(TLBLO2ADDR(lo0));           pLine < ADDR2SLINE(TLBLO2ADDR(lo0)) + LINES_PER_PAGE;           pLine++, vLine++ ) {       uint tag = EMP[cpuNum].cache_tag[SCACHE_INDEXOF(SLINE2ADDR(pLine))];       if( CACHE_VALID( tag ) && CACHE_PLINE( tag ) == pLine ) {         if( CACHE_EXCL( tag ) && IS_DIRTY(lo0) ) {           set_qc_state(cpuNum, vLine, pLine, MEM_D_EXCLUSIVE);         } else {           /* Don't know I/D */           set_qc_state(cpuNum, vLine, pLine, MEM_D_SHARED);         }       }     }   } else {       vLine += LINES_PER_PAGE;   }   if( IS_VALID(lo1) ) {     for( pLine = ADDR2SLINE(TLBLO2ADDR(lo1));           pLine < ADDR2SLINE(TLBLO2ADDR(lo1)) + LINES_PER_PAGE;           pLine++, vLine++ ) {       uint tag = EMP[cpuNum].cache_tag[SCACHE_INDEXOF(SLINE2ADDR(pLine))];       if( CACHE_VALID( tag ) && CACHE_PLINE( tag ) == pLine ) {         if( CACHE_EXCL( tag ) && IS_DIRTY(lo1) ) {           set_qc_state(cpuNum, vLine, pLine, MEM_D_EXCLUSIVE);         } else {           /* Don't know I/D */           set_qc_state(cpuNum, vLine, pLine, MEM_D_SHARED);         }       }     }   }}   void qc_map_page( int cpuNum, int idx){   /* We do this in a lazy way for QC64 */}void qc_flush_etlb(int cpuNum){}void qc_erase_etlb(int cpuNum, EntryHi hi){}K0A non_excepting_tv( int cpuNum, VA vAddr){   MA mAddr;   if (IS_BACKDOOR(vAddr)) {     return vAddr;   }    mAddr = (MA)Em_QC64Reload(vAddr,QC64_READ);    if (mAddr) return MEMADDR_TO_K0(M_FROM_CPU(cpuNum),mAddr);   return 0;}/* This function acesses true TLB state */void qc_mmu_switch( int cpuNum, unsigned old_asid, unsigned new_asid,                    uint forceSelfSwitch){  int numTlbEntries;  int i;  EmbraState *P = EMP + cpuNum;  /* Optimizations - Dont cxt switch to self (that is solid) */  if( old_asid == new_asid )	return;  quick_ASID[cpuNum] = new_asid;  numTlbEntries = P->numTlbEntries;  for(i = 0; i < numTlbEntries; i++ ) {     /* If the entry is (1)valid, (2)has the previous asid, and (3)does */     /* not have the global bit set, then invalidate that page in the */     /* quick check array */     int global = IS_GLOBAL_HI( P->tlbEntry[i].Hi);     if( GET_ASID( P->tlbEntry[i].Hi ) == old_asid && !global) {        if( embra.emode == EMBRA_CACHE ) {           qc_cache_inval_page( cpuNum, i);        }        qc_tlb_inval_page( cpuNum, i);     }  }  /*  QC64Check(P, new_asid); */}void qc_CheckForDuplicate(CPUState *P,  int index){   int j;   Reg hi = P->tlbEntry[index].Hi & ~TLBHI_G;   if (IS_UNMAPPED_TLBHI(hi)) return;   for(j=0;j<P->numTlbEntries;j++) {      Reg other = P->tlbEntry[j].Hi & ~TLBHI_G;      if (j==index) continue;      if (IS_UNMAPPED_TLBHI(other) || (other == 0)) continue;      if (other==(hi&~TLBHI_G)) {         CPUWarning("Duplicate tlb entry: new=0x%llx old=0x%llx indx=%d\n",                     (Reg64)hi, (Reg64)other, j);      }   }}void qc_CheckForDuplicates(CPUState *P){   Reg hiEntries[MAX_NTLBENTRIES];   int i,j;   for(i = 0; i < P->numTlbEntries; i++ ) {      Reg hi = P->tlbEntry[i].Hi;      hiEntries[i] = hi & ~TLBHI_G;      if (IS_UNMAPPED_TLBHI(hi)) continue;      for(j=0;j<i;j++) {         ASSERT( hiEntries[j] != hi);      }   }}void EmFirewallChange(PA pAddr, uint grant, uint64 cpumask){   MA mAddr = PHYS_TO_MEMADDR(M_FROM_CPU(EMP->myNum),pAddr);   VA vAddr = PHYS_TO_K0(pAddr);   uint cpu;   ASSERT(embra.emode == EMBRA_PAGE);   /* this function doesn't support                                         * cache mode yet                                          */   if (grant) {      /* easy case: only need to update kseg0 that might be denied.       * kseg2 and kuseg will get added on next qc miss        */      for (cpu = 0; cpu < TOTAL_CPUS; cpu++) {         if ( ! (cpumask & (1 << cpu)))            continue;         if (!TCcoherence_is_code(PHYS_TO_MEMADDR(M_FROM_CPU(cpu), pAddr))) {           RemoveAddrQC64(EMP+cpu,vAddr);         }      }   } else {      for (cpu = 0; cpu < TOTAL_CPUS; cpu++) {         if ( ! (cpumask & (1 << cpu)))            continue;         /* Denying access that used to be granted.  Take it out of kseg0...          */         RemoveAddrQC64(EMP+cpu, vAddr);               }   }}/****************************************************************************//* phys_mem_ref wrapper *//* This implements the "fast reload" from the physically indexed quick *//* check array pQC */ /***********************************************************************//* Returning 0 means rewind QC, returning a value means use that value */MA phys_mem_ref(VA vAddr, EmVQCMemState new_state, MA mAddr, int cpuNum){   PLN pline;   phys_info_t pLineInfo;   PA pAddr;   EmbraState* P = &EMP[cpuNum];   MA retval;   /* ASSERT( cpuNum < NUM_CPUS );*/   if( VQC_INST( new_state ) ) {      K0A k0Addr;      vAddr = IN_BD( vAddr )?CLEAR_BD( vAddr ) + INST_SIZE: vAddr;      k0Addr = non_excepting_tv(cpuNum, vAddr);    /* MMU lookup */      if( k0Addr ) {         pAddr = K0_TO_PHYS_REMAP(k0Addr, cpuNum);      } else {         /* TLB fault */         retval =  mem_ref( vAddr, new_state, cpuNum );         ASSERT( !retval || (uint)retval >0x1000);         return retval;      }   } else {      if( (uint)mAddr < 0x1000 ) {         /* TLB fault */         retval= mem_ref( vAddr, new_state, cpuNum );         ASSERT( !retval || (uint)retval >0x1000);         return retval;      }      pAddr = MEMADDR_TO_PHYS(  M_FROM_CPU(P->myNum),mAddr );   }   /* VQC missed */   /* STAT_VQC(new_state);*/   pline = ADDR2SLINE(pAddr);   pLineInfo = P->qc_p[pline];   switch( new_state ) {   case MEM_INVALID:      ASSERT(0);      break;   case MEM_I_EXCLUSIVE:      ASSERT(0);      break;   case MEM_D_EXCLUSIVE:      /* Expensive Assert */      /* ASSERT( P->qc_v[ADDR2SLINE(vAddr)] == MEM_INVALID ||          VQC_SHARED( P->qc_v[ADDR2SLINE(vAddr)] ) );     */      if( PQC_DIRTY( pLineInfo ) &&          ( PQC_VLINE( pLineInfo ) == ADDR2SLINE( vAddr ) ||            !PQC_VLINE( pLineInfo ) )          /* The problem is that the kernel can write a location, then */          /* NOT set the dirty bit for the TLB entry which allows user */          /* to write to this location */          /* Returns 1 for K0 addresses */          && Em_Is_Tlb_Writable(cpuNum, vAddr, CURRENT_ASID(cpuNum) ) ) {         /* VASSERT( cache_verify_excl(cpuNum, pline),             ("%d vAddr 0x%x pAddr 0x%x state %d",             cpuNum, vAddr, pAddr, new_state) );  EXP            */         set_qc_state(cpuNum, ADDR2SLINE(vAddr), pline, new_state );         /* Use below line to support parallel cache mode */         /* return (MPinUP || NUM_CPUS == 1) ? PHYS_TO_MEMADDR( pAddr ): 0;*/         retval = PHYS_TO_MEMADDR(M_FROM_CPU(P->myNum), pAddr );         ASSERT( !retval || (uint)retval >0x1000);         return retval;      }          break;   case MEM_I_SHARED:   case MEM_D_SHARED:      if (new_state == MEM_I_SHARED) {          VASSERT( (vAddr == CLEAR_BD(P->PC) || vAddr == CLEAR_BD(P->PC)+INST_SIZE),                  ("vAddr 0x%x\nPC 0x%x\n",                   vAddr, P->PC) );      }      /* Either the line is invalid and we are reading */      /* or the line is exclusive and we are executing.  I need to */      /* see that case to detect self-modified code */      /* Expensive Assert */      /* VASSERT( ( (P->qc_v[ADDR2SLINE(vAddr)] == MEM_INVALID) ||         (VQC_INST(new_state) &&         (VQC_EXCL(P->qc_v[ADDR2SLINE(vAddr)])) ) ),         ("%d vAddr 0x%x QC_V 0x%x\n",          cpuNum, vAddr, P->qc_v[ADDR2SLINE(vAddr)]) ); */           if( PQC_VALID( pLineInfo ) &&          ( PQC_VLINE( pLineInfo ) == ADDR2SLINE( vAddr ) ||            !PQC_VLINE( pLineInfo ) ) &&          /* The problem is that the kernel can read/write a */          /* location, then */           /* NOT set the valid bit for the TLB entry which allows user */          /* access to this location */          /* Returns 1 for K0 addresses */                 Em_Is_Tlb_Readable(cpuNum, vAddr,CURRENT_ASID(cpuNum) ) ) {           /* If we are detecting an execute after write hazard, this */         /* downgrades the line to read/exec so that future writes */         /* will be detected */         /* Otherwise this condition is detected in mem_ref */         /* VASSERT( cache_verify_shared(cpuNum, pline),             ("%d vAddr 0x%x pAddr 0x%x state %d",             cpuNum, vAddr, pAddr, new_state) ); EXP */         set_qc_state(cpuNum, ADDR2SLINE(vAddr), pline, new_state );         /* Use below line to support parallel cache mode */         /* return (MPinUP || NUM_CPUS == 1) ? PHYS_TO_MEMADDR( pAddr ) : 0; */         retval= PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), pAddr );         ASSERT( !retval || (uint)retval >0x1000);         return retval;      }      break;   }   /* Can't just filter backdoor addresses because some of them are relocated */   /* Returning a non-zero result causes the quick check to not rewind */   /* No need to rewind when we are MPinUP */   retval= mem_ref( vAddr, new_state, cpuNum );   ASSERT( !retval || (uint)retval >0x1000);   return retval;}/********************************************************************** * PhysArray Memory Reference : *   called on a miss in the MMU or (hit in MMU and miss in PhysArray) **********************************************************************/unsigned pa_mem_ref(VA vAddr, EmVQCMemState new_state, MA mAddr, int cpuNum){#if SIMBASEADDR != 0x78000000   /* Dispatch backdoor references quickly */   if( IS_BACKDOOR(vAddr) ) {      STAT_INC( backdoor_ref );      STAT_INC( backdoor_unaltered_ref );      return vAddr;   }#endif   return (unsigned) mem_ref(vAddr, new_state, cpuNum);}static void QC64Check(EmbraState *P, int myASID){    int i, idx;    for (i = 0; i < QC64_NUM_ENTRIES; i++) {       if (P->kernelMMU[i].vpn != QC64_INVALID_VPN) {          VA va = P->kernelMMU[i].vpn * DEFAULT_PAGESZ;          if (IS_UNMAPPED_ADDR(va)) continue;          idx = Tlb_Lookup( P->myNum, GET_REGION(va), GET_VPN2(va), myASID );          if (idx > 0) {              int szEntry;              Reg lo_reg;             PA pAddr;             idx--;             szEntry = P->tlbEntrySize[idx];             /* Which lo register? */             if (va & (TlbEntrySz[szEntry].offset_mask+1))                lo_reg = P->tlbEntry[idx].Lo1;             else                lo_reg = P->tlbEntry[idx].Lo0;             ASSERT(IS_VALID(lo_reg));             if (P->kernelMMU[i].writable) {                 ASSERT(IS_DIRTY(lo_reg));             }             /*               pAddr = ((((GET_PFN(lo_reg)))*DEFAULT_PAGESZ) &                ~(VA)TlbEntrySz[szEntry].offset_mask) +                (va & TlbEntrySz[szEntry].offset_mask);               ASSERT(pAddr == ((uint)P->kernelMMU[i].ma)-MEMADDRBASE); */             ASSERT((P->QC64TLBBackMap[idx] & TlbEntrySz[szEntry].vpn_mask) == ((va/DEFAULT_PAGESZ) & TlbEntrySz[szEntry].vpn_mask));          } else ASSERT(0);       }    }          }/****************************************************************************//* Debugging functions.  They verify invariants *//****************************************************************************//* Insure qc maps what the TLB wants mapped */void qc_consistency_check( int cpuNum ){}#endif 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -