⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 qc.c

📁 一个用在mips体系结构中的操作系统
💻 C
📖 第 1 页 / 共 3 页
字号:
   if (IS_UNMAPPED_TLBHI(hi)) return;   for(j=0;j<P->numTlbEntries;j++) {      Reg other = P->tlbEntry[j].Hi & ~TLBHI_G;      if (j==index) continue;      if (IS_UNMAPPED_TLBHI(other) || (other == 0)) continue;      if (other==(hi&~TLBHI_G)) {         CPUWarning("Duplicate tlb entry: new=0x%llx old=0x%llx indx=%d\n",                     (Reg64)hi, (Reg64)other, j);      }   }}void qc_CheckForDuplicates(CPUState *P){   Reg hiEntries[MAX_NTLBENTRIES];   int i,j;   for(i = 0; i < P->numTlbEntries; i++ ) {      Reg hi = P->tlbEntry[i].Hi;      hiEntries[i] = hi & ~TLBHI_G;      if (IS_UNMAPPED_TLBHI(hi)) continue;      for(j=0;j<i;j++) {         ASSERT( hiEntries[j] != hi);      }   }}void EmFirewallChange(PA pAddr, uint grant, uint64 cpumask){   MA mAddr;   VA vAddr = PHYS_TO_K0(pAddr);   uint cpu;   ASSERT(embra.emode == EMBRA_PAGE);   /* this function doesn't support                                         * cache mode yet                                          */   if (grant) {      /* easy case: only need to update kseg0 that might be denied.       * kseg2 and kuseg will get added on next qc miss        */      for (cpu = 0; cpu < TOTAL_CPUS; cpu++) {         if ( ! (cpumask & (1 << cpu)))            continue;         mAddr = PHYS_TO_MEMADDR(M_FROM_CPU(cpu), pAddr);         if (!TCcoherence_is_code(PHYS_TO_MEMADDR(M_FROM_CPU(cpu), pAddr))) {            SetMMUEntry(&EMP[cpu],vAddr,MMU_PROT_WRITE(mAddr));         }      }   } else {      for (cpu = 0; cpu < TOTAL_CPUS; cpu++) {         if ( ! (cpumask & (1 << cpu)))            continue;         mAddr = PHYS_TO_MEMADDR(M_FROM_CPU(cpu), pAddr);         /* Denying access that used to be granted.  Take it out of kseg0...          */         SetMMUEntry(&EMP[cpu],vAddr,MMU_PROT_READ(mAddr));         /* ... and out of kuseg or kseg2.  for now do this inefficiently */         qc_mmu_switch(cpu, CURRENT_ASID(cpu), CURRENT_ASID(cpu), 1);      }   }}/****************************************************************************//* phys_mem_ref wrapper *//* This implements the "fast reload" from the physically indexed quick *//* check array pQC */ /***********************************************************************//* Returning 0 means rewind QC, returning a value means use that value */MA phys_mem_ref(VA vAddr, EmVQCMemState new_state, MA mAddr, int cpuNum){   PLN pline;   phys_info_t pLineInfo;   PA pAddr;   EmbraState* P = &EMP[cpuNum];   MA retval;   /* ASSERT( cpuNum < TOTAL_CPUS );*/   if( VQC_INST( new_state ) ) {      K0A k0Addr;      vAddr = IN_BD( vAddr )?CLEAR_BD( vAddr ) + INST_SIZE: vAddr;      k0Addr = non_excepting_tv(cpuNum, vAddr);    /* MMU lookup */      if( k0Addr ) {         pAddr = K0_TO_PHYS_REMAP(k0Addr, cpuNum);      } else {         /* TLB fault */         retval =  mem_ref( vAddr, new_state, cpuNum );         ASSERT( !retval || (uint)retval >0x1000);         return retval;      }   } else {      if( (uint)mAddr < 0x1000 ) {         /* TLB fault */         retval= mem_ref( vAddr, new_state, cpuNum );         ASSERT( !retval || (uint)retval >0x1000);         return retval;      }      pAddr = MEMADDR_TO_PHYS( M_FROM_CPU(cpuNum), mAddr );   }   /* VQC missed */   /* STAT_VQC(new_state);*/   pline = ADDR2SLINE(pAddr);   pLineInfo = P->qc_p[pline];   switch( new_state ) {   case MEM_INVALID:      ASSERT(0);      break;   case MEM_I_EXCLUSIVE:      ASSERT(0);      break;   case MEM_D_EXCLUSIVE:      /* Expensive Assert */      /* ASSERT( P->qc_v[ADDR2SLINE(vAddr)] == MEM_INVALID ||          VQC_SHARED( P->qc_v[ADDR2SLINE(vAddr)] ) );     */      if( PQC_DIRTY( pLineInfo ) &&          ( PQC_VLINE( pLineInfo ) == ADDR2SLINE( vAddr ) ||            !PQC_VLINE( pLineInfo ) )          /* The problem is that the kernel can write a location, then */          /* NOT set the dirty bit for the TLB entry which allows user */          /* to write to this location */          /* Returns 1 for K0 addresses */          && Em_Is_Tlb_Writable(cpuNum, vAddr, CURRENT_ASID(cpuNum) ) ) {         /* VASSERT( cache_verify_excl(cpuNum, pline),             ("%d vAddr 0x%x pAddr 0x%x state %d",             cpuNum, vAddr, pAddr, new_state) );  EXP            */         set_qc_state(cpuNum, ADDR2SLINE(vAddr), pline, new_state );         /* Use below line to support parallel cache mode */         /* return (MPinUP || TOTAL_CPUS == 1) ?                            PHYS_TO_MEMADDR( M_FROM_CPU(cpuNum),  pAddr ): 0;*/         retval = PHYS_TO_MEMADDR( M_FROM_CPU(cpuNum), pAddr );         ASSERT( !retval || (uint)retval >0x1000);         return retval;      }          break;   case MEM_I_SHARED:   case MEM_D_SHARED:      if (new_state == MEM_I_SHARED) {          VASSERT( (vAddr == CLEAR_BD(P->PC) || vAddr == CLEAR_BD(P->PC)+INST_SIZE),                  ("vAddr 0x%x\nPC 0x%x\n",                   vAddr, P->PC) );      }      /* Either the line is invalid and we are reading */      /* or the line is exclusive and we are executing.  I need to */      /* see that case to detect self-modified code */      /* Expensive Assert */      /* VASSERT( ( (P->qc_v[ADDR2SLINE(vAddr)] == MEM_INVALID) ||         (VQC_INST(new_state) &&         (VQC_EXCL(P->qc_v[ADDR2SLINE(vAddr)])) ) ),         ("%d vAddr 0x%x QC_V 0x%x\n",          cpuNum, vAddr, P->qc_v[ADDR2SLINE(vAddr)]) ); */           if( PQC_VALID( pLineInfo ) &&          ( PQC_VLINE( pLineInfo ) == ADDR2SLINE( vAddr ) ||            !PQC_VLINE( pLineInfo ) ) &&          /* The problem is that the kernel can read/write a */          /* location, then */           /* NOT set the valid bit for the TLB entry which allows user */          /* access to this location */          /* Returns 1 for K0 addresses */                 Em_Is_Tlb_Readable(cpuNum, vAddr,CURRENT_ASID(cpuNum) ) ) {           /* If we are detecting an execute after write hazard, this */         /* downgrades the line to read/exec so that future writes */         /* will be detected */         /* Otherwise this condition is detected in mem_ref */         /* VASSERT( cache_verify_shared(cpuNum, pline),             ("%d vAddr 0x%x pAddr 0x%x state %d",             cpuNum, vAddr, pAddr, new_state) ); EXP */         set_qc_state(cpuNum, ADDR2SLINE(vAddr), pline, new_state );         /* Use below line to support parallel cache mode */         /* return (MPinUP || TOTAL_CPUS == 1) ?                               PHYS_TO_MEMADDR( M_FROM_CPU(cpuNum), pAddr ) : 0; */         retval= PHYS_TO_MEMADDR( M_FROM_CPU(cpuNum), pAddr );         ASSERT( !retval || (uint)retval >0x1000);         return retval;      }      break;   }   /* Can't just filter backdoor addresses because some of them are relocated */   /* Returning a non-zero result causes the quick check to not rewind */   /* No need to rewind when we are MPinUP */   retval= mem_ref( vAddr, new_state, cpuNum );   ASSERT( !retval || (uint)retval >0x1000);   return retval;}/********************************************************************** * PhysArray Memory Reference : *   called on a miss in the MMU or (hit in MMU and miss in PhysArray) **********************************************************************/unsigned pa_mem_ref(VA vAddr, EmVQCMemState new_state, MA mAddr, int cpuNum){#if SIMBASEADDR != 0x78000000   /* Dispatch backdoor references quickly */   if( IS_BACKDOOR(vAddr) ) {      STAT_INC( backdoor_ref );      STAT_INC( backdoor_unaltered_ref );      return vAddr;   }#endif   return (unsigned) mem_ref(vAddr, new_state, cpuNum);}/****************************************************************************//* Debugging functions.  They verify invariants *//****************************************************************************//* Insure qc maps what the TLB wants mapped */void qc_consistency_check( int cpuNum ){  int i;  for( i = 0; i < EMP[cpuNum].numTlbEntries; i++) {     if( IS_VALID(EMP[cpuNum].tlbEntry[i].Lo0) ) {        if( (uint)EMP[cpuNum].mmu[TLB_ENT2VPN(EMP[cpuNum].tlbEntry[i].Hi)] !=            (uint)EMP[cpuNum].tlbEntry[i].Lo0 & 0xfffff000 ) {           CPUPut("%d Help! MMUreloc inconsistent with TLB\n", cpuNum);           while( 1 )              ; /* spin */        }     }     if( IS_VALID(EMP[cpuNum].tlbEntry[i].Lo1) ) {        if( (uint)EMP[cpuNum].mmu[TLB_ENT2VPN(EMP[cpuNum].tlbEntry[i].Hi)|1] !=            (uint)EMP[cpuNum].tlbEntry[i].Lo1 & 0xfffff000 ) {           CPUPut("%d Help! MMUreloc inconsistent with TLB\n", cpuNum);           while( 1 )              ; /* spin */        }     }  }}void qc_insure_other_qc_invalid(int cpuNum, PA pAddr){  phys_info_t* phys;  char* virt;  int i;  for( i = 0; i < TOTAL_CPUS; i++ )    {      if( i != EMP[cpuNum].myNum )        {          phys = &EMP[i].qc_p[ADDR2SLINE(pAddr)];          ASSERT(!PQC_VALID(*phys));          virt = &EMP[i].qc_v[PQC_VLINE(*phys)];          ASSERT( *virt == 0 );        }    }}void qc_insure_other_qc_invalid_or_read(int cpuNum, PA pAddr){  phys_info_t* phys;  char* virt;  int i;  for( i = 0; i < TOTAL_CPUS; i++ )    {      if( i != EMP[cpuNum].myNum )        {          phys = &EMP[i].qc_p[ADDR2SLINE(pAddr)];          ASSERT(!PQC_VALID(*phys) || PQC_SHARED(*phys) );		  /* Can't say anything about virtual addresses because the */          /* mapping could have changed, and the backmapped virt addr */          /* points to a new phys addr (which could be owned excl) */		  /* But now we remove backmap on unmap */          virt = &EMP[i].qc_v[PQC_VLINE(*phys)];          ASSERT((*virt == MEM_INVALID) || VQC_SHARED(*virt));        }    }}/* checks if all MMU entries are also in the TLB with the matching asid *//* for now just with processor 0 */voidMMUCheck(int procNum){   int i;   int myASID = CURRENT_ASID(procNum);   int writing=0;   unsigned ppn;   IDX tlb_index;   for (i=0;i<(MMU_RELOC_SIZE/4);i++){      if (EMP[procNum].mmu[i]){         if ((int)EMP[procNum].mmu[i]<0) writing=1;         tlb_index = Tlb_Lookup( procNum, GET_REGION(i*DEFAULT_PAGESZ),                                 CONVERT_TO_VPN2(i), myASID );               if (tlb_index) {            EntryLo lo;            tlb_index--;            /* We have a matching VPN and ASID */            lo =  (i & 1) ? EMP[procNum].tlbEntry[tlb_index].Lo1 :                            EMP[procNum].tlbEntry[tlb_index].Lo0;            if (IS_VALID( lo )) {               /* Valid */               if( IS_DIRTY(lo) || !(writing) ) {                  /* Page is dirty or we are reading - OK */                  ppn = GET_PFN(lo);                  if (ppn!=PAGE_NUMBER(((unsigned)EMP[procNum].mmu[i]&0x7fffffff)-MA_TO_UINT(SIM_MEM_ADDR(M_FROM_CPU(procNum)))))                     CPUPrint("ERROR: MMU Entry VPN %x : %x different from TLB (%x)\n",i,EMP[procNum].mmu[i], ppn);               } else {                  /* Page is not dirty and MMU is writable write */                              CPUPrint("ERROR: MMU Entry VPN %x : %x writable, TLB is not!\n",i,EMP[procNum].mmu[i]);               }            } else {                /* TLB Invalid entry */               if (!IS_KSEG0(i*DEFAULT_PAGESZ))                  CPUPrint("ERROR: MMU Entry VPN %x : %x not in TLB\n",i,EMP[procNum].mmu[i]);            }                    if (((unsigned)EMP[procNum].mmu[i]&0x7fffffff)>(MA_TO_UINT(SIM_MEM_ADDR(M_FROM_CPU(procNum)))+MEM_SIZE(M_FROM_CPU(procNum))))               CPUPrint("ERROR: MMU Entry VPN %x : %x not in MemFile\n",i,EMP[procNum].mmu[i]);         } else {            if (!IS_KSEG0(i*DEFAULT_PAGESZ))               CPUPrint("ERROR: MMU Entry VPN %x : %x not in TLB\n",i,EMP[procNum].mmu[i]);                 }      }   }}voidPACheck(int procNum){   int i,line_no;   pa_info_t x;   for (i=0;i<(PA_SIZE(M_FROM_CPU(procNum)));i++){      x=EMP[procNum].pa_p[i];      if (x!=PA_SET_INV){         line_no = i & (SCACHE_INDEX -1);   /* direct mapped */         if ((PA_SHARED(x)&& !CACHE_SHARED(EMP[procNum].cache_tag[line_no]))             ||             (PA_DIRTY(x) && !CACHE_EXCL(EMP[procNum].cache_tag[line_no]))){            CPUPrint("ERROR : Cache State %x PA : %x , C.Tag : %x\n",i,x,EMP[procNum].cache_tag[line_no]);         }         if (CACHE_PLINE(EMP[procNum].cache_tag[line_no])!=i) {            CPUPrint("ERROR : Wrong Tag pline %x C.Tag.line : %x\n",i,CACHE_PLINE(EMP[procNum].cache_tag[line_no]));         }      }   }}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -