📄 ppc32_mem.c
字号:
/* * Cisco router simulation platform. * Copyright (c) 2006 Christophe Fillot (cf@utc.fr) * * PowerPC MMU. */#define _GNU_SOURCE#include <stdio.h>#include <stdlib.h>#include <unistd.h>#include <string.h>#include <sys/types.h>#include <sys/stat.h>#include <sys/mman.h>#include <fcntl.h>#include <assert.h>#include "cpu.h"#include "vm.h"#include "dynamips.h"#include "memory.h"#include "device.h"#include "ppc32_jit.h"#define DEBUG_ICBI 0/* Memory access with special access mask */void ppc32_access_special(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid, m_uint32_t mask,u_int op_code,u_int op_type, u_int op_size,m_uint64_t *data){ switch(mask) { case MTS_ACC_T: if (op_code != PPC_MEMOP_LOOKUP) {#if DEBUG_MTS_ACC_T cpu_log(cpu->gen, "MTS","MMU exception for address 0x%8.8x at ia=0x%8.8x " "(%s access, size=%u)\n", vaddr,cpu->ia,(op_type == MTS_READ) ? "read":"write",op_size); //ppc32_dump_regs(cpu->gen);#if MEMLOG_ENABLE memlog_dump(cpu->gen);#endif#endif if (cid == PPC32_MTS_DCACHE) { cpu->dsisr = PPC32_DSISR_NOTRANS; if (op_type == MTS_WRITE) cpu->dsisr |= PPC32_DSISR_STORE; cpu->dar = vaddr; ppc32_trigger_exception(cpu,PPC32_EXC_DSI); cpu_exec_loop_enter(cpu->gen); } } break; case MTS_ACC_U: if (op_type == MTS_READ) *data = 0; if (cpu->gen->undef_mem_handler != NULL) { if (cpu->gen->undef_mem_handler(cpu->gen,(m_uint64_t)vaddr, op_size,op_type,data)) return; }#if DEBUG_MTS_ACC_U if (op_type == MTS_READ) cpu_log(cpu->gen, "MTS","read access to undefined address 0x%8.8x at " "ia=0x%8.8x (size=%u)\n",vaddr,cpu->ia,op_size); else cpu_log(cpu->gen, "MTS","write access to undefined address 0x%8.8x at " "ia=0x%8.8x, value=0x%8.8llx (size=%u)\n", vaddr,cpu->ia,*data,op_size);#endif break; }}/* Initialize the MTS subsystem for the specified CPU */int ppc32_mem_init(cpu_ppc_t *cpu){ size_t len; /* Initialize the cache entries to 0 (empty) */ len = MTS32_HASH_SIZE * sizeof(mts32_entry_t); if (!(cpu->mts_cache[PPC32_MTS_ICACHE] = malloc(len))) return(-1); if (!(cpu->mts_cache[PPC32_MTS_DCACHE] = malloc(len))) return(-1); memset(cpu->mts_cache[PPC32_MTS_ICACHE],0xFF,len); memset(cpu->mts_cache[PPC32_MTS_DCACHE],0xFF,len); cpu->mts_lookups = 0; cpu->mts_misses = 0; return(0);}/* Free memory used by MTS */void ppc32_mem_shutdown(cpu_ppc_t *cpu){ if (cpu != NULL) { /* Free the caches themselves */ free(cpu->mts_cache[PPC32_MTS_ICACHE]); free(cpu->mts_cache[PPC32_MTS_DCACHE]); cpu->mts_cache[PPC32_MTS_ICACHE] = NULL; cpu->mts_cache[PPC32_MTS_DCACHE] = NULL; }}/* Show MTS detailed information (debugging only!) */void ppc32_mem_show_stats(cpu_gen_t *gen_cpu){ cpu_ppc_t *cpu = CPU_PPC32(gen_cpu);#if DEBUG_MTS_MAP_VIRT mts32_entry_t *entry; u_int i,count;#endif printf("\nCPU%u: MTS statistics:\n",cpu->gen->id);#if DEBUG_MTS_MAP_VIRT printf("Instruction cache:\n"); /* Valid hash entries for Instruction Cache */ for(count=0,i=0;i<MTS32_HASH_SIZE;i++) { entry = &cpu->mts_cache[PPC32_MTS_ICACHE][i]; if (!(entry->gvpa & MTS_INV_ENTRY_MASK)) { printf(" %4u: vaddr=0x%8.8x, paddr=0x%8.8x, hpa=%p\n", i,entry->gvpa,entry->gppa,(void *)entry->hpa); count++; } } printf(" %u/%u valid hash entries for icache.\n",count,MTS32_HASH_SIZE); printf("Data cache:\n"); /* Valid hash entries for Instruction Cache */ for(count=0,i=0;i<MTS32_HASH_SIZE;i++) { entry = &cpu->mts_cache[PPC32_MTS_DCACHE][i]; if (!(entry->gvpa & MTS_INV_ENTRY_MASK)) { printf(" %4u: vaddr=0x%8.8x, paddr=0x%8.8x, hpa=%p\n", i,entry->gvpa,entry->gppa,(void *)entry->hpa); count++; } } printf(" %u/%u valid hash entries for dcache.\n",count,MTS32_HASH_SIZE);#endif printf("\n Total lookups: %llu, misses: %llu, efficiency: %g%%\n", cpu->mts_lookups, cpu->mts_misses, 100 - ((double)(cpu->mts_misses*100)/ (double)cpu->mts_lookups));}/* Invalidate the MTS caches (instruction and data) */void ppc32_mem_invalidate_cache(cpu_ppc_t *cpu){ size_t len; len = MTS32_HASH_SIZE * sizeof(mts32_entry_t); memset(cpu->mts_cache[PPC32_MTS_ICACHE],0xFF,len); memset(cpu->mts_cache[PPC32_MTS_DCACHE],0xFF,len);}/* * MTS mapping. * * It is NOT inlined since it triggers a GCC bug on my config (x86, GCC 3.3.5) */static no_inline struct mts32_entry *ppc32_mem_map(cpu_ppc_t *cpu,u_int op_type,mts_map_t *map, mts32_entry_t *entry,mts32_entry_t *alt_entry){ ppc32_jit_tcb_t *block; struct vdevice *dev; m_uint32_t offset; m_iptr_t host_ptr; m_uint32_t exec_flag = 0; int cow; if (!(dev = dev_lookup(cpu->vm,map->paddr+map->offset,map->cached))) return NULL; if (cpu->exec_phys_map) { block = ppc32_jit_find_by_phys_page(cpu,map->paddr >> VM_PAGE_SHIFT); if (block) exec_flag = MTS_FLAG_EXEC; } if (dev->flags & VDEVICE_FLAG_SPARSE) { host_ptr = dev_sparse_get_host_addr(cpu->vm,dev,map->paddr,op_type,&cow); entry->gvpa = map->vaddr; entry->gppa = map->paddr; entry->hpa = host_ptr; entry->flags = (cow) ? MTS_FLAG_COW : 0; entry->flags |= exec_flag; return entry; } if (!dev->host_addr || (dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) { offset = (map->paddr + map->offset) - dev->phys_addr; /* device entries are never stored in virtual TLB */ alt_entry->gppa = dev->id; alt_entry->hpa = offset; alt_entry->flags = MTS_FLAG_DEV; return alt_entry; } entry->gvpa = map->vaddr; entry->gppa = map->paddr; entry->hpa = dev->host_addr + (map->paddr - dev->phys_addr); entry->flags = exec_flag; return entry;}/* BAT lookup */static forced_inline int ppc32_bat_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr, u_int cid,mts_map_t *map){ m_uint32_t bepi,mask,bl,pr,ubat; int i; pr = (cpu->msr & PPC32_MSR_PR) >> PPC32_MSR_PR_SHIFT; pr = ((~pr << 1) | pr) & 0x03; for(i=0;i<PPC32_BAT_NR;i++) { ubat = cpu->bat[cid][i].reg[0]; if (!(ubat & pr)) continue; //bl = (ubat & PPC32_UBAT_BL_MASK) >> PPC32_UBAT_BL_SHIFT; bl = (ubat & PPC32_UBAT_XBL_MASK) >> PPC32_UBAT_XBL_SHIFT; mask = ~bl << PPC32_BAT_ADDR_SHIFT; bepi = ubat & PPC32_UBAT_BEPI_MASK; if (bepi == (vaddr & mask)) { map->vaddr = vaddr & PPC32_MIN_PAGE_MASK; map->paddr = cpu->bat[cid][i].reg[1] & PPC32_LBAT_BRPN_MASK; map->paddr += map->vaddr - bepi; map->offset = vaddr & PPC32_MIN_PAGE_IMASK; map->cached = FALSE; return(TRUE); } } return(FALSE);}/* Memory slow lookup */static mts32_entry_t *ppc32_slow_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr, u_int cid,u_int op_code,u_int op_size, u_int op_type,m_uint64_t *data, mts32_entry_t *alt_entry){ m_uint32_t hash_bucket,segment,vsid; m_uint32_t hash,tmp,pteg_offset,pte_key,key,pte2; mts32_entry_t *entry; m_uint8_t *pte_haddr; m_uint64_t paddr; mts_map_t map; int i;#if DEBUG_MTS_STATS cpu->mts_misses++;#endif hash_bucket = MTS32_HASH(vaddr); entry = &cpu->mts_cache[cid][hash_bucket]; /* No translation - cover the 4GB space */ if (((cid == PPC32_MTS_ICACHE) && !(cpu->msr & PPC32_MSR_IR)) || ((cid == PPC32_MTS_DCACHE) && !(cpu->msr & PPC32_MSR_DR))) { map.vaddr = vaddr & PPC32_MIN_PAGE_MASK; map.paddr = vaddr & PPC32_MIN_PAGE_MASK; map.offset = vaddr & PPC32_MIN_PAGE_IMASK; map.cached = FALSE; if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry))) goto err_undef; return entry; } /* Walk through the BAT registers */ if (ppc32_bat_lookup(cpu,vaddr,cid,&map)) { if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry))) goto err_undef; return entry; } if (unlikely(!cpu->sdr1)) goto no_pte; /* Get the virtual segment identifier */ segment = vaddr >> 28; vsid = cpu->sr[segment] & PPC32_SD_VSID_MASK; /* Compute the first hash value */ hash = (vaddr >> PPC32_MIN_PAGE_SHIFT) & 0xFFFF; hash ^= vsid; hash &= 0x7FFFFF; tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK); pteg_offset = (hash & 0x3FF) << 6; pteg_offset |= tmp << 16; pte_haddr = cpu->sdr1_hptr + pteg_offset; pte_key = 0x80000000 | (vsid << 7); pte_key |= (vaddr >> 22) & 0x3F; for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) { key = vmtoh32(*(m_uint32_t *)pte_haddr); if (key == pte_key) goto pte_lookup_done; } /* Secondary hash value */ hash = (~hash) & 0x7FFFFF; tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK); pteg_offset = (hash & 0x3FF) << 6; pteg_offset |= tmp << 16; pte_haddr = cpu->sdr1_hptr + pteg_offset; pte_key = 0x80000040 | (vsid << 7); pte_key |= (vaddr >> 22) & 0x3F; for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) { key = vmtoh32(*(m_uint32_t *)pte_haddr); if (key == pte_key) goto pte_lookup_done; } no_pte: /* No matching PTE for this virtual address */ ppc32_access_special(cpu,vaddr,cid,MTS_ACC_T,op_code,op_type,op_size,data); return NULL; pte_lookup_done: pte2 = vmtoh32(*(m_uint32_t *)(pte_haddr + sizeof(m_uint32_t))); paddr = pte2 & PPC32_PTEL_RPN_MASK; paddr |= (pte2 & PPC32_PTEL_XPN_MASK) << (33 - PPC32_PTEL_XPN_SHIFT); paddr |= (pte2 & PPC32_PTEL_X_MASK) << (32 - PPC32_PTEL_X_SHIFT); map.vaddr = vaddr & ~PPC32_MIN_PAGE_IMASK; map.paddr = paddr; map.offset = vaddr & PPC32_MIN_PAGE_IMASK; map.cached = FALSE; if ((entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry))) return entry; err_undef: ppc32_access_special(cpu,vaddr,cid,MTS_ACC_U,op_code,op_type,op_size,data); return NULL;}/* Memory access */static inline void *ppc32_mem_access(cpu_ppc_t *cpu,m_uint32_t vaddr, u_int cid,u_int op_code,u_int op_size, u_int op_type,m_uint64_t *data){ mts32_entry_t *entry,alt_entry; ppc32_jit_tcb_t *block; m_uint32_t hash_bucket; m_uint32_t phys_page; m_uint32_t ia_hash; m_iptr_t haddr; u_int dev_id; int cow;#if MEMLOG_ENABLE /* Record the memory access */ memlog_rec_access(cpu->gen,vaddr,*data,op_size,op_type);#endif hash_bucket = MTS32_HASH(vaddr); entry = &cpu->mts_cache[cid][hash_bucket];#if DEBUG_MTS_STATS cpu->mts_lookups++;#endif /* Copy-On-Write for sparse device ? */ cow = (op_type == MTS_WRITE) && (entry->flags & MTS_FLAG_COW); /* Slow lookup if nothing found in cache */ if (unlikely(((vaddr & PPC32_MIN_PAGE_MASK) != entry->gvpa) || cow)) { entry = cpu->mts_slow_lookup(cpu,vaddr,cid,op_code,op_size,op_type, data,&alt_entry); if (!entry) return NULL; if (entry->flags & MTS_FLAG_DEV) { dev_id = entry->gppa; haddr = entry->hpa; return(dev_access_fast(cpu->gen,dev_id,haddr,op_size,op_type,data)); } } /* Invalidate JIT code for written pages */ if ((op_type == MTS_WRITE) && (entry->flags & MTS_FLAG_EXEC)) { if (cpu->exec_phys_map) { phys_page = entry->gppa >> VM_PAGE_SHIFT; if (vaddr >= PPC32_EXC_SYS_RST) { block = ppc32_jit_find_by_phys_page(cpu,phys_page); if (block != NULL) { //printf("Invalidation of block 0x%8.8x\n",block->start_ia); ia_hash = ppc32_jit_get_ia_hash(block->start_ia); ppc32_jit_tcb_free(cpu,block,TRUE); if (cpu->exec_blk_map[ia_hash] == block) cpu->exec_blk_map[ia_hash] = NULL; entry->flags &= ~MTS_FLAG_EXEC; } } } } /* Raw memory access */ haddr = entry->hpa + (vaddr & PPC32_MIN_PAGE_IMASK);#if MEMLOG_ENABLE memlog_update_read(cpu->gen,haddr);#endif return((void *)haddr);}/* Memory data access */#define PPC32_MEM_DACCESS(cpu,vaddr,op_code,op_size,op_type,data) \ ppc32_mem_access((cpu),(vaddr),PPC32_MTS_DCACHE,(op_code),(op_size),\ (op_type),(data))/* Virtual address to physical page translation */static fastcall int ppc32_translate(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid, m_uint32_t *phys_page){ mts32_entry_t *entry,alt_entry; m_uint32_t hash_bucket; m_uint64_t data = 0; hash_bucket = MTS32_HASH(vaddr); entry = &cpu->mts_cache[cid][hash_bucket]; /* Slow lookup if nothing found in cache */ if (unlikely(((m_uint32_t)vaddr & PPC32_MIN_PAGE_MASK) != entry->gvpa)) { entry = cpu->mts_slow_lookup(cpu,vaddr,cid,PPC_MEMOP_LOOKUP,4,MTS_READ, &data,&alt_entry); if (!entry) return(-1); } *phys_page = entry->gppa >> PPC32_MIN_PAGE_SHIFT; return(0);}/* Virtual address lookup */static void *ppc32_mem_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid){ m_uint64_t data; return(ppc32_mem_access(cpu,vaddr,cid,PPC_MEMOP_LOOKUP,4,MTS_READ,&data));}/* Set a BAT register */int ppc32_set_bat(cpu_ppc_t *cpu,struct ppc32_bat_prog *bp){ struct ppc32_bat_reg *bat; if ((bp->type != PPC32_IBAT_IDX) && (bp->type != PPC32_DBAT_IDX)) return(-1); if (bp->index >= PPC32_BAT_NR) return(-1); bat = &cpu->bat[bp->type][bp->index]; bat->reg[0] = bp->hi; bat->reg[1] = bp->lo; return(0);}/* Load BAT registers from a BAT array */void ppc32_load_bat_array(cpu_ppc_t *cpu,struct ppc32_bat_prog *bp){ while(bp->index != -1) { ppc32_set_bat(cpu,bp); bp++;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -