📄 mmuarmlib.h
字号:
/* mmuArmLib.h - ARM MMU library header file *//* Copyright 1996-2001 Wind River Systems, Inc. *//*modification history--------------------01p,15jan02,m_h 920,940 thumb incorrectly defined as CPU_940_T not CPU_940T_T01o,05oct01,rec added I-SRAM and D-SRAM CP15, R1 bit defs.01n,03oct01,jpd added WRITETHROUGH definitions for 946E.01m,26jul01,scm add extended small page table support for XScale...01l,25jul01,scm add support for extended page attributres on XScale...01k,23jul01,scm change XScale name to conform to coding standards...01j,11dec00,scm replace references to ARMSA2 with XScale01i,01sep00,scm add sa2 support...01h,07sep99,jpd added ARM740T, ARM920T support.01g,20jan99,cdp removed support for old ARM libraries; add prefix to MMU_IDs.01f,24nov98,jpd added ARM940T support, added other CPU ids; removed mmuIntLock/Unlock as intIFLock/Unlock to be used instead; cdp restructured for multiple cache/MMU libraries; big-endian.01e,09mar98,jpd added MMUCTR_ definitions for ARM810 and extra cache states.01d,20jan98,jpd added _func_armPhysToVirt() etc. function pointers.01c,17sep97,jpd added MMU_INIT_VALUE and mmuIntLock/Unlock definitions.01b,24jan97,jpd modified mmuAEnable definition.01a,09may96,cdp created.*/#ifndef __INCmmuArmLibh#define __INCmmuArmLibh/* * N.B. although this library contains code written for the ARM810 CPU, * at the time of writing, this code has not been fully tested on * that CPU. YOU HAVE BEEN WARNED. */#ifdef __cplusplusextern "C" {#endif#ifndef _ASMLANGUAGE#include "memLib.h"#endif /* _ASMLANGUAGE *//* * MMUs are not fitted to all processors, and so the definitions below * should not need to be included for processors without MMUs. */#if (ARMMMU != ARMMMU_NONE)/* * MMU identifiers as read from ID register (not all tested, some from * datasheets). */#if (CPU==XSCALE)#define MMU_ID_CPU_MASK 0xFFFFF000 /* Mask for CPU types */#define MMU_ID_XSCALE 0x69052000 /* XScale needs validation */#else#define MMU_ID_CPU_MASK 0xFFFFF0 /* Mask for CPU types */#define MMU_ID_ARM710A 0x047100#define MMU_ID_ARM710T 0x807100#define MMU_ID_ARM720T 0x807200#define MMU_ID_ARM810 0x018100#define MMU_ID_SA110 0x01A100#define MMU_ID_SA1100 0x01A110#define MMU_ID_SA1500 0x01A150#define MMU_ID_ARM920T 0x029200#define MMU_ID_ARM940T 0x029400#define MMU_ID_ARM740T_8 0x807400 /* 8 kB cache variant */#define MMU_ID_ARM740T_4 0x817400 /* 4 kB cache variant */#define MMU_ID_ARM946E 0x049460#endif /* if CPU=XSCALE *//* * The page size we will use. Ignore the sub-page, Large Page and Tiny * Page features. Page size is still defined for MPUs, (it is the * minimum size for a region). */#define PAGE_SIZE 4096#if (!ARM_HAS_MPU) /* definitions for page-table type MMUs *//* * The amount described by a Level 1 Descriptor, which equals the smallest * amount of VM allocatable in VxWorks. */#define PAGE_BLOCK_SIZE 0x100000 /* 1 MB */#define PTE_INDEX_MASK 0x000FF000 /* extract PTE index from Virt Addr */#define PTE_INDEX_SHIFT 12 /* shift to make that into PTE index */#define L1D_TO_BASE_SHIFT 10 /* to turn L1D to PT base addr */#define ADDR_PI_MASK 0x00000FFF /* extract page index from Virt Addr */#define ADDR_TO_PAGE 12 /* shift phys address to PTE page base address *//* Level 1 Descriptor types */#define DESC_TYPE_PAGE 1#define DEF_L1_PAGE 0x00000011 /* domain zero, Page descriptor *//* * Section descriptors, such as might be used to set up an intermediate * set of page tables on processors such as SA-1100/SA-1500 where this * needs to be done from BSP initialisation, before vm(Base)Lib sets up a proper set of page tables. */#define RAM_SECT_DESC 0xC1E /* R/W cacheable bufferable domain 0 */#define MINICACHE_SECT_DESC 0xC1A /* R/W C+ B- domain 0 */#define OTHER_SECT_DESC 0xC12 /* R/W C- B- domain 0 *//* Level 2 Descriptor or Page Table Entry (PTE) types */#define PTE_TYPE_FAULT 0 /* any access will cause a fault */#define PTE_TYPE_SM_PAGE 2 /* Small page descriptor */#define PTE_TYPE_EX_PAGE 3 /* Extended page descriptor *//* * The Translation Table Base register (TTBR) points to a table of Level 1 * Descriptors. these are either Invalid Section descriptors, Section * Descriptors, or Page Table Descriptors. If Page Table Descriptors, they * each point to a table of Level 2 Page Descriptors, or Page Table Entries * (PTEs). * The 32-bit (virtual) address space allows for 4096 M. Each Level 1 * Descriptor describes a 1 M area of memory. There are therefore 4096 Level * 1 Descriptors, and each table of 256 Level 2 Page Descriptors (PTEs) * describes 256 4 kbyte pages. */#define NUM_L1_DESCS 4096#define NUM_L2_DESCS 256#define L1_TABLE_SIZE (NUM_L1_DESCS * sizeof(LEVEL_1_DESC))/* No. of pages a Level 1 Descriptor table takes up */#define L1_DESC_PAGES (L1_TABLE_SIZE / PAGE_SIZE)/* Size of a Page Table */#define PAGE_TABLE_SIZE (NUM_L2_DESCS * sizeof(PTE))/* * Architecture-dependent MMU states. These are states settable for pages and * here they correspond to settings in the Page Table Entries (PTEs). * * We set Valid/Invalid by setting a Fault second-level descriptor rather * than by using the Access Permissions within a small page second-level * descriptor. This is because we will want to use the Access Permissions to * allow read/write from supervisor mode and we cannot then use the AP bits * to disallow access as the SR bits in the MMU Control register must be set * to 01 in order to control read/write access from Supervisor mode. */#define MMU_STATE_MASK_VALID 0x03 /* 2nd level desc type*/#if (CPU==XSCALE)#define MMU_STATE_MASK_WRITABLE 0x00000030 /* AP bits */#else#define MMU_STATE_MASK_WRITABLE 0x00000FF0 /* All APn bits */#endif /* if CPU=XSCALE */#define MMU_STATE_MASK_CACHEABLE 0x0000000C /* CB bits */#if (CPU==XSCALE)#define MMU_STATE_MASK_EX_CACHEABLE 0x0000004C /* X bit, and CB bits */#endif#if (CPU==XSCALE)#define MMU_STATE_VALID PTE_TYPE_EX_PAGE /* set to page type */#else#define MMU_STATE_VALID PTE_TYPE_SM_PAGE /* set to page type */#endif /* if CPU=XSCALE */#define MMU_STATE_VALID_NOT PTE_TYPE_FAULT /* set to type fault */#if (CPU==XSCALE)#define MMU_STATE_WRITABLE 0x00000030 /* AP bits to 11 */#else#define MMU_STATE_WRITABLE 0x00000FF0 /* APn bits to 11 */#endif /* if CPU=XSCALE */#define MMU_STATE_WRITABLE_NOT 0x0 /* APn bits to 00 */#if ((ARMMMU == ARMMMU_710A) || (ARMMMU == ARMMMU_720T) || \ (ARMMMU == ARMMMU_XSCALE))/* Write-through mode is only available on some CPUs */#define MMU_STATE_CACHEABLE_WRITETHROUGH 0x8#endif /* (ARMMMU == ARMMMU_710A/720T/XSCALE) */#if ((ARMMMU == ARMMMU_SA1100) || (ARMMMU == ARMMMU_SA1500) || \ (ARMMMU == ARMMMU_XSCALE))/* Minicacheable state only available on some CPUs. */#if (CPU==XSCALE)#define MMU_STATE_CACHEABLE_MINICACHE 0x48 /* allocate in minicache, X=1, CB=10 */#else#define MMU_STATE_CACHEABLE_MINICACHE 0x8 /* allocate in minicache */#endif /* if CPU=XSCALE */#endif#define MMU_STATE_CACHEABLE_COPYBACK 0xC /* write back */#if (CPU==XSCALE)#define MMU_STATE_CACHEABLE_EX_COPYBACK 0x4C /* allow read/write allocate, X=1, CB=11 */#endif /* if CPU=XSCALE *//* * Set the default state to be copyback. CACHEABLE_WRITETHROUGH can also be * selected on 710A. */#define MMU_STATE_CACHEABLE MMU_STATE_CACHEABLE_COPYBACK#if (CPU==XSCALE)#define MMU_STATE_EX_CACHEABLE MMU_STATE_CACHEABLE_EX_COPYBACK#endif /* if CPU=XSCALE */#define MMU_STATE_CACHEABLE_NOT 0x0#if (CPU==XSCALE)#define MMU_STATE_EX_CACHEABLE_NOT 0x0#endif /* if CPU=XSCALE */#define MMU_STATE_BUFFERABLE 0x4 /* bufferable, not cacheable */#define MMU_STATE_BUFFERABLE_NOT 0x0 /* will also set not cacheable */#if (CPU==XSCALE)#define MMU_STATE_EX_BUFFERABLE 0x44 /* bufferable, not cacheable, no coalesce */#define MMU_STATE_EX_BUFFERABLE_NOT 0x0 /* will also set not cacheable, no coalesce */#endif /* if CPU=XSCALE */#else /* (!ARM_HAS_MPU) */#define MPU_REGION_BASE_MASK 0xFFFFF000#define MPU_REGION_SIZE_MASK 0x0000003E#define MPU_REGION_SIZE_MAX 0x0000003E#define MMU_STATE_MASK_VALID 0x03 /* 2 AP bits */#define MMU_STATE_MASK_CACHEABLE 0xC0 /* CB bits */#define MMU_STATE_VALID 0x03 /* Full access */#define MMU_STATE_VALID_NOT 0x00 /* No access *//* cacheable and bufferable states are not real physical bit definitions */#if ((ARMMMU == ARMMMU_740T) || (ARMMMU == ARMMMU_946E))/* Write-through mode is not available on all CPUs */#define MMU_STATE_CACHEABLE_WRITETHROUGH 0x80#endif /* (ARMMMU == ARMMMU_740T,946E) */#define MMU_STATE_CACHE_BIT 0x80#define MMU_STATE_CACHEABLE_COPYBACK 0xC0/* * Set the default state to be copyback. CACHEABLE_WRITETHROUGH can also be * selected on 740T/946E. */#define MMU_STATE_CACHEABLE MMU_STATE_CACHEABLE_COPYBACK#define MMU_STATE_CACHEABLE_NOT 0x0#define MMU_STATE_BUFFERABLE 0x40 /* bufferable, not cacheable */#define MMU_STATE_BUFFERABLE_NOT 0x0 /* will also set not cacheable*//* * For MPUs, we need a size for a region that means the entire address * space. It is perfectly reasonable to wish to define such a region * and it is of course, not expressible within a UINT. So, define a * special size. */#define MMU_ENTIRE_SPACE 0#endif /* (!ARM_HAS_MPU) *//* MMU Control Register bit allocations */#define MMUCR_M_ENABLE (1<<0) /* MMU enable */#define MMUCR_A_ENABLE (1<<1) /* Address alignment fault enable */#define MMUCR_C_ENABLE (1<<2) /* (data) cache enable */#define MMUCR_W_ENABLE (1<<3) /* write buffer enable */#define MMUCR_PROG32 (1<<4) /* PROG32 */#define MMUCR_DATA32 (1<<5) /* DATA32 */#define MMUCR_L_ENABLE (1<<6) /* Late abort on earlier CPUs */#define MMUCR_BIGEND (1<<7) /* Big-endian (=1), little-endian (=0) */#define MMUCR_SYSTEM (1<<8) /* System bit, modifies MMU protections */#define MMUCR_ROM (1<<9) /* ROM bit, modifies MMU protections */#define MMUCR_F (1<<10) /* Should Be Zero */#define MMUCR_Z_ENABLE (1<<11) /* Branch prediction enable on 810 */#define MMUCR_I_ENABLE (1<<12) /* Instruction cache enable */#if ((defined CPU_946ES) || (defined CPU_946ES_T))#define MMUCR_ALTVECT (1<<13) /* alternate vector select */#define MMUCR_ROUND_ROBIN (1<<14) /* round-robin placement */#define MMUCR_DISABLE_TBIT (1<<15) /* disable TBIT */#define MMUCR_DSRAM_EN (1<<16) /* D-SRAM enable */#define MMUCR_DSRAM_LOADMODE (1<<17) /* D-SRAM load mode select */#define MMUCR_ISRAM_EN (1<<18) /* I-SRAM enable */#define MMUCR_ISRAM_LOADMODE (1<<19) /* I-SRAM load mode select */#endif /* CPU_946ES */#if ((defined CPU_966ES) || (defined CPU_966ES_T))#define MMUCR_ALTVECT (1<<13) /* alternate vector select */#define MMUCR_DISABLE_TBIT (1<<15) /* disable TBIT */#endif /* CPU_966ES */#if ((defined CPU_920T) || (defined CPU_920T_T) || \ (defined CPU_940T) || (defined CPU_940T_T))#define MMUCR_FB_DISABLE (1<<30) /* nFastBus bit */#define MMUCR_ASYNC_BIT (1<<31) /* Async bit*/#define MMUCR_SYNC (1<<30) /* Synchronous mode */#define MMUCR_ASYNC (3<<30) /* Asynchronous mode */#endif /* CPU_920T or CPU_940T *//* * Values to be used when mmuEnable() is called. This will be after the MMU has * been initialised by sysInit()/romInit() and after cacheLib has set whatever * cache enable settings have been chosen. * * M 1 Enable MMU * A 0 Enable address alignment fault * C X ((D-)Cache Enable) Controlled by cacheLib * W X (Write Buffer) Controlled by cacheLib * P X (PROG32) should be set before this * D X (DATA32) should be set before this * L X (Late abort on earlier CPUs) ignore * B X (Big/Little-endian) should be set before this * S 0 (System) * R 1 (ROM) * F 0 Should be Zero * Z X (Branch prediction enable on 810) Controlled by cacheLib * I X (I-cache enable) Controlled by cacheLib * * For time being, do not enable the address alignment fault, as GCC
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -