📄 pgtable.h
字号:
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
extern inline void nocache_page (unsigned long vaddr)
{
if (m68k_is040or060) {
pgd_t *dir;
pmd_t *pmdp;
pte_t *ptep;
dir = pgd_offset_k(vaddr);
pmdp = pmd_offset(dir,vaddr);
ptep = pte_offset(pmdp,vaddr);
*ptep = pte_mknocache(*ptep);
}
}
static inline void cache_page (unsigned long vaddr)
{
if (m68k_is040or060) {
pgd_t *dir;
pmd_t *pmdp;
pte_t *ptep;
dir = pgd_offset_k(vaddr);
pmdp = pmd_offset(dir,vaddr);
ptep = pte_offset(pmdp,vaddr);
*ptep = pte_mkcache(*ptep);
}
}
extern const char PgtabStr_bad_pmd[];
extern const char PgtabStr_bad_pgd[];
extern const char PgtabStr_bad_pmdk[];
extern const char PgtabStr_bad_pgdk[];
extern inline void pte_free(pte_t * pte)
{
cache_page((unsigned long)pte);
free_page((unsigned long) pte);
}
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd)) {
pte_t * page = (pte_t *)get_free_page(GFP_KERNEL);
if (pmd_none(*pmd)) {
if (page) {
nocache_page((unsigned long)page);
pmd_set(pmd,page);
return page + address;
}
pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
free_page((unsigned long)page);
}
if (pmd_bad(*pmd)) {
printk(PgtabStr_bad_pmd, pmd_val(*pmd));
pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
return (pte_t *) pmd_page(*pmd) + address;
}
extern pmd_t *get_pointer_table (void);
extern void free_pointer_table (pmd_t *);
extern pmd_t *get_kpointer_table (void);
extern void free_kpointer_table (pmd_t *);
extern inline void pmd_free(pmd_t * pmd)
{
free_pointer_table (pmd);
}
extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
{
address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
if (pgd_none(*pgd)) {
pmd_t *page = get_pointer_table();
if (pgd_none(*pgd)) {
if (page) {
pgd_set(pgd, page);
return page + address;
}
pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
return NULL;
}
free_pointer_table(page);
}
if (pgd_bad(*pgd)) {
printk(PgtabStr_bad_pgd, pgd_val(*pgd));
pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
return NULL;
}
return (pmd_t *) pgd_page(*pgd) + address;
}
extern inline void pte_free_kernel(pte_t * pte)
{
cache_page((unsigned long)pte);
free_page((unsigned long) pte);
}
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd)) {
pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
if (pmd_none(*pmd)) {
if (page) {
nocache_page((unsigned long)page);
pmd_set(pmd, page);
return page + address;
}
pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
free_page((unsigned long) page);
}
if (pmd_bad(*pmd)) {
printk(PgtabStr_bad_pmdk, pmd_val(*pmd));
pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
return (pte_t *) pmd_page(*pmd) + address;
}
extern inline void pmd_free_kernel(pmd_t * pmd)
{
free_kpointer_table(pmd);
}
extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
{
address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
if (pgd_none(*pgd)) {
pmd_t *page = get_kpointer_table();
if (pgd_none(*pgd)) {
if (page) {
pgd_set(pgd, page);
return page + address;
}
pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
return NULL;
}
free_kpointer_table(page);
}
if (pgd_bad(*pgd)) {
printk(PgtabStr_bad_pgdk, pgd_val(*pgd));
pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
return NULL;
}
return (pmd_t *) pgd_page(*pgd) + address;
}
extern inline void pgd_free(pgd_t * pgd)
{
free_pointer_table ((pmd_t *) pgd);
}
extern inline pgd_t * pgd_alloc(void)
{
return (pgd_t *)get_pointer_table ();
}
#define flush_icache() \
do { \
if (m68k_is040or060) \
asm ("nop; .word 0xf498 /* cinva %%ic */"); \
else \
asm ("movec %/cacr,%/d0;" \
"oriw %0,%/d0;" \
"movec %/d0,%/cacr" \
: /* no outputs */ \
: "i" (FLUSH_I) \
: "d0"); \
} while (0)
/*
* invalidate the cache for the specified memory range.
* It starts at the physical address specified for
* the given number of bytes.
*/
extern void cache_clear (unsigned long paddr, int len);
/*
* push any dirty cache in the specified memory range.
* It starts at the physical address specified for
* the given number of bytes.
*/
extern void cache_push (unsigned long paddr, int len);
/*
* push and invalidate pages in the specified user virtual
* memory range.
*/
extern void cache_push_v (unsigned long vaddr, int len);
/* cache code */
#define FLUSH_I_AND_D (0x00000808)
#define FLUSH_I (0x00000008)
/* This is needed whenever the virtual mapping of the current
process changes. */
#define __flush_cache_all() \
do { \
if (m68k_is040or060) \
__asm__ __volatile__ ("nop; .word 0xf478\n" ::); \
else \
__asm__ __volatile__ ("movec %%cacr,%%d0\n\t" \
"orw %0,%%d0\n\t" \
"movec %%d0,%%cacr" \
: : "di" (FLUSH_I_AND_D) : "d0"); \
} while (0)
#define __flush_cache_030() \
do { \
if (m68k_is040or060 == 0) \
__asm__ __volatile__ ("movec %%cacr,%%d0\n\t" \
"orw %0,%%d0\n\t" \
"movec %%d0,%%cacr" \
: : "di" (FLUSH_I_AND_D) : "d0"); \
} while (0)
#define flush_cache_all() __flush_cache_all()
extern inline void flush_cache_mm(struct mm_struct *mm)
{
if (mm == current->mm) __flush_cache_all();
}
extern inline void flush_cache_range(struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
if (mm == current->mm){
if (m68k_is040or060)
cache_push_v(start, end-start);
else
__flush_cache_030();
}
}
extern inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
if (vma->vm_mm == current->mm){
if (m68k_is040or060)
cache_push_v(vmaddr, PAGE_SIZE);
else
__flush_cache_030();
}
}
/* Push the page at kernel virtual address and clear the icache */
extern inline void flush_page_to_ram (unsigned long address)
{
if (m68k_is040or060) {
register unsigned long tmp __asm ("a0") = VTOP(address);
__asm__ __volatile__ ("nop\n\t"
".word 0xf470 /* cpushp %%dc,(%0) */\n\t"
".word 0xf490 /* cinvp %%ic,(%0) */"
: : "a" (tmp));
}
else
__asm volatile ("movec %%cacr,%%d0\n\t"
"orw %0,%%d0\n\t"
"movec %%d0,%%cacr"
: : "di" (FLUSH_I) : "d0");
}
/* Push n pages at kernel virtual address and clear the icache */
extern inline void flush_pages_to_ram (unsigned long address, int n)
{
if (m68k_is040or060) {
while (n--) {
register unsigned long tmp __asm ("a0") = VTOP(address);
__asm__ __volatile__ ("nop\n\t"
".word 0xf470 /* cpushp %%dc,(%0) */\n\t"
".word 0xf490 /* cinvp %%ic,(%0) */"
: : "a" (tmp));
address += PAGE_SIZE;
}
}
else
__asm volatile ("movec %%cacr,%%d0\n\t"
"orw %0,%%d0\n\t"
"movec %%d0,%%cacr"
: : "di" (FLUSH_I) : "d0");
}
/*
* Check if the addr/len goes up to the end of a physical
* memory chunk. Used for DMA functions.
*/
int mm_end_of_chunk (unsigned long addr, int len);
/*
* Map some physical address range into the kernel address space. The
* code is copied and adapted from map_chunk().
*/
extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
int nocacheflag, unsigned long *memavailp );
/*
* Change the cache mode of some kernel address range.
*/
extern void kernel_set_cachemode( unsigned long address, unsigned long size,
unsigned cmode );
/* Values for nocacheflag and cmode */
#define KERNELMAP_FULL_CACHING 0
#define KERNELMAP_NOCACHE_SER 1
#define KERNELMAP_NOCACHE_NONSER 2
#define KERNELMAP_NO_COPYBACK 3
/*
* The m68k doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
extern inline void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
{
}
/*
* I don't know what is going on here, but since these were changed,
* swapping hasn't been working on the 68040.
*/
#define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)
#if 0
#define SWP_OFFSET(entry) ((entry) >> 9)
#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
#else
#define SWP_OFFSET(entry) ((entry) >> PAGE_SHIFT)
#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << PAGE_SHIFT))
#endif
#endif /* __ASSEMBLY__ */
#endif /* _M68K_PGTABLE_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -