📄 htab.c
字号:
dword0 = hpte.dw0.dword0; return dword0;}unsigned long hpte_getword0_pSeries( unsigned long slot ){ unsigned long dword0; HPTE * hptep = htab_data.htab + slot; dword0 = hptep->dw0.dword0; return dword0;}static long hpte_find_iSeries(unsigned long vpn){ HPTE hpte; long slot; slot = HvCallHpt_findValid( &hpte, vpn ); if ( hpte.dw0.dw0.v ) { if ( slot < 0 ) { slot &= 0x7fffffffffffffff; slot = -slot; } } else slot = -1; return slot;}static long hpte_find_pSeries(unsigned long vpn){ union { unsigned long d; Hpte_dword0 h; } hpte_dw0; long slot; unsigned long hash; unsigned long i,j; hash = hpt_hash(vpn, 0); for ( j=0; j<2; ++j ) { slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP; for ( i=0; i<HPTES_PER_GROUP; ++i ) { hpte_dw0.d = hpte_getword0_pSeries( slot ); if ( ( hpte_dw0.h.avpn == ( vpn >> 11 ) ) && ( hpte_dw0.h.v ) && ( hpte_dw0.h.h == j ) ) { /* HPTE matches */ if ( j ) slot = -slot; return slot; } ++slot; } hash = ~hash; } return -1;} /* This function is called by iSeries setup when initializing the hpt */void build_valid_hpte( unsigned long vsid, unsigned long ea, unsigned long pa, pte_t * ptep, unsigned hpteflags, unsigned bolted ){ unsigned long vpn, flags; long hpte_slot; unsigned hash; pte_t pte; vpn = ((vsid << 28) | ( ea & 0xffff000 )) >> 12; spin_lock_irqsave( &hash_table_lock, flags ); hpte_slot = ppc_md.hpte_selectslot( vpn ); hash = 0; if ( hpte_slot < 0 ) { if ( hpte_slot == 0x8000000000000000 ) { udbg_printf("hash_page: ptep = 0x%016lx\n", (unsigned long)ptep ); udbg_printf("hash_page: ea = 0x%016lx\n", ea ); udbg_printf("hash_page: vpn = 0x%016lx\n", vpn ); panic("hash_page: hpte already exists\n"); } hash = 1; hpte_slot = -hpte_slot; } ppc_md.hpte_create_valid( hpte_slot, vpn, pa >> 12, hash, ptep, hpteflags, bolted ); if ( ptep ) { /* Get existing pte flags */ pte = *ptep; pte_val(pte) &= ~_PAGE_HPTEFLAGS; /* Add in the has hpte flag */ pte_val(pte) |= _PAGE_HASHPTE; /* Add in the _PAGE_SECONDARY flag */ pte_val(pte) |= hash << 15; /* Add in the hpte slot */ pte_val(pte) |= (hpte_slot << 12) & _PAGE_GROUP_IX; /* Save the new pte. */ *ptep = pte; } spin_unlock_irqrestore( &hash_table_lock, flags );}/* Create an HPTE and validate it * It is assumed that the HPT slot currently is invalid. * The HPTE is set with the vpn, rpn (converted to absolute) * and flags */static void hpte_create_valid_iSeries(unsigned long slot, unsigned long vpn, unsigned long prpn, unsigned hash, void * ptep, unsigned hpteflags, unsigned bolted ){ /* Local copy of HPTE */ struct { /* Local copy of first doubleword of HPTE */ union { unsigned long d; Hpte_dword0 h; } dw0; /* Local copy of second doubleword of HPTE */ union { unsigned long d; Hpte_dword1 h; Hpte_dword1_flags f; } dw1; } lhpte; unsigned long avpn = vpn >> 11; unsigned long arpn = physRpn_to_absRpn( prpn ); /* Fill in the local HPTE with absolute rpn, avpn and flags */ lhpte.dw1.d = 0; lhpte.dw1.h.rpn = arpn; lhpte.dw1.f.flags = hpteflags; lhpte.dw0.d = 0; lhpte.dw0.h.avpn = avpn; lhpte.dw0.h.h = hash; lhpte.dw0.h.bolted = bolted; lhpte.dw0.h.v = 1; /* Now fill in the actual HPTE */ HvCallHpt_addValidate( slot, hash, (HPTE *)&lhpte );}static void hpte_create_valid_pSeries(unsigned long slot, unsigned long vpn, unsigned long prpn, unsigned hash, void * ptep, unsigned hpteflags, unsigned bolted){ /* Local copy of HPTE */ struct { /* Local copy of first doubleword of HPTE */ union { unsigned long d; Hpte_dword0 h; } dw0; /* Local copy of second doubleword of HPTE */ union { unsigned long d; Hpte_dword1 h; Hpte_dword1_flags f; } dw1; } lhpte; unsigned long avpn = vpn >> 11; unsigned long arpn = physRpn_to_absRpn( prpn ); HPTE *hptep; /* Fill in the local HPTE with absolute rpn, avpn and flags */ lhpte.dw1.d = 0; lhpte.dw1.h.rpn = arpn; lhpte.dw1.f.flags = hpteflags; lhpte.dw0.d = 0; lhpte.dw0.h.avpn = avpn; lhpte.dw0.h.h = hash; lhpte.dw0.h.bolted = bolted; lhpte.dw0.h.v = 1; /* Now fill in the actual HPTE */ hptep = htab_data.htab + slot; /* Set the second dword first so that the valid bit * is the last thing set */ hptep->dw1.dword1 = lhpte.dw1.d; /* Guarantee the second dword is visible before * the valid bit */ __asm__ __volatile__ ("eieio" : : : "memory"); /* Now set the first dword including the valid bit */ hptep->dw0.dword0 = lhpte.dw0.d; __asm__ __volatile__ ("ptesync" : : : "memory");}/* find_linux_pte returns the address of a linux pte for a given * effective address and directory. If not found, it returns zero. */pte_t * find_linux_pte( pgd_t * pgdir, unsigned long ea ){ pgd_t *pg; pmd_t *pm; pte_t *pt = NULL; pte_t pte; pg = pgdir + pgd_index( ea ); if ( ! pgd_none( *pg ) ) { pm = pmd_offset( pg, ea ); if ( ! pmd_none( *pm ) ) { pt = pte_offset( pm, ea ); pte = *pt; if ( ! pte_present( pte ) ) pt = NULL; } } return pt;}static inline unsigned long computeHptePP( unsigned long pte ){ return ( pte & _PAGE_USER ) | ( ( ( pte & _PAGE_USER ) >> 1 ) & ( ( ~( ( pte >> 2 ) & /* _PAGE_RW */ ( pte >> 7 ) ) ) & /* _PAGE_DIRTY */ 1 ) );}static void hpte_updatepp_iSeries(long slot, unsigned long newpp, unsigned long va){ HvCallHpt_setPp( slot, newpp );}static void hpte_updatepp_pSeries(long slot, unsigned long newpp, unsigned long va){ /* Local copy of first doubleword of HPTE */ union { unsigned long d; Hpte_dword0 h; } hpte_dw0; /* Local copy of second doubleword of HPTE */ union { unsigned long d; Hpte_dword1 h; Hpte_dword1_flags f; } hpte_dw1; HPTE * hptep = htab_data.htab + slot; /* Turn off valid bit in HPTE */ hpte_dw0.d = hptep->dw0.dword0; hpte_dw0.h.v = 0; hptep->dw0.dword0 = hpte_dw0.d; /* Ensure it is out of the tlb too */ _tlbie( va ); /* Insert the new pp bits into the HPTE */ hpte_dw1.d = hptep->dw1.dword1; hpte_dw1.h.pp = newpp; hptep->dw1.dword1 = hpte_dw1.d; /* Ensure it is visible before validating */ __asm__ __volatile__ ("eieio" : : : "memory"); /* Turn the valid bit back on in HPTE */ hpte_dw0.h.v = 1; hptep->dw0.dword0 = hpte_dw0.d; __asm__ __volatile__ ("ptesync" : : : "memory");}/* * Update the page protection bits. Intended to be used to create * guard pages for kernel data structures on pages which are bolted * in the HPT. Assumes pages being operated on will not be stolen. */void hpte_updateboltedpp_iSeries(unsigned long newpp, unsigned long ea ){ unsigned long vsid,va,vpn; long slot; vsid = get_kernel_vsid( ea ); va = ( vsid << 28 ) | ( ea & 0x0fffffff ); vpn = va >> PAGE_SHIFT; slot = ppc_md.hpte_find( vpn ); HvCallHpt_setPp( slot, newpp );}static __inline__ void set_pp_bit(unsigned long pp, HPTE *addr){ unsigned long old; unsigned long *p = (unsigned long *)(&(addr->dw1)); __asm__ __volatile__( "1: ldarx %0,0,%3\n\ rldimi %0,%2,0,62\n\ stdcx. %0,0,%3\n\ bne 1b" : "=&r" (old), "=m" (*p) : "r" (pp), "r" (p), "m" (*p) : "cc");}/* * Update the page protection bits. Intended to be used to create * guard pages for kernel data structures on pages which are bolted * in the HPT. Assumes pages being operated on will not be stolen. */void hpte_updateboltedpp_pSeries(unsigned long newpp, unsigned long ea){ unsigned long vsid,va,vpn,flags; long slot; HPTE *hptep; vsid = get_kernel_vsid( ea ); va = ( vsid << 28 ) | ( ea & 0x0fffffff ); vpn = va >> PAGE_SHIFT; slot = ppc_md.hpte_find( vpn ); hptep = htab_data.htab + slot; set_pp_bit(newpp , hptep); /* Ensure it is out of the tlb too */ spin_lock_irqsave( &hash_table_lock, flags ); _tlbie( va ); spin_unlock_irqrestore( &hash_table_lock, flags );}/* This is called very early. */void hpte_init_iSeries(void){ ppc_md.hpte_invalidate = hpte_invalidate_iSeries; ppc_md.hpte_updatepp = hpte_updatepp_iSeries; ppc_md.hpte_updateboltedpp = hpte_updateboltedpp_iSeries; ppc_md.hpte_getword0 = hpte_getword0_iSeries; ppc_md.hpte_selectslot = hpte_selectslot_iSeries; ppc_md.hpte_create_valid = hpte_create_valid_iSeries; ppc_md.hpte_find = hpte_find_iSeries;}void hpte_init_pSeries(void){ ppc_md.hpte_invalidate = hpte_invalidate_pSeries; ppc_md.hpte_updatepp = hpte_updatepp_pSeries; ppc_md.hpte_updateboltedpp = hpte_updateboltedpp_pSeries; ppc_md.hpte_getword0 = hpte_getword0_pSeries; ppc_md.hpte_selectslot = hpte_selectslot_pSeries; ppc_md.hpte_create_valid = hpte_create_valid_pSeries; ppc_md.hpte_find = hpte_find_pSeries;}/* Handle a fault by adding an HPTE * If the address can't be determined to be valid * via Linux page tables, return 1. If handled * return 0 */int hash_page( unsigned long ea, unsigned long access ){ int rc = 1; void * pgdir = NULL; unsigned long va, vsid, vpn; unsigned long newpp, hash_ind, prpn; unsigned long hpteflags, regionid; long slot; struct mm_struct * mm; pte_t old_pte, new_pte, *ptep; /* Check for invalid addresses. */ if (!IS_VALID_EA(ea)) { return 1; } regionid = REGION_ID(ea); switch ( regionid ) { case USER_REGION_ID: mm = current->mm; if ( mm == NULL ) { PPCDBG(PPCDBG_MM, "hash_page returning; mm = 0\n"); return 1; } vsid = get_vsid(mm->context, ea ); break; case IO_REGION_ID: mm = &ioremap_mm; vsid = get_kernel_vsid( ea ); break;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -