📄 xlate.c
字号:
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright (C) IBM Corp. 2005 * * Authors: Jimi Xenidis <jimix@watson.ibm.com> */#undef DEBUG#undef DEBUG_LOW#include <xen/config.h>#include <xen/types.h>#include <xen/sched.h>#include <xen/init.h>#include <public/xen.h>#include <asm/current.h>#include <asm/papr.h>#include <asm/hcalls.h>#include <asm/platform.h>#ifdef DEBUG#define DBG(fmt...) printk(fmt)#else#define DBG(fmt...)#endif#ifdef DEBUG_LOW#define DBG_LOW(fmt...) printk(fmt)#else#define DBG_LOW(fmt...)#endif#ifdef USE_PTE_INSERTstatic inline void pte_insert(union pte volatile *pte, ulong vsid, ulong rpn, ulong lrpn){ /* * It's required that external locking be done to provide * exclusion between the choices of insertion points. Any valid * choice of pte requires that the pte be invalid upon entry to * this function. */ ASSERT( (pte->bits.v == 0) ); /* Set shadow word. */ (void)lrpn; /* Set the second word first so the valid bit is the last thing set */ pte->words.rpn = rpn; /* Guarantee the second word is visible before the valid bit */ __asm__ __volatile__("eieio" : : : "memory"); /* Now set the first word including the valid bit */ pte->words.vsid = vsid; /* Architecturally this instruction will cause a heavier operation * if this one is not supported. note: on come machines like Cell * this coul dbe a nop */ __asm__ __volatile__("ptesync" : : : "memory");}#endif/* * POWER Arch 2.03 Sec 4.12.1 (Yes 970 is one) * * when a tlbsync instruction has been executed by a processor in a * given partition, a ptesync instruction must be executed by that * processor before a tlbie or tlbsync instruction is executed by * another processor in that partition. * * So for now, here is a BFLock to deal with it, the lock should be per-domain. * * XXX Will need to audit all tlb usege soon enough. */static DEFINE_SPINLOCK(native_tlbie_lock);static void pte_tlbie(union pte volatile *pte, ulong ptex){ ulong va; ulong vsid; ulong group; ulong pi; ulong pi_high; vsid = pte->bits.avpn >> 5; group = ptex >> 3; if (pte->bits.h) { group = ~group; } pi = (vsid ^ group) & 0x7ff; pi_high = (pte->bits.avpn & 0x1f) << 11; pi |= pi_high; va = (pi << 12) | (vsid << 28); va &= ~(0xffffULL << 48); spin_lock(&native_tlbie_lock);#ifndef FLUSH_THE_WHOLE_THING if (pte->bits.l) { va |= (pte->bits.rpn & 1); asm volatile("ptesync ;tlbie %0,1" : : "r"(va) : "memory"); } else { asm volatile("ptesync; tlbie %0,0" : : "r"(va) : "memory"); } asm volatile("eieio; tlbsync; ptesync" : : : "memory");#else { unsigned i; ulong rb; for (i = 0; i < 256; i++) { rb = i; rb <<= 12; asm volatile("ptesync; tlbie %0,0; eieio; tlbsync; ptesync; isync" : "=r" (rb): : "memory"); asm volatile("ptesync; tlbie %0,1; eieio; tlbsync; ptesync; isync" : "=r" (rb): : "memory"); } }#endif spin_unlock(&native_tlbie_lock);}long pte_enter(ulong flags, ulong ptex, ulong vsid, ulong rpn){ union pte pte; union pte volatile *ppte; struct domain_htab *htab; int lp_bits = 0; int pgshift = PAGE_SHIFT; ulong idx; int limit = 0; /* how many PTEs to examine in the PTEG */ ulong pfn; ulong mfn; struct vcpu *v = get_current(); struct domain *d = v->domain; int mtype; struct page_info *pg = NULL; struct domain *f = NULL; htab = &d->arch.htab; if (ptex > (1UL << htab->log_num_ptes)) { DBG("%s: bad ptex: 0x%lx\n", __func__, ptex); return H_Parameter; } /* use local HPTE to avoid manual shifting & masking */ pte.words.vsid = vsid; pte.words.rpn = rpn; if ( pte.bits.l ) { /* large page? */ /* figure out the page size for the selected large page */ ulong lp_rpn = pte.bits.rpn; uint lp_size = 0; while ( lp_rpn & 0x1 ) { lp_rpn >>= 1; lp_bits = ((lp_bits << 1) | 0x1); lp_size++; } if ( lp_size >= d->arch.large_page_sizes ) { DBG("%s: attempt to use unsupported lp_size %d\n", __func__, lp_size); return H_Parameter; } /* get correct pgshift value */ pgshift = d->arch.large_page_order[lp_size] + PAGE_SHIFT; } /* get the correct logical RPN in terms of 4K pages need to mask * off lp bits and unused arpn bits if this is a large page */ pfn = ~0ULL << (pgshift - PAGE_SHIFT); pfn = pte.bits.rpn & pfn; mfn = pfn2mfn(d, pfn, &mtype); if (mfn == INVALID_MFN) { DBG("%s: Bad PFN: 0x%lx\n", __func__, pfn); return H_Parameter; } if (mtype == PFN_TYPE_IO && !d->is_privileged) { /* only a privilaged dom can access outside IO space */ DBG("%s: unprivileged access to physical page: 0x%lx\n", __func__, pfn); return H_Privilege; } if (mtype == PFN_TYPE_IO) { if ( !((pte.bits.w == 0) && (pte.bits.i == 1) && (pte.bits.g == 1)) ) { DBG("%s: expecting an IO WIMG " "w=%x i=%d m=%d, g=%d\n word 0x%lx\n", __func__, pte.bits.w, pte.bits.i, pte.bits.m, pte.bits.g, pte.words.rpn); return H_Parameter; } } if (mtype == PFN_TYPE_GNTTAB) { DBG("%s: Dom[%d] mapping grant table: 0x%lx\n", __func__, d->domain_id, pfn << PAGE_SHIFT); pte.bits.i = 0; pte.bits.g = 0; } /* fixup the RPN field of our local PTE copy */ pte.bits.rpn = mfn | lp_bits; /* clear reserved bits in high word */ pte.bits.lock = 0x0; pte.bits.res = 0x0; /* clear reserved bits in low word */ pte.bits.pp0 = 0x0; pte.bits.ts = 0x0; pte.bits.res2 = 0x0; if (mtype == PFN_TYPE_FOREIGN) { pg = mfn_to_page(mfn); f = page_get_owner(pg); BUG_ON(f == d); if (unlikely(!get_domain(f))) { DBG("%s: Rescinded, no domain: 0x%lx\n", __func__, pfn); return H_Rescinded; } if (unlikely(!get_page(pg, f))) { put_domain(f); DBG("%s: Rescinded, no page: 0x%lx\n", __func__, pfn); return H_Rescinded; } } if ( !(flags & H_EXACT) ) { /* PTEG (not specific PTE); clear 3 lowest bits */ ptex &= ~0x7UL; limit = 7; } /* data manipulations should be done prior to the pte insertion. */ if ( flags & H_ZERO_PAGE ) { ulong pg = mfn << PAGE_SHIFT; ulong pgs = 1UL << pgshift; while (pgs > 0) { clear_page((void *)pg); pg += PAGE_SIZE; --pgs; } } if ( flags & H_ICACHE_INVALIDATE ) { ulong k; ulong addr = mfn << PAGE_SHIFT; for (k = 0; k < (1UL << pgshift); k += L1_CACHE_BYTES) { dcbst(addr + k); sync(); icbi(addr + k); sync(); isync(); } } if ( flags & H_ICACHE_SYNCHRONIZE ) { ulong k; ulong addr = mfn << PAGE_SHIFT; for (k = 0; k < (1UL << pgshift); k += L1_CACHE_BYTES) { icbi(addr + k); sync(); isync(); } } for (idx = ptex; idx <= ptex + limit; idx++) { ppte = &htab->map[idx]; if ( ppte->bits.v == 0 && ppte->bits.lock == 0) { /* got it */ asm volatile( "std %1, 8(%0); eieio; std %2, 0(%0); ptesync" : : "b" (ppte), "r" (pte.words.rpn), "r" (pte.words.vsid) : "memory"); return idx; } } /* If the PTEG is full then no additional values are returned. */ DBG("%s: PTEG FULL\n", __func__); if (pg != NULL) put_page(pg);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -