📄 tlb_track.c
字号:
/****************************************************************************** * tlb_track.c * * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */#include <asm/tlb_track.h>#include <asm/p2m_entry.h>#include <asm/vmx_mm_def.h> /* for IA64_RR_SHIFT */#include <asm/vmx_vcpu.h> /* for VRN7 */#include <asm/vcpu.h> /* for PSCB() */#define CONFIG_TLB_TRACK_DEBUG#ifdef CONFIG_TLB_TRACK_DEBUG# define tlb_track_printd(fmt, ...) \ dprintk(XENLOG_DEBUG, fmt, ##__VA_ARGS__)#else# define tlb_track_printd(fmt, ...) do { } while (0)#endifstatic inttlb_track_allocate_entries(struct tlb_track* tlb_track){ struct page_info* entry_page; struct tlb_track_entry* track_entries; unsigned int allocated; unsigned long i; BUG_ON(tlb_track->num_free > 0); if (tlb_track->num_entries >= tlb_track->limit) { dprintk(XENLOG_WARNING, "%s: num_entries %d limit %d\n", __func__, tlb_track->num_entries, tlb_track->limit); return -ENOMEM; } entry_page = alloc_domheap_page(NULL, 0); if (entry_page == NULL) { dprintk(XENLOG_WARNING, "%s: domheap page failed. num_entries %d limit %d\n", __func__, tlb_track->num_entries, tlb_track->limit); return -ENOMEM; } list_add(&entry_page->list, &tlb_track->page_list); track_entries = (struct tlb_track_entry*)page_to_virt(entry_page); allocated = PAGE_SIZE / sizeof(track_entries[0]); tlb_track->num_entries += allocated; tlb_track->num_free += allocated; for (i = 0; i < allocated; i++) { list_add(&track_entries[i].list, &tlb_track->free_list); // tlb_track_printd("track_entries[%ld] 0x%p\n", i, &track_entries[i]); } tlb_track_printd("allocated %d num_entries %d num_free %d\n", allocated, tlb_track->num_entries, tlb_track->num_free); return 0;}inttlb_track_create(struct domain* d){ struct tlb_track* tlb_track = NULL; struct page_info* hash_page = NULL; unsigned int hash_size; unsigned int hash_shift; unsigned int i; tlb_track = xmalloc(struct tlb_track); if (tlb_track == NULL) goto out; hash_page = alloc_domheap_page(NULL, 0); if (hash_page == NULL) goto out; spin_lock_init(&tlb_track->free_list_lock); INIT_LIST_HEAD(&tlb_track->free_list); tlb_track->limit = TLB_TRACK_LIMIT_ENTRIES; tlb_track->num_entries = 0; tlb_track->num_free = 0; INIT_LIST_HEAD(&tlb_track->page_list); if (tlb_track_allocate_entries(tlb_track) < 0) goto out; spin_lock_init(&tlb_track->hash_lock); /* XXX hash size optimization */ hash_size = PAGE_SIZE / sizeof(tlb_track->hash[0]); for (hash_shift = 0; (1 << (hash_shift + 1)) < hash_size; hash_shift++) /* nothing */; tlb_track->hash_size = (1 << hash_shift); tlb_track->hash_shift = hash_shift; tlb_track->hash_mask = (1 << hash_shift) - 1; tlb_track->hash = page_to_virt(hash_page); for (i = 0; i < tlb_track->hash_size; i++) INIT_LIST_HEAD(&tlb_track->hash[i]); smp_mb(); /* make initialization visible before use. */ d->arch.tlb_track = tlb_track; dprintk(XENLOG_DEBUG, "hash 0x%p hash_size %d\n", tlb_track->hash, tlb_track->hash_size); return 0;out: if (hash_page != NULL) free_domheap_page(hash_page); if (tlb_track != NULL) xfree(tlb_track); return -ENOMEM;}voidtlb_track_destroy(struct domain* d){ struct tlb_track* tlb_track = d->arch.tlb_track; struct page_info* page; struct page_info* next; spin_lock(&tlb_track->free_list_lock); BUG_ON(tlb_track->num_free != tlb_track->num_entries); list_for_each_entry_safe(page, next, &tlb_track->page_list, list) { list_del(&page->list); free_domheap_page(page); } free_domheap_page(virt_to_page(tlb_track->hash)); xfree(tlb_track); // d->tlb_track = NULL;}static struct tlb_track_entry*tlb_track_get_entry(struct tlb_track* tlb_track){ struct tlb_track_entry* entry = NULL; spin_lock(&tlb_track->free_list_lock); if (tlb_track->num_free == 0) (void)tlb_track_allocate_entries(tlb_track); if (tlb_track->num_free > 0) { BUG_ON(list_empty(&tlb_track->free_list)); entry = list_entry(tlb_track->free_list.next, struct tlb_track_entry, list); tlb_track->num_free--; list_del(&entry->list); } spin_unlock(&tlb_track->free_list_lock); return entry;}voidtlb_track_free_entry(struct tlb_track* tlb_track, struct tlb_track_entry* entry){ spin_lock(&tlb_track->free_list_lock); list_add(&entry->list, &tlb_track->free_list); tlb_track->num_free++; spin_unlock(&tlb_track->free_list_lock);}#include <linux/hash.h>/* XXX hash function. */static struct list_head*tlb_track_hash_head(struct tlb_track* tlb_track, volatile pte_t* ptep){ unsigned long hash = hash_long((unsigned long)ptep, tlb_track->hash_shift); BUG_ON(hash >= tlb_track->hash_size); BUG_ON((hash & tlb_track->hash_mask) != hash); return &tlb_track->hash[hash];}static inttlb_track_pte_zapped(pte_t old_pte, pte_t ret_pte){ if (pte_pfn(old_pte) != pte_pfn(ret_pte) || (pte_val(old_pte) & ~(_PFN_MASK | _PAGE_TLB_TRACK_MASK)) != (pte_val(ret_pte) & ~(_PFN_MASK | _PAGE_TLB_TRACK_MASK))) { /* Other thread zapped the p2m entry. */ return 1; } return 0;}static TLB_TRACK_RET_Ttlb_track_insert_or_dirty(struct tlb_track* tlb_track, struct mm_struct* mm, volatile pte_t* ptep, pte_t old_pte, unsigned long vaddr, unsigned long rid){ unsigned long mfn = pte_pfn(old_pte); struct list_head* head = tlb_track_hash_head(tlb_track, ptep); struct tlb_track_entry* entry; struct tlb_track_entry* new_entry = NULL; unsigned long bit_to_be_set = _PAGE_TLB_INSERTED; pte_t new_pte; pte_t ret_pte; struct vcpu* v = current; TLB_TRACK_RET_T ret = TLB_TRACK_NOT_FOUND;#if 0 /* this is done at vcpu_tlb_track_insert_or_dirty() */ perfc_incr(tlb_track_iod); if (!pte_tlb_tracking(old_pte)) { perfc_incr(tlb_track_iod_not_tracked); return TLB_TRACK_NOT_TRACKED; }#endif if (pte_tlb_inserted_many(old_pte)) { perfc_incr(tlb_track_iod_tracked_many); return TLB_TRACK_MANY; } /* vaddr must be normalized so that it is in vrn7 and page aligned. */ BUG_ON((vaddr >> IA64_RR_SHIFT) != VRN7); BUG_ON((vaddr & ~PAGE_MASK) != 0);#if 0 tlb_track_printd("\n" "\tmfn 0x%016lx\n" "\told_pte 0x%016lx ptep 0x%p\n" "\tptep_val 0x%016lx vaddr 0x%016lx rid %ld\n" "\ttlb_track 0x%p head 0x%p\n", mfn, pte_val(old_pte), ptep, pte_val(*ptep), vaddr, rid, tlb_track, head);#endif again: /* * zapping side may zap the p2m entry and then remove tlb track entry * non-atomically. We may see the stale tlb track entry here. * p2m_entry_retry() handles such a case. * Or other thread may zap the p2m entry and remove tlb track entry * and inserted new tlb track entry. */ spin_lock(&tlb_track->hash_lock); list_for_each_entry(entry, head, list) { if (entry->ptep != ptep) continue; if (pte_pfn(entry->pte_val) == mfn) { // tlb_track_entry_printf(entry); if (entry->vaddr == vaddr && entry->rid == rid) { // tlb_track_printd("TLB_TRACK_FOUND\n"); ret = TLB_TRACK_FOUND; perfc_incr(tlb_track_iod_found);#ifdef CONFIG_TLB_TRACK_CNT entry->cnt++; if (entry->cnt > TLB_TRACK_CNT_FORCE_MANY) { /*
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -