⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tlb_track.c

📁 xen 3.2.2 源码
💻 C
📖 第 1 页 / 共 2 页
字号:
                     * heuristics:                     * If a page is used to transfer data by dev channel,                     * it would be unmapped with small amount access                     * (once or twice tlb insert) after real device                     * I/O completion. It would be short period.                     * However this page seems to be accessed many times.                     * We guess that this page is used I/O ring                     * so that tracking this entry might be useless.                     */                     // tlb_track_entry_printf(entry);                     // tlb_track_printd("cnt = %ld\n", entry->cnt);                    perfc_incr(tlb_track_iod_force_many);                    goto force_many;                }#endif                goto found;            } else {#ifdef CONFIG_TLB_TRACK_CNT            force_many:#endif                if (!pte_tlb_inserted(old_pte)) {                    printk("%s:%d racy update\n", __func__, __LINE__);                    old_pte = __pte(pte_val(old_pte) | _PAGE_TLB_INSERTED);                }                new_pte = __pte(pte_val(old_pte) | _PAGE_TLB_INSERTED_MANY);                ret_pte = ptep_cmpxchg_rel(mm, vaddr, ptep, old_pte, new_pte);                if (pte_val(ret_pte) != pte_val(old_pte)) {                    // tlb_track_printd("TLB_TRACK_AGAIN\n");                    ret = TLB_TRACK_AGAIN;                    perfc_incr(tlb_track_iod_again);                } else {                    // tlb_track_printd("TLB_TRACK_MANY del entry 0x%p\n",                    //                  entry);                    ret = TLB_TRACK_MANY;                    list_del(&entry->list);                    // tlb_track_entry_printf(entry);                    perfc_incr(tlb_track_iod_tracked_many_del);                }                goto out;            }        }        /*         * Other thread changed the p2m entry and removed and inserted new         * tlb tracn entry after we get old_pte, but before we get         * spinlock.         */        // tlb_track_printd("TLB_TRACK_AGAIN\n");        ret = TLB_TRACK_AGAIN;        perfc_incr(tlb_track_iod_again);        goto out;    }    entry = NULL; // prevent freeing entry.    if (pte_tlb_inserted(old_pte)) {        /* Other thread else removed the tlb_track_entry after we got old_pte           before we got spin lock. */        ret = TLB_TRACK_AGAIN;        perfc_incr(tlb_track_iod_again);        goto out;    }    if (new_entry == NULL && bit_to_be_set == _PAGE_TLB_INSERTED) {        spin_unlock(&tlb_track->hash_lock);        new_entry = tlb_track_get_entry(tlb_track);        if (new_entry == NULL) {            tlb_track_printd("get_entry failed\n");            /* entry can't be allocated.               fall down into full flush mode. */            bit_to_be_set |= _PAGE_TLB_INSERTED_MANY;            perfc_incr(tlb_track_iod_new_failed);        }        // tlb_track_printd("new_entry 0x%p\n", new_entry);        perfc_incr(tlb_track_iod_new_entry);        goto again;    }    BUG_ON(pte_tlb_inserted_many(old_pte));    new_pte = __pte(pte_val(old_pte) | bit_to_be_set);    ret_pte = ptep_cmpxchg_rel(mm, vaddr, ptep, old_pte, new_pte);    if (pte_val(old_pte) != pte_val(ret_pte)) {        if (tlb_track_pte_zapped(old_pte, ret_pte)) {            // tlb_track_printd("zapped TLB_TRACK_AGAIN\n");            ret = TLB_TRACK_AGAIN;            perfc_incr(tlb_track_iod_again);            goto out;        }        /* Other thread set _PAGE_TLB_INSERTED and/or _PAGE_TLB_INSERTED_MANY */        if (pte_tlb_inserted_many(ret_pte)) {            /* Other thread already set _PAGE_TLB_INSERTED_MANY and               removed the entry. */            // tlb_track_printd("iserted TLB_TRACK_MANY\n");            BUG_ON(!pte_tlb_inserted(ret_pte));            ret = TLB_TRACK_MANY;            perfc_incr(tlb_track_iod_new_many);            goto out;        }        BUG_ON(pte_tlb_inserted(ret_pte));        BUG();    }    if (new_entry) {        // tlb_track_printd("iserting new_entry 0x%p\n", new_entry);        entry = new_entry;        new_entry = NULL;        entry->ptep = ptep;        entry->pte_val = old_pte;        entry->vaddr = vaddr;        entry->rid = rid;        cpus_clear(entry->pcpu_dirty_mask);        vcpus_clear(entry->vcpu_dirty_mask);        list_add(&entry->list, head);#ifdef CONFIG_TLB_TRACK_CNT        entry->cnt = 0;#endif        perfc_incr(tlb_track_iod_insert);        // tlb_track_entry_printf(entry);    } else {        goto out;    } found:    BUG_ON(v->processor >= NR_CPUS);    cpu_set(v->processor, entry->pcpu_dirty_mask);    BUG_ON(v->vcpu_id >= NR_CPUS);    vcpu_set(v->vcpu_id, entry->vcpu_dirty_mask);    perfc_incr(tlb_track_iod_dirtied); out:    spin_unlock(&tlb_track->hash_lock);    if (ret == TLB_TRACK_MANY && entry != NULL)        tlb_track_free_entry(tlb_track, entry);    if (new_entry != NULL)        tlb_track_free_entry(tlb_track, new_entry);    return ret;}void__vcpu_tlb_track_insert_or_dirty(struct vcpu *vcpu, unsigned long vaddr,                                 struct p2m_entry* entry){    unsigned long vrn = vaddr >> IA64_RR_SHIFT;    unsigned long rid = PSCB(vcpu, rrs[vrn]);    TLB_TRACK_RET_T ret;    /* normalize vrn7       When linux dom0 case, vrn7 is the most common case. */    vaddr |= VRN7 << VRN_SHIFT;    vaddr &= PAGE_MASK;    ret = tlb_track_insert_or_dirty(vcpu->domain->arch.tlb_track,                                    &vcpu->domain->arch.mm,                                    entry->ptep, entry->used,                                    vaddr, rid);    if (ret == TLB_TRACK_AGAIN)        p2m_entry_set_retry(entry);}TLB_TRACK_RET_Ttlb_track_search_and_remove(struct tlb_track* tlb_track,                            volatile pte_t* ptep, pte_t old_pte,                            struct tlb_track_entry** entryp){    unsigned long mfn = pte_pfn(old_pte);    struct list_head* head = tlb_track_hash_head(tlb_track, ptep);    struct tlb_track_entry* entry;    perfc_incr(tlb_track_sar);    if (!pte_tlb_tracking(old_pte)) {        perfc_incr(tlb_track_sar_not_tracked);        return TLB_TRACK_NOT_TRACKED;    }    if (!pte_tlb_inserted(old_pte)) {        BUG_ON(pte_tlb_inserted_many(old_pte));        perfc_incr(tlb_track_sar_not_found);        return TLB_TRACK_NOT_FOUND;    }    if (pte_tlb_inserted_many(old_pte)) {        BUG_ON(!pte_tlb_inserted(old_pte));        perfc_incr(tlb_track_sar_many);        return TLB_TRACK_MANY;    }    spin_lock(&tlb_track->hash_lock);    list_for_each_entry(entry, head, list) {        if (entry->ptep != ptep)            continue;        if (pte_pfn(entry->pte_val) == mfn) {            /*             * PARANOIA             * We're here after zapping p2m entry.  However another pCPU             * may update the same p2m entry entry the same mfn at the             * same time in theory.  In such a case, we can't determine             * whether this entry is for us or for the racy p2m update.             * Such a guest domain's racy behaviour doesn't make sense,             * but is allowed.  Go the very pessimistic way.  Leave this             * entry to be found later and do full flush at this time.             *             * NOTE: Updating tlb tracking hash is protected by spin lock and             *       setting _PAGE_TLB_INSERTED and_PAGE_TLB_INSERTED_MANY bits             *       is serialized by the same spin lock.             *       See tlb_track_insert_or_dirty().             */            pte_t current_pte = *ptep;            if (unlikely(pte_pfn(current_pte) == mfn &&                         pte_tlb_tracking(current_pte) &&                         pte_tlb_inserted(current_pte))) {                BUG_ON(pte_tlb_inserted_many(current_pte));                spin_unlock(&tlb_track->hash_lock);                perfc_incr(tlb_track_sar_many);                return TLB_TRACK_MANY;            }            list_del(&entry->list);            spin_unlock(&tlb_track->hash_lock);            *entryp = entry;            perfc_incr(tlb_track_sar_found);            // tlb_track_entry_printf(entry);#ifdef CONFIG_TLB_TRACK_CNT            // tlb_track_printd("cnt = %ld\n", entry->cnt);#endif            return TLB_TRACK_FOUND;        }        BUG();    }    BUG();    spin_unlock(&tlb_track->hash_lock);    return TLB_TRACK_NOT_TRACKED;}/* for debug */void__tlb_track_entry_printf(const char* func, int line,                         const struct tlb_track_entry* entry){    char pcpumask_buf[NR_CPUS + 1];    char vcpumask_buf[MAX_VIRT_CPUS + 1];    cpumask_scnprintf(pcpumask_buf, sizeof(pcpumask_buf),                      entry->pcpu_dirty_mask);    vcpumask_scnprintf(vcpumask_buf, sizeof(vcpumask_buf),                       entry->vcpu_dirty_mask);    printk("%s:%d\n"           "\tmfn 0x%016lx\n"           "\told_pte 0x%016lx ptep 0x%p\n"           "\tpte_val 0x%016lx vaddr 0x%016lx rid %ld\n"           "\tpcpu_dirty_mask %s vcpu_dirty_mask %s\n"           "\tentry 0x%p\n",           func, line,           pte_pfn(entry->pte_val),           pte_val(entry->pte_val), entry->ptep, pte_val(*entry->ptep),           entry->vaddr, entry->rid,           pcpumask_buf, vcpumask_buf,           entry);}/* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -