aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-05-15 13:51:35 +0300
committerAvi Kivity <avi@qumranet.com>2008-07-20 12:40:50 +0300
commit1b7fcd3263e5f12dba43d27b64e1578bec070c28 (patch)
tree7f8d9679404f0b46803c6b1cf926235d170606ee /arch
parent7682f2d0dd3ff5bd2756eac018a5b4e7e30ef16c (diff)
downloadkernel_samsung_espresso10-1b7fcd3263e5f12dba43d27b64e1578bec070c28.zip
kernel_samsung_espresso10-1b7fcd3263e5f12dba43d27b64e1578bec070c28.tar.gz
kernel_samsung_espresso10-1b7fcd3263e5f12dba43d27b64e1578bec070c28.tar.bz2
KVM: MMU: Fix false flooding when a pte points to page table
The KVM MMU tries to detect when a speculative pte update is not actually used by demand fault, by checking the accessed bit of the shadow pte. If the shadow pte has not been accessed, we deem that page table flooded and remove the shadow page table, allowing further pte updates to proceed without emulation. However, if the pte itself points at a page table and only used for write operations, the accessed bit will never be set since all access will happen through the emulator. This is exactly what happens with kscand on old (2.4.x) HIGHMEM kernels. The kernel points a kmap_atomic() pte at a page table, and then proceeds with read-modify-write operations to look at the dirty and accessed bits. We get a false flood trigger on the kmap ptes, which results in the mmu spending all its time setting up and tearing down shadows. Fix by setting the shadow accessed bit on emulated accesses. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c17
-rw-r--r--arch/x86/kvm/mmu.h3
2 files changed, 18 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8e449db..53f1ed8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1122,8 +1122,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
else
kvm_release_pfn_clean(pfn);
}
- if (!ptwrite || !*ptwrite)
+ if (speculative) {
vcpu->arch.last_pte_updated = shadow_pte;
+ vcpu->arch.last_pte_gfn = gfn;
+ }
}
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -1671,6 +1673,18 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu->arch.update_pte.pfn = pfn;
}
+static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ u64 *spte = vcpu->arch.last_pte_updated;
+
+ if (spte
+ && vcpu->arch.last_pte_gfn == gfn
+ && shadow_accessed_mask
+ && !(*spte & shadow_accessed_mask)
+ && is_shadow_present_pte(*spte))
+ set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+}
+
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes)
{
@@ -1694,6 +1708,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
spin_lock(&vcpu->kvm->mmu_lock);
+ kvm_mmu_access_page(vcpu, gfn);
kvm_mmu_free_some_pages(vcpu);
++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, "pre pte write");
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 1730757..258e5d5 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -15,7 +15,8 @@
#define PT_USER_MASK (1ULL << 2)
#define PT_PWT_MASK (1ULL << 3)
#define PT_PCD_MASK (1ULL << 4)
-#define PT_ACCESSED_MASK (1ULL << 5)
+#define PT_ACCESSED_SHIFT 5
+#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
#define PT_DIRTY_MASK (1ULL << 6)
#define PT_PAGE_SIZE_MASK (1ULL << 7)
#define PT_PAT_MASK (1ULL << 7)