diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2009-12-05 12:34:11 -0200 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2009-12-27 13:36:30 -0200 |
commit | fb341f572d26e0786167cd96b90cc4febed830cf (patch) | |
tree | 2bf327861e4da2745b66c2dda03c5c5b0afcf7b6 /arch/x86/kvm | |
parent | 6b7b284958d47b77d06745b36bc7f36dab769d9b (diff) | |
download | kernel_goldelico_gta04-fb341f572d26e0786167cd96b90cc4febed830cf.zip kernel_goldelico_gta04-fb341f572d26e0786167cd96b90cc4febed830cf.tar.gz kernel_goldelico_gta04-fb341f572d26e0786167cd96b90cc4febed830cf.tar.bz2 |
KVM: MMU: remove prefault from invlpg handler
The invlpg prefault optimization breaks Windows 2008 R2 occasionally.
The visible effect is that the invlpg handler instantiates a pte which
is, microseconds later, written with a different gfn by another vcpu.
The OS could have other mechanisms to prevent a present translation from
being used, which the hypervisor is unaware of.
While the documentation states that the cpu is at liberty to prefetch tlb
entries, it looks like this is not heeded, so remove tlb prefetch from
invlpg.
Cc: stable@kernel.org
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 18 |
1 files changed, 0 insertions, 18 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a601713..58a0f1e 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -455,8 +455,6 @@ out_unlock: static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) { struct kvm_shadow_walk_iterator iterator; - pt_element_t gpte; - gpa_t pte_gpa = -1; int level; u64 *sptep; int need_flush = 0; @@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) if (level == PT_PAGE_TABLE_LEVEL || ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { - struct kvm_mmu_page *sp = page_header(__pa(sptep)); - - pte_gpa = (sp->gfn << PAGE_SHIFT); - pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); if (is_shadow_present_pte(*sptep)) { rmap_remove(vcpu->kvm, sptep); @@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) if (need_flush) kvm_flush_remote_tlbs(vcpu->kvm); spin_unlock(&vcpu->kvm->mmu_lock); - - if (pte_gpa == -1) - return; - if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, - sizeof(pt_element_t))) - return; - if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) { - if (mmu_topup_memory_caches(vcpu)) - return; - kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, - sizeof(pt_element_t), 0); - } } static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) |