aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-07-16 11:25:17 +0800
committerAvi Kivity <avi@redhat.com>2010-08-02 06:40:58 +0300
commit9ed5520dd3c9cb79c25f95fce9c57b87637d0fb7 (patch)
tree32a983f23b3aa4dba5dea64a57f1b96126afe92a /arch
parentdaa3db693ce925a14b7e17ab6f306dc0e6a5342c (diff)
downloadkernel_goldelico_gta04-9ed5520dd3c9cb79c25f95fce9c57b87637d0fb7.zip
kernel_goldelico_gta04-9ed5520dd3c9cb79c25f95fce9c57b87637d0fb7.tar.gz
kernel_goldelico_gta04-9ed5520dd3c9cb79c25f95fce9c57b87637d0fb7.tar.bz2
KVM: MMU: fix page dirty tracking lost while sync page
In sync-page path, if spte.writable is changed, it will lose page dirty tracking, for example: assume spte.writable = 0 in a unsync-page, when it's synced, it map spte to writable(that is spte.writable = 1), later guest write spte.gfn, it means spte.gfn is dirty, then guest changed this mapping to read-only, after it's synced, spte.writable = 0 So, when host release the spte, it detect spte.writable = 0 and not mark page dirty Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9b3b916..a04756a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1985,6 +1985,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
mark_page_dirty(vcpu->kvm, gfn);
set_pte:
+ if (is_writable_pte(*sptep) && !is_writable_pte(spte))
+ kvm_set_pfn_dirty(pfn);
update_spte(sptep, spte);
done:
return ret;
@@ -1998,7 +2000,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
bool reset_host_protection)
{
int was_rmapped = 0;
- int was_writable = is_writable_pte(*sptep);
int rmap_count;
pgprintk("%s: spte %llx access %x write_fault %d"
@@ -2048,15 +2049,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
page_header_update_slot(vcpu->kvm, sptep, gfn);
if (!was_rmapped) {
rmap_count = rmap_add(vcpu, sptep, gfn);
- kvm_release_pfn_clean(pfn);
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn);
- } else {
- if (was_writable)
- kvm_release_pfn_dirty(pfn);
- else
- kvm_release_pfn_clean(pfn);
}
+ kvm_release_pfn_clean(pfn);
if (speculative) {
vcpu->arch.last_pte_updated = sptep;
vcpu->arch.last_pte_gfn = gfn;