diff options
author | Roedel, Joerg <Joerg.Roedel@amd.com> | 2010-05-06 11:38:43 +0200 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-19 11:36:39 +0300 |
commit | b69e8caef5b190af48c525f6d715e7b7728a77f6 (patch) | |
tree | 102d0728a1c8ae0b698e6f746cd40a67dfc2db94 /arch/x86 | |
parent | 0d945bd9351199744c1e89d57a70615b6ee9f394 (diff) | |
download | kernel_samsung_smdk4412-b69e8caef5b190af48c525f6d715e7b7728a77f6.zip kernel_samsung_smdk4412-b69e8caef5b190af48c525f6d715e7b7728a77f6.tar.gz kernel_samsung_smdk4412-b69e8caef5b190af48c525f6d715e7b7728a77f6.tar.bz2 |
KVM: x86: Inject #GP with the right rip on efer writes
This patch fixes a bug in the KVM efer-msr write path. If a
guest writes to a reserved efer bit the set_efer function
injects the #GP directly. The architecture dependent wrmsr
function does not see this, assumes success and advances the
rip. This results in a #GP in the guest with the wrong rip.
This patch fixes this by reporting efer write errors back to
the architectural wrmsr function.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/x86.c | 31 |
1 files changed, 12 insertions, 19 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 161ede2..fe6d126 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -683,37 +683,29 @@ static u32 emulated_msrs[] = { MSR_IA32_MISC_ENABLE, }; -static void set_efer(struct kvm_vcpu *vcpu, u64 efer) +static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { - if (efer & efer_reserved_bits) { - kvm_inject_gp(vcpu, 0); - return; - } + if (efer & efer_reserved_bits) + return 1; if (is_paging(vcpu) - && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { - kvm_inject_gp(vcpu, 0); - return; - } + && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) + return 1; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); - if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { - kvm_inject_gp(vcpu, 0); - return; - } + if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) + return 1; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); - if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { - kvm_inject_gp(vcpu, 0); - return; - } + if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) + return 1; } kvm_x86_ops->set_efer(vcpu, efer); @@ -725,6 +717,8 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; kvm_mmu_reset_context(vcpu); + + return 0; } void kvm_enable_efer_bits(u64 mask) @@ -1153,8 +1147,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case MSR_EFER: - set_efer(vcpu, data); - break; + return set_efer(vcpu, data); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ |