diff options
author | Andre Przywara <andre.przywara@amd.com> | 2010-12-21 11:12:01 +0100 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 11:30:59 +0200 |
commit | db8fcefaa704ccb40b6dcd24e3b75bad3ce7dde3 (patch) | |
tree | 7915835b4ebe513bbb9ad08388db5a45fa0355f0 /arch | |
parent | eea1cff9ab732ea56358ff5e1bd8b99db2e8402d (diff) | |
download | kernel_samsung_smdk4412-db8fcefaa704ccb40b6dcd24e3b75bad3ce7dde3.zip kernel_samsung_smdk4412-db8fcefaa704ccb40b6dcd24e3b75bad3ce7dde3.tar.gz kernel_samsung_smdk4412-db8fcefaa704ccb40b6dcd24e3b75bad3ce7dde3.tar.bz2 |
KVM: move complete_insn_gp() into x86.c
move the complete_insn_gp() helper function out of the VMX part
into the generic x86 part to make it usable by SVM.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 16 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 9 |
3 files changed, 15 insertions, 12 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index cb5cad2..cd4a990 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -828,4 +828,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); +void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); + #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8e87bae..fd8ffde 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3147,14 +3147,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) hypercall[2] = 0xc1; } -static void complete_insn_gp(struct kvm_vcpu *vcpu, int err) -{ - if (err) - kvm_inject_gp(vcpu, 0); - else - skip_emulated_instruction(vcpu); -} - static int handle_cr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification, val; @@ -3172,21 +3164,21 @@ static int handle_cr(struct kvm_vcpu *vcpu) switch (cr) { case 0: err = kvm_set_cr0(vcpu, val); - complete_insn_gp(vcpu, err); + kvm_complete_insn_gp(vcpu, err); return 1; case 3: err = kvm_set_cr3(vcpu, val); - complete_insn_gp(vcpu, err); + kvm_complete_insn_gp(vcpu, err); return 1; case 4: err = kvm_set_cr4(vcpu, val); - complete_insn_gp(vcpu, err); + kvm_complete_insn_gp(vcpu, err); return 1; case 8: { u8 cr8_prev = kvm_get_cr8(vcpu); u8 cr8 = kvm_register_read(vcpu, reg); err = kvm_set_cr8(vcpu, cr8); - complete_insn_gp(vcpu, err); + kvm_complete_insn_gp(vcpu, err); if (irqchip_in_kernel(vcpu->kvm)) return 1; if (cr8_prev <= cr8) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2dbf68c..1d54cb7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -334,6 +334,15 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) } EXPORT_SYMBOL_GPL(kvm_requeue_exception); +void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) +{ + if (err) + kvm_inject_gp(vcpu, 0); + else + kvm_x86_ops->skip_emulated_instruction(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); + void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { ++vcpu->stat.pf_guest; |