aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-08-07 11:49:28 +0200
committerAvi Kivity <avi@redhat.com>2009-09-10 08:33:23 +0300
commit2af9194d1b683f91ae956afff9afb0b52a241371 (patch)
tree70e7bc17d9e715b19545ce6002dfedbf160f6964
parent256cd2ef4f5c125f5df2c81d8457f080a4684ae6 (diff)
downloadkernel_goldelico_gta04-2af9194d1b683f91ae956afff9afb0b52a241371.zip
kernel_goldelico_gta04-2af9194d1b683f91ae956afff9afb0b52a241371.tar.gz
kernel_goldelico_gta04-2af9194d1b683f91ae956afff9afb0b52a241371.tar.bz2
KVM: SVM: add helper functions for global interrupt flag
This patch makes the code easier to read when it comes to setting, clearing and checking the status of the virtualized global interrupt flag for the VCPU. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/svm.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 10e718d..9f72772 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -129,6 +129,21 @@ static inline bool is_nested(struct vcpu_svm *svm)
return svm->nested_vmcb;
}
+static inline void enable_gif(struct vcpu_svm *svm)
+{
+ svm->vcpu.arch.hflags |= HF_GIF_MASK;
+}
+
+static inline void disable_gif(struct vcpu_svm *svm)
+{
+ svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
+}
+
+static inline bool gif_set(struct vcpu_svm *svm)
+{
+ return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
+}
+
static unsigned long iopm_base;
struct kvm_ldttss_desc {
@@ -621,7 +636,9 @@ static void init_vmcb(struct vcpu_svm *svm)
force_new_asid(&svm->vcpu);
svm->nested_vmcb = 0;
- svm->vcpu.arch.hflags = HF_GIF_MASK;
+ svm->vcpu.arch.hflags = 0;
+
+ enable_gif(svm);
}
static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -1629,7 +1646,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0;
- svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
+ disable_gif(svm);
/* Exit nested SVM mode */
svm->nested_vmcb = 0;
@@ -1761,7 +1778,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
- svm->vcpu.arch.hflags |= HF_GIF_MASK;
+ enable_gif(svm);
return 0;
}
@@ -1850,7 +1867,7 @@ static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
- svm->vcpu.arch.hflags |= HF_GIF_MASK;
+ enable_gif(svm);
return 1;
}
@@ -1863,7 +1880,7 @@ static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
- svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
+ disable_gif(svm);
/* After a CLGI no interrupts should come */
svm_clear_vintr(svm);
@@ -2352,7 +2369,7 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- BUG_ON(!(svm->vcpu.arch.hflags & HF_GIF_MASK));
+ BUG_ON(!(gif_set(svm)));
svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
@@ -2383,7 +2400,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
struct vmcb *vmcb = svm->vmcb;
return (vmcb->save.rflags & X86_EFLAGS_IF) &&
!(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
- (svm->vcpu.arch.hflags & HF_GIF_MASK) &&
+ gif_set(svm) &&
!is_nested(svm);
}
@@ -2398,7 +2415,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
* GIF becomes 1, because that's a separate STGI/VMRUN intercept.
* The next time we get that intercept, this function will be
* called again though and we'll get the vintr intercept. */
- if (svm->vcpu.arch.hflags & HF_GIF_MASK) {
+ if (gif_set(svm)) {
svm_set_vintr(svm);
svm_inject_irq(svm, 0x0);
}