aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h14
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h12
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kvm/44x_tlb.c53
-rw-r--r--arch/powerpc/kvm/Kconfig11
-rw-r--r--arch/powerpc/kvm/Makefile6
-rw-r--r--arch/powerpc/kvm/booke_guest.c17
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S79
-rw-r--r--arch/powerpc/kvm/emulate.c8
-rw-r--r--arch/powerpc/kvm/powerpc.c99
10 files changed, 246 insertions, 57 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2655e2a..34b52b7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -81,11 +81,17 @@ struct kvm_vcpu_arch {
struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
/* Pages which are referenced in the shadow TLB. */
struct page *shadow_pages[PPC44x_TLB_SIZE];
- /* Copy of the host's TLB. */
- struct tlbe host_tlb[PPC44x_TLB_SIZE];
+
+ /* Track which TLB entries we've modified in the current exit. */
+ u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
u32 host_stack;
u32 host_pid;
+ u32 host_dbcr0;
+ u32 host_dbcr1;
+ u32 host_dbcr2;
+ u32 host_iac[4];
+ u32 host_msr;
u64 fpr[32];
u32 gpr[32];
@@ -123,7 +129,11 @@ struct kvm_vcpu_arch {
u32 ivor[16];
u32 ivpr;
u32 pir;
+
+ u32 shadow_pid;
u32 pid;
+ u32 swap_pid;
+
u32 pvr;
u32 ccr0;
u32 ccr1;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a8b0687..8931ba7 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -64,6 +64,10 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
gva_t eend, u32 asid);
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
+extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
+
+/* XXX Book E specific */
+extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);
extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
@@ -92,4 +96,12 @@ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
kvm_vcpu_block(vcpu);
}
+static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
+{
+ if (vcpu->arch.pid != new_pid) {
+ vcpu->arch.pid = new_pid;
+ vcpu->arch.swap_pid = 1;
+ }
+}
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 09febc5..75c5dd0 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -359,8 +359,8 @@ int main(void)
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
- DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb));
DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
+ DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
@@ -372,7 +372,7 @@ int main(void)
DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
- DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid));
+ DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5a5602d..2e227a4 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <asm/mmu-44x.h>
@@ -109,7 +110,6 @@ static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
}
-/* Must be called with mmap_sem locked for writing. */
static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
unsigned int index)
{
@@ -124,6 +124,11 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
}
}
+void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
+{
+ vcpu->arch.shadow_tlb_mod[i] = 1;
+}
+
/* Caller must ensure that the specified guest TLB entry is safe to insert into
* the shadow TLB. */
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
@@ -142,19 +147,16 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
stlbe = &vcpu->arch.shadow_tlb[victim];
/* Get reference to new page. */
- down_read(&current->mm->mmap_sem);
new_page = gfn_to_page(vcpu->kvm, gfn);
if (is_error_page(new_page)) {
printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
kvm_release_page_clean(new_page);
- up_read(&current->mm->mmap_sem);
return;
}
hpaddr = page_to_phys(new_page);
/* Drop reference to old page. */
kvmppc_44x_shadow_release(vcpu, victim);
- up_read(&current->mm->mmap_sem);
vcpu->arch.shadow_pages[victim] = new_page;
@@ -164,27 +166,30 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
/* XXX what about AS? */
- stlbe->tid = asid & 0xff;
+ stlbe->tid = !(asid & 0xff);
/* Force TS=1 for all guest mappings. */
/* For now we hardcode 4KB mappings, but it will be important to
* use host large pages in the future. */
stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
| PPC44x_TLB_4K;
-
stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
vcpu->arch.msr & MSR_PR);
+ kvmppc_tlbe_set_modified(vcpu, victim);
+
+ KVMTRACE_5D(STLB_WRITE, vcpu, victim,
+ stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
+ handler);
}
void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
gva_t eend, u32 asid)
{
- unsigned int pid = asid & 0xff;
+ unsigned int pid = !(asid & 0xff);
int i;
/* XXX Replace loop with fancy data structures. */
- down_write(&current->mm->mmap_sem);
for (i = 0; i <= tlb_44x_hwater; i++) {
struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
unsigned int tid;
@@ -204,21 +209,35 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
kvmppc_44x_shadow_release(vcpu, i);
stlbe->word0 = 0;
+ kvmppc_tlbe_set_modified(vcpu, i);
+ KVMTRACE_5D(STLB_INVAL, vcpu, i,
+ stlbe->tid, stlbe->word0, stlbe->word1,
+ stlbe->word2, handler);
}
- up_write(&current->mm->mmap_sem);
}
-/* Invalidate all mappings, so that when they fault back in they will get the
- * proper permission bits. */
+/* Invalidate all mappings on the privilege switch after PID has been changed.
+ * The guest always runs with PID=1, so we must clear the entire TLB when
+ * switching address spaces. */
void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
{
int i;
- /* XXX Replace loop with fancy data structures. */
- down_write(&current->mm->mmap_sem);
- for (i = 0; i <= tlb_44x_hwater; i++) {
- kvmppc_44x_shadow_release(vcpu, i);
- vcpu->arch.shadow_tlb[i].word0 = 0;
+ if (vcpu->arch.swap_pid) {
+ /* XXX Replace loop with fancy data structures. */
+ for (i = 0; i <= tlb_44x_hwater; i++) {
+ struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
+
+ /* Future optimization: clear only userspace mappings. */
+ kvmppc_44x_shadow_release(vcpu, i);
+ stlbe->word0 = 0;
+ kvmppc_tlbe_set_modified(vcpu, i);
+ KVMTRACE_5D(STLB_INVAL, vcpu, i,
+ stlbe->tid, stlbe->word0, stlbe->word1,
+ stlbe->word2, handler);
+ }
+ vcpu->arch.swap_pid = 0;
}
- up_write(&current->mm->mmap_sem);
+
+ vcpu->arch.shadow_pid = !usermode;
}
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 6b07601..53aaa66 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -37,6 +37,17 @@ config KVM_BOOKE_HOST
Provides host support for KVM on Book E PowerPC processors. Currently
this works on 440 processors only.
+config KVM_TRACE
+ bool "KVM trace support"
+ depends on KVM && MARKERS && SYSFS
+ select RELAY
+ select DEBUG_FS
+ default n
+ ---help---
+ This option allows reading a trace of kvm-related events through
+ relayfs. Note the ABI is not considered stable and will be
+ modified in future updates.
+
source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 04e3449..2a5d439 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -4,9 +4,11 @@
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
-common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
-kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o
+common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o)
+
+kvm-objs := $(common-objs-y) powerpc.o emulate.o booke_guest.o
obj-$(CONFIG_KVM) += kvm.o
AFLAGS_booke_interrupts.o := -I$(obj)
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c
index 9c8ad85..7b2591e 100644
--- a/arch/powerpc/kvm/booke_guest.c
+++ b/arch/powerpc/kvm/booke_guest.c
@@ -410,6 +410,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
+ case BOOKE_INTERRUPT_DEBUG: {
+ u32 dbsr;
+
+ vcpu->arch.pc = mfspr(SPRN_CSRR0);
+
+ /* clear IAC events in DBSR register */
+ dbsr = mfspr(SPRN_DBSR);
+ dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
+ mtspr(SPRN_DBSR, dbsr);
+
+ run->exit_reason = KVM_EXIT_DEBUG;
+ r = RESUME_HOST;
+ break;
+ }
+
default:
printk(KERN_EMERG "exit_nr %d\n", exit_nr);
BUG();
@@ -471,6 +486,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.msr = 0;
vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
+ vcpu->arch.shadow_pid = 1;
+
/* Eye-catching number so we know if the guest takes an interrupt
* before it's programmed its own IVPR. */
vcpu->arch.ivpr = 0x55550000;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 3b653b5..95e165b 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -42,7 +42,8 @@
#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
- (1<<BOOKE_INTERRUPT_DTLB_MISS))
+ (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
+ (1<<BOOKE_INTERRUPT_DEBUG))
#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
(1<<BOOKE_INTERRUPT_DTLB_MISS))
@@ -331,51 +332,57 @@ lightweight_exit:
mfspr r3, SPRN_PID
stw r3, VCPU_HOST_PID(r4)
- lwz r3, VCPU_PID(r4)
+ lwz r3, VCPU_SHADOW_PID(r4)
mtspr SPRN_PID, r3
- /* Prevent all TLB updates. */
+ /* Prevent all asynchronous TLB updates. */
mfmsr r5
lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
andc r6, r5, r6
mtmsr r6
- /* Save the host's non-pinned TLB mappings, and load the guest mappings
- * over them. Leave the host's "pinned" kernel mappings in place. */
- /* XXX optimization: use generation count to avoid swapping unmodified
- * entries. */
+ /* Load the guest mappings, leaving the host's "pinned" kernel mappings
+ * in place. */
mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
- lis r8, tlb_44x_hwater@ha
- lwz r8, tlb_44x_hwater@l(r8)
- addi r3, r4, VCPU_HOST_TLB - 4
- addi r9, r4, VCPU_SHADOW_TLB - 4
- li r6, 0
+ li r5, PPC44x_TLB_SIZE
+ lis r5, tlb_44x_hwater@ha
+ lwz r5, tlb_44x_hwater@l(r5)
+ mtctr r5
+ addi r9, r4, VCPU_SHADOW_TLB
+ addi r5, r4, VCPU_SHADOW_MOD
+ li r3, 0
1:
- /* Save host entry. */
- tlbre r7, r6, PPC44x_TLB_PAGEID
- mfspr r5, SPRN_MMUCR
- stwu r5, 4(r3)
- stwu r7, 4(r3)
- tlbre r7, r6, PPC44x_TLB_XLAT
- stwu r7, 4(r3)
- tlbre r7, r6, PPC44x_TLB_ATTRIB
- stwu r7, 4(r3)
+ lbzx r7, r3, r5
+ cmpwi r7, 0
+ beq 3f
+
/* Load guest entry. */
- lwzu r7, 4(r9)
+ mulli r11, r3, TLBE_BYTES
+ add r11, r11, r9
+ lwz r7, 0(r11)
mtspr SPRN_MMUCR, r7
- lwzu r7, 4(r9)
- tlbwe r7, r6, PPC44x_TLB_PAGEID
- lwzu r7, 4(r9)
- tlbwe r7, r6, PPC44x_TLB_XLAT
- lwzu r7, 4(r9)
- tlbwe r7, r6, PPC44x_TLB_ATTRIB
- /* Increment index. */
- addi r6, r6, 1
- cmpw r6, r8
- blt 1b
+ lwz r7, 4(r11)
+ tlbwe r7, r3, PPC44x_TLB_PAGEID
+ lwz r7, 8(r11)
+ tlbwe r7, r3, PPC44x_TLB_XLAT
+ lwz r7, 12(r11)
+ tlbwe r7, r3, PPC44x_TLB_ATTRIB
+3:
+ addi r3, r3, 1 /* Increment index. */
+ bdnz 1b
+
mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */
+ /* Clear bitmap of modified TLB entries */
+ li r5, PPC44x_TLB_SIZE>>2
+ mtctr r5
+ addi r5, r4, VCPU_SHADOW_MOD - 4
+ li r6, 0
+1:
+ stwu r6, 4(r5)
+ bdnz 1b
+
iccci 0, 0 /* XXX hack */
/* Load some guest volatiles. */
@@ -431,6 +438,14 @@ lightweight_exit:
oris r3, r3, KVMPPC_MSR_MASK@h
ori r3, r3, KVMPPC_MSR_MASK@l
mtsrr1 r3
+
+ /* Clear any debug events which occurred since we disabled MSR[DE].
+ * XXX This gives us a 3-instruction window in which a breakpoint
+ * intended for guest context could fire in the host instead. */
+ lis r3, 0xffff
+ ori r3, r3, 0xffff
+ mtspr SPRN_DBSR, r3
+
lwz r3, VCPU_GPR(r3)(r4)
lwz r4, VCPU_GPR(r4)(r4)
rfi
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 8c605d0..0fce4fb 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -170,6 +170,10 @@ static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
}
+ KVMTRACE_5D(GTLB_WRITE, vcpu, index,
+ tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2,
+ handler);
+
return EMULATE_DONE;
}
@@ -504,7 +508,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case SPRN_MMUCR:
vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
case SPRN_PID:
- vcpu->arch.pid = vcpu->arch.gpr[rs]; break;
+ kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
case SPRN_CCR0:
vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
case SPRN_CCR1:
@@ -765,6 +769,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
}
+ KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit);
+
if (advance)
vcpu->arch.pc += 4; /* Advance past emulated instruction. */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 53826a5..90a6fc4 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -27,6 +27,7 @@
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
+#include <asm/tlbflush.h>
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
@@ -239,18 +240,114 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
}
+/* Note: clearing MSR[DE] just means that the debug interrupt will not be
+ * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
+ * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
+ * will be delivered as an "imprecise debug event" (which is indicated by
+ * DBSR[IDE].
+ */
+static void kvmppc_disable_debug_interrupts(void)
+{
+ mtmsr(mfmsr() & ~MSR_DE);
+}
+
+static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu)
+{
+ kvmppc_disable_debug_interrupts();
+
+ mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
+ mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
+ mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
+ mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
+ mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
+ mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
+ mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
+ mtmsr(vcpu->arch.host_msr);
+}
+
+static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu)
+{
+ struct kvm_guest_debug *dbg = &vcpu->guest_debug;
+ u32 dbcr0 = 0;
+
+ vcpu->arch.host_msr = mfmsr();
+ kvmppc_disable_debug_interrupts();
+
+ /* Save host debug register state. */
+ vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
+ vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
+ vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
+ vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
+ vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
+ vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
+ vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
+
+ /* set registers up for guest */
+
+ if (dbg->bp[0]) {
+ mtspr(SPRN_IAC1, dbg->bp[0]);
+ dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
+ }
+ if (dbg->bp[1]) {
+ mtspr(SPRN_IAC2, dbg->bp[1]);
+ dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
+ }
+ if (dbg->bp[2]) {
+ mtspr(SPRN_IAC3, dbg->bp[2]);
+ dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
+ }
+ if (dbg->bp[3]) {
+ mtspr(SPRN_IAC4, dbg->bp[3]);
+ dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
+ }
+
+ mtspr(SPRN_DBCR0, dbcr0);
+ mtspr(SPRN_DBCR1, 0);
+ mtspr(SPRN_DBCR2, 0);
+}
+
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ int i;
+
+ if (vcpu->guest_debug.enabled)
+ kvmppc_load_guest_debug_registers(vcpu);
+
+ /* Mark every guest entry in the shadow TLB entry modified, so that they
+ * will all be reloaded on the next vcpu run (instead of being
+ * demand-faulted). */
+ for (i = 0; i <= tlb_44x_hwater; i++)
+ kvmppc_tlbe_set_modified(vcpu, i);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ if (vcpu->guest_debug.enabled)
+ kvmppc_restore_host_debug_state(vcpu);
+
+ /* Don't leave guest TLB entries resident when being de-scheduled. */
+ /* XXX It would be nice to differentiate between heavyweight exit and
+ * sched_out here, since we could avoid the TLB flush for heavyweight
+ * exits. */
+ _tlbia();
}
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg)
{
- return -ENOTSUPP;
+ int i;
+
+ vcpu->guest_debug.enabled = dbg->enabled;
+ if (vcpu->guest_debug.enabled) {
+ for (i=0; i < ARRAY_SIZE(vcpu->guest_debug.bp); i++) {
+ if (dbg->breakpoints[i].enabled)
+ vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
+ else
+ vcpu->guest_debug.bp[i] = 0;
+ }
+ }
+
+ return 0;
}
static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,