diff options
author | Xiantao Zhang <xiantao.zhang@intel.com> | 2008-09-28 01:39:46 -0700 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-10-15 14:25:38 +0200 |
commit | b010eb5103cfbe12ae6f08a4cdb3a748bf78c410 (patch) | |
tree | e2b812000bbb1b13edb52667e42a8d04d4ad02e5 /arch/ia64/kvm | |
parent | 1cbea809c400661eecb538e0dd0bc4f3660f0a35 (diff) | |
download | kernel_samsung_smdk4412-b010eb5103cfbe12ae6f08a4cdb3a748bf78c410.zip kernel_samsung_smdk4412-b010eb5103cfbe12ae6f08a4cdb3a748bf78c410.tar.gz kernel_samsung_smdk4412-b010eb5103cfbe12ae6f08a4cdb3a748bf78c410.tar.bz2 |
KVM: ia64: add directed mmio range support for kvm guests
Using vt-d, kvm guests can be assigned physcial devices, so
this patch introduce a new mmio type (directed mmio)
to handle its mmio access.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 4 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.h | 26 | ||||
-rw-r--r-- | arch/ia64/kvm/vtlb.c | 23 |
3 files changed, 32 insertions, 21 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 800a4f2..3df82f3 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1447,11 +1447,11 @@ int kvm_arch_set_memory_region(struct kvm *kvm, if (!kvm_is_mmio_pfn(pfn)) { kvm_set_pmt_entry(kvm, base_gfn + i, pfn << PAGE_SHIFT, - _PAGE_MA_WB); + _PAGE_AR_RWX | _PAGE_MA_WB); memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); } else { kvm_set_pmt_entry(kvm, base_gfn + i, - GPFN_LOW_MMIO | (pfn << PAGE_SHIFT), + GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), _PAGE_MA_UC); memslot->rmap[i] = 0; } diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index b0fcfb6..341e3fe 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h @@ -313,21 +313,21 @@ static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir, trp->rid = rid; } -extern u64 kvm_lookup_mpa(u64 gpfn); -extern u64 kvm_gpa_to_mpa(u64 gpa); - -/* Return I/O type if trye */ -#define __gpfn_is_io(gpfn) \ - ({ \ - u64 pte, ret = 0; \ - pte = kvm_lookup_mpa(gpfn); \ - if (!(pte & GPFN_INV_MASK)) \ - ret = pte & GPFN_IO_MASK; \ - ret; \ - }) +extern u64 kvm_get_mpt_entry(u64 gpfn); +/* Return I/ */ +static inline u64 __gpfn_is_io(u64 gpfn) +{ + u64 pte; + pte = kvm_get_mpt_entry(gpfn); + if (!(pte & GPFN_INV_MASK)) { + pte = pte & GPFN_IO_MASK; + if (pte != GPFN_PHYS_MMIO) + return pte; + } + return 0; +} #endif - #define IA64_NO_FAULT 0 #define IA64_FAULT 1 diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index def4576..e22b933 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -390,7 +390,7 @@ void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps) u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) { - u64 ps, ps_mask, paddr, maddr; + u64 ps, ps_mask, paddr, maddr, io_mask; union pte_flags phy_pte; ps = itir_ps(itir); @@ -398,8 +398,9 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) phy_pte.val = *pte; paddr = *pte; paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); - maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT); - if (maddr & GPFN_IO_MASK) { + maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT); + io_mask = maddr & GPFN_IO_MASK; + if (io_mask && (io_mask != GPFN_PHYS_MMIO)) { *pte |= VTLB_PTE_IO; return -1; } @@ -418,7 +419,7 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 ifa, int type) { u64 ps; - u64 phy_pte; + u64 phy_pte, io_mask, index; union ia64_rr vrr, mrr; int ret = 0; @@ -426,13 +427,16 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, vrr.val = vcpu_get_rr(v, ifa); mrr.val = ia64_get_rr(ifa); + index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; + io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK; phy_pte = translate_phy_pte(&pte, itir, ifa); /* Ensure WB attribute if pte is related to a normal mem page, * which is required by vga acceleration since qemu maps shared * vram buffer with WB. */ - if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) { + if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) && + io_mask != GPFN_PHYS_MMIO) { pte &= ~_PAGE_MA_MASK; phy_pte &= ~_PAGE_MA_MASK; } @@ -566,12 +570,19 @@ void thash_init(struct thash_cb *hcb, u64 sz) } } -u64 kvm_lookup_mpa(u64 gpfn) +u64 kvm_get_mpt_entry(u64 gpfn) { u64 *base = (u64 *) KVM_P2M_BASE; return *(base + gpfn); } +u64 kvm_lookup_mpa(u64 gpfn) +{ + u64 maddr; + maddr = kvm_get_mpt_entry(gpfn); + return maddr&_PAGE_PPN_MASK; +} + u64 kvm_gpa_to_mpa(u64 gpa) { u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); |