From fcd95807fb61e67d602610e7ff7129ed769e9fee Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 11 Jan 2010 16:38:18 +0100 Subject: kvm: Change kvm_iommu_map_pages to map large pages This patch changes the implementation of of kvm_iommu_map_pages to map the pages with the host page size into the io virtual address space. Signed-off-by: Joerg Roedel Acked-By: Avi Kivity --- virt/kvm/iommu.c | 113 ++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 91 insertions(+), 22 deletions(-) (limited to 'virt') diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 80fd3ad..11692b9 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -32,12 +32,30 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm); static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); +static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, unsigned long size) +{ + gfn_t end_gfn; + pfn_t pfn; + + pfn = gfn_to_pfn_memslot(kvm, slot, gfn); + end_gfn = gfn + (size >> PAGE_SHIFT); + gfn += 1; + + if (is_error_pfn(pfn)) + return pfn; + + while (gfn < end_gfn) + gfn_to_pfn_memslot(kvm, slot, gfn++); + + return pfn; +} + int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { - gfn_t gfn = slot->base_gfn; - unsigned long npages = slot->npages; + gfn_t gfn, end_gfn; pfn_t pfn; - int i, r = 0; + int r = 0; struct iommu_domain *domain = kvm->arch.iommu_domain; int flags; @@ -45,31 +63,62 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) if (!domain) return 0; + gfn = slot->base_gfn; + end_gfn = gfn + slot->npages; + flags = IOMMU_READ | IOMMU_WRITE; if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) flags |= IOMMU_CACHE; - for (i = 0; i < npages; i++) { - /* check if already mapped */ - if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) + + while (gfn < end_gfn) { + unsigned long page_size; + + /* Check if already mapped */ + if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { + gfn += 1; + continue; + } + + /* Get the page size we could use to map */ + page_size = kvm_host_page_size(kvm, gfn); + + /* Make sure the page_size does not exceed the memslot */ + while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) + page_size >>= 1; + + /* Make sure gfn is aligned to the page size we want to map */ + while ((gfn << PAGE_SHIFT) & (page_size - 1)) + page_size >>= 1; + + /* + * Pin all pages we are about to map in memory. This is + * important because we unmap and unpin in 4kb steps later. + */ + pfn = kvm_pin_pages(kvm, slot, gfn, page_size); + if (is_error_pfn(pfn)) { + gfn += 1; continue; + } - pfn = gfn_to_pfn_memslot(kvm, slot, gfn); - r = iommu_map_range(domain, - gfn_to_gpa(gfn), - pfn_to_hpa(pfn), - PAGE_SIZE, flags); + /* Map into IO address space */ + r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), + get_order(page_size), flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%lx\n", pfn); goto unmap_pages; } - gfn++; + + gfn += page_size >> PAGE_SHIFT; + + } + return 0; unmap_pages: - kvm_iommu_put_pages(kvm, slot->base_gfn, i); + kvm_iommu_put_pages(kvm, slot->base_gfn, gfn); return r; } @@ -189,27 +238,47 @@ out_unmap: return r; } +static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) +{ + unsigned long i; + + for (i = 0; i < npages; ++i) + kvm_release_pfn_clean(pfn + i); +} + static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) { - gfn_t gfn = base_gfn; + struct iommu_domain *domain; + gfn_t end_gfn, gfn; pfn_t pfn; - struct iommu_domain *domain = kvm->arch.iommu_domain; - unsigned long i; u64 phys; + domain = kvm->arch.iommu_domain; + end_gfn = base_gfn + npages; + gfn = base_gfn; + /* check if iommu exists and in use */ if (!domain) return; - for (i = 0; i < npages; i++) { + while (gfn < end_gfn) { + unsigned long unmap_pages; + int order; + + /* Get physical address */ phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); - pfn = phys >> PAGE_SHIFT; - kvm_release_pfn_clean(pfn); - gfn++; - } + pfn = phys >> PAGE_SHIFT; + + /* Unmap address from IO address space */ + order = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); + unmap_pages = 1ULL << order; - iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages); + /* Unpin all pages we just unmapped to not leak any memory */ + kvm_unpin_pages(kvm, pfn, unmap_pages); + + gfn += unmap_pages; + } } static int kvm_iommu_unmap_memslots(struct kvm *kvm) -- cgit v1.1 From 5a0e3ad6af8660be21ca98a971cd00f331318c05 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Mar 2010 17:04:11 +0900 Subject: include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo Guess-its-ok-by: Christoph Lameter Cc: Ingo Molnar Cc: Lee Schermerhorn --- virt/kvm/assigned-dev.c | 1 + virt/kvm/coalesced_mmio.c | 1 + virt/kvm/eventfd.c | 1 + virt/kvm/ioapic.c | 1 + virt/kvm/irq_comm.c | 1 + virt/kvm/kvm_main.c | 2 +- 6 files changed, 6 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 057e2cc..02ff2b1 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "irq.h" static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 5169736..36e2580 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -10,6 +10,7 @@ #include "iodev.h" #include +#include #include #include "coalesced_mmio.h" diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 7016319..b81f0eb 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "iodev.h" diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 3db15a8..03a5eb2 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 9fd5b3e..a0e8880 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -20,6 +20,7 @@ */ #include +#include #include #include diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 548f925..5a0cd19 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -46,6 +45,7 @@ #include #include #include +#include #include #include -- cgit v1.1 From 87bf6e7de1134f48681fd2ce4b7c1ec45458cb6d Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Mon, 12 Apr 2010 19:35:35 +0900 Subject: KVM: fix the handling of dirty bitmaps to avoid overflows Int is not long enough to store the size of a dirty bitmap. This patch fixes this problem with the introduction of a wrapper function to calculate the sizes of dirty bitmaps. Note: in mark_page_dirty(), we have to consider the fact that __set_bit() takes the offset as int, not long. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- virt/kvm/kvm_main.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5a0cd19..364daac 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -648,7 +648,7 @@ skip_lpage: /* Allocate page dirty bitmap if needed */ if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { - unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; + unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); new.dirty_bitmap = vmalloc(dirty_bytes); if (!new.dirty_bitmap) @@ -768,7 +768,7 @@ int kvm_get_dirty_log(struct kvm *kvm, { struct kvm_memory_slot *memslot; int r, i; - int n; + unsigned long n; unsigned long any = 0; r = -EINVAL; @@ -780,7 +780,7 @@ int kvm_get_dirty_log(struct kvm *kvm, if (!memslot->dirty_bitmap) goto out; - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + n = kvm_dirty_bitmap_bytes(memslot); for (i = 0; !any && i < n/sizeof(long); ++i) any = memslot->dirty_bitmap[i]; @@ -1186,10 +1186,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) memslot = gfn_to_memslot_unaliased(kvm, gfn); if (memslot && memslot->dirty_bitmap) { unsigned long rel_gfn = gfn - memslot->base_gfn; + unsigned long *p = memslot->dirty_bitmap + + rel_gfn / BITS_PER_LONG; + int offset = rel_gfn % BITS_PER_LONG; /* avoid RMW */ - if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) - generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); + if (!generic_test_le_bit(offset, p)) + generic___set_le_bit(offset, p); } } -- cgit v1.1 From eda2beda835697878e309b6049aa0a3bad051590 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 20 Apr 2010 14:29:29 +0800 Subject: KVM: Add missing srcu_read_lock() for kvm_mmu_notifier_release() I got this dmesg due to srcu_read_lock() is missing in kvm_mmu_notifier_release(). =================================================== [ INFO: suspicious rcu_dereference_check() usage. ] --------------------------------------------------- arch/x86/kvm/x86.h:72 invoked rcu_dereference_check() without protection! other info that might help us debug this: rcu_scheduler_active = 1, debug_locks = 0 2 locks held by qemu-system-x86/3100: #0: (rcu_read_lock){.+.+..}, at: [] __mmu_notifier_release+0x38/0xdf #1: (&(&kvm->mmu_lock)->rlock){+.+...}, at: [] kvm_mmu_zap_all+0x21/0x5e [kvm] stack backtrace: Pid: 3100, comm: qemu-system-x86 Not tainted 2.6.34-rc3-22949-gbc8a97a-dirty #2 Call Trace: [] lockdep_rcu_dereference+0xaa/0xb3 [] unalias_gfn+0x56/0xab [kvm] [] gfn_to_memslot+0x16/0x25 [kvm] [] gfn_to_rmap+0x17/0x6e [kvm] [] rmap_remove+0xa0/0x19d [kvm] [] kvm_mmu_zap_page+0x109/0x34d [kvm] [] kvm_mmu_zap_all+0x35/0x5e [kvm] [] kvm_arch_flush_shadow+0x16/0x22 [kvm] [] kvm_mmu_notifier_release+0x15/0x17 [kvm] [] __mmu_notifier_release+0x88/0xdf [] ? __mmu_notifier_release+0x38/0xdf [] ? exit_mm+0xe0/0x115 [] exit_mmap+0x2c/0x17e [] mmput+0x2d/0xd4 [] exit_mm+0x108/0x115 [...] Signed-off-by: Lai Jiangshan Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 364daac..c82ae24 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -341,7 +341,11 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct kvm *kvm = mmu_notifier_to_kvm(mn); + int idx; + + idx = srcu_read_lock(&kvm->srcu); kvm_arch_flush_shadow(kvm); + srcu_read_unlock(&kvm->srcu, idx); } static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { -- cgit v1.1 From f5c9803173848864d0c56108b9e102db0bf601de Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Thu, 25 Feb 2010 11:33:19 +0900 Subject: KVM: update gfn_to_hva() to use gfn_to_hva_memslot() Marcelo introduced gfn_to_hva_memslot() when he implemented gfn_to_pfn_memslot(). Let's use this for gfn_to_hva() too. Note: also remove parentheses next to return as checkpatch said to do. Signed-off-by: Takuya Yoshikawa Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c82ae24..5bac6eb 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -914,6 +914,11 @@ int memslot_id(struct kvm *kvm, gfn_t gfn) return memslot - slots->memslots; } +static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) +{ + return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; +} + unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; @@ -922,7 +927,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) slot = gfn_to_memslot_unaliased(kvm, gfn); if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return bad_hva(); - return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); + return gfn_to_hva_memslot(slot, gfn); } EXPORT_SYMBOL_GPL(gfn_to_hva); @@ -972,11 +977,6 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_pfn); -static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) -{ - return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); -} - pfn_t gfn_to_pfn_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) { -- cgit v1.1 From 46a47b1ed118cda1a08b7f6077b837a00fbc112b Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Fri, 23 Apr 2010 14:03:38 -0300 Subject: KVM: convert ioapic lock to spinlock kvm_set_irq is used from non sleepable contexes, so convert ioapic from mutex to spinlock. KVM-Stable-Tag. Tested-by: Ralf Bonenkamp Signed-off-by: Marcelo Tosatti --- virt/kvm/ioapic.c | 30 +++++++++++++++--------------- virt/kvm/ioapic.h | 2 +- 2 files changed, 16 insertions(+), 16 deletions(-) (limited to 'virt') diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 03a5eb2..7c79c1d 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -197,7 +197,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) union kvm_ioapic_redirect_entry entry; int ret = 1; - mutex_lock(&ioapic->lock); + spin_lock(&ioapic->lock); if (irq >= 0 && irq < IOAPIC_NUM_PINS) { entry = ioapic->redirtbl[irq]; level ^= entry.fields.polarity; @@ -214,7 +214,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) } trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); } - mutex_unlock(&ioapic->lock); + spin_unlock(&ioapic->lock); return ret; } @@ -238,9 +238,9 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, * is dropped it will be put into irr and will be delivered * after ack notifier returns. */ - mutex_unlock(&ioapic->lock); + spin_unlock(&ioapic->lock); kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); - mutex_lock(&ioapic->lock); + spin_lock(&ioapic->lock); if (trigger_mode != IOAPIC_LEVEL_TRIG) continue; @@ -259,9 +259,9 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) smp_rmb(); if (!test_bit(vector, ioapic->handled_vectors)) return; - mutex_lock(&ioapic->lock); + spin_lock(&ioapic->lock); __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); - mutex_unlock(&ioapic->lock); + spin_unlock(&ioapic->lock); } static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) @@ -287,7 +287,7 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, ASSERT(!(addr & 0xf)); /* check alignment */ addr &= 0xff; - mutex_lock(&ioapic->lock); + spin_lock(&ioapic->lock); switch (addr) { case IOAPIC_REG_SELECT: result = ioapic->ioregsel; @@ -301,7 +301,7 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, result = 0; break; } - mutex_unlock(&ioapic->lock); + spin_unlock(&ioapic->lock); switch (len) { case 8: @@ -338,7 +338,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, } addr &= 0xff; - mutex_lock(&ioapic->lock); + spin_lock(&ioapic->lock); switch (addr) { case IOAPIC_REG_SELECT: ioapic->ioregsel = data; @@ -356,7 +356,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, default: break; } - mutex_unlock(&ioapic->lock); + spin_unlock(&ioapic->lock); return 0; } @@ -386,7 +386,7 @@ int kvm_ioapic_init(struct kvm *kvm) ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); if (!ioapic) return -ENOMEM; - mutex_init(&ioapic->lock); + spin_lock_init(&ioapic->lock); kvm->arch.vioapic = ioapic; kvm_ioapic_reset(ioapic); kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); @@ -419,9 +419,9 @@ int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) if (!ioapic) return -EINVAL; - mutex_lock(&ioapic->lock); + spin_lock(&ioapic->lock); memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); - mutex_unlock(&ioapic->lock); + spin_unlock(&ioapic->lock); return 0; } @@ -431,9 +431,9 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) if (!ioapic) return -EINVAL; - mutex_lock(&ioapic->lock); + spin_lock(&ioapic->lock); memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); update_handled_vectors(ioapic); - mutex_unlock(&ioapic->lock); + spin_unlock(&ioapic->lock); return 0; } diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index 8a751b7..0b190c3 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h @@ -45,7 +45,7 @@ struct kvm_ioapic { struct kvm_io_device dev; struct kvm *kvm; void (*ack_notifier)(void *opaque, int irq); - struct mutex lock; + spinlock_t lock; DECLARE_BITMAP(handled_vectors, 256); }; -- cgit v1.1 From 2ed152afc7ed61830b848b32936e1541a1a57799 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 10 Mar 2010 19:00:43 +0800 Subject: KVM: cleanup kvm trace This patch does: - no need call tracepoint_synchronize_unregister() when kvm module is unloaded since ftrace can handle it - cleanup ftrace's macro Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 1 - 1 file changed, 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5bac6eb..b152b23 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2279,7 +2279,6 @@ EXPORT_SYMBOL_GPL(kvm_init); void kvm_exit(void) { - tracepoint_synchronize_unregister(); kvm_exit_debug(); misc_deregister(&kvm_dev); kmem_cache_destroy(kvm_vcpu_cache); -- cgit v1.1 From a87fa35514bcf17aeca97245574e3f735cec9074 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Fri, 12 Mar 2010 12:59:06 +0800 Subject: KVM: fix the errno of ioctl KVM_[UN]REGISTER_COALESCED_MMIO failure This patch change the errno of ioctl KVM_[UN]REGISTER_COALESCED_MMIO from -EINVAL to -ENXIO if no coalesced mmio dev exists. Signed-off-by: Wei Yongjun Signed-off-by: Marcelo Tosatti --- virt/kvm/coalesced_mmio.c | 4 ++-- virt/kvm/kvm_main.c | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'virt') diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 36e2580..b66001f 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -139,7 +139,7 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; if (dev == NULL) - return -EINVAL; + return -ENXIO; mutex_lock(&kvm->slots_lock); if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { @@ -162,7 +162,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *z; if (dev == NULL) - return -EINVAL; + return -ENXIO; mutex_lock(&kvm->slots_lock); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b152b23..0db6dfc 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1609,7 +1609,6 @@ static long kvm_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&zone, argp, sizeof zone)) goto out; - r = -ENXIO; r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); if (r) goto out; @@ -1621,7 +1620,6 @@ static long kvm_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&zone, argp, sizeof zone)) goto out; - r = -ENXIO; r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); if (r) goto out; -- cgit v1.1 From d57e2c0740bbdd768dcbafe58cf62174f31d7c2d Mon Sep 17 00:00:00 2001 From: jing zhang Date: Sat, 13 Mar 2010 15:00:45 +0800 Subject: KVM: fix assigned_device_enable_host_msix error handling Free IRQ's and disable MSIX upon failure. Cc: Avi Kivity Signed-off-by: Jing Zhang Signed-off-by: Marcelo Tosatti --- virt/kvm/assigned-dev.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 02ff2b1..4d10b1e 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c @@ -316,12 +316,16 @@ static int assigned_device_enable_host_msix(struct kvm *kvm, kvm_assigned_dev_intr, 0, "kvm_assigned_msix_device", (void *)dev); - /* FIXME: free requested_irq's on failure */ if (r) - return r; + goto err; } return 0; +err: + for (i -= 1; i >= 0; i--) + free_irq(dev->host_msix_entries[i].vector, (void *)dev); + pci_disable_msix(dev->dev); + return r; } #endif -- cgit v1.1 From 6ce5a090a9a0ea4266a2cad058c69e2f27201e11 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Mon, 15 Mar 2010 22:13:30 +0900 Subject: KVM: coalesced_mmio: fix kvm_coalesced_mmio_init()'s error handling kvm_coalesced_mmio_init() keeps to hold the addresses of a coalesced mmio ring page and dev even after it has freed them. Also, if this function fails, though it might be rare, it seems to be suggesting the system's serious state: so we'd better stop the works following the kvm_creat_vm(). This patch clears these problems. We move the coalesced mmio's initialization out of kvm_create_vm(). This seems to be natural because it includes a registration which can be done only when vm is successfully created. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- virt/kvm/coalesced_mmio.c | 2 ++ virt/kvm/kvm_main.c | 12 ++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'virt') diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index b66001f..5385017 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -120,8 +120,10 @@ int kvm_coalesced_mmio_init(struct kvm *kvm) return ret; out_free_dev: + kvm->coalesced_mmio_dev = NULL; kfree(dev); out_free_page: + kvm->coalesced_mmio_ring = NULL; __free_page(page); out_err: return ret; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0db6dfc..55a5d48 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -422,9 +422,6 @@ static struct kvm *kvm_create_vm(void) spin_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); spin_unlock(&kvm_lock); -#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET - kvm_coalesced_mmio_init(kvm); -#endif out: return kvm; @@ -1753,12 +1750,19 @@ static struct file_operations kvm_vm_fops = { static int kvm_dev_ioctl_create_vm(void) { - int fd; + int fd, r; struct kvm *kvm; kvm = kvm_create_vm(); if (IS_ERR(kvm)) return PTR_ERR(kvm); +#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET + r = kvm_coalesced_mmio_init(kvm); + if (r < 0) { + kvm_put_kvm(kvm); + return r; + } +#endif fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); if (fd < 0) kvm_put_kvm(kvm); -- cgit v1.1 From 660c22c425cbe14badfb3b0a0206862577701ab7 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 13 Apr 2010 22:47:24 +0900 Subject: KVM: limit the number of pages per memory slot This patch limits the number of pages per memory slot to make us free from extra care about type issues. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- virt/kvm/kvm_main.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 55a5d48..d6351a3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -557,6 +557,10 @@ int __kvm_set_memory_region(struct kvm *kvm, base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT; + r = -EINVAL; + if (npages > KVM_MEM_MAX_NR_PAGES) + goto out; + if (!npages) mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; @@ -1187,13 +1191,10 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) memslot = gfn_to_memslot_unaliased(kvm, gfn); if (memslot && memslot->dirty_bitmap) { unsigned long rel_gfn = gfn - memslot->base_gfn; - unsigned long *p = memslot->dirty_bitmap + - rel_gfn / BITS_PER_LONG; - int offset = rel_gfn % BITS_PER_LONG; /* avoid RMW */ - if (!generic_test_le_bit(offset, p)) - generic___set_le_bit(offset, p); + if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) + generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); } } -- cgit v1.1 From 90d83dc3d49f5101addae962ccc1b4aff66b68d8 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 19 Apr 2010 17:41:23 +0800 Subject: KVM: use the correct RCU API for PROVE_RCU=y The RCU/SRCU API have already changed for proving RCU usage. I got the following dmesg when PROVE_RCU=y because we used incorrect API. This patch coverts rcu_deference() to srcu_dereference() or family API. =================================================== [ INFO: suspicious rcu_dereference_check() usage. ] --------------------------------------------------- arch/x86/kvm/mmu.c:3020 invoked rcu_dereference_check() without protection! other info that might help us debug this: rcu_scheduler_active = 1, debug_locks = 0 2 locks held by qemu-system-x86/8550: #0: (&kvm->slots_lock){+.+.+.}, at: [] kvm_set_memory_region+0x29/0x50 [kvm] #1: (&(&kvm->mmu_lock)->rlock){+.+...}, at: [] kvm_arch_commit_memory_region+0xa6/0xe2 [kvm] stack backtrace: Pid: 8550, comm: qemu-system-x86 Not tainted 2.6.34-rc4-tip-01028-g939eab1 #27 Call Trace: [] lockdep_rcu_dereference+0xaa/0xb3 [] kvm_mmu_calculate_mmu_pages+0x44/0x7d [kvm] [] kvm_arch_commit_memory_region+0xb7/0xe2 [kvm] [] __kvm_set_memory_region+0x636/0x6e2 [kvm] [] kvm_set_memory_region+0x37/0x50 [kvm] [] vmx_set_tss_addr+0x46/0x5a [kvm_intel] [] kvm_arch_vm_ioctl+0x17a/0xcf8 [kvm] [] ? unlock_page+0x27/0x2c [] ? __do_fault+0x3a9/0x3e1 [] kvm_vm_ioctl+0x364/0x38d [kvm] [] ? up_read+0x23/0x3d [] vfs_ioctl+0x32/0xa6 [] do_vfs_ioctl+0x495/0x4db [] ? fget_light+0xc2/0x241 [] ? do_sys_open+0x104/0x116 [] ? retint_swapgs+0xe/0x13 [] sys_ioctl+0x47/0x6a [] system_call_fastpath+0x16/0x1b Signed-off-by: Lai Jiangshan Signed-off-by: Avi Kivity --- virt/kvm/iommu.c | 4 ++-- virt/kvm/kvm_main.c | 13 ++++++++----- 2 files changed, 10 insertions(+), 7 deletions(-) (limited to 'virt') diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 80fd3ad..37ca71e 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -78,7 +78,7 @@ static int kvm_iommu_map_memslots(struct kvm *kvm) int i, r = 0; struct kvm_memslots *slots; - slots = rcu_dereference(kvm->memslots); + slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { r = kvm_iommu_map_pages(kvm, &slots->memslots[i]); @@ -217,7 +217,7 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm) int i; struct kvm_memslots *slots; - slots = rcu_dereference(kvm->memslots); + slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d6351a3..4901ec5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -834,7 +834,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva); struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) { int i; - struct kvm_memslots *slots = rcu_dereference(kvm->memslots); + struct kvm_memslots *slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; ++i) { struct kvm_memory_slot *memslot = &slots->memslots[i]; @@ -856,7 +856,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) { int i; - struct kvm_memslots *slots = rcu_dereference(kvm->memslots); + struct kvm_memslots *slots = kvm_memslots(kvm); gfn = unalias_gfn_instantiation(kvm, gfn); for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { @@ -900,7 +900,7 @@ out: int memslot_id(struct kvm *kvm, gfn_t gfn) { int i; - struct kvm_memslots *slots = rcu_dereference(kvm->memslots); + struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memory_slot *memslot = NULL; gfn = unalias_gfn(kvm, gfn); @@ -1994,7 +1994,9 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val) { int i; - struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); + struct kvm_io_bus *bus; + + bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); for (i = 0; i < bus->dev_count; i++) if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) return 0; @@ -2006,8 +2008,9 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, void *val) { int i; - struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); + struct kvm_io_bus *bus; + bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); for (i = 0; i < bus->dev_count; i++) if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) return 0; -- cgit v1.1 From 66cbff59a16017234ea6c80ecddfc17b27a83504 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Sat, 17 Apr 2010 17:00:19 +0800 Subject: KVM: do not call hardware_disable() on CPU_UP_CANCELED When CPU_UP_CANCELED, hardware_enable() has not been called at the CPU which is going up because raw_notifier_call_chain(CPU_ONLINE) has not been called for this cpu. Drop the handling for CPU_UP_CANCELED. Signed-off-by: Lai Jiangshan Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4901ec5..6dc9404 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1931,11 +1931,6 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, cpu); hardware_disable(NULL); break; - case CPU_UP_CANCELED: - printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", - cpu); - smp_call_function_single(cpu, hardware_disable, NULL, 1); - break; case CPU_ONLINE: printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", cpu); -- cgit v1.1 From d14769377a247d4e7b570592a090474c8a059938 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Fri, 23 Apr 2010 17:48:35 +0900 Subject: KVM: Remove test-before-set optimization for dirty bits As Avi pointed out, testing bit part in mark_page_dirty() was important in the days of shadow paging, but currently EPT and NPT has already become common and the chance of faulting a page more that once per iteration is small. So let's remove the test bit to avoid extra access. Signed-off-by: Takuya Yoshikawa Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6dc9404..9ab1a77 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1192,9 +1192,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) if (memslot && memslot->dirty_bitmap) { unsigned long rel_gfn = gfn - memslot->base_gfn; - /* avoid RMW */ - if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) - generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); + generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); } } -- cgit v1.1 From 0ee75bead83da4791e5cbf659806c54d8ee40f12 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 28 Apr 2010 15:39:01 +0300 Subject: KVM: Let vcpu structure alignment be determined at runtime vmx and svm vcpus have different contents and therefore may have different alignmment requirements. Let each specify its required alignment. Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9ab1a77..f032806 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2178,7 +2178,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, kvm_arch_vcpu_put(vcpu); } -int kvm_init(void *opaque, unsigned int vcpu_size, +int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, struct module *module) { int r; @@ -2228,8 +2228,9 @@ int kvm_init(void *opaque, unsigned int vcpu_size, goto out_free_4; /* A kmem cache lets us meet the alignment requirements of fx_save. */ - kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, - __alignof__(struct kvm_vcpu), + if (!vcpu_align) + vcpu_align = __alignof__(struct kvm_vcpu); + kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 0, NULL); if (!kvm_vcpu_cache) { r = -ENOMEM; -- cgit v1.1