aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-02-08 14:44:49 +0100
committerJoerg Roedel <joerg.roedel@amd.com>2010-03-01 14:16:22 +0100
commit5d214fe6e808a8caa9cb6f610c0190d3f50ac570 (patch)
tree326b6d2ea83d9a5df8ea4eef91834dc3e0da2bac
parent339d3261aa3eb0e12f68ef868e042c1ca03628f7 (diff)
downloadkernel_samsung_tuna-5d214fe6e808a8caa9cb6f610c0190d3f50ac570.zip
kernel_samsung_tuna-5d214fe6e808a8caa9cb6f610c0190d3f50ac570.tar.gz
kernel_samsung_tuna-5d214fe6e808a8caa9cb6f610c0190d3f50ac570.tar.bz2
x86/amd-iommu: Protect IOMMU-API map/unmap path
This patch introduces a mutex to lock page table updates in the IOMMU-API path. We can't use the spin_lock here because this patch might sleep. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h2
-rw-r--r--arch/x86/kernel/amd_iommu.c9
2 files changed, 11 insertions, 0 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index ba19ad4..5e46e78 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -21,6 +21,7 @@
#define _ASM_X86_AMD_IOMMU_TYPES_H
#include <linux/types.h>
+#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -237,6 +238,7 @@ struct protection_domain {
struct list_head list; /* for list of all protection domains */
struct list_head dev_list; /* List of all devices in this domain */
spinlock_t lock; /* mostly used to lock the page table*/
+ struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 2c4a501..b97f2f1 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -2327,6 +2327,7 @@ static struct protection_domain *protection_domain_alloc(void)
return NULL;
spin_lock_init(&domain->lock);
+ mutex_init(&domain->api_lock);
domain->id = domain_id_alloc();
if (!domain->id)
goto out_err;
@@ -2456,6 +2457,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
iova &= PAGE_MASK;
paddr &= PAGE_MASK;
+ mutex_lock(&domain->api_lock);
+
for (i = 0; i < npages; ++i) {
ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
if (ret)
@@ -2465,6 +2468,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
paddr += PAGE_SIZE;
}
+ mutex_unlock(&domain->api_lock);
+
return 0;
}
@@ -2477,12 +2482,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
iova &= PAGE_MASK;
+ mutex_lock(&domain->api_lock);
+
for (i = 0; i < npages; ++i) {
iommu_unmap_page(domain, iova, PM_MAP_4k);
iova += PAGE_SIZE;
}
iommu_flush_tlb_pde(domain);
+
+ mutex_unlock(&domain->api_lock);
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,