aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-09-23 16:12:48 +0200
committerJoerg Roedel <joerg.roedel@amd.com>2010-09-23 16:26:20 +0200
commit04e0463e088b41060c08c255eb0d3278a504f094 (patch)
tree73a74dfb1575e9a7f255bcd6fcc84bcdb25f0e5f /arch/x86
parent4c894f47bb49284008073d351c0ddaac8860864e (diff)
downloadkernel_samsung_smdk4412-04e0463e088b41060c08c255eb0d3278a504f094.zip
kernel_samsung_smdk4412-04e0463e088b41060c08c255eb0d3278a504f094.tar.gz
kernel_samsung_smdk4412-04e0463e088b41060c08c255eb0d3278a504f094.tar.bz2
x86/amd-iommu: Fix rounding-bug in __unmap_single
In the __unmap_single function the dma_addr is rounded down to a page boundary before the dma pages are unmapped. The address is later also used to flush the TLB entries for that mapping. But without the offset into the dma page the amount of pages to flush might be miscalculated in the TLB flushing path. This patch fixes this bug by using the original address to flush the TLB. Cc: stable@kernel.org Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/amd_iommu.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index fa044e1..679b645 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
size_t size,
int dir)
{
+ dma_addr_t flush_addr;
dma_addr_t i, start;
unsigned int pages;
@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
(dma_addr + size > dma_dom->aperture_size))
return;
+ flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
dma_ops_free_addresses(dma_dom, dma_addr, pages);
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
- iommu_flush_pages(&dma_dom->domain, dma_addr, size);
+ iommu_flush_pages(&dma_dom->domain, flush_addr, size);
dma_dom->need_flush = false;
}
}