aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-11 11:03:09 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-11 11:03:12 -0700
commitbf6f51e3a46f6a602853d3cbacd05864bc6e2a37 (patch)
tree72ae3563c8a6703e276f005fce5b3f1a3daca08c /lib
parentec8deffa33757286ba59e71d3d98173c37638b37 (diff)
parent725c25819e4a0dafdcf42a5f31bc569341919c7c (diff)
downloadkernel_samsung_tuna-bf6f51e3a46f6a602853d3cbacd05864bc6e2a37.zip
kernel_samsung_tuna-bf6f51e3a46f6a602853d3cbacd05864bc6e2a37.tar.gz
kernel_samsung_tuna-bf6f51e3a46f6a602853d3cbacd05864bc6e2a37.tar.bz2
Merge phase #3 (IOMMU) of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-v28-for-linus-phase3-B' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (74 commits) AMD IOMMU: use iommu_device_max_index, fix AMD IOMMU: use iommu_device_max_index x86: add PCI IDs for AMD Barcelona PCI devices x86/iommu: use __GFP_ZERO instead of memset for GART x86/iommu: convert GART need_flush to bool x86/iommu: make GART driver checkpatch clean x86 gart: remove unnecessary initialization x86: restore old GART alloc_coherent behavior revert "x86: make GART to respect device's dma_mask about virtual mappings" x86: export pci-nommu's alloc_coherent iommu: remove fullflush and nofullflush in IOMMU generic option x86: remove set_bit_string() iommu: export iommu_area_reserve helper function AMD IOMMU: use coherent_dma_mask in alloc_coherent add AMD IOMMU tree to MAINTAINERS file AMD IOMMU: use cmd_buf_size when freeing the command buffer AMD IOMMU: calculate IVHD size with a function AMD IOMMU: remove unnecessary cast to u64 in the init code AMD IOMMU: free domain bitmap with its allocation order AMD IOMMU: simplify dma_mask_to_pages ...
Diffstat (limited to 'lib')
-rw-r--r--lib/iommu-helper.c5
-rw-r--r--lib/swiotlb.c49
2 files changed, 22 insertions, 32 deletions
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index a3b8d4c..5d90074 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -30,8 +30,7 @@ again:
return index;
}
-static inline void set_bit_area(unsigned long *map, unsigned long i,
- int len)
+void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
{
unsigned long end = i + len;
while (i < end) {
@@ -64,7 +63,7 @@ again:
start = index + 1;
goto again;
}
- set_bit_area(map, index, nr);
+ iommu_area_reserve(map, index, nr);
}
return index;
}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 8826fdf..f8eebd4 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -274,13 +274,14 @@ cleanup1:
}
static int
-address_needs_mapping(struct device *hwdev, dma_addr_t addr)
+address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
{
- dma_addr_t mask = 0xffffffff;
- /* If the device has a mask, use it, otherwise default to 32 bits */
- if (hwdev && hwdev->dma_mask)
- mask = *hwdev->dma_mask;
- return (addr & ~mask) != 0;
+ return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
+}
+
+static int is_swiotlb_buffer(char *addr)
+{
+ return addr >= io_tlb_start && addr < io_tlb_end;
}
/*
@@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
void *ret;
int order = get_order(size);
- /*
- * XXX fix me: the DMA API should pass us an explicit DMA mask
- * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
- * bit range instead of a 16MB one).
- */
- flags |= GFP_DMA;
-
ret = (void *)__get_free_pages(flags, order);
- if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
+ if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
/*
* The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single().
@@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
* swiotlb_map_single(), which will grab memory from
* the lowest available address range.
*/
- dma_addr_t handle;
- handle = swiotlb_map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
- if (swiotlb_dma_mapping_error(hwdev, handle))
+ ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
+ if (!ret)
return NULL;
-
- ret = bus_to_virt(handle);
}
memset(ret, 0, size);
dev_addr = virt_to_bus(ret);
/* Confirm address can be DMA'd by device */
- if (address_needs_mapping(hwdev, dev_addr)) {
+ if (address_needs_mapping(hwdev, dev_addr, size)) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)*hwdev->dma_mask,
(unsigned long long)dev_addr);
@@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
WARN_ON(irqs_disabled());
- if (!(vaddr >= (void *)io_tlb_start
- && vaddr < (void *)io_tlb_end))
+ if (!is_swiotlb_buffer(vaddr))
free_pages((unsigned long) vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
+ unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
}
static void
@@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
+ if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
return dev_addr;
/*
@@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
/*
* Ensure that the address returned is DMA'ble
*/
- if (address_needs_mapping(hwdev, dev_addr))
+ if (address_needs_mapping(hwdev, dev_addr, size))
panic("map_single: bounce buffer is not DMA'ble");
return dev_addr;
@@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
unmap_single(hwdev, dma_addr, size, dir);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr) + offset;
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
for_each_sg(sgl, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
dev_addr = virt_to_bus(addr);
- if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
+ if (swiotlb_force ||
+ address_needs_mapping(hwdev, dev_addr, sg->length)) {
void *map = map_single(hwdev, addr, sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users