aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2008-02-01 23:09:32 +0100
committerBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2008-02-01 23:09:32 +0100
commit5c05ff68b9a9b40a9be949497e0aa980185565cf (patch)
treed4d9a59a0f2d8b6ab3e107d5b15ed0d0709f4997 /drivers/ide
parentf8341c1c19730f1869f2f12e30fe56ff4afb4189 (diff)
downloadkernel_samsung_aries-5c05ff68b9a9b40a9be949497e0aa980185565cf.zip
kernel_samsung_aries-5c05ff68b9a9b40a9be949497e0aa980185565cf.tar.gz
kernel_samsung_aries-5c05ff68b9a9b40a9be949497e0aa980185565cf.tar.bz2
ide: switch to DMA-mapping API
* pci_map_sg() -> dma_map_sg() in ide_build_sglist(). * pci_unmap_sg() -> dma_unmap_sg() in ide_destroy_dmatable(). There should be no functionality changes caused by this patch except for blackfin arch whose dma_[un]map_sg() implementation differs from pci_[un]map_sg() one (on s390 arch there is no PCI, on avr32 and h8300 archs PCI is currently unsupported, on m32r arch PCI support depends on BROKEN, on m68k arch PCI support depends on HADES which in turn depends on BROKEN, on all other archs dma_[un]map_sg() functionality matches with pci_[un]map_sg() one). blackfin behavior change was ack-ed by Bryan Wu. Cc: Bryan Wu <bryan.wu@analog.com> Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Diffstat (limited to 'drivers/ide')
-rw-r--r--drivers/ide/ide-dma.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 782e5da..ec7c5c8 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -85,6 +85,7 @@
#include <linux/ide.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -175,26 +176,26 @@ static int ide_dma_good_drive(ide_drive_t *drive)
* @drive: the drive to build the DMA table for
* @rq: the request holding the sg list
*
- * Perform the PCI mapping magic necessary to access the source or
- * target buffers of a request via PCI DMA. The lower layers of the
+ * Perform the DMA mapping magic necessary to access the source or
+ * target buffers of a request via DMA. The lower layers of the
* kernel provide the necessary cache management so that we can
- * operate in a portable fashion
+ * operate in a portable fashion.
*/
int ide_build_sglist(ide_drive_t *drive, struct request *rq)
{
ide_hwif_t *hwif = HWIF(drive);
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
struct scatterlist *sg = hwif->sg_table;
ide_map_sg(drive, rq);
if (rq_data_dir(rq) == READ)
- hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
+ hwif->sg_dma_direction = DMA_FROM_DEVICE;
else
- hwif->sg_dma_direction = PCI_DMA_TODEVICE;
+ hwif->sg_dma_direction = DMA_TO_DEVICE;
- return pci_map_sg(pdev, sg, hwif->sg_nents, hwif->sg_dma_direction);
+ return dma_map_sg(hwif->dev, sg, hwif->sg_nents,
+ hwif->sg_dma_direction);
}
EXPORT_SYMBOL_GPL(ide_build_sglist);
@@ -308,9 +309,8 @@ EXPORT_SYMBOL_GPL(ide_build_dmatable);
void ide_destroy_dmatable (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- pci_unmap_sg(pdev, hwif->sg_table, hwif->sg_nents,
+ dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents,
hwif->sg_dma_direction);
}