diff options
author | Jeff Garzik <jeff@garzik.org> | 2007-08-08 02:16:04 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 16:50:22 -0700 |
commit | 6cc92cddb13a7874dcd7751c84b0e61738815077 (patch) | |
tree | 9e33135f1ed5f4ad80e240d86f725e0207d2f030 /drivers/net/8139cp.c | |
parent | ae94607d19028f9805e82da8975c66d3858fcfd8 (diff) | |
download | kernel_goldelico_gta04-6cc92cddb13a7874dcd7751c84b0e61738815077.zip kernel_goldelico_gta04-6cc92cddb13a7874dcd7751c84b0e61738815077.tar.gz kernel_goldelico_gta04-6cc92cddb13a7874dcd7751c84b0e61738815077.tar.bz2 |
[netdrvr] 8139cp, 8139too: convert to generic DMA
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/8139cp.c')
-rw-r--r-- | drivers/net/8139cp.c | 31 |
1 files changed, 17 insertions, 14 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 7f18ca2..2dec3d6 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -562,7 +562,7 @@ rx_status_loop: skb_reserve(new_skb, RX_OFFSET); - pci_unmap_single(cp->pdev, mapping, + dma_unmap_single(&cp->pdev->dev, mapping, buflen, PCI_DMA_FROMDEVICE); /* Handle checksum offloading for incoming packets. */ @@ -573,7 +573,7 @@ rx_status_loop: skb_put(skb, len); - mapping = pci_map_single(cp->pdev, new_skb->data, buflen, + mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, PCI_DMA_FROMDEVICE); cp->rx_skb[rx_tail] = new_skb; @@ -701,7 +701,7 @@ static void cp_tx (struct cp_private *cp) skb = cp->tx_skb[tx_tail]; BUG_ON(!skb); - pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr), + dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), le32_to_cpu(txd->opts1) & 0xffff, PCI_DMA_TODEVICE); @@ -779,7 +779,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; len = skb->len; - mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); CP_VLAN_TX_TAG(txd, vlan_tag); txd->addr = cpu_to_le64(mapping); wmb(); @@ -815,7 +815,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) */ first_eor = eor; first_len = skb_headlen(skb); - first_mapping = pci_map_single(cp->pdev, skb->data, + first_mapping = dma_map_single(&cp->pdev->dev, skb->data, first_len, PCI_DMA_TODEVICE); cp->tx_skb[entry] = skb; entry = NEXT_TX(entry); @@ -827,7 +827,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; len = this_frag->size; - mapping = pci_map_single(cp->pdev, + mapping = dma_map_single(&cp->pdev->dev, ((void *) page_address(this_frag->page) + this_frag->page_offset), len, PCI_DMA_TODEVICE); @@ -1066,8 +1066,8 @@ static int cp_refill_rx (struct cp_private *cp) skb_reserve(skb, RX_OFFSET); - mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + mapping = dma_map_single(&cp->pdev->dev, skb->data, + cp->rx_buf_sz, PCI_DMA_FROMDEVICE); cp->rx_skb[i] = skb; cp->rx_ring[i].opts2 = 0; @@ -1107,7 +1107,8 @@ static int cp_alloc_rings (struct cp_private *cp) { void *mem; - mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma); + mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES, + &cp->ring_dma, GFP_KERNEL); if (!mem) return -ENOMEM; @@ -1125,7 +1126,7 @@ static void cp_clean_rings (struct cp_private *cp) for (i = 0; i < CP_RX_RING_SIZE; i++) { if (cp->rx_skb[i]) { desc = cp->rx_ring + i; - pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), + dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), cp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(cp->rx_skb[i]); } @@ -1136,7 +1137,7 @@ static void cp_clean_rings (struct cp_private *cp) struct sk_buff *skb = cp->tx_skb[i]; desc = cp->tx_ring + i; - pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), + dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), le32_to_cpu(desc->opts1) & 0xffff, PCI_DMA_TODEVICE); if (le32_to_cpu(desc->opts1) & LastFrag) @@ -1155,7 +1156,8 @@ static void cp_clean_rings (struct cp_private *cp) static void cp_free_rings (struct cp_private *cp) { cp_clean_rings(cp); - pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); + dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, + cp->ring_dma); cp->rx_ring = NULL; cp->tx_ring = NULL; } @@ -1519,7 +1521,8 @@ static void cp_get_ethtool_stats (struct net_device *dev, dma_addr_t dma; int i; - nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma); + nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), + &dma, GFP_KERNEL); if (!nic_stats) return; @@ -1554,7 +1557,7 @@ static void cp_get_ethtool_stats (struct net_device *dev, tmp_stats[i++] = cp->cp_stats.rx_frags; BUG_ON(i != CP_NUM_STATS); - pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma); + dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); } static const struct ethtool_ops cp_ethtool_ops = { |