diff options
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 154 |
1 files changed, 109 insertions, 45 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 5182b28..247ed2a 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -218,10 +218,20 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, struct ixgbe_tx_buffer *tx_buffer_info) { - tx_buffer_info->dma = 0; + if (tx_buffer_info->dma) { + if (tx_buffer_info->mapped_as_page) + pci_unmap_page(adapter->pdev, + tx_buffer_info->dma, + tx_buffer_info->length, + PCI_DMA_TODEVICE); + else + pci_unmap_single(adapter->pdev, + tx_buffer_info->dma, + tx_buffer_info->length, + PCI_DMA_TODEVICE); + tx_buffer_info->dma = 0; + } if (tx_buffer_info->skb) { - skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb, - DMA_TO_DEVICE); dev_kfree_skb_any(tx_buffer_info->skb); tx_buffer_info->skb = NULL; } @@ -242,11 +252,11 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) { - int tc; u32 txoff = IXGBE_TFCS_TXOFF; #ifdef CONFIG_IXGBE_DCB if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + int tc; int reg_idx = tx_ring->reg_idx; int dcb_i = adapter->ring_feature[RING_F_DCB].indices; @@ -403,7 +413,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && !test_bit(__IXGBE_DOWN, &adapter->state)) { netif_wake_subqueue(netdev, tx_ring->queue_index); - ++adapter->restart_queue; + ++tx_ring->restart_queue; } } @@ -614,7 +624,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, /* It must be a TCP or UDP packet with a valid checksum */ skb->ip_summed = CHECKSUM_UNNECESSARY; - adapter->hw_csum_rx_good++; } static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, @@ -671,14 +680,19 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, if (!bi->skb) { struct sk_buff *skb; - skb = netdev_alloc_skb_ip_align(adapter->netdev, - rx_ring->rx_buf_len); + /* netdev_alloc_skb reserves 32 bytes up front!! */ + uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES; + skb = netdev_alloc_skb(adapter->netdev, bufsz); if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; } + /* advance the data pointer to the next cache line */ + skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES) + - skb->data)); + bi->skb = skb; bi->dma = pci_map_single(pdev, skb->data, rx_ring->rx_buf_len, @@ -791,8 +805,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> IXGBE_RXDADV_HDRBUFLEN_SHIFT; - if (hdr_info & IXGBE_RXDADV_SPH) - adapter->rx_hdr_split++; if (len > IXGBE_RX_HDR_SIZE) len = IXGBE_RX_HDR_SIZE; upper_len = le16_to_cpu(rx_desc->wb.upper.length); @@ -802,7 +814,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, cleaned = true; skb = rx_buffer_info->skb; - prefetch(skb->data - NET_IP_ALIGN); + prefetch(skb->data); rx_buffer_info->skb = NULL; if (rx_buffer_info->dma) { @@ -874,7 +886,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, skb->next = next_buffer->skb; skb->next->prev = skb; } - adapter->non_eop_descs++; + rx_ring->non_eop_descs++; goto next_desc; } @@ -1317,8 +1329,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) r_idx + 1); } - /* disable interrupts on this vector only */ - ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); + /* EIAM disabled interrupts (on this vector) for us */ napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -1350,7 +1361,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) return IRQ_HANDLED; /* disable interrupts on this vector only */ - ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); + /* EIAM disabled interrupts (on this vector) for us */ napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -1385,8 +1396,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) r_idx + 1); } - /* disable interrupts on this vector only */ - ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); + /* EIAM disabled interrupts (on this vector) for us */ napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -2704,7 +2714,22 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); } - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + /* + * use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + break; + default: + case ixgbe_mac_82599EB: + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); + break; + } + } else { /* legacy interrupts, use EIAM to auto-mask when reading EICR, * specifically only auto mask tx and rx interrupts */ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); @@ -3948,8 +3973,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; adapter->ring_feature[RING_F_FCOE].indices = 0; +#ifdef CONFIG_IXGBE_DCB /* Default traffic class to use for FCoE */ adapter->fcoe.tc = IXGBE_FCOE_DEFTC; +#endif #endif /* IXGBE_FCOE */ } @@ -4499,6 +4526,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) adapter->rsc_total_flush = rsc_flush; } + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->restart_queue += adapter->tx_ring[i].restart_queue; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->non_eop_descs += adapter->tx_ring[i].non_eop_descs; + adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); for (i = 0; i < 8; i++) { /* for packet buffers not used, the register should read 0 */ @@ -4881,14 +4915,12 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, iph->daddr, 0, IPPROTO_TCP, 0); - adapter->hw_tso_ctxt++; } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); - adapter->hw_tso6_ctxt++; } i = tx_ring->next_to_use; @@ -5007,7 +5039,6 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; - adapter->hw_csum_tx_good++; i++; if (i == tx_ring->count) i = 0; @@ -5024,23 +5055,16 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, struct sk_buff *skb, u32 tx_flags, unsigned int first) { + struct pci_dev *pdev = adapter->pdev; struct ixgbe_tx_buffer *tx_buffer_info; unsigned int len; unsigned int total = skb->len; unsigned int offset = 0, size, count = 0, i; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int f; - dma_addr_t *map; i = tx_ring->next_to_use; - if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { - dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); - return 0; - } - - map = skb_shinfo(skb)->dma_maps; - if (tx_flags & IXGBE_TX_FLAGS_FCOE) /* excluding fcoe_crc_eof for FCoE */ total -= sizeof(struct fcoe_crc_eof); @@ -5051,7 +5075,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; - tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset; + tx_buffer_info->mapped_as_page = false; + tx_buffer_info->dma = pci_map_single(pdev, + skb->data + offset, + size, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) + goto dma_error; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -5072,7 +5101,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, frag = &skb_shinfo(skb)->frags[f]; len = min((unsigned int)frag->size, total); - offset = 0; + offset = frag->page_offset; while (len) { i++; @@ -5083,7 +5112,13 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; - tx_buffer_info->dma = map[f] + offset; + tx_buffer_info->dma = pci_map_page(adapter->pdev, + frag->page, + offset, size, + PCI_DMA_TODEVICE); + tx_buffer_info->mapped_as_page = true; + if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) + goto dma_error; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -5100,6 +5135,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, tx_ring->tx_buffer_info[first].next_to_watch = i; return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + + /* clear timestamp and dma mappings for failed tx_buffer_info map */ + tx_buffer_info->dma = 0; + tx_buffer_info->time_stamp = 0; + tx_buffer_info->next_to_watch = 0; + count--; + + /* clear timestamp and dma mappings for remaining portion of packet */ + while (count >= 0) { + count--; + i--; + if (i < 0) + i += tx_ring->count; + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); + } + + return count; } static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, @@ -5219,8 +5275,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, static int __ixgbe_maybe_stop_tx(struct net_device *netdev, struct ixgbe_ring *tx_ring, int size) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - netif_stop_subqueue(netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); @@ -5234,7 +5288,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(netdev, tx_ring->queue_index); - ++adapter->restart_queue; + ++tx_ring->restart_queue; return 0; } @@ -5249,10 +5303,19 @@ static int ixgbe_maybe_stop_tx(struct net_device *netdev, static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) { struct ixgbe_adapter *adapter = netdev_priv(dev); + int txq = smp_processor_id(); if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) - return smp_processor_id(); + return txq; +#ifdef IXGBE_FCOE + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + (skb->protocol == htons(ETH_P_FCOE))) { + txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); + txq += adapter->ring_feature[RING_F_FCOE].mask; + return txq; + } +#endif if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13; @@ -5267,7 +5330,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, unsigned int first; unsigned int tx_flags = 0; u8 hdr_len = 0; - int r_idx = 0, tso; + int tso; int count = 0; unsigned int f; @@ -5275,13 +5338,13 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, tx_flags |= vlan_tx_tag_get(skb); if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; - tx_flags |= (skb->queue_mapping << 13); + tx_flags |= ((skb->queue_mapping & 0x7) << 13); } tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { if (skb->priority != TC_PRIO_CONTROL) { - tx_flags |= (skb->queue_mapping << 13); + tx_flags |= ((skb->queue_mapping & 0x7) << 13); tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } else { @@ -5290,17 +5353,18 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, } } - r_idx = skb->queue_mapping; - tx_ring = &adapter->tx_ring[r_idx]; + tx_ring = &adapter->tx_ring[skb->queue_mapping]; if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && (skb->protocol == htons(ETH_P_FCOE))) { tx_flags |= IXGBE_TX_FLAGS_FCOE; #ifdef IXGBE_FCOE - r_idx = smp_processor_id(); - r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1); - r_idx += adapter->ring_feature[RING_F_FCOE].mask; - tx_ring = &adapter->tx_ring[r_idx]; +#ifdef CONFIG_IXGBE_DCB + tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK + << IXGBE_TX_FLAGS_VLAN_SHIFT); + tx_flags |= ((adapter->fcoe.up << 13) + << IXGBE_TX_FLAGS_VLAN_SHIFT); +#endif #endif } /* four things can cause us to need a context descriptor */ |