aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe.h39
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c181
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c261
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c230
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c76
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c511
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h38
10 files changed, 1023 insertions, 320 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index dd688d4..8da8eb5 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -51,11 +51,11 @@
__func__ , ## args)))
/* TX/RX descriptor defines */
-#define IXGBE_DEFAULT_TXD 1024
+#define IXGBE_DEFAULT_TXD 512
#define IXGBE_MAX_TXD 4096
#define IXGBE_MIN_TXD 64
-#define IXGBE_DEFAULT_RXD 1024
+#define IXGBE_DEFAULT_RXD 512
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
@@ -106,6 +106,7 @@ struct ixgbe_tx_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
+ u16 mapped_as_page;
};
struct ixgbe_rx_buffer {
@@ -159,10 +160,13 @@ struct ixgbe_ring {
struct ixgbe_queue_stats stats;
unsigned long reinit_state;
u64 rsc_count; /* stat for coalesced packets */
+ u64 rsc_flush; /* stats for flushed packets */
+ u32 restart_queue; /* track tx queue restarts */
+ u32 non_eop_descs; /* track hardware descriptor chaining */
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
-};
+} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
RING_F_NONE = 0,
@@ -187,7 +191,7 @@ enum ixgbe_ring_f_enum {
struct ixgbe_ring_feature {
int indices;
int mask;
-};
+} ____cacheline_internodealigned_in_smp;
#define MAX_RX_QUEUES 128
#define MAX_TX_QUEUES 128
@@ -267,34 +271,31 @@ struct ixgbe_adapter {
enum ixgbe_fc_mode last_lfc_mode;
/* Interrupt Throttle Rate */
- u32 itr_setting;
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
u16 eitr_low;
u16 eitr_high;
/* TX */
- struct ixgbe_ring *tx_ring; /* One per active queue */
+ struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */
int num_tx_queues;
- u64 restart_queue;
- u64 hw_csum_tx_good;
- u64 lsc_int;
- u64 hw_tso_ctxt;
- u64 hw_tso6_ctxt;
u32 tx_timeout_count;
bool detect_tx_hung;
+ u64 restart_queue;
+ u64 lsc_int;
+
/* RX */
- struct ixgbe_ring *rx_ring; /* One per active queue */
+ struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
int num_rx_queues;
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
- u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
int max_msix_q_vectors; /* true count of q_vectors for device */
struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
- u64 rx_hdr_split;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -339,7 +340,6 @@ struct ixgbe_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- struct net_device_stats net_stats;
u32 test_icr;
struct ixgbe_ring test_tx_ring;
@@ -351,7 +351,8 @@ struct ixgbe_adapter {
struct ixgbe_hw_stats stats;
/* Interrupt Throttle Rate */
- u32 eitr_param;
+ u32 rx_eitr_param;
+ u32 tx_eitr_param;
unsigned long state;
u64 tx_busy;
@@ -374,7 +375,8 @@ struct ixgbe_adapter {
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
- u64 rsc_count;
+ u64 rsc_total_count;
+ u64 rsc_total_flush;
u32 wol;
u16 eeprom_version;
};
@@ -395,7 +397,7 @@ enum ixgbe_boards {
extern struct ixgbe_info ixgbe_82598_info;
extern struct ixgbe_info ixgbe_82599_info;
#ifdef CONFIG_IXGBE_DCB
-extern struct dcbnl_rtnl_ops dcbnl_ops;
+extern const struct dcbnl_rtnl_ops dcbnl_ops;
extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
struct ixgbe_dcb_config *dst_dcb_cfg,
int tc_max);
@@ -456,6 +458,7 @@ extern int ixgbe_fcoe_disable(struct net_device *netdev);
extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
#endif /* CONFIG_IXGBE_DCB */
+extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
#endif /* IXGBE_FCOE */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 56b12f3..e2d5343 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -425,7 +425,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
#endif /* CONFIG_DCB */
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- ret_val = -IXGBE_ERR_CONFIG;
+ ret_val = IXGBE_ERR_CONFIG;
goto out;
break;
}
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 2ec58dc..5383405 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -42,6 +42,10 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
@@ -64,7 +68,13 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
/* Set up dual speed SFP+ support */
mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
} else {
- mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+ if ((mac->ops.get_media_type(hw) ==
+ ixgbe_media_type_backplane) &&
+ (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
+ hw->phy.smart_speed == ixgbe_smart_speed_on))
+ mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
+ else
+ mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
}
}
@@ -330,11 +340,15 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_82599_XAUI_LOM:
/* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_SFP_EM:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82599_CX4:
@@ -477,7 +491,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
hw->mac.autotry_restart = false;
}
- /* The controller may take up to 500ms at 10g to acquire link */
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+
for (i = 0; i < 5; i++) {
/* Wait for the link partner to also set speed */
msleep(100);
@@ -565,6 +584,111 @@ out:
}
/**
+ * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Implements the Intel SmartSpeed algorithm.
+ **/
+static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ ixgbe_link_speed link_speed;
+ s32 i, j;
+ bool link_up = false;
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
+
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ /*
+ * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
+ * autoneg advertisement if link is unable to be established at the
+ * highest negotiated rate. This can sometimes happen due to integrity
+ * issues with the physical media connection.
+ */
+
+ /* First, try to get link with full advertisement */
+ hw->phy.smart_speed_active = false;
+ for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ if (status)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
+ * Table 9 in the AN MAS.
+ */
+ for (i = 0; i < 5; i++) {
+ mdelay(100);
+
+ /* If we have link, just jump out */
+ hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (link_up)
+ goto out;
+ }
+ }
+
+ /*
+ * We didn't get link. If we advertised KR plus one of KX4/KX
+ * (or BX4/BX), then disable KR and try again.
+ */
+ if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
+ ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
+ goto out;
+
+ /* Turn SmartSpeed on to disable KR support */
+ hw->phy.smart_speed_active = true;
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ if (status)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. 600ms will allow for
+ * the AN link_fail_inhibit_timer as well for multiple cycles of
+ * parallel detect, both 10g and 1g. This allows for the maximum
+ * connect attempts as defined in the AN MAS table 73-7.
+ */
+ for (i = 0; i < 6; i++) {
+ mdelay(100);
+
+ /* If we have link, just jump out */
+ hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Turn SmartSpeed back off. */
+ hw->phy.smart_speed_active = false;
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+
+out:
+ return status;
+}
+
+/**
* ixgbe_check_mac_link_82599 - Determine link and speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
@@ -667,7 +791,8 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
autoc |= IXGBE_AUTOC_KX4_SUPP;
- if (orig_autoc & IXGBE_AUTOC_KR_SUPP)
+ if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
+ (hw->phy.smart_speed_active == false))
autoc |= IXGBE_AUTOC_KR_SUPP;
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
@@ -876,6 +1001,10 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->mac.num_rar_entries--;
}
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
reset_hw_out:
return status;
}
@@ -2412,6 +2541,51 @@ fw_version_out:
return status;
}
+/**
+ * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+ &alt_san_mac_blk_offset);
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ hw->eeprom.ops.read(hw, offset, &caps);
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+ return 0;
+}
+
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
@@ -2423,6 +2597,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_82599,
.get_device_caps = &ixgbe_get_device_caps_82599,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
.stop_adapter = &ixgbe_stop_adapter_generic,
.get_bus_info = &ixgbe_get_bus_info_generic,
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 6621e17..688b8ca 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1355,9 +1355,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
/**
* ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
* @hw: pointer to hardware structure
- * @addr_list: the list of new addresses
- * @addr_count: number of addresses
- * @next: iterator function to walk the address list
+ * @uc_list: the list of new addresses
*
* The given list replaces any existing list. Clears the secondary addrs from
* receive address registers. Uses unused receive address registers for the
@@ -1384,10 +1382,10 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
hw->addr_ctrl.overflow_promisc = 0;
/* Zero out the other receive addresses */
- hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use);
- for (i = 1; i <= uc_addr_in_use; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+ hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
+ for (i = 0; i < uc_addr_in_use; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
}
/* Add the new addresses */
@@ -1663,7 +1661,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
#endif /* CONFIG_DCB */
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- ret_val = -IXGBE_ERR_CONFIG;
+ ret_val = IXGBE_ERR_CONFIG;
goto out;
break;
}
@@ -1734,33 +1732,75 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
s32 ret_val = 0;
ixgbe_link_speed speed;
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+ u32 links2, anlp1_reg, autoc_reg, links;
bool link_up;
/*
* AN should have completed when the cable was plugged in.
* Look for reasons to bail out. Bail out if:
* - FC autoneg is disabled, or if
- * - we don't have multispeed fiber, or if
- * - we're not running at 1G, or if
- * - link is not up, or if
- * - link is up but AN did not complete, or if
- * - link is up and AN completed but timed out
+ * - link is not up.
*
- * Since we're being called from an LSC, link is already know to be up.
+ * Since we're being called from an LSC, link is already known to be up.
* So use link_up_wait_to_complete=false.
*/
hw->mac.ops.check_link(hw, &speed, &link_up, false);
- linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
-
- if (hw->fc.disable_fc_autoneg ||
- !hw->phy.multispeed_fiber ||
- (speed != IXGBE_LINK_SPEED_1GB_FULL) ||
- !link_up ||
- ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
- ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+
+ if (hw->fc.disable_fc_autoneg || (!link_up)) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ goto out;
+ }
+
+ /*
+ * On backplane, bail out if
+ * - backplane autoneg was not completed, or if
+ * - we are 82599 and link partner is not AN enabled
+ */
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ goto out;
+ }
+
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * On multispeed fiber at 1g, bail out if
+ * - link is up but AN did not complete, or if
+ * - link is up and AN completed but timed out
+ */
+ if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
+ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ goto out;
+ }
+ }
+
+ /*
+ * Bail out on
+ * - copper or CX4 adapters
+ * - fiber adapters running at 10gig
+ */
+ if ((hw->phy.media_type == ixgbe_media_type_copper) ||
+ (hw->phy.media_type == ixgbe_media_type_cx4) ||
+ ((hw->phy.media_type == ixgbe_media_type_fiber) &&
+ (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
hw->fc.fc_was_autonegged = false;
hw->fc.current_mode = hw->fc.requested_mode;
- hw_dbg(hw, "Autoneg FC was skipped.\n");
goto out;
}
@@ -1768,41 +1808,85 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
* Read the AN advertisement and LP ability registers and resolve
* local flow control settings accordingly
*/
- pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
- if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
+ if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
+ (hw->phy.media_type != ixgbe_media_type_backplane)) {
+ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+ if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ hw_dbg(hw, "Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control=RX PAUSE only\n");
+ }
+ } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+ } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
+ !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw_dbg(hw, "Flow Control = NONE.\n");
+ }
+ }
+
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
/*
- * Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
+ * Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
*/
- if (hw->fc.requested_mode == ixgbe_fc_full) {
- hw->fc.current_mode = ixgbe_fc_full;
- hw_dbg(hw, "Flow Control = FULL.\n");
- } else {
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+
+ if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+ (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ hw_dbg(hw, "Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control=RX PAUSE only\n");
+ }
+ } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+ (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
+ (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
+ (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+ } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+ (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
+ !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
+ (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
hw->fc.current_mode = ixgbe_fc_rx_pause;
hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw_dbg(hw, "Flow Control = NONE.\n");
}
- } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_tx_pause;
- hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
- } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_none;
- hw_dbg(hw, "Flow Control = NONE.\n");
}
-
/* Record that current_mode is the result of a successful autoneg */
hw->fc.fc_was_autonegged = true;
@@ -1919,7 +2003,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
#endif /* CONFIG_DCB */
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- ret_val = -IXGBE_ERR_CONFIG;
+ ret_val = IXGBE_ERR_CONFIG;
goto out;
break;
}
@@ -1927,9 +2011,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
- /* Enable and restart autoneg to inform the link partner */
- reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
-
/* Disable AN timeout */
if (hw->fc.strict_ieee)
reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
@@ -1937,6 +2018,70 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ /*
+ * Set up the 10G flow control advertisement registers so the HW
+ * can do fc autoneg once the cable is plugged in. If we end up
+ * using 1g instead, this is harmless.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ /*
+ * The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ reg |= (IXGBE_AUTOC_ASM_PAUSE);
+ reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+ break;
+#ifdef CONFIG_DCB
+ case ixgbe_fc_pfc:
+ goto out;
+ break;
+#endif /* CONFIG_DCB */
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+ /*
+ * AUTOC restart handles negotiation of 1G and 10G. There is
+ * no need to set the PCS1GCTL register.
+ */
+ reg |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
+ hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+
out:
return ret_val;
}
@@ -2000,7 +2145,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
while (timeout) {
if (ixgbe_get_eeprom_semaphore(hw))
- return -IXGBE_ERR_SWFW_SYNC;
+ return IXGBE_ERR_SWFW_SYNC;
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
if (!(gssr & (fwmask | swmask)))
@@ -2017,7 +2162,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
if (!timeout) {
hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
- return -IXGBE_ERR_SWFW_SYNC;
+ return IXGBE_ERR_SWFW_SYNC;
}
gssr |= swmask;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index a6bc1ef..3c7a79a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -563,7 +563,7 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
return rval;
}
-struct dcbnl_rtnl_ops dcbnl_ops = {
+const struct dcbnl_rtnl_ops dcbnl_ops = {
.getstate = ixgbe_dcbnl_get_state,
.setstate = ixgbe_dcbnl_set_state,
.getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 026e94a..0bd49d3 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -40,56 +40,64 @@
#define IXGBE_ALL_RAR_ENTRIES 16
+enum {NETDEV_STATS, IXGBE_STATS};
+
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
+ int type;
int sizeof_stat;
int stat_offset;
};
-#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
- offsetof(struct ixgbe_adapter, m)
+#define IXGBE_STAT(m) IXGBE_STATS, \
+ sizeof(((struct ixgbe_adapter *)0)->m), \
+ offsetof(struct ixgbe_adapter, m)
+#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
+ sizeof(((struct net_device *)0)->m), \
+ offsetof(struct net_device, m)
+
static struct ixgbe_stats ixgbe_gstrings_stats[] = {
- {"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
- {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
- {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
- {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
+ {"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)},
+ {"tx_packets", IXGBE_NETDEV_STAT(stats.tx_packets)},
+ {"rx_bytes", IXGBE_NETDEV_STAT(stats.rx_bytes)},
+ {"tx_bytes", IXGBE_NETDEV_STAT(stats.tx_bytes)},
+ {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
+ {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
+ {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
+ {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
{"lsc_int", IXGBE_STAT(lsc_int)},
{"tx_busy", IXGBE_STAT(tx_busy)},
{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
- {"rx_errors", IXGBE_STAT(net_stats.rx_errors)},
- {"tx_errors", IXGBE_STAT(net_stats.tx_errors)},
- {"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)},
- {"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)},
- {"multicast", IXGBE_STAT(net_stats.multicast)},
+ {"rx_errors", IXGBE_NETDEV_STAT(stats.rx_errors)},
+ {"tx_errors", IXGBE_NETDEV_STAT(stats.tx_errors)},
+ {"rx_dropped", IXGBE_NETDEV_STAT(stats.rx_dropped)},
+ {"tx_dropped", IXGBE_NETDEV_STAT(stats.tx_dropped)},
+ {"multicast", IXGBE_NETDEV_STAT(stats.multicast)},
{"broadcast", IXGBE_STAT(stats.bprc)},
{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
- {"collisions", IXGBE_STAT(net_stats.collisions)},
- {"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)},
- {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
- {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
- {"hw_rsc_count", IXGBE_STAT(rsc_count)},
+ {"collisions", IXGBE_NETDEV_STAT(stats.collisions)},
+ {"rx_over_errors", IXGBE_NETDEV_STAT(stats.rx_over_errors)},
+ {"rx_crc_errors", IXGBE_NETDEV_STAT(stats.rx_crc_errors)},
+ {"rx_frame_errors", IXGBE_NETDEV_STAT(stats.rx_frame_errors)},
+ {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
+ {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
{"fdir_match", IXGBE_STAT(stats.fdirmatch)},
{"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
- {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
- {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
- {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
- {"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)},
- {"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)},
- {"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)},
+ {"rx_fifo_errors", IXGBE_NETDEV_STAT(stats.rx_fifo_errors)},
+ {"rx_missed_errors", IXGBE_NETDEV_STAT(stats.rx_missed_errors)},
+ {"tx_aborted_errors", IXGBE_NETDEV_STAT(stats.tx_aborted_errors)},
+ {"tx_carrier_errors", IXGBE_NETDEV_STAT(stats.tx_carrier_errors)},
+ {"tx_fifo_errors", IXGBE_NETDEV_STAT(stats.tx_fifo_errors)},
+ {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(stats.tx_heartbeat_errors)},
{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
{"tx_restart_queue", IXGBE_STAT(restart_queue)},
{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
- {"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt)},
- {"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt)},
{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
- {"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good)},
{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
- {"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)},
- {"rx_header_split", IXGBE_STAT(rx_hdr_split)},
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
@@ -192,6 +200,56 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->autoneg = AUTONEG_DISABLE;
}
+ /* Get PHY type */
+ switch (adapter->hw.phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_cu_unknown:
+ /* Copper 10G-BASET */
+ ecmd->port = PORT_TP;
+ break;
+ case ixgbe_phy_qt:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case ixgbe_phy_nl:
+ case ixgbe_phy_tw_tyco:
+ case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ switch (adapter->hw.phy.sfp_type) {
+ /* SFP+ devices, further checking needed */
+ case ixgbe_sfp_type_da_cu:
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ ecmd->port = PORT_DA;
+ break;
+ case ixgbe_sfp_type_sr:
+ case ixgbe_sfp_type_lr:
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case ixgbe_sfp_type_not_present:
+ ecmd->port = PORT_NONE;
+ break;
+ case ixgbe_sfp_type_unknown:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
+ }
+ break;
+ case ixgbe_phy_xaui:
+ ecmd->port = PORT_NONE;
+ break;
+ case ixgbe_phy_unknown:
+ case ixgbe_phy_generic:
+ case ixgbe_phy_sfp_unsupported:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
+ }
+
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
@@ -794,7 +852,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
- int i, err;
+ int i, err = 0;
u32 new_rx_count, new_tx_count;
bool need_update = false;
@@ -818,6 +876,16 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
msleep(1);
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i].count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto err_setup;
+ }
+
temp_tx_ring = kcalloc(adapter->num_tx_queues,
sizeof(struct ixgbe_ring), GFP_KERNEL);
if (!temp_tx_ring) {
@@ -875,8 +943,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* if rings need to be updated, here's the place to do it in one shot */
if (need_update) {
- if (netif_running(netdev))
- ixgbe_down(adapter);
+ ixgbe_down(adapter);
/* tx */
if (new_tx_count != adapter->tx_ring_count) {
@@ -893,13 +960,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
temp_rx_ring = NULL;
adapter->rx_ring_count = new_rx_count;
}
- }
-
- /* success! */
- err = 0;
- if (netif_running(netdev))
ixgbe_up(adapter);
-
+ }
err_setup:
clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
@@ -925,10 +987,22 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
int j, k;
int i;
+ char *p = NULL;
ixgbe_update_stats(adapter);
+ dev_get_stats(netdev);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
+ switch (ixgbe_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *) netdev +
+ ixgbe_gstrings_stats[i].stat_offset;
+ break;
+ case IXGBE_STATS:
+ p = (char *) adapter +
+ ixgbe_gstrings_stats[i].stat_offset;
+ break;
+ }
+
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
@@ -1247,15 +1321,15 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
return 0;
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
shared_int = false;
- if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
+ if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
netdev)) {
*data = 1;
return -1;
}
- } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
+ } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
netdev->name, netdev)) {
shared_int = false;
- } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
+ } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
netdev->name, netdev)) {
*data = 1;
return -1;
@@ -1929,7 +2003,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
/* only valid if in constant ITR mode */
- switch (adapter->itr_setting) {
+ switch (adapter->rx_itr_setting) {
case 0:
/* throttling disabled */
ec->rx_coalesce_usecs = 0;
@@ -1940,9 +2014,29 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
break;
default:
/* fixed interrupt rate mode */
- ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
+ ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param;
break;
}
+
+ /* if in mixed tx/rx queues per vector mode, report only rx settings */
+ if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
+ return 0;
+
+ /* only valid if in constant ITR mode */
+ switch (adapter->tx_itr_setting) {
+ case 0:
+ /* throttling disabled */
+ ec->tx_coalesce_usecs = 0;
+ break;
+ case 1:
+ /* dynamic ITR mode */
+ ec->tx_coalesce_usecs = 1;
+ break;
+ default:
+ ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param;
+ break;
+ }
+
return 0;
}
@@ -1953,6 +2047,11 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_q_vector *q_vector;
int i;
+ /* don't accept tx specific changes if we've got mixed RxTx vectors */
+ if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
+ && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
if (ec->tx_max_coalesced_frames_irq)
adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
@@ -1963,26 +2062,49 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
/* store the value in ints/second */
- adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
+ adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
/* static value of interrupt rate */
- adapter->itr_setting = adapter->eitr_param;
+ adapter->rx_itr_setting = adapter->rx_eitr_param;
/* clear the lower bit as its used for dynamic state */
- adapter->itr_setting &= ~1;
+ adapter->rx_itr_setting &= ~1;
} else if (ec->rx_coalesce_usecs == 1) {
/* 1 means dynamic mode */
- adapter->eitr_param = 20000;
- adapter->itr_setting = 1;
+ adapter->rx_eitr_param = 20000;
+ adapter->rx_itr_setting = 1;
} else {
/*
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
- adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE;
+ adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
else
- adapter->eitr_param = IXGBE_MAX_INT_RATE;
- adapter->itr_setting = 0;
+ adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
+ adapter->rx_itr_setting = 0;
+ }
+
+ if (ec->tx_coalesce_usecs > 1) {
+ /* check the limits */
+ if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
+ (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
+ return -EINVAL;
+
+ /* store the value in ints/second */
+ adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs;
+
+ /* static value of interrupt rate */
+ adapter->tx_itr_setting = adapter->tx_eitr_param;
+
+ /* clear the lower bit as its used for dynamic state */
+ adapter->tx_itr_setting &= ~1;
+ } else if (ec->tx_coalesce_usecs == 1) {
+ /* 1 means dynamic mode */
+ adapter->tx_eitr_param = 10000;
+ adapter->tx_itr_setting = 1;
+ } else {
+ adapter->tx_eitr_param = IXGBE_MAX_INT_RATE;
+ adapter->tx_itr_setting = 0;
}
/* MSI/MSIx Interrupt Mode */
@@ -1992,17 +2114,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i];
if (q_vector->txr_count && !q_vector->rxr_count)
- /* tx vector gets half the rate */
- q_vector->eitr = (adapter->eitr_param >> 1);
+ /* tx only */
+ q_vector->eitr = adapter->tx_eitr_param;
else
/* rx only or mixed */
- q_vector->eitr = adapter->eitr_param;
+ q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
}
/* Legacy Interrupt Mode */
} else {
q_vector = adapter->q_vector[0];
- q_vector->eitr = adapter->eitr_param;
+ q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index a3c9f99..da32a10 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -499,6 +499,10 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+#ifdef CONFIG_IXGBE_DCB
+ u8 tc;
+ u32 up2tc;
+#endif
/* create the pool for ddp if not created yet */
if (!fcoe->pool) {
@@ -540,6 +544,17 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_FCRXCTRL_FCOELLI |
IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
+#ifdef CONFIG_IXGBE_DCB
+ up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
+ for (i = 0; i < MAX_USER_PRIORITY; i++) {
+ tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
+ tc &= (MAX_TRAFFIC_CLASS - 1);
+ if (fcoe->tc == tc) {
+ fcoe->up = i;
+ break;
+ }
+ }
+#endif
}
/**
@@ -671,19 +686,7 @@ out_disable:
*/
u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter)
{
- int i;
- u8 tc;
- u32 up2tc;
-
- up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
- for (i = 0; i < MAX_USER_PRIORITY; i++) {
- tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
- tc &= (MAX_TRAFFIC_CLASS - 1);
- if (adapter->fcoe.tc == tc)
- return 1 << i;
- }
-
- return 0;
+ return 1 << adapter->fcoe.up;
}
/**
@@ -710,6 +713,7 @@ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT);
up2tc &= (MAX_TRAFFIC_CLASS - 1);
adapter->fcoe.tc = (u8)up2tc;
+ adapter->fcoe.up = i;
return 0;
}
}
@@ -718,3 +722,49 @@ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
return 1;
}
#endif /* CONFIG_IXGBE_DCB */
+
+/**
+ * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
+ * @netdev : ixgbe adapter
+ * @wwn : the world wide name
+ * @type: the type of world wide name
+ *
+ * Returns the node or port world wide name if both the prefix and the san
+ * mac address are valid, then the wwn is formed based on the NAA-2 for
+ * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
+ *
+ * Returns : 0 on success
+ */
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
+{
+ int rc = -EINVAL;
+ u16 prefix = 0xffff;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+ switch (type) {
+ case NETDEV_FCOE_WWNN:
+ prefix = mac->wwnn_prefix;
+ break;
+ case NETDEV_FCOE_WWPN:
+ prefix = mac->wwpn_prefix;
+ break;
+ default:
+ break;
+ }
+
+ if ((prefix != 0xffff) &&
+ is_valid_ether_addr(mac->san_addr)) {
+ *wwn = ((u64) prefix << 48) |
+ ((u64) mac->san_addr[0] << 40) |
+ ((u64) mac->san_addr[1] << 32) |
+ ((u64) mac->san_addr[2] << 24) |
+ ((u64) mac->san_addr[3] << 16) |
+ ((u64) mac->san_addr[4] << 8) |
+ ((u64) mac->san_addr[5]);
+ rc = 0;
+ }
+ return rc;
+}
+
+
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index b5dee7b..de8ff53 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -62,7 +62,10 @@ struct ixgbe_fcoe_ddp {
};
struct ixgbe_fcoe {
+#ifdef CONFIG_IXGBE_DCB
u8 tc;
+ u8 up;
+#endif
spinlock_t lock;
struct pci_pool *pool;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 59ad959..35ea8c9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -44,12 +44,13 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
+#include "ixgbe_dcb_82599.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "2.0.37-k2"
+#define DRV_VERSION "2.0.44-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
@@ -95,10 +96,18 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
+ board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
+ board_82599 },
/* required last entry */
{0, }
@@ -211,10 +220,20 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
struct ixgbe_tx_buffer
*tx_buffer_info)
{
- tx_buffer_info->dma = 0;
+ if (tx_buffer_info->dma) {
+ if (tx_buffer_info->mapped_as_page)
+ pci_unmap_page(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->dma = 0;
+ }
if (tx_buffer_info->skb) {
- skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
- DMA_TO_DEVICE);
dev_kfree_skb_any(tx_buffer_info->skb);
tx_buffer_info->skb = NULL;
}
@@ -222,6 +241,56 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
/* tx_buffer_info must be completely set up in the transmit path */
}
+/**
+ * ixgbe_tx_is_paused - check if the tx ring is paused
+ * @adapter: the ixgbe adapter
+ * @tx_ring: the corresponding tx_ring
+ *
+ * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
+ * corresponding TC of this tx_ring when checking TFCS.
+ *
+ * Returns : true if paused
+ */
+static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ u32 txoff = IXGBE_TFCS_TXOFF;
+
+#ifdef CONFIG_IXGBE_DCB
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ int tc;
+ int reg_idx = tx_ring->reg_idx;
+ int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ tc = reg_idx >> 2;
+ txoff = IXGBE_TFCS_TXOFF0;
+ } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ tc = 0;
+ txoff = IXGBE_TFCS_TXOFF;
+ if (dcb_i == 8) {
+ /* TC0, TC1 */
+ tc = reg_idx >> 5;
+ if (tc == 2) /* TC2, TC3 */
+ tc += (reg_idx - 64) >> 4;
+ else if (tc == 3) /* TC4, TC5, TC6, TC7 */
+ tc += 1 + ((reg_idx - 96) >> 3);
+ } else if (dcb_i == 4) {
+ /* TC0, TC1 */
+ tc = reg_idx >> 6;
+ if (tc == 1) {
+ tc += (reg_idx - 64) >> 5;
+ if (tc == 2) /* TC2, TC3 */
+ tc += (reg_idx - 96) >> 4;
+ }
+ }
+ }
+ txoff <<= tc;
+ }
+#endif
+ return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
+}
+
static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
unsigned int eop)
@@ -233,7 +302,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
adapter->detect_tx_hung = false;
if (tx_ring->tx_buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
- !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
+ !ixgbe_tx_is_paused(adapter, tx_ring)) {
/* detected Tx unit hang */
union ixgbe_adv_tx_desc *tx_desc;
tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -346,7 +415,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
!test_bit(__IXGBE_DOWN, &adapter->state)) {
netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->restart_queue;
}
}
@@ -368,8 +437,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
tx_ring->total_packets += total_packets;
tx_ring->stats.packets += total_packets;
tx_ring->stats.bytes += total_bytes;
- adapter->net_stats.tx_bytes += total_bytes;
- adapter->net_stats.tx_packets += total_packets;
return (count < tx_ring->work_limit);
}
@@ -408,19 +475,23 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
u32 txctrl;
int cpu = get_cpu();
int q = tx_ring - adapter->tx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
if (tx_ring->cpu != cpu) {
- txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+ IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
}
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
tx_ring->cpu = cpu;
}
put_cpu();
@@ -553,7 +624,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- adapter->hw_csum_rx_good++;
}
static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
@@ -610,21 +680,18 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
if (!bi->skb) {
struct sk_buff *skb;
- skb = netdev_alloc_skb(adapter->netdev,
- (rx_ring->rx_buf_len +
- NET_IP_ALIGN));
+ /* netdev_alloc_skb reserves 32 bytes up front!! */
+ uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES;
+ skb = netdev_alloc_skb(adapter->netdev, bufsz);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- /*
- * Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
- * the 14 byte MAC header is removed
- */
- skb_reserve(skb, NET_IP_ALIGN);
+ /* advance the data pointer to the next cache line */
+ skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES)
+ - skb->data));
bi->skb = skb;
bi->dma = pci_map_single(pdev, skb->data,
@@ -676,12 +743,14 @@ static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
/**
* ixgbe_transform_rsc_queue - change rsc queue into a full packet
* @skb: pointer to the last skb in the rsc queue
+ * @count: pointer to number of packets coalesced in this context
*
* This function changes a queue full of hw rsc buffers into a completed
* packet. It uses the ->prev pointers to find the first packet and then
* turns it into the frag list owner.
**/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
+ u64 *count)
{
unsigned int frag_list_size = 0;
@@ -690,6 +759,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
frag_list_size += skb->len;
skb->prev = NULL;
skb = prev;
+ *count += 1;
}
skb_shinfo(skb)->frag_list = skb->next;
@@ -705,6 +775,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
int *work_done, int work_to_do)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@ -734,8 +805,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (hdr_info & IXGBE_RXDADV_SPH)
- adapter->rx_hdr_split++;
if (len > IXGBE_RX_HDR_SIZE)
len = IXGBE_RX_HDR_SIZE;
upper_len = le16_to_cpu(rx_desc->wb.upper.length);
@@ -745,7 +814,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
cleaned = true;
skb = rx_buffer_info->skb;
- prefetch(skb->data - NET_IP_ALIGN);
+ prefetch(skb->data);
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
@@ -791,14 +860,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
IXGBE_RXDADV_NEXTP_SHIFT;
next_buffer = &rx_ring->rx_buffer_info[nextp];
- rx_ring->rsc_count += (rsc_count - 1);
} else {
next_buffer = &rx_ring->rx_buffer_info[i];
}
if (staterr & IXGBE_RXD_STAT_EOP) {
if (skb->prev)
- skb = ixgbe_transform_rsc_queue(skb);
+ skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
+ rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
+ else
+ rx_ring->rsc_count++;
+ rx_ring->rsc_flush++;
+ }
rx_ring->stats.packets++;
rx_ring->stats.bytes += skb->len;
} else {
@@ -811,7 +886,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->next = next_buffer->skb;
skb->next->prev = skb;
}
- adapter->non_eop_descs++;
+ rx_ring->non_eop_descs++;
goto next_desc;
}
@@ -876,8 +951,8 @@ next_desc:
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
- adapter->net_stats.rx_bytes += total_rx_bytes;
- adapter->net_stats.rx_packets += total_rx_packets;
+ netdev->stats.rx_bytes += total_rx_bytes;
+ netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -926,12 +1001,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
r_idx + 1);
}
- /* if this is a tx only vector halve the interrupt rate */
if (q_vector->txr_count && !q_vector->rxr_count)
- q_vector->eitr = (adapter->eitr_param >> 1);
+ /* tx only */
+ q_vector->eitr = adapter->tx_eitr_param;
else if (q_vector->rxr_count)
- /* rx only */
- q_vector->eitr = adapter->eitr_param;
+ /* rx or mixed */
+ q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
}
@@ -1150,6 +1225,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
adapter->link_check_timeout = jiffies;
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
+ IXGBE_WRITE_FLUSH(hw);
schedule_work(&adapter->watchdog_task);
}
}
@@ -1253,8 +1329,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
r_idx + 1);
}
- /* disable interrupts on this vector only */
- ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ /* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -1285,10 +1360,8 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
if (!q_vector->rxr_count)
return IRQ_HANDLED;
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
/* disable interrupts on this vector only */
- ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ /* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -1323,8 +1396,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx + 1);
}
- /* disable interrupts on this vector only */
- ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ /* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -1359,7 +1431,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->rx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
@@ -1420,7 +1492,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->rx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
@@ -1458,10 +1530,10 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
work_done = budget;
- /* If all Rx work done, exit the polling mode */
+ /* If all Tx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->tx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
@@ -1608,7 +1680,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
sprintf(adapter->name[vector], "%s:lsc", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+ ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
if (err) {
DPRINTK(PROBE, ERR,
"request_irq for msix_lsc failed: %d\n", err);
@@ -1779,10 +1851,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
err = ixgbe_request_msix_irqs(adapter);
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
- err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
+ err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
netdev->name, netdev);
} else {
- err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
+ err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
netdev->name, netdev);
}
@@ -1848,7 +1920,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
- EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
+ EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
ixgbe_set_ivar(adapter, 0, 0, 0);
ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -1885,18 +1957,49 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
adapter->tx_ring[i].head = IXGBE_TDH(j);
adapter->tx_ring[i].tail = IXGBE_TDT(j);
- /* Disable Tx Head Writeback RO bit, since this hoses
+ /*
+ * Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren't delivered in order.
*/
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+ break;
+ case ixgbe_mac_82599EB:
+ default:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
+ break;
+ }
txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ default:
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
+ break;
+ }
}
+
if (hw->mac.type == ixgbe_mac_82599EB) {
+ u32 rttdcs;
+
+ /* disable the arbiter while setting MTQC */
+ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ rttdcs |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
/* We enable 8 traffic classes, DCB only */
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
IXGBE_MTQC_8TC_8TQ));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+
+ /* re-eable the arbiter */
+ rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
}
}
@@ -1970,6 +2073,50 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_configure_rscctl - enable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @index: index of ring to set
+ **/
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
+{
+ struct ixgbe_ring *rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int j;
+ u32 rscctrl;
+ int rx_buf_len;
+
+ rx_ring = &adapter->rx_ring[index];
+ j = rx_ring->reg_idx;
+ rx_buf_len = rx_ring->rx_buf_len;
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
+ rscctrl |= IXGBE_RSCCTL_RSCEN;
+ /*
+ * we must limit the number of descriptors so that the
+ * total size of max desc * buf_len is not greater
+ * than 65535
+ */
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+#if (MAX_SKB_FRAGS > 16)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#elif (MAX_SKB_FRAGS > 8)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+#elif (MAX_SKB_FRAGS > 4)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+#endif
+ } else {
+ if (rx_buf_len < IXGBE_RXBUFFER_4096)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+ else if (rx_buf_len < IXGBE_RXBUFFER_8192)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+ else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
+}
+
+/**
* ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
* @adapter: board private structure
*
@@ -1990,7 +2137,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
u32 fctrl, hlreg0;
u32 reta = 0, mrqc = 0;
u32 rdrxctl;
- u32 rscctrl;
int rx_buf_len;
/* Decide whether to use packet split mode or not */
@@ -2148,36 +2294,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
/* Enable 82599 HW-RSC */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring = &adapter->rx_ring[i];
- j = rx_ring->reg_idx;
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
- rscctrl |= IXGBE_RSCCTL_RSCEN;
- /*
- * we must limit the number of descriptors so that the
- * total size of max desc * buf_len is not greater
- * than 65535
- */
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
-#if (MAX_SKB_FRAGS > 16)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (MAX_SKB_FRAGS > 8)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#elif (MAX_SKB_FRAGS > 4)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#else
- rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
-#endif
- } else {
- if (rx_buf_len < IXGBE_RXBUFFER_4096)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (rx_buf_len < IXGBE_RXBUFFER_8192)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
- else
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
- }
- IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
- }
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbe_configure_rscctl(adapter, i);
+
/* Disable RSC for ACK packets */
IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
(IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -2227,23 +2346,25 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
* not in DCB mode.
*/
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+
+ /* Disable CFI check */
+ ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+
+ /* enable VLAN tag stripping */
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ ctrl |= IXGBE_VLNCTRL_VME;
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- ctrl |= IXGBE_VLNCTRL_VFE;
- /* enable VLAN tag insert/strip */
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
for (i = 0; i < adapter->num_rx_queues; i++) {
+ u32 ctrl;
j = adapter->rx_ring[i].reg_idx;
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
}
}
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+
ixgbe_vlan_rx_add_vid(netdev, 0);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2434,7 +2555,10 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_restore_vlan(adapter);
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- netif_set_gso_max_size(netdev, 32768);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ netif_set_gso_max_size(netdev, 32768);
+ else
+ netif_set_gso_max_size(netdev, 65536);
ixgbe_configure_dcb(adapter);
} else {
netif_set_gso_max_size(netdev, 65536);
@@ -2590,7 +2714,22 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
}
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ /*
+ * use EIAM to auto-mask when MSI-X interrupt is asserted
+ * this saves a register write for every interrupt
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ break;
+ default:
+ case ixgbe_mac_82599EB:
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+ break;
+ }
+ } else {
/* legacy interrupts, use EIAM to auto-mask when reading EICR,
* specifically only auto mask tx and rx interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
@@ -2926,6 +3065,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_napi_disable_all(adapter);
+ clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
+ del_timer_sync(&adapter->sfp_timer);
del_timer_sync(&adapter->watchdog_timer);
cancel_work_sync(&adapter->watchdog_task);
@@ -2989,7 +3130,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(adapter);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
@@ -3521,10 +3662,10 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
- * (roughly) twice the number of vectors as there are CPU's.
+ * (roughly) the same number of vectors as there are CPU's.
*/
v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
- (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+ (int)num_online_cpus()) + NON_Q_VECTORS;
/*
* At the same time, hardware can only support a maximum of
@@ -3599,7 +3740,10 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
- q_vector->eitr = adapter->eitr_param;
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ q_vector->eitr = adapter->tx_eitr_param;
+ else
+ q_vector->eitr = adapter->rx_eitr_param;
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
adapter->q_vector[q_idx] = q_vector;
@@ -3829,8 +3973,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = 0;
+#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
+#endif
#endif /* IXGBE_FCOE */
}
@@ -3868,8 +4014,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->fc.disable_fc_autoneg = false;
/* enable itr by default in dynamic mode */
- adapter->itr_setting = 1;
- adapter->eitr_param = 20000;
+ adapter->rx_itr_setting = 1;
+ adapter->rx_eitr_param = 20000;
+ adapter->tx_itr_setting = 1;
+ adapter->tx_eitr_param = 10000;
/* set defaults for eitr in MegaBytes */
adapter->eitr_low = 10;
@@ -4359,20 +4507,32 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
**/
void ixgbe_update_stats(struct ixgbe_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
- if (hw->mac.type == ixgbe_mac_82599EB) {
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
u64 rsc_count = 0;
+ u64 rsc_flush = 0;
for (i = 0; i < 16; i++)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
- for (i = 0; i < adapter->num_rx_queues; i++)
+ for (i = 0; i < adapter->num_rx_queues; i++) {
rsc_count += adapter->rx_ring[i].rsc_count;
- adapter->rsc_count = rsc_count;
+ rsc_flush += adapter->rx_ring[i].rsc_flush;
+ }
+ adapter->rsc_total_count = rsc_count;
+ adapter->rsc_total_flush = rsc_flush;
}
+ /* gather some stats to the adapter struct that are per queue */
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->restart_queue += adapter->tx_ring[i].restart_queue;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->non_eop_descs += adapter->tx_ring[i].non_eop_descs;
+
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
/* for packet buffers not used, the register should read 0 */
@@ -4409,10 +4569,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* 82598 hardware only has a 32 bit counter in the high register */
if (hw->mac.type == ixgbe_mac_82599EB) {
+ u64 tmp;
adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
- IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
+ tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
+ adapter->stats.gorc += (tmp << 32);
adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
- IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
+ tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
+ adapter->stats.gotc += (tmp << 32);
adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
@@ -4475,15 +4638,15 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
/* Fill out the OS statistics structure */
- adapter->net_stats.multicast = adapter->stats.mprc;
+ netdev->stats.multicast = adapter->stats.mprc;
/* Rx Errors */
- adapter->net_stats.rx_errors = adapter->stats.crcerrs +
+ netdev->stats.rx_errors = adapter->stats.crcerrs +
adapter->stats.rlec;
- adapter->net_stats.rx_dropped = 0;
- adapter->net_stats.rx_length_errors = adapter->stats.rlec;
- adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
- adapter->net_stats.rx_missed_errors = total_mpc;
+ netdev->stats.rx_dropped = 0;
+ netdev->stats.rx_length_errors = adapter->stats.rlec;
+ netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+ netdev->stats.rx_missed_errors = total_mpc;
}
/**
@@ -4752,14 +4915,12 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
iph->daddr, 0,
IPPROTO_TCP,
0);
- adapter->hw_tso_ctxt++;
} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
- adapter->hw_tso6_ctxt++;
}
i = tx_ring->next_to_use;
@@ -4878,7 +5039,6 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
- adapter->hw_csum_tx_good++;
i++;
if (i == tx_ring->count)
i = 0;
@@ -4895,23 +5055,16 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct sk_buff *skb, u32 tx_flags,
unsigned int first)
{
+ struct pci_dev *pdev = adapter->pdev;
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
unsigned int offset = 0, size, count = 0, i;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
- dma_addr_t *map;
i = tx_ring->next_to_use;
- if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
- dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
- return 0;
- }
-
- map = skb_shinfo(skb)->dma_maps;
-
if (tx_flags & IXGBE_TX_FLAGS_FCOE)
/* excluding fcoe_crc_eof for FCoE */
total -= sizeof(struct fcoe_crc_eof);
@@ -4922,7 +5075,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
+ tx_buffer_info->mapped_as_page = false;
+ tx_buffer_info->dma = pci_map_single(pdev,
+ skb->data + offset,
+ size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -4943,7 +5101,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
frag = &skb_shinfo(skb)->frags[f];
len = min((unsigned int)frag->size, total);
- offset = 0;
+ offset = frag->page_offset;
while (len) {
i++;
@@ -4954,7 +5112,13 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = map[f] + offset;
+ tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ frag->page,
+ offset, size,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->mapped_as_page = true;
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -4971,6 +5135,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_ring->tx_buffer_info[first].next_to_watch = i;
return count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed tx_buffer_info map */
+ tx_buffer_info->dma = 0;
+ tx_buffer_info->time_stamp = 0;
+ tx_buffer_info->next_to_watch = 0;
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count >= 0) {
+ count--;
+ i--;
+ if (i < 0)
+ i += tx_ring->count;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ return count;
}
static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5048,7 +5233,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
/* Right now, we support IPv4 only */
struct ixgbe_atr_input atr_input;
struct tcphdr *th;
- struct udphdr *uh;
struct iphdr *iph = ip_hdr(skb);
struct ethhdr *eth = (struct ethhdr *)skb->data;
u16 vlan_id, src_port, dst_port, flex_bytes;
@@ -5062,12 +5246,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
dst_port = th->dest;
l4type |= IXGBE_ATR_L4TYPE_TCP;
/* l4type IPv4 type is 0, no need to assign */
- } else if(iph->protocol == IPPROTO_UDP) {
- uh = udp_hdr(skb);
- src_port = uh->source;
- dst_port = uh->dest;
- l4type |= IXGBE_ATR_L4TYPE_UDP;
- /* l4type IPv4 type is 0, no need to assign */
} else {
/* Unsupported L4 header, just bail here */
return;
@@ -5097,8 +5275,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
struct ixgbe_ring *tx_ring, int size)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
netif_stop_subqueue(netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
@@ -5112,7 +5288,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->restart_queue;
return 0;
}
@@ -5127,10 +5303,19 @@ static int ixgbe_maybe_stop_tx(struct net_device *netdev,
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int txq = smp_processor_id();
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- return smp_processor_id();
+ return txq;
+#ifdef IXGBE_FCOE
+ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ (skb->protocol == htons(ETH_P_FCOE))) {
+ txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+ txq += adapter->ring_feature[RING_F_FCOE].mask;
+ return txq;
+ }
+#endif
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
@@ -5142,10 +5327,11 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring;
+ struct netdev_queue *txq;
unsigned int first;
unsigned int tx_flags = 0;
u8 hdr_len = 0;
- int r_idx = 0, tso;
+ int tso;
int count = 0;
unsigned int f;
@@ -5153,13 +5339,13 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
- tx_flags |= (skb->queue_mapping << 13);
+ tx_flags |= ((skb->queue_mapping & 0x7) << 13);
}
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
if (skb->priority != TC_PRIO_CONTROL) {
- tx_flags |= (skb->queue_mapping << 13);
+ tx_flags |= ((skb->queue_mapping & 0x7) << 13);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
} else {
@@ -5168,17 +5354,18 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
}
}
- r_idx = skb->queue_mapping;
- tx_ring = &adapter->tx_ring[r_idx];
+ tx_ring = &adapter->tx_ring[skb->queue_mapping];
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
(skb->protocol == htons(ETH_P_FCOE))) {
tx_flags |= IXGBE_TX_FLAGS_FCOE;
#ifdef IXGBE_FCOE
- r_idx = smp_processor_id();
- r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
- r_idx += adapter->ring_feature[RING_F_FCOE].mask;
- tx_ring = &adapter->tx_ring[r_idx];
+#ifdef CONFIG_IXGBE_DCB
+ tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+ << IXGBE_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= ((adapter->fcoe.up << 13)
+ << IXGBE_TX_FLAGS_VLAN_SHIFT);
+#endif
#endif
}
/* four things can cause us to need a context descriptor */
@@ -5238,6 +5425,9 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
tx_ring->atr_count = 0;
}
}
+ txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
+ txq->tx_bytes += skb->len;
+ txq->tx_packets++;
ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
hdr_len);
ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
@@ -5252,21 +5442,6 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
}
/**
- * ixgbe_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- /* only return the current stats */
- return &adapter->net_stats;
-}
-
-/**
* ixgbe_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -5396,7 +5571,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
.ndo_select_queue = ixgbe_select_queue,
- .ndo_get_stats = ixgbe_get_stats,
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_set_multicast_list = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
@@ -5415,6 +5589,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
.ndo_fcoe_enable = ixgbe_fcoe_enable,
.ndo_fcoe_disable = ixgbe_fcoe_disable,
+ .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
#endif /* IXGBE_FCOE */
};
@@ -5471,12 +5646,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_pci_reg;
}
- err = pci_enable_pcie_error_reporting(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
- "0x%x\n", err);
- /* non-fatal, continue */
- }
+ pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
@@ -5785,7 +5955,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int err;
set_bit(__IXGBE_DOWN, &adapter->state);
/* clear the module not found bit to make sure the worker won't
@@ -5836,10 +6005,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
free_netdev(netdev);
- err = pci_disable_pcie_error_reporting(pdev);
- if (err)
- dev_err(&pdev->dev,
- "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
@@ -5891,6 +6057,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
pci_wake_from_d3(pdev, false);
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 8761d78..f3e8d52 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -49,9 +49,13 @@
#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
#define IXGBE_DEV_ID_82599_KX4 0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
+#define IXGBE_DEV_ID_82599_KR 0x1517
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -1336,6 +1340,8 @@
#define IXGBE_AUTOC_KX4_SUPP 0x80000000
#define IXGBE_AUTOC_KX_SUPP 0x40000000
#define IXGBE_AUTOC_PAUSE 0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
#define IXGBE_AUTOC_RF 0x08000000
#define IXGBE_AUTOC_PD_TMR 0x06000000
#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
@@ -1404,6 +1410,8 @@
#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
+
/* PCS1GLSTA Bit Masks */
#define IXGBE_PCS1GLSTA_LINK_OK 1
#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
@@ -1424,6 +1432,11 @@
#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
+/* ANLP1 Bit Masks */
+#define IXGBE_ANLP1_PAUSE 0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE 0x0400
+#define IXGBE_ANLP1_ASM_PAUSE 0x0800
+
/* SW Semaphore Register bitmasks */
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
@@ -1527,6 +1540,16 @@
#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
#define IXGBE_FW_PATCH_VERSION_4 0x7
+/* Alternative SAN MAC Address Block */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
+
/* PCI Bus Info */
#define IXGBE_PCI_LINK_STATUS 0xB2
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
@@ -2160,6 +2183,14 @@ enum ixgbe_fc_mode {
ixgbe_fc_default
};
+/* Smart Speed Settings */
+#define IXGBE_SMARTSPEED_MAX_RETRIES 3
+enum ixgbe_smart_speed {
+ ixgbe_smart_speed_auto = 0,
+ ixgbe_smart_speed_on,
+ ixgbe_smart_speed_off
+};
+
/* PCI bus types */
enum ixgbe_bus_type {
ixgbe_bus_type_unknown = 0,
@@ -2325,6 +2356,7 @@ struct ixgbe_mac_operations {
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
+ s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
void (*set_lan_id)(struct ixgbe_hw *);
@@ -2396,6 +2428,10 @@ struct ixgbe_mac_info {
u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ /* prefix for World Wide Node Name (WWNN) */
+ u16 wwnn_prefix;
+ /* prefix for World Wide Port Name (WWPN) */
+ u16 wwpn_prefix;
s32 mc_filter_type;
u32 mcft_size;
u32 vft_size;
@@ -2420,6 +2456,8 @@ struct ixgbe_phy_info {
enum ixgbe_media_type media_type;
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
+ enum ixgbe_smart_speed smart_speed;
+ bool smart_speed_active;
bool multispeed_fiber;
};