diff options
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 47 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_82598.c | 217 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_82599.c | 348 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.c | 64 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb_82598.c | 4 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb_nl.c | 119 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 62 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_fcoe.c | 552 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_fcoe.h | 66 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 1025 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_phy.c | 136 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_phy.h | 1 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_type.h | 164 |
14 files changed, 2251 insertions, 556 deletions
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index b3f8208..21b41f4 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile @@ -37,3 +37,5 @@ ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o + +ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index c26433d..05a2405 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -36,6 +36,10 @@ #include "ixgbe_type.h" #include "ixgbe_common.h" #include "ixgbe_dcb.h" +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#define IXGBE_FCOE +#include "ixgbe_fcoe.h" +#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ #ifdef CONFIG_IXGBE_DCA #include <linux/dca.h> #endif @@ -71,6 +75,8 @@ #define IXGBE_RXBUFFER_128 128 /* Used for packet split */ #define IXGBE_RXBUFFER_256 256 /* Used for packet split */ #define IXGBE_RXBUFFER_2048 2048 +#define IXGBE_RXBUFFER_4096 4096 +#define IXGBE_RXBUFFER_8192 8192 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 @@ -84,6 +90,8 @@ #define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) #define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) #define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) +#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4) +#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 @@ -147,6 +155,7 @@ struct ixgbe_ring { u16 work_limit; /* max work per interrupt */ u16 rx_buf_len; + u64 rsc_count; /* stat for coalesced packets */ }; enum ixgbe_ring_f_enum { @@ -154,6 +163,9 @@ enum ixgbe_ring_f_enum { RING_F_DCB, RING_F_VMDQ, RING_F_RSS, +#ifdef IXGBE_FCOE + RING_F_FCOE, +#endif /* IXGBE_FCOE */ RING_F_ARRAY_SIZE /* must be last in enum set */ }; @@ -161,6 +173,9 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_DCB_INDICES 8 #define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_VMDQ_INDICES 16 +#ifdef IXGBE_FCOE +#define IXGBE_MAX_FCOE_INDICES 8 +#endif /* IXGBE_FCOE */ struct ixgbe_ring_feature { int indices; int mask; @@ -186,6 +201,7 @@ struct ixgbe_q_vector { u8 tx_itr; u8 rx_itr; u32 eitr; + u32 v_idx; /* vector index in list */ }; /* Helper macros to switch between ints/sec and what the register uses. @@ -208,6 +224,10 @@ struct ixgbe_q_vector { (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) #define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 +#ifdef IXGBE_FCOE +/* Use 3K as the baby jumbo frame size for FCoE */ +#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 +#endif /* IXGBE_FCOE */ #define OTHER_VECTOR 1 #define NON_Q_VECTORS (OTHER_VECTOR) @@ -229,11 +249,12 @@ struct ixgbe_adapter { struct vlan_group *vlgrp; u16 bd_number; struct work_struct reset_task; - struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; + struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; struct ixgbe_dcb_config dcb_cfg; struct ixgbe_dcb_config temp_dcb_cfg; u8 dcb_set_bitmap; + enum ixgbe_fc_mode last_lfc_mode; /* Interrupt Throttle Rate */ u32 itr_setting; @@ -294,6 +315,9 @@ struct ixgbe_adapter { #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) #define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24) #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) +#define IXGBE_FLAG_RSC_CAPABLE (u32)(1 << 26) +#define IXGBE_FLAG_RSC_ENABLED (u32)(1 << 27) +#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) /* default to trying for four seconds */ #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) @@ -325,6 +349,10 @@ struct ixgbe_adapter { struct timer_list sfp_timer; struct work_struct multispeed_fiber_task; struct work_struct sfp_config_module_task; +#ifdef IXGBE_FCOE + struct ixgbe_fcoe fcoe; +#endif /* IXGBE_FCOE */ + u64 rsc_count; u32 wol; u16 eeprom_version; }; @@ -363,10 +391,21 @@ extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *) extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); -extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter); extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); -void ixgbe_napi_add_all(struct ixgbe_adapter *adapter); -void ixgbe_napi_del_all(struct ixgbe_adapter *adapter); +extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); extern void ixgbe_write_eitr(struct ixgbe_adapter *, int, u32); +#ifdef IXGBE_FCOE +extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); +extern int ixgbe_fso(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u8 *hdr_len); +extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); +extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); +extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); +#endif /* IXGBE_FCOE */ #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 4791238..afc9fe3 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -75,18 +75,49 @@ static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw) static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; + + /* Call PHY identify routine to get the phy type */ + ixgbe_identify_phy_generic(hw); + + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); + + return 0; +} + +/** + * ixgbe_init_phy_ops_82598 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during get_invariants because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val = 0; u16 list_offset, data_offset; - /* Set the bus information prior to PHY identification */ - mac->ops.get_bus_info(hw); + /* Identify the PHY */ + phy->ops.identify(hw); - /* Call PHY identify routine to get the phy type */ - ixgbe_identify_phy_generic(hw); + /* Overwrite the link function pointers if copper PHY */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = &ixgbe_setup_copper_link_82598; + mac->ops.setup_link_speed = + &ixgbe_setup_copper_link_speed_82598; + mac->ops.get_link_capabilities = + &ixgbe_get_copper_link_capabilities_82598; + } - /* PHY Init */ - switch (phy->type) { + switch (hw->phy.type) { case ixgbe_phy_tn: phy->ops.check_link = &ixgbe_check_phy_link_tnx; phy->ops.get_firmware_version = @@ -106,8 +137,8 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) /* Check to see if SFP+ module is supported */ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, - &list_offset, - &data_offset); + &list_offset, + &data_offset); if (ret_val != 0) { ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; goto out; @@ -117,21 +148,6 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) break; } - if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { - mac->ops.setup_link = &ixgbe_setup_copper_link_82598; - mac->ops.setup_link_speed = - &ixgbe_setup_copper_link_speed_82598; - mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_82598; - } - - mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; - mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; - mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); - out: return ret_val; } @@ -149,12 +165,19 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, bool *autoneg) { s32 status = 0; + u32 autoc = 0; /* * Determine link capabilities based on the stored value of AUTOC, - * which represents EEPROM defaults. + * which represents EEPROM defaults. If AUTOC value has not been + * stored, use the current register value. */ - switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) { + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; *autoneg = false; @@ -173,9 +196,9 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, case IXGBE_AUTOC_LMS_KX4_AN: case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: *speed = IXGBE_LINK_SPEED_UNKNOWN; - if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP) + if (autoc & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP) + if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; break; @@ -206,14 +229,13 @@ static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, *speed = 0; *autoneg = true; - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, + status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, &speed_ability); if (status == 0) { - if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) + if (speed_ability & MDIO_SPEED_10G) *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) + if (speed_ability & MDIO_PMA_SPEED_1000) *speed |= IXGBE_LINK_SPEED_1GB_FULL; } @@ -322,6 +344,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) } /* Enable 802.3x based flow control settings. */ + fctrl_reg |= IXGBE_FCTRL_DPF; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); @@ -340,7 +363,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) } /* Configure pause time (2 TCs per register) */ - reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num)); + reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); if ((packetbuf_num & 1) == 0) reg = (reg & 0xFFFF0000) | hw->fc.pause_time; else @@ -380,9 +403,11 @@ static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) * because it causes the controller to just blast out fc packets. */ if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { - hw_dbg(hw, "Invalid water mark configuration\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; + if (hw->fc.requested_mode != ixgbe_fc_none) { + hw_dbg(hw, "Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } } /* @@ -500,9 +525,9 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, * clear indicates active; set indicates inactive. */ if (hw->phy.type == ixgbe_phy_nl) { - hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); - hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); - hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, + hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); + hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, &adapt_comp_reg); if (link_up_wait_to_complete) { for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { @@ -515,10 +540,10 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, } msleep(100); hw->phy.ops.read_reg(hw, 0xC79F, - IXGBE_TWINAX_DEV, + MDIO_MMD_PMAPMD, &link_reg); hw->phy.ops.read_reg(hw, 0xC00C, - IXGBE_TWINAX_DEV, + MDIO_MMD_PMAPMD, &adapt_comp_reg); } } else { @@ -716,14 +741,23 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) } /* Reset PHY */ - if (hw->phy.reset_disable == false) + if (hw->phy.reset_disable == false) { + /* PHY ops must be identified and initialized prior to reset */ + + /* Init PHY and function pointers, perform SFP setup */ + status = hw->phy.ops.init(hw); + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + hw->phy.ops.reset(hw); + } /* * Prevent the PCI-E bus from from hanging by disabling PCI-E master * access and verify no pending requests before reset */ - if (ixgbe_disable_pcie_master(hw) != 0) { + status = ixgbe_disable_pcie_master(hw); + if (status != 0) { status = IXGBE_ERR_MASTER_REQUESTS_PENDING; hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); } @@ -767,9 +801,16 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); } + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table + */ + hw->mac.ops.init_rx_addrs(hw); + /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); +reset_hw_out: return status; } @@ -954,14 +995,14 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, + MDIO_MMD_PMAPMD, sfp_addr); /* Poll status */ for (i = 0; i < 100; i++) { hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, + MDIO_MMD_PMAPMD, &sfp_stat); sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) @@ -977,7 +1018,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, /* Read data */ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); + MDIO_MMD_PMAPMD, &sfp_data); *eeprom_data = (u8)(sfp_data >> 8); } else { @@ -998,35 +1039,56 @@ out: static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) { u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + + hw->phy.ops.identify(hw); + + /* Copper PHY must be checked before AUTOC LMS to determine correct + * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ + if (hw->phy.type == ixgbe_phy_tn || + hw->phy.type == ixgbe_phy_cu_unknown) { + hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, + &ext_ability); + if (ext_ability & MDIO_PMA_EXTABLE_10GBT) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_1000BT) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_100BTX) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + } - switch (hw->device_id) { - case IXGBE_DEV_ID_82598: - /* Default device ID is mezzanine card KX/KX4 */ - physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | - IXGBE_PHYSICAL_LAYER_1000BASE_KX); - break; - case IXGBE_DEV_ID_82598_BX: - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; - case IXGBE_DEV_ID_82598EB_CX4: - case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; - break; - case IXGBE_DEV_ID_82598_DA_DUAL_PORT: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; + else + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; break; - case IXGBE_DEV_ID_82598AF_DUAL_PORT: - case IXGBE_DEV_ID_82598AF_SINGLE_PORT: - case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else /* XAUI */ + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; break; - case IXGBE_DEV_ID_82598EB_XF_LR: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; break; - case IXGBE_DEV_ID_82598AT: - physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_T | - IXGBE_PHYSICAL_LAYER_1000BASE_T); + default: break; - case IXGBE_DEV_ID_82598EB_SFP_LOM: + } + + if (hw->phy.type == ixgbe_phy_nl) { hw->phy.ops.identify_sfp(hw); switch (hw->phy.sfp_type) { @@ -1043,13 +1105,25 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; break; } - break; + } + switch (hw->device_id) { + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case IXGBE_DEV_ID_82598EB_XF_LR: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; default: - physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; break; } +out: return physical_layer; } @@ -1099,6 +1173,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = { static struct ixgbe_phy_operations phy_ops_82598 = { .identify = &ixgbe_identify_phy_generic, .identify_sfp = &ixgbe_identify_sfp_module_generic, + .init = &ixgbe_init_phy_ops_82598, .reset = &ixgbe_reset_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, .write_reg = &ixgbe_write_phy_reg_generic, diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 29771fb..39d7b64 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -100,6 +100,9 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { ixgbe_init_mac_link_ops_82599(hw); + + hw->phy.ops.reset = NULL; + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); @@ -146,51 +149,60 @@ u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw) static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val; - /* Set the bus information prior to PHY identification */ - mac->ops.get_bus_info(hw); + ixgbe_init_mac_link_ops_82599(hw); - /* Call PHY identify routine to get the Cu or SFI phy type */ - ret_val = phy->ops.identify(hw); + mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; + mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; + mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw); - if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto get_invariants_out; + return 0; +} - ixgbe_init_mac_link_ops_82599(hw); +/** + * ixgbe_init_phy_ops_82599 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during get_invariants because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; - /* Setup SFP module if there is one present. */ - ret_val = mac->ops.setup_sfp(hw); + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_82599(hw); /* If copper media, overwrite with copper function pointers */ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { mac->ops.setup_link = &ixgbe_setup_copper_link_82599; mac->ops.setup_link_speed = - &ixgbe_setup_copper_link_speed_82599; + &ixgbe_setup_copper_link_speed_82599; mac->ops.get_link_capabilities = &ixgbe_get_copper_link_capabilities_82599; } - /* PHY Init */ + /* Set necessary function pointers based on phy type */ switch (hw->phy.type) { case ixgbe_phy_tn: phy->ops.check_link = &ixgbe_check_phy_link_tnx; phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_tnx; + &ixgbe_get_phy_firmware_version_tnx; break; default: break; } - mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; - mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; - mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw); - -get_invariants_out: return ret_val; } @@ -207,8 +219,19 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, bool *negotiation) { s32 status = 0; + u32 autoc = 0; + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not been + * stored, use the current register value. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) { + switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; *negotiation = false; @@ -232,22 +255,22 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, case IXGBE_AUTOC_LMS_KX4_KX_KR: case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: *speed = IXGBE_LINK_SPEED_UNKNOWN; - if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP) + if (autoc & IXGBE_AUTOC_KR_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP) + if (autoc & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP) + if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; *negotiation = true; break; case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: *speed = IXGBE_LINK_SPEED_100_FULL; - if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP) + if (autoc & IXGBE_AUTOC_KR_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP) + if (autoc & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP) + if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; *negotiation = true; break; @@ -291,14 +314,13 @@ static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw, *speed = 0; *autoneg = true; - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, + status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, &speed_ability); if (status == 0) { - if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) + if (speed_ability & MDIO_SPEED_10G) *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) + if (speed_ability & MDIO_PMA_SPEED_1000) *speed |= IXGBE_LINK_SPEED_1GB_FULL; } @@ -325,6 +347,7 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_82599: case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_XAUI_LOM: /* Default device ID is mezzanine card KX/KX4 */ media_type = ixgbe_media_type_backplane; break; @@ -558,6 +581,7 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, s32 status = 0; u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 orig_autoc = 0; u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; @@ -569,6 +593,13 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); speed &= link_capabilities; + /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ + if (hw->mac.orig_link_settings_stored) + orig_autoc = hw->mac.orig_autoc; + else + orig_autoc = autoc; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) { status = IXGBE_ERR_LINK_SETUP; } else if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || @@ -577,9 +608,9 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, /* Set KX4/KX/KR support according to speed requested */ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); if (speed & IXGBE_LINK_SPEED_10GB_FULL) - if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP) + if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) autoc |= IXGBE_AUTOC_KX4_SUPP; - if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP) + if (orig_autoc & IXGBE_AUTOC_KR_SUPP) autoc |= IXGBE_AUTOC_KR_SUPP; if (speed & IXGBE_LINK_SPEED_1GB_FULL) autoc |= IXGBE_AUTOC_KX_SUPP; @@ -705,14 +736,30 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) /* Call adapter stop to disable tx/rx and clear interrupts */ hw->mac.ops.stop_adapter(hw); + /* PHY ops must be identified and initialized prior to reset */ + + /* Init PHY and function pointers, perform SFP setup */ + status = hw->phy.ops.init(hw); + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + /* Reset PHY */ - hw->phy.ops.reset(hw); + if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) + hw->phy.ops.reset(hw); /* * Prevent the PCI-E bus from from hanging by disabling PCI-E master * access and verify no pending requests before reset */ - if (ixgbe_disable_pcie_master(hw) != 0) { + status = ixgbe_disable_pcie_master(hw); + if (status != 0) { status = IXGBE_ERR_MASTER_REQUESTS_PENDING; hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); } @@ -770,9 +817,30 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) } } + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + +reset_hw_out: return status; } @@ -1093,53 +1161,100 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) { u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; u8 comp_codes_10g = 0; - switch (hw->device_id) { - case IXGBE_DEV_ID_82599: - case IXGBE_DEV_ID_82599_KX4: - /* Default device ID is mezzanine card KX/KX4 */ - physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | - IXGBE_PHYSICAL_LAYER_1000BASE_KX); + hw->phy.ops.identify(hw); + + if (hw->phy.type == ixgbe_phy_tn || + hw->phy.type == ixgbe_phy_cu_unknown) { + hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, + &ext_ability); + if (ext_ability & MDIO_PMA_EXTABLE_10GBT) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_1000BT) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_100BTX) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | + IXGBE_PHYSICAL_LAYER_1000BASE_BX; + goto out; + } else + /* SFI mode so read SFP module */ + goto sfp_check; break; - case IXGBE_DEV_ID_82599_SFP: - hw->phy.ops.identify_sfp(hw); + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; + goto out; + break; + case IXGBE_AUTOC_LMS_10G_SERIAL: + if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + if (autoc & IXGBE_AUTOC_KR_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + break; + default: + goto out; + break; + } - switch (hw->phy.sfp_type) { - case ixgbe_sfp_type_da_cu: - case ixgbe_sfp_type_da_cu_core0: - case ixgbe_sfp_type_da_cu_core1: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; - break; - case ixgbe_sfp_type_sr: +sfp_check: + /* SFP check must be done last since DA modules are sometimes used to + * test KR mode - we need to id KR mode correctly before SFP module. + * Call identify_sfp because the pluggable module may have changed */ + hw->phy.ops.identify_sfp(hw); + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + goto out; + + switch (hw->phy.type) { + case ixgbe_phy_tw_tyco: + case ixgbe_phy_tw_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; - break; - case ixgbe_sfp_type_lr: + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; - break; - case ixgbe_sfp_type_srlr_core0: - case ixgbe_sfp_type_srlr_core1: - hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_10GBE_COMP_CODES, - &comp_codes_10g); - if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) - physical_layer = - IXGBE_PHYSICAL_LAYER_10GBASE_SR; - else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) - physical_layer = - IXGBE_PHYSICAL_LAYER_10GBASE_LR; - else - physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - default: - physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - break; - } break; default: - physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; break; } +out: return physical_layer; } @@ -1187,6 +1302,90 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) return 0; } +/** + * ixgbe_get_device_caps_82599 - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps) +{ + hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); + + return 0; +} + +/** + * ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599 + * @hw: pointer to hardware structure + * @san_mac_offset: SAN MAC address offset + * + * This function will read the EEPROM location for the SAN MAC address + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. + **/ +s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw, + u16 *san_mac_offset) +{ + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. + */ + hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); + + return 0; +} + +/** + * ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599 + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + * set_lan_id() is called by identify_sfp(), but this cannot be relied + * upon for non-SFP connections, so we must call it here. + **/ +s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + u16 san_mac_data, san_mac_offset; + u8 i; + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. If they're not, no point in calling set_lan_id() here. + */ + ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset); + + if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { + /* + * No addresses available in this EEPROM. It's not an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; + + goto san_mac_addr_out; + } + + /* make sure we know which port we need to program */ + hw->mac.ops.set_lan_id(hw); + /* apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + for (i = 0; i < 3; i++) { + hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); + san_mac_addr[i * 2] = (u8)(san_mac_data); + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); + san_mac_offset++; + } + +san_mac_addr_out: + return 0; +} + static struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, @@ -1196,6 +1395,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599, .enable_rx_dma = &ixgbe_enable_rx_dma_82599, .get_mac_addr = &ixgbe_get_mac_addr_generic, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599, + .get_device_caps = &ixgbe_get_device_caps_82599, .stop_adapter = &ixgbe_stop_adapter_generic, .get_bus_info = &ixgbe_get_bus_info_generic, .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, @@ -1236,6 +1437,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = { static struct ixgbe_phy_operations phy_ops_82599 = { .identify = &ixgbe_identify_phy_82599, .identify_sfp = &ixgbe_identify_sfp_module_generic, + .init = &ixgbe_init_phy_ops_82599, .reset = &ixgbe_reset_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, .write_reg = &ixgbe_write_phy_reg_generic, diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 186a650..0cc3c47 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -71,12 +71,6 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) /* Identify the PHY */ hw->phy.ops.identify(hw); - /* - * Store MAC address from RAR0, clear receive address registers, and - * clear the multicast table - */ - hw->mac.ops.init_rx_addrs(hw); - /* Clear the VLAN filter table */ hw->mac.ops.clear_vfta(hw); @@ -1595,6 +1589,13 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num) u32 mflcn_reg; u32 fccfg_reg; u32 reg; + u32 rx_pba_size; + +#ifdef CONFIG_DCB + if (hw->fc.requested_mode == ixgbe_fc_pfc) + goto out; + +#endif /* CONFIG_DCB */ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); @@ -1653,24 +1654,45 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num) } /* Enable 802.3x based flow control settings. */ + mflcn_reg |= IXGBE_MFLCN_DPF; IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); - /* Set up and enable Rx high/low water mark thresholds, enable XON. */ - if (hw->fc.current_mode & ixgbe_fc_tx_pause) { - if (hw->fc.send_xon) - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), - (hw->fc.low_water | IXGBE_FCRTL_XONE)); - else - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), - hw->fc.low_water); + reg = IXGBE_READ_REG(hw, IXGBE_MTQC); + /* Thresholds are different for link flow control when in DCB mode */ + if (reg & IXGBE_MTQC_RT_ENA) { + /* Always disable XON for LFC when in DCB mode */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), 0); + + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); + reg = (rx_pba_size >> 2) & 0xFFE0; + if (hw->fc.current_mode & ixgbe_fc_tx_pause) + reg |= IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg); + } else { + /* + * Set up and enable Rx high/low water mark thresholds, + * enable XON. + */ + if (hw->fc.current_mode & ixgbe_fc_tx_pause) { + if (hw->fc.send_xon) { + IXGBE_WRITE_REG(hw, + IXGBE_FCRTL_82599(packetbuf_num), + (hw->fc.low_water | + IXGBE_FCRTL_XONE)); + } else { + IXGBE_WRITE_REG(hw, + IXGBE_FCRTL_82599(packetbuf_num), + hw->fc.low_water); + } - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), - (hw->fc.high_water | IXGBE_FCRTH_FCEN)); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), + (hw->fc.high_water | IXGBE_FCRTH_FCEN)); + } } /* Configure pause time (2 TCs per register) */ - reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num)); + reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); if ((packetbuf_num & 1) == 0) reg = (reg & 0xFFFF0000) | hw->fc.pause_time; else @@ -1859,9 +1881,11 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num) * because it causes the controller to just blast out fc packets. */ if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { - hw_dbg(hw, "Invalid water mark configuration\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; + if (hw->fc.requested_mode != ixgbe_fc_none) { + hw_dbg(hw, "Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } } /* diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c index 6220627..f302638 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c @@ -294,6 +294,9 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u32 reg, rx_pba_size; u8 i; + if (!dcb_config->pfc_mode_enable) + goto out; + /* Enable Transmit Priority Flow Control */ reg = IXGBE_READ_REG(hw, IXGBE_RMCS); reg &= ~IXGBE_RMCS_TFCE_802_3X; @@ -341,6 +344,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); +out: return 0; } diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index bd0a0c2..d56890f 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -28,15 +28,22 @@ #include "ixgbe.h" #include <linux/dcbnl.h> +#include "ixgbe_dcb_82598.h" +#include "ixgbe_dcb_82599.h" /* Callbacks for DCB netlink in the kernel */ #define BIT_DCB_MODE 0x01 #define BIT_PFC 0x02 #define BIT_PG_RX 0x04 #define BIT_PG_TX 0x08 -#define BIT_BCN 0x10 +#define BIT_RESETLINK 0x40 #define BIT_LINKSPEED 0x80 +/* Responses for the DCB_C_SET_ALL command */ +#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ +#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ +#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ + int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) { @@ -124,15 +131,12 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); - ixgbe_reset_interrupt_capability(adapter); - ixgbe_napi_del_all(adapter); - INIT_LIST_HEAD(&netdev->napi_list); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); - adapter->tx_ring = NULL; - adapter->rx_ring = NULL; + ixgbe_clear_interrupt_scheme(adapter); - adapter->hw.fc.requested_mode = ixgbe_fc_pfc; + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->last_lfc_mode = adapter->hw.fc.current_mode; + adapter->hw.fc.requested_mode = ixgbe_fc_none; + } adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; adapter->flags |= IXGBE_FLAG_DCB_ENABLED; ixgbe_init_interrupt_scheme(adapter); @@ -141,17 +145,13 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) } else { /* Turn off DCB */ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - adapter->hw.fc.requested_mode = ixgbe_fc_default; if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); - ixgbe_reset_interrupt_capability(adapter); - ixgbe_napi_del_all(adapter); - INIT_LIST_HEAD(&netdev->napi_list); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); - adapter->tx_ring = NULL; - adapter->rx_ring = NULL; + ixgbe_clear_interrupt_scheme(adapter); + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags |= IXGBE_FLAG_RSS_ENABLED; ixgbe_init_interrupt_scheme(adapter); @@ -167,10 +167,15 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; + int i, j; for (i = 0; i < netdev->addr_len; i++) perm_addr[i] = adapter->hw.mac.perm_addr[i]; + + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + for (j = 0; j < netdev->addr_len; j++, i++) + perm_addr[i] = adapter->hw.mac.san_addr[j]; + } } static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, @@ -197,8 +202,10 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != - adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) + adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) { adapter->dcb_set_bitmap |= BIT_PG_TX; + adapter->dcb_set_bitmap |= BIT_RESETLINK; + } } static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, @@ -209,8 +216,10 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != - adapter->dcb_cfg.bw_percentage[0][bwg_id]) + adapter->dcb_cfg.bw_percentage[0][bwg_id]) { adapter->dcb_set_bitmap |= BIT_PG_RX; + adapter->dcb_set_bitmap |= BIT_RESETLINK; + } } static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, @@ -237,8 +246,10 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != - adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) + adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) { adapter->dcb_set_bitmap |= BIT_PG_RX; + adapter->dcb_set_bitmap |= BIT_RESETLINK; + } } static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, @@ -249,8 +260,10 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != - adapter->dcb_cfg.bw_percentage[1][bwg_id]) + adapter->dcb_cfg.bw_percentage[1][bwg_id]) { adapter->dcb_set_bitmap |= BIT_PG_RX; + adapter->dcb_set_bitmap |= BIT_RESETLINK; + } } static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, @@ -319,28 +332,60 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); int ret; - adapter->dcb_set_bitmap &= ~BIT_BCN; /* no set for BCN */ if (!adapter->dcb_set_bitmap) - return 1; + return DCB_NO_HW_CHG; - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) - msleep(1); + /* + * Only take down the adapter if the configuration change + * requires a reset. + */ + if (adapter->dcb_set_bitmap & BIT_RESETLINK) { + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + msleep(1); - if (netif_running(netdev)) - ixgbe_down(adapter); + if (netif_running(netdev)) + ixgbe_down(adapter); + } ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, adapter->ring_feature[RING_F_DCB].indices); if (ret) { - clear_bit(__IXGBE_RESETTING, &adapter->state); - return ret; + if (adapter->dcb_set_bitmap & BIT_RESETLINK) + clear_bit(__IXGBE_RESETTING, &adapter->state); + return DCB_NO_HW_CHG; + } + + if (adapter->dcb_cfg.pfc_mode_enable) { + if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && + (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) + adapter->last_lfc_mode = adapter->hw.fc.current_mode; + adapter->hw.fc.requested_mode = ixgbe_fc_pfc; + } else { + if (adapter->hw.mac.type != ixgbe_mac_82598EB) + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + else + adapter->hw.fc.requested_mode = ixgbe_fc_none; } - if (netif_running(netdev)) - ixgbe_up(adapter); + if (adapter->dcb_set_bitmap & BIT_RESETLINK) { + if (netif_running(netdev)) + ixgbe_up(adapter); + ret = DCB_HW_CHG_RST; + } else if (adapter->dcb_set_bitmap & BIT_PFC) { + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + ixgbe_dcb_config_pfc_82598(&adapter->hw, + &adapter->dcb_cfg); + else if (adapter->hw.mac.type == ixgbe_mac_82599EB) + ixgbe_dcb_config_pfc_82599(&adapter->hw, + &adapter->dcb_cfg); + ret = DCB_HW_CHG; + } + if (adapter->dcb_cfg.pfc_mode_enable) + adapter->hw.fc.current_mode = ixgbe_fc_pfc; + if (adapter->dcb_set_bitmap & BIT_RESETLINK) + clear_bit(__IXGBE_RESETTING, &adapter->state); adapter->dcb_set_bitmap = 0x00; - clear_bit(__IXGBE_RESETTING, &adapter->state); return ret; } @@ -416,11 +461,17 @@ static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); + return adapter->dcb_cfg.pfc_mode_enable; } static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.pfc_mode_enable = state; + if (adapter->temp_dcb_cfg.pfc_mode_enable != + adapter->dcb_cfg.pfc_mode_enable) + adapter->dcb_set_bitmap |= BIT_PFC; return; } diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index f0a20fa..35255b8 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -67,6 +67,7 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { {"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)}, {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)}, {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)}, + {"hw_rsc_count", IXGBE_STAT(rsc_count)}, {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)}, {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)}, {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)}, @@ -90,6 +91,14 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, +#ifdef IXGBE_FCOE + {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, + {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, + {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, + {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, + {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, + {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, +#endif /* IXGBE_FCOE */ }; #define IXGBE_QUEUE_STATS_LEN \ @@ -245,6 +254,13 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, else pause->autoneg = 1; +#ifdef CONFIG_DCB + if (hw->fc.current_mode == ixgbe_fc_pfc) { + pause->rx_pause = 0; + pause->tx_pause = 0; + } + +#endif if (hw->fc.current_mode == ixgbe_fc_rx_pause) { pause->rx_pause = 1; } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { @@ -261,6 +277,13 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; +#ifdef CONFIG_DCB + if (adapter->dcb_cfg.pfc_mode_enable || + ((hw->mac.type == ixgbe_mac_82598EB) && + (adapter->flags & IXGBE_FLAG_DCB_ENABLED))) + return -EINVAL; + +#endif if (pause->autoneg != AUTONEG_ENABLE) hw->fc.disable_fc_autoneg = true; else @@ -277,6 +300,9 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, else return -EINVAL; +#ifdef CONFIG_DCB + adapter->last_lfc_mode = hw->fc.requested_mode; +#endif hw->mac.ops.setup_fc(hw, 0); return 0; @@ -311,10 +337,17 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev) static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) { - if (data) + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (data) { netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - else + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + netdev->features |= NETIF_F_SCTP_CSUM; + } else { netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + netdev->features &= ~NETIF_F_SCTP_CSUM; + } return 0; } @@ -1106,7 +1139,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, } for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { - struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; if (q_vector->txr_count && !q_vector->rxr_count) /* tx vector gets half the rate */ q_vector->eitr = (adapter->eitr_param >> 1); @@ -1120,6 +1153,27 @@ static int ixgbe_set_coalesce(struct net_device *netdev, return 0; } +static int ixgbe_set_flags(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ethtool_op_set_flags(netdev, data); + + if (!(adapter->flags & IXGBE_FLAG_RSC_CAPABLE)) + return 0; + + /* if state changes we need to update adapter->flags and reset */ + if ((!!(data & ETH_FLAG_LRO)) != + (!!(adapter->flags & IXGBE_FLAG_RSC_ENABLED))) { + adapter->flags ^= IXGBE_FLAG_RSC_ENABLED; + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); + } + return 0; + +} static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, @@ -1154,7 +1208,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .get_coalesce = ixgbe_get_coalesce, .set_coalesce = ixgbe_set_coalesce, .get_flags = ethtool_op_get_flags, - .set_flags = ethtool_op_set_flags, + .set_flags = ixgbe_set_flags, }; void ixgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c new file mode 100644 index 0000000..d5939de --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -0,0 +1,552 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include "ixgbe.h" +#include <linux/if_ether.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/fc/fc_fs.h> +#include <scsi/fc/fc_fcoe.h> +#include <scsi/libfc.h> +#include <scsi/libfcoe.h> + +/** + * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type + * @rx_desc: advanced rx descriptor + * + * Returns : true if it is FCoE pkt + */ +static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc) +{ + u16 p; + + p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info); + if (p & IXGBE_RXDADV_PKTTYPE_ETQF) { + p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK; + p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT; + return p == IXGBE_ETQF_FILTER_FCOE; + } + return false; +} + +/** + * ixgbe_fcoe_clear_ddp - clear the given ddp context + * @ddp - ptr to the ixgbe_fcoe_ddp + * + * Returns : none + * + */ +static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) +{ + ddp->len = 0; + ddp->err = 0; + ddp->udl = NULL; + ddp->udp = 0UL; + ddp->sgl = NULL; + ddp->sgc = 0; +} + +/** + * ixgbe_fcoe_ddp_put - free the ddp context for a given xid + * @netdev: the corresponding net_device + * @xid: the xid that corresponding ddp will be freed + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_done + * and it is expected to be called by ULD, i.e., FCP layer of libfc + * to release the corresponding ddp context when the I/O is done. + * + * Returns : data length already ddp-ed in bytes + */ +int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) +{ + int len = 0; + struct ixgbe_fcoe *fcoe; + struct ixgbe_adapter *adapter; + struct ixgbe_fcoe_ddp *ddp; + + if (!netdev) + goto out_ddp_put; + + if (xid >= IXGBE_FCOE_DDP_MAX) + goto out_ddp_put; + + adapter = netdev_priv(netdev); + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + goto out_ddp_put; + + len = ddp->len; + /* if there an error, force to invalidate ddp context */ + if (ddp->err) { + spin_lock_bh(&fcoe->lock); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, + (xid | IXGBE_FCFLTRW_WE)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_WE)); + spin_unlock_bh(&fcoe->lock); + } + if (ddp->sgl) + pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, + DMA_FROM_DEVICE); + pci_pool_free(fcoe->pool, ddp->udl, ddp->udp); + ixgbe_fcoe_clear_ddp(ddp); + +out_ddp_put: + return len; +} + +/** + * ixgbe_fcoe_ddp_get - called to set up ddp context + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. + * + * Returns : 1 for success and 0 for no ddp + */ +int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + struct ixgbe_fcoe *fcoe; + struct ixgbe_fcoe_ddp *ddp; + struct scatterlist *sg; + unsigned int i, j, dmacount; + unsigned int len; + static const unsigned int bufflen = 4096; + unsigned int firstoff = 0; + unsigned int lastsize; + unsigned int thisoff = 0; + unsigned int thislen = 0; + u32 fcbuff, fcdmarw, fcfltrw; + dma_addr_t addr; + + if (!netdev || !sgl) + return 0; + + adapter = netdev_priv(netdev); + if (xid >= IXGBE_FCOE_DDP_MAX) { + DPRINTK(DRV, WARNING, "xid=0x%x out-of-range\n", xid); + return 0; + } + + fcoe = &adapter->fcoe; + if (!fcoe->pool) { + DPRINTK(DRV, WARNING, "xid=0x%x no ddp pool for fcoe\n", xid); + return 0; + } + + ddp = &fcoe->ddp[xid]; + if (ddp->sgl) { + DPRINTK(DRV, ERR, "xid 0x%x w/ non-null sgl=%p nents=%d\n", + xid, ddp->sgl, ddp->sgc); + return 0; + } + ixgbe_fcoe_clear_ddp(ddp); + + /* setup dma from scsi command sgl */ + dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); + if (dmacount == 0) { + DPRINTK(DRV, ERR, "xid 0x%x DMA map error\n", xid); + return 0; + } + + /* alloc the udl from our ddp pool */ + ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp); + if (!ddp->udl) { + DPRINTK(DRV, ERR, "failed allocated ddp context\n"); + goto out_noddp_unmap; + } + ddp->sgl = sgl; + ddp->sgc = sgc; + + j = 0; + for_each_sg(sgl, sg, dmacount, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + while (len) { + /* get the offset of length of current buffer */ + thisoff = addr & ((dma_addr_t)bufflen - 1); + thislen = min((bufflen - thisoff), len); + /* + * all but the 1st buffer (j == 0) + * must be aligned on bufflen + */ + if ((j != 0) && (thisoff)) + goto out_noddp_free; + /* + * all but the last buffer + * ((i == (dmacount - 1)) && (thislen == len)) + * must end at bufflen + */ + if (((i != (dmacount - 1)) || (thislen != len)) + && ((thislen + thisoff) != bufflen)) + goto out_noddp_free; + + ddp->udl[j] = (u64)(addr - thisoff); + /* only the first buffer may have none-zero offset */ + if (j == 0) + firstoff = thisoff; + len -= thislen; + addr += thislen; + j++; + /* max number of buffers allowed in one DDP context */ + if (j > IXGBE_BUFFCNT_MAX) { + DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx " + "not enough descriptors\n", + xid, i, j, dmacount, (u64)addr); + goto out_noddp_free; + } + } + } + /* only the last buffer may have non-full bufflen */ + lastsize = thisoff + thislen; + + fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); + fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT); + fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); + fcbuff |= (IXGBE_FCBUFF_VALID); + + fcdmarw = xid; + fcdmarw |= IXGBE_FCDMARW_WE; + fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); + + fcfltrw = xid; + fcfltrw |= IXGBE_FCFLTRW_WE; + + /* program DMA context */ + hw = &adapter->hw; + spin_lock_bh(&fcoe->lock); + IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_32BIT_MASK); + IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); + spin_unlock_bh(&fcoe->lock); + + return 1; + +out_noddp_free: + pci_pool_free(fcoe->pool, ddp->udl, ddp->udp); + ixgbe_fcoe_clear_ddp(ddp); + +out_noddp_unmap: + pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); + return 0; +} + +/** + * ixgbe_fcoe_ddp - check ddp status and mark it done + * @adapter: ixgbe adapter + * @rx_desc: advanced rx descriptor + * @skb: the skb holding the received data + * + * This checks ddp status. + * + * Returns : 0 for success and skb will not be delivered to ULD + */ +int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 xid; + u32 sterr, fceofe, fcerr, fcstat; + int rc = -EINVAL; + struct ixgbe_fcoe *fcoe; + struct ixgbe_fcoe_ddp *ddp; + struct fc_frame_header *fh; + + if (!ixgbe_rx_is_fcoe(rx_desc)) + goto ddp_out; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + sterr = le32_to_cpu(rx_desc->wb.upper.status_error); + fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR); + fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE); + if (fcerr == IXGBE_FCERR_BADCRC) + skb->ip_summed = CHECKSUM_NONE; + + skb_reset_network_header(skb); + skb_set_transport_header(skb, skb_network_offset(skb) + + sizeof(struct fcoe_hdr)); + fh = (struct fc_frame_header *)skb_transport_header(skb); + xid = be16_to_cpu(fh->fh_ox_id); + if (xid >= IXGBE_FCOE_DDP_MAX) + goto ddp_out; + + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + goto ddp_out; + + ddp->err = (fcerr | fceofe); + if (ddp->err) + goto ddp_out; + + fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); + if (fcstat) { + /* update length of DDPed data */ + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + /* unmap the sg list when FCP_RSP is received */ + if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { + pci_unmap_sg(adapter->pdev, ddp->sgl, + ddp->sgc, DMA_FROM_DEVICE); + ddp->sgl = NULL; + ddp->sgc = 0; + } + /* return 0 to bypass going to ULD for DDPed data */ + if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) + rc = 0; + } + +ddp_out: + return rc; +} + +/** + * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) + * @adapter: ixgbe adapter + * @tx_ring: tx desc ring + * @skb: associated skb + * @tx_flags: tx flags + * @hdr_len: hdr_len to be returned + * + * This sets up large send offload for FCoE + * + * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error + */ +int ixgbe_fso(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u8 *hdr_len) +{ + u8 sof, eof; + u32 vlan_macip_lens; + u32 fcoe_sof_eof; + u32 type_tucmd; + u32 mss_l4len_idx; + int mss = 0; + unsigned int i; + struct ixgbe_tx_buffer *tx_buffer_info; + struct ixgbe_adv_tx_context_desc *context_desc; + struct fc_frame_header *fh; + + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { + DPRINTK(DRV, ERR, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", + skb_shinfo(skb)->gso_type); + return -EINVAL; + } + + /* resets the header to point fcoe/fc */ + skb_set_network_header(skb, skb->mac_len); + skb_set_transport_header(skb, skb->mac_len + + sizeof(struct fcoe_hdr)); + + /* sets up SOF and ORIS */ + fcoe_sof_eof = 0; + sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; + switch (sof) { + case FC_SOF_I2: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; + break; + case FC_SOF_I3: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; + break; + case FC_SOF_N2: + break; + case FC_SOF_N3: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; + break; + default: + DPRINTK(DRV, WARNING, "unknown sof = 0x%x\n", sof); + return -EINVAL; + } + + /* the first byte of the last dword is EOF */ + skb_copy_bits(skb, skb->len - 4, &eof, 1); + /* sets up EOF and ORIE */ + switch (eof) { + case FC_EOF_N: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; + break; + case FC_EOF_T: + /* lso needs ORIE */ + if (skb_is_gso(skb)) { + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE; + } else { + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; + } + break; + case FC_EOF_NI: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; + break; + case FC_EOF_A: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; + break; + default: + DPRINTK(DRV, WARNING, "unknown eof = 0x%x\n", eof); + return -EINVAL; + } + + /* sets up PARINC indicating data offset */ + fh = (struct fc_frame_header *)skb_transport_header(skb); + if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; + + /* hdr_len includes fc_hdr if FCoE lso is enabled */ + *hdr_len = sizeof(struct fcoe_crc_eof); + if (skb_is_gso(skb)) + *hdr_len += (skb_transport_offset(skb) + + sizeof(struct fc_frame_header)); + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = (skb_transport_offset(skb) + + sizeof(struct fc_frame_header)); + vlan_macip_lens |= ((skb_transport_offset(skb) - 4) + << IXGBE_ADVTXD_MACLEN_SHIFT); + vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); + + /* type_tycmd and mss: set TUCMD.FCoE to enable offload */ + type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT | + IXGBE_ADVTXT_TUCMD_FCOE; + if (skb_is_gso(skb)) + mss = skb_shinfo(skb)->gso_size; + /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ + mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) | + (1 << IXGBE_ADVTXD_IDX_SHIFT); + + /* write context desc */ + i = tx_ring->next_to_use; + context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return skb_is_gso(skb); +} + +/** + * ixgbe_configure_fcoe - configures registers for fcoe at start + * @adapter: ptr to ixgbe adapter + * + * This sets up FCoE related registers + * + * Returns : none + */ +void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) +{ + int i, fcoe_q, fcoe_i; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + + /* create the pool for ddp if not created yet */ + if (!fcoe->pool) { + /* allocate ddp pool */ + fcoe->pool = pci_pool_create("ixgbe_fcoe_ddp", + adapter->pdev, IXGBE_FCPTR_MAX, + IXGBE_FCPTR_ALIGN, PAGE_SIZE); + if (!fcoe->pool) + DPRINTK(DRV, ERR, + "failed to allocated FCoE DDP pool\n"); + + spin_lock_init(&fcoe->lock); + } + + /* Enable L2 eth type filter for FCoE */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), + (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); + if (adapter->ring_feature[RING_F_FCOE].indices) { + /* Use multiple rx queues for FCoE by redirection table */ + for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { + fcoe_i = f->mask + i % f->indices; + fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; + fcoe_q = adapter->rx_ring[fcoe_i].reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); + } + IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); + } else { + /* Use single rx queue for FCoE */ + fcoe_i = f->mask; + fcoe_q = adapter->rx_ring[fcoe_i].reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), + IXGBE_ETQS_QUEUE_EN | + (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, + IXGBE_FCRXCTRL_FCOELLI | + IXGBE_FCRXCTRL_FCCRCBO | + (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); +} + +/** + * ixgbe_cleanup_fcoe - release all fcoe ddp context resources + * @adapter : ixgbe adapter + * + * Cleans up outstanding ddp context resources + * + * Returns : none + */ +void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) +{ + int i; + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + /* release ddp resource */ + if (fcoe->pool) { + for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) + ixgbe_fcoe_ddp_put(adapter->netdev, i); + pci_pool_destroy(fcoe->pool); + fcoe->pool = NULL; + } +} diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h new file mode 100644 index 0000000..b7f9b63 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_fcoe.h @@ -0,0 +1,66 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_FCOE_H +#define _IXGBE_FCOE_H + +#include <scsi/fc/fc_fcoe.h> + +/* shift bits within STAT fo FCSTAT */ +#define IXGBE_RXDADV_FCSTAT_SHIFT 4 + +/* ddp user buffer */ +#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ +#define IXGBE_FCPTR_ALIGN 16 +#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) +#define IXGBE_FCBUFF_4KB 0x0 +#define IXGBE_FCBUFF_8KB 0x1 +#define IXGBE_FCBUFF_16KB 0x2 +#define IXGBE_FCBUFF_64KB 0x3 +#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ +#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ +#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ + +/* fcerr */ +#define IXGBE_FCERR_BADCRC 0x00100000 + +struct ixgbe_fcoe_ddp { + int len; + u32 err; + unsigned int sgc; + struct scatterlist *sgl; + dma_addr_t udp; + u64 *udl; +}; + +struct ixgbe_fcoe { + spinlock_t lock; + struct pci_pool *pool; + struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; +}; + +#endif /* _IXGBE_FCOE_H */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 07e778d..d342619 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -39,6 +39,7 @@ #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> +#include <scsi/fc/fc_fcoe.h> #include "ixgbe.h" #include "ixgbe_common.h" @@ -47,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "2.0.8-k2" +#define DRV_VERSION "2.0.24-k2" const char ixgbe_driver_version[] = DRV_VERSION; static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; @@ -89,6 +90,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = { board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), + board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, @@ -326,8 +329,18 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, } /* re-arm the interrupt */ - if (count >= tx_ring->work_limit) - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx); + if (count >= tx_ring->work_limit) { + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, + tx_ring->v_idx); + else if (tx_ring->v_idx & 0xFFFFFFFF) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), + tx_ring->v_idx); + else + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), + (tx_ring->v_idx >> 32)); + } + tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; @@ -451,6 +464,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) **/ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb, u8 status, + struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc) { struct ixgbe_adapter *adapter = q_vector->adapter; @@ -458,24 +472,17 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, bool is_vlan = (status & IXGBE_RXD_STAT_VP); u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); - skb_record_rx_queue(skb, q_vector - &adapter->q_vector[0]); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + skb_record_rx_queue(skb, ring->queue_index); + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { if (adapter->vlgrp && is_vlan && (tag != 0)) vlan_gro_receive(napi, adapter->vlgrp, tag, skb); else napi_gro_receive(napi, skb); } else { - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { - if (adapter->vlgrp && is_vlan && (tag != 0)) - vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); - else - netif_receive_skb(skb); - } else { - if (adapter->vlgrp && is_vlan && (tag != 0)) - vlan_hwaccel_rx(skb, adapter->vlgrp, tag); - else - netif_rx(skb); - } + if (adapter->vlgrp && is_vlan && (tag != 0)) + vlan_hwaccel_rx(skb, adapter->vlgrp, tag); + else + netif_rx(skb); } } @@ -622,6 +629,40 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; } +static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) +{ + return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & + IXGBE_RXDADV_RSCCNT_MASK) >> + IXGBE_RXDADV_RSCCNT_SHIFT; +} + +/** + * ixgbe_transform_rsc_queue - change rsc queue into a full packet + * @skb: pointer to the last skb in the rsc queue + * + * This function changes a queue full of hw rsc buffers into a completed + * packet. It uses the ->prev pointers to find the first packet and then + * turns it into the frag list owner. + **/ +static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) +{ + unsigned int frag_list_size = 0; + + while (skb->prev) { + struct sk_buff *prev = skb->prev; + frag_list_size += skb->len; + skb->prev = NULL; + skb = prev; + } + + skb_shinfo(skb)->frag_list = skb->next; + skb->next = NULL; + skb->len += frag_list_size; + skb->data_len += frag_list_size; + skb->truesize += frag_list_size; + return skb; +} + static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, int *work_done, int work_to_do) @@ -631,7 +672,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, union ixgbe_adv_rx_desc *rx_desc, *next_rxd; struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; struct sk_buff *skb; - unsigned int i; + unsigned int i, rsc_count = 0; u32 len, staterr; u16 hdr_info; bool cleaned = false; @@ -697,20 +738,38 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, i++; if (i == rx_ring->count) i = 0; - next_buffer = &rx_ring->rx_buffer_info[i]; next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); prefetch(next_rxd); - cleaned_count++; + + if (adapter->flags & IXGBE_FLAG_RSC_CAPABLE) + rsc_count = ixgbe_get_rsc_count(rx_desc); + + if (rsc_count) { + u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> + IXGBE_RXDADV_NEXTP_SHIFT; + next_buffer = &rx_ring->rx_buffer_info[nextp]; + rx_ring->rsc_count += (rsc_count - 1); + } else { + next_buffer = &rx_ring->rx_buffer_info[i]; + } + if (staterr & IXGBE_RXD_STAT_EOP) { + if (skb->prev) + skb = ixgbe_transform_rsc_queue(skb); rx_ring->stats.packets++; rx_ring->stats.bytes += skb->len; } else { - rx_buffer_info->skb = next_buffer->skb; - rx_buffer_info->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + rx_buffer_info->skb = next_buffer->skb; + rx_buffer_info->dma = next_buffer->dma; + next_buffer->skb = skb; + next_buffer->dma = 0; + } else { + skb->next = next_buffer->skb; + skb->next->prev = skb; + } adapter->non_eop_descs++; goto next_desc; } @@ -727,7 +786,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, total_rx_packets++; skb->protocol = eth_type_trans(skb, adapter->netdev); - ixgbe_receive_skb(q_vector, skb, staterr, rx_desc); +#ifdef IXGBE_FCOE + /* if ddp, not passing to ULD unless for FCP_RSP or error */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + if (!ixgbe_fcoe_ddp(adapter, rx_desc, skb)) + goto next_desc; +#endif /* IXGBE_FCOE */ + ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); next_desc: rx_desc->wb.upper.status_error = 0; @@ -740,7 +805,7 @@ next_desc: /* use prefetched values */ rx_desc = next_rxd; - rx_buffer_info = next_buffer; + rx_buffer_info = &rx_ring->rx_buffer_info[i]; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } @@ -780,7 +845,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) * corresponding register. */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { - q_vector = &adapter->q_vector[v_idx]; + q_vector = adapter->q_vector[v_idx]; /* XXX for_each_bit(...) */ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); @@ -929,8 +994,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) struct ixgbe_adapter *adapter = q_vector->adapter; u32 new_itr; u8 current_itr, ret_itr; - int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / - sizeof(struct ixgbe_q_vector); + int i, r_idx, v_idx = q_vector->v_idx; struct ixgbe_ring *rx_ring, *tx_ring; r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); @@ -1121,7 +1185,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); rx_ring = &(adapter->rx_ring[r_idx]); /* disable interrupts on this vector only */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); + else if (rx_ring->v_idx & 0xFFFFFFFF) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), rx_ring->v_idx); + else + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), + (rx_ring->v_idx >> 32)); napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -1135,6 +1205,23 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) return IRQ_HANDLED; } +static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); + } else { + mask = (qmask & 0xFFFFFFFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); + mask = (qmask >> 32); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); + } + /* skip the flush */ +} + /** * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine * @napi: napi struct with our devices info in it @@ -1167,7 +1254,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) if (adapter->itr_setting & 1) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx); + ixgbe_irq_enable_queues(adapter, rx_ring->v_idx); } return work_done; @@ -1189,7 +1276,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) struct ixgbe_ring *rx_ring = NULL; int work_done = 0, i; long r_idx; - u16 enable_mask = 0; + u64 enable_mask = 0; /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ @@ -1216,7 +1303,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) if (adapter->itr_setting & 1) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask); + ixgbe_irq_enable_queues(adapter, enable_mask); return 0; } @@ -1225,19 +1312,21 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, int r_idx) { - a->q_vector[v_idx].adapter = a; - set_bit(r_idx, a->q_vector[v_idx].rxr_idx); - a->q_vector[v_idx].rxr_count++; - a->rx_ring[r_idx].v_idx = 1 << v_idx; + struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; + + set_bit(r_idx, q_vector->rxr_idx); + q_vector->rxr_count++; + a->rx_ring[r_idx].v_idx = (u64)1 << v_idx; } static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, - int r_idx) + int t_idx) { - a->q_vector[v_idx].adapter = a; - set_bit(r_idx, a->q_vector[v_idx].txr_idx); - a->q_vector[v_idx].txr_count++; - a->tx_ring[r_idx].v_idx = 1 << v_idx; + struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; + + set_bit(t_idx, q_vector->txr_idx); + q_vector->txr_count++; + a->tx_ring[t_idx].v_idx = (u64)1 << v_idx; } /** @@ -1333,7 +1422,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ &ixgbe_msix_clean_many) for (vector = 0; vector < q_vectors; vector++) { - handler = SET_HANDLER(&adapter->q_vector[vector]); + handler = SET_HANDLER(adapter->q_vector[vector]); if(handler == &ixgbe_msix_clean_rx) { sprintf(adapter->name[vector], "%s-%s-%d", @@ -1349,7 +1438,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) err = request_irq(adapter->msix_entries[vector].vector, handler, 0, adapter->name[vector], - &(adapter->q_vector[vector])); + adapter->q_vector[vector]); if (err) { DPRINTK(PROBE, ERR, "request_irq failed for MSIX interrupt " @@ -1372,7 +1461,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) free_queue_irqs: for (i = vector - 1; i >= 0; i--) free_irq(adapter->msix_entries[--vector].vector, - &(adapter->q_vector[i])); + adapter->q_vector[i]); adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); @@ -1383,7 +1472,7 @@ out: static void ixgbe_set_itr(struct ixgbe_adapter *adapter) { - struct ixgbe_q_vector *q_vector = adapter->q_vector; + struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; u8 current_itr; u32 new_itr = q_vector->eitr; struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; @@ -1436,7 +1525,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) { u32 mask; - mask = IXGBE_EIMS_ENABLE_MASK; + + mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) mask |= IXGBE_EIMS_GPI_SDP1; if (adapter->hw.mac.type == ixgbe_mac_82599EB) { @@ -1446,14 +1536,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) } IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - /* enable the rest of the queue vectors */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), - (IXGBE_EIMS_RTX_QUEUE << 16)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2), - ((IXGBE_EIMS_RTX_QUEUE << 16) | - IXGBE_EIMS_RTX_QUEUE)); - } + ixgbe_irq_enable_queues(adapter, ~0); IXGBE_WRITE_FLUSH(&adapter->hw); } @@ -1467,6 +1550,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) struct net_device *netdev = data; struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; u32 eicr; /* @@ -1494,13 +1578,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); - if (napi_schedule_prep(&adapter->q_vector[0].napi)) { + if (napi_schedule_prep(&(q_vector->napi))) { adapter->tx_ring[0].total_packets = 0; adapter->tx_ring[0].total_bytes = 0; adapter->rx_ring[0].total_packets = 0; adapter->rx_ring[0].total_bytes = 0; /* would disable interrupts here but EIAM disabled it */ - __napi_schedule(&adapter->q_vector[0].napi); + __napi_schedule(&(q_vector->napi)); } return IRQ_HANDLED; @@ -1511,7 +1595,7 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (i = 0; i < q_vectors; i++) { - struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); q_vector->rxr_count = 0; @@ -1562,7 +1646,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) i--; for (; i >= 0; i--) { free_irq(adapter->msix_entries[i].vector, - &(adapter->q_vector[i])); + adapter->q_vector[i]); } ixgbe_reset_q_vectors(adapter); @@ -1577,10 +1661,12 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) **/ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) { - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); + } else { + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(2), ~0); } IXGBE_WRITE_FLUSH(&adapter->hw); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { @@ -1592,18 +1678,6 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) } } -static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter) -{ - u32 mask = IXGBE_EIMS_RTX_QUEUE; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask << 16); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2), - (mask << 16 | mask)); - } - /* skip the flush */ -} - /** * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts * @@ -1675,7 +1749,29 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) unsigned long mask; if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - queue0 = index; + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + int dcb_i = adapter->ring_feature[RING_F_DCB].indices; + if (dcb_i == 8) + queue0 = index >> 4; + else if (dcb_i == 4) + queue0 = index >> 5; + else + dev_err(&adapter->pdev->dev, "Invalid DCB " + "configuration\n"); +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *f; + + rx_ring = &adapter->rx_ring[queue0]; + f = &adapter->ring_feature[RING_F_FCOE]; + if ((queue0 == 0) && (index > rx_ring->reg_idx)) + queue0 = f->mask + index - + rx_ring->reg_idx - 1; + } +#endif /* IXGBE_FCOE */ + } else { + queue0 = index; + } } else { mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask; queue0 = index & mask; @@ -1689,28 +1785,20 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; + srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK; + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - u16 bufsz = IXGBE_RXBUFFER_2048; - /* grow the amount we can receive on large page machines */ - if (bufsz < (PAGE_SIZE / 2)) - bufsz = (PAGE_SIZE / 2); - /* cap the bufsz at our largest descriptor size */ - bufsz = min((u16)IXGBE_MAX_RXBUFFER, bufsz); - - srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER + srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#else + srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#endif srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - srrctl |= ((IXGBE_RX_HDR_SIZE << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); } else { + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - - if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) - srrctl |= IXGBE_RXBUFFER_2048 >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - else - srrctl |= rx_ring->rx_buf_len >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; } IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); @@ -1736,11 +1824,17 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) u32 fctrl, hlreg0; u32 reta = 0, mrqc = 0; u32 rdrxctl; + u32 rscctrl; int rx_buf_len; /* Decide whether to use packet split mode or not */ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; +#endif /* IXGBE_FCOE */ + /* Set the RX buffer length according to the mode */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { rx_buf_len = IXGBE_RX_HDR_SIZE; @@ -1749,11 +1843,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR; + IXGBE_PSRTYPE_IPV6HDR | + IXGBE_PSRTYPE_L2HDR; IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); } } else { - if (netdev->mtu <= ETH_DATA_LEN) + if (!(adapter->flags & IXGBE_FLAG_RSC_ENABLED) && + (netdev->mtu <= ETH_DATA_LEN)) rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; else rx_buf_len = ALIGN(max_frame, 1024); @@ -1770,6 +1866,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; else hlreg0 |= IXGBE_HLREG0_JUMBOEN; +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + hlreg0 |= IXGBE_HLREG0_JUMBOEN; +#endif IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); @@ -1791,6 +1891,17 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) adapter->rx_ring[i].tail = IXGBE_RDT(j); adapter->rx_ring[i].rx_buf_len = rx_buf_len; +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *f; + f = &adapter->ring_feature[RING_F_FCOE]; + if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + (i >= f->mask) && (i < f->mask + f->indices)) + adapter->rx_ring[i].rx_buf_len = + IXGBE_FCOE_JUMBO_FRAME_SIZE; + } + +#endif /* IXGBE_FCOE */ ixgbe_configure_srrctl(adapter, j); } @@ -1875,8 +1986,45 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) if (hw->mac.type == ixgbe_mac_82599EB) { rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); } + + if (adapter->flags & IXGBE_FLAG_RSC_ENABLED) { + /* Enable 82599 HW-RSC */ + for (i = 0; i < adapter->num_rx_queues; i++) { + j = adapter->rx_ring[i].reg_idx; + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); + rscctrl |= IXGBE_RSCCTL_RSCEN; + /* + * we must limit the number of descriptors so that the + * total size of max desc * buf_len is not greater + * than 65535 + */ + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { +#if (MAX_SKB_FRAGS > 16) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; +#elif (MAX_SKB_FRAGS > 8) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; +#elif (MAX_SKB_FRAGS > 4) + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; +#else + rscctrl |= IXGBE_RSCCTL_MAXDESC_1; +#endif + } else { + if (rx_buf_len < IXGBE_RXBUFFER_4096) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; + else if (rx_buf_len < IXGBE_RXBUFFER_8192) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; + else + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; + } + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl); + } + /* Disable RSC for ACK packets */ + IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, + (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); + } } static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) @@ -2041,7 +2189,7 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { struct napi_struct *napi; - q_vector = &adapter->q_vector[q_idx]; + q_vector = adapter->q_vector[q_idx]; if (!q_vector->rxr_count) continue; napi = &q_vector->napi; @@ -2064,7 +2212,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) q_vectors = 1; for (q_idx = 0; q_idx < q_vectors; q_idx++) { - q_vector = &adapter->q_vector[q_idx]; + q_vector = adapter->q_vector[q_idx]; if (!q_vector->rxr_count) continue; napi_disable(&q_vector->napi); @@ -2140,6 +2288,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) netif_set_gso_max_size(netdev, 65536); #endif +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + ixgbe_configure_fcoe(adapter); + +#endif /* IXGBE_FCOE */ ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); for (i = 0; i < adapter->num_rx_queues; i++) @@ -2294,6 +2447,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); } +#ifdef IXGBE_FCOE + /* adjust max frame to be able to do baby jumbo for FCoE */ + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) + max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; + +#endif /* IXGBE_FCOE */ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { mhadd &= ~IXGBE_MHADD_MFS_MASK; @@ -2357,6 +2517,17 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) ixgbe_irq_enable(adapter); /* + * If this adapter has a fan, check to see if we had a failure + * before we enabled the interrupt. + */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + DPRINTK(DRV, CRIT, + "Fan has stopped, replace the adapter\n"); + } + + /* * For hot-pluggable SFP+ devices, a new SFP+ module may have * arrived before interrupts were enabled. We need to kick off * the SFP+ module setup first, then try to bring up link. @@ -2404,8 +2575,6 @@ int ixgbe_up(struct ixgbe_adapter *adapter) /* hardware has been reset, we need to reload some things */ ixgbe_configure(adapter); - ixgbe_napi_add_all(adapter); - return ixgbe_up_complete(adapter); } @@ -2445,8 +2614,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, rx_buffer_info->dma = 0; } if (rx_buffer_info->skb) { - dev_kfree_skb(rx_buffer_info->skb); + struct sk_buff *skb = rx_buffer_info->skb; rx_buffer_info->skb = NULL; + do { + struct sk_buff *this = skb; + skb = skb->prev; + dev_kfree_skb(this); + } while (skb); } if (!rx_buffer_info->page) continue; @@ -2632,7 +2806,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) if (adapter->itr_setting & 1) ixgbe_set_itr(adapter); if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter); + ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); } return work_done; } @@ -2710,6 +2884,47 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) return ret; } +#ifdef IXGBE_FCOE +/** + * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) + * @adapter: board private structure to initialize + * + * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. + * The ring feature mask is not used as a mask for FCoE, as it can take any 8 + * rx queues out of the max number of rx queues, instead, it is used as the + * index of the first rx queue used by FCoE. + * + **/ +static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) +{ + bool ret = false; + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + + f->indices = min((int)num_online_cpus(), f->indices); + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { +#ifdef CONFIG_IXGBE_DCB + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + DPRINTK(PROBE, INFO, "FCOE enabled with DCB \n"); + ixgbe_set_dcb_queues(adapter); + } +#endif + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n"); + ixgbe_set_rss_queues(adapter); + } + /* adding FCoE rx rings to the end */ + f->mask = adapter->num_rx_queues; + adapter->num_rx_queues += f->indices; + if (adapter->num_tx_queues == 0) + adapter->num_tx_queues = f->indices; + + ret = true; + } + + return ret; +} + +#endif /* IXGBE_FCOE */ /* * ixgbe_set_num_queues: Allocate queues for device, feature dependant * @adapter: board private structure to initialize @@ -2723,6 +2938,11 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) **/ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) { +#ifdef IXGBE_FCOE + if (ixgbe_set_fcoe_queues(adapter)) + goto done; + +#endif /* IXGBE_FCOE */ #ifdef CONFIG_IXGBE_DCB if (ixgbe_set_dcb_queues(adapter)) goto done; @@ -2778,9 +2998,6 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; kfree(adapter->msix_entries); adapter->msix_entries = NULL; - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; - ixgbe_set_num_queues(adapter); } else { adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ /* @@ -2901,6 +3118,39 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) } #endif +#ifdef IXGBE_FCOE +/** + * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for FCoE mode to the assigned rings. + * + */ +static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) +{ + int i, fcoe_i = 0; + bool ret = false; + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { +#ifdef CONFIG_IXGBE_DCB + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + ixgbe_cache_ring_dcb(adapter); + fcoe_i = adapter->rx_ring[0].reg_idx + 1; + } +#endif /* CONFIG_IXGBE_DCB */ + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + ixgbe_cache_ring_rss(adapter); + fcoe_i = f->mask; + } + for (i = 0; i < f->indices; i++, fcoe_i++) + adapter->rx_ring[f->mask + i].reg_idx = fcoe_i; + ret = true; + } + return ret; +} + +#endif /* IXGBE_FCOE */ /** * ixgbe_cache_ring_register - Descriptor ring to register mapping * @adapter: board private structure to initialize @@ -2918,6 +3168,11 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) adapter->rx_ring[0].reg_idx = 0; adapter->tx_ring[0].reg_idx = 0; +#ifdef IXGBE_FCOE + if (ixgbe_cache_ring_fcoe(adapter)) + return; + +#endif /* IXGBE_FCOE */ #ifdef CONFIG_IXGBE_DCB if (ixgbe_cache_ring_dcb(adapter)) return; @@ -3004,31 +3259,20 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) * mean we disable MSI-X capabilities of the adapter. */ adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); - if (!adapter->msix_entries) { - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; - ixgbe_set_num_queues(adapter); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); - err = ixgbe_alloc_queues(adapter); - if (err) { - DPRINTK(PROBE, ERR, "Unable to allocate memory " - "for queues\n"); - goto out; - } - - goto try_msi; - } + if (adapter->msix_entries) { + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; - for (vector = 0; vector < v_budget; vector++) - adapter->msix_entries[vector].entry = vector; + ixgbe_acquire_msix_vectors(adapter, v_budget); - ixgbe_acquire_msix_vectors(adapter, v_budget); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + goto out; + } - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - goto out; + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; + ixgbe_set_num_queues(adapter); -try_msi: err = pci_enable_msi(adapter->pdev); if (!err) { adapter->flags |= IXGBE_FLAG_MSI_ENABLED; @@ -3043,6 +3287,87 @@ out: return err; } +/** + * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) +{ + int q_idx, num_q_vectors; + struct ixgbe_q_vector *q_vector; + int napi_vectors; + int (*poll)(struct napi_struct *, int); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + napi_vectors = adapter->num_rx_queues; + poll = &ixgbe_clean_rxonly; + } else { + num_q_vectors = 1; + napi_vectors = 1; + poll = &ixgbe_poll; + } + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL); + if (!q_vector) + goto err_out; + q_vector->adapter = adapter; + q_vector->v_idx = q_idx; + q_vector->eitr = adapter->eitr_param; + if (q_idx < napi_vectors) + netif_napi_add(adapter->netdev, &q_vector->napi, + (*poll), 64); + adapter->q_vector[q_idx] = q_vector; + } + + return 0; + +err_out: + while (q_idx) { + q_idx--; + q_vector = adapter->q_vector[q_idx]; + netif_napi_del(&q_vector->napi); + kfree(q_vector); + adapter->q_vector[q_idx] = NULL; + } + return -ENOMEM; +} + +/** + * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) +{ + int q_idx, num_q_vectors; + int napi_vectors; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + napi_vectors = adapter->num_rx_queues; + } else { + num_q_vectors = 1; + napi_vectors = 1; + } + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; + + adapter->q_vector[q_idx] = NULL; + if (q_idx < napi_vectors) + netif_napi_del(&q_vector->napi); + kfree(q_vector); + } +} + void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { @@ -3074,18 +3399,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) /* Number of supported queues */ ixgbe_set_num_queues(adapter); - err = ixgbe_alloc_queues(adapter); - if (err) { - DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - err = ixgbe_set_interrupt_capability(adapter); if (err) { DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); goto err_set_interrupt; } + err = ixgbe_alloc_q_vectors(adapter); + if (err) { + DPRINTK(PROBE, ERR, "Unable to allocate memory for queue " + "vectors\n"); + goto err_alloc_q_vectors; + } + + err = ixgbe_alloc_queues(adapter); + if (err) { + DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " "Tx Queue count = %u\n", (adapter->num_rx_queues > 1) ? "Enabled" : @@ -3095,11 +3427,30 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) return 0; +err_alloc_queues: + ixgbe_free_q_vectors(adapter); +err_alloc_q_vectors: + ixgbe_reset_interrupt_capability(adapter); err_set_interrupt: + return err; +} + +/** + * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) +{ kfree(adapter->tx_ring); kfree(adapter->rx_ring); -err_alloc_queues: - return err; + adapter->tx_ring = NULL; + adapter->rx_ring = NULL; + + ixgbe_free_q_vectors(adapter); + ixgbe_reset_interrupt_capability(adapter); } /** @@ -3185,10 +3536,19 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->ring_feature[RING_F_RSS].indices = rss; adapter->flags |= IXGBE_FLAG_RSS_ENABLED; adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; - if (hw->mac.type == ixgbe_mac_82598EB) + if (hw->mac.type == ixgbe_mac_82598EB) { + if (hw->device_id == IXGBE_DEV_ID_82598AT) + adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; - else if (hw->mac.type == ixgbe_mac_82599EB) + } else if (hw->mac.type == ixgbe_mac_82599EB) { adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; + adapter->flags |= IXGBE_FLAG_RSC_CAPABLE; + adapter->flags |= IXGBE_FLAG_RSC_ENABLED; +#ifdef IXGBE_FCOE + adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; +#endif /* IXGBE_FCOE */ + } #ifdef CONFIG_IXGBE_DCB /* Configure DCB traffic classes */ @@ -3203,6 +3563,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; adapter->dcb_cfg.rx_pba_cfg = pba_equal; + adapter->dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.round_robin_enable = false; adapter->dcb_set_bitmap = 0x00; ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, @@ -3213,6 +3574,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) /* default flow control settings */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ +#ifdef CONFIG_DCB + adapter->last_lfc_mode = hw->fc.current_mode; +#endif hw->fc.high_water = IXGBE_DEFAULT_FCRTH; hw->fc.low_water = IXGBE_DEFAULT_FCRTL; hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; @@ -3503,6 +3867,8 @@ static int ixgbe_open(struct net_device *netdev) if (test_bit(__IXGBE_TESTING, &adapter->state)) return -EBUSY; + netif_carrier_off(netdev); + /* allocate transmit descriptors */ err = ixgbe_setup_all_tx_resources(adapter); if (err) @@ -3515,8 +3881,6 @@ static int ixgbe_open(struct net_device *netdev) ixgbe_configure(adapter); - ixgbe_napi_add_all(adapter); - err = ixgbe_request_irq(adapter); if (err) goto err_req_irq; @@ -3568,55 +3932,6 @@ static int ixgbe_close(struct net_device *netdev) return 0; } -/** - * ixgbe_napi_add_all - prep napi structs for use - * @adapter: private struct - * - * helper function to napi_add each possible q_vector->napi - */ -void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) -{ - int q_idx, q_vectors; - struct net_device *netdev = adapter->netdev; - int (*poll)(struct napi_struct *, int); - - /* check if we already have our netdev->napi_list populated */ - if (&netdev->napi_list != netdev->napi_list.next) - return; - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - poll = &ixgbe_clean_rxonly; - /* Only enable as many vectors as we have rx queues. */ - q_vectors = adapter->num_rx_queues; - } else { - poll = &ixgbe_poll; - /* only one q_vector for legacy modes */ - q_vectors = 1; - } - - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx]; - netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); - } -} - -void ixgbe_napi_del_all(struct ixgbe_adapter *adapter) -{ - int q_idx; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* legacy and MSI only use one vector */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; - - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx]; - if (!q_vector->rxr_count) - continue; - netif_napi_del(&q_vector->napi); - } -} - #ifdef CONFIG_PM static int ixgbe_resume(struct pci_dev *pdev) { @@ -3626,7 +3941,8 @@ static int ixgbe_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - err = pci_enable_device(pdev); + + err = pci_enable_device_mem(pdev); if (err) { printk(KERN_ERR "ixgbe: Cannot enable PCI device from " "suspend\n"); @@ -3634,8 +3950,7 @@ static int ixgbe_resume(struct pci_dev *pdev) } pci_set_master(pdev); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); + pci_wake_from_d3(pdev, false); err = ixgbe_init_interrupt_scheme(adapter); if (err) { @@ -3679,11 +3994,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) ixgbe_free_all_tx_resources(adapter); ixgbe_free_all_rx_resources(adapter); } - ixgbe_reset_interrupt_capability(adapter); - ixgbe_napi_del_all(adapter); - INIT_LIST_HEAD(&netdev->napi_list); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); + ixgbe_clear_interrupt_scheme(adapter); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -3711,13 +4022,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); } - if (wufc && hw->mac.type == ixgbe_mac_82599EB) { - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } else { - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - } + if (wufc && hw->mac.type == ixgbe_mac_82599EB) + pci_wake_from_d3(pdev, true); + else + pci_wake_from_d3(pdev, false); *enable_wake = !!wufc; @@ -3772,9 +4080,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; if (hw->mac.type == ixgbe_mac_82599EB) { + u64 rsc_count = 0; for (i = 0; i < 16; i++) adapter->hw_rx_no_dma_resources += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + for (i = 0; i < adapter->num_rx_queues; i++) + rsc_count += adapter->rx_ring[i].rsc_count; + adapter->rsc_count = rsc_count; } adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); @@ -3821,6 +4133,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); +#ifdef IXGBE_FCOE + adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); +#endif /* IXGBE_FCOE */ } else { adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); @@ -3896,7 +4216,7 @@ static void ixgbe_watchdog(unsigned long data) int i; for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) - eics |= (1 << i); + eics |= ((u64)1 << i); /* Cause software interrupt to ensure rx rings are cleaned */ switch (hw->mac.type) { @@ -3916,16 +4236,9 @@ static void ixgbe_watchdog(unsigned long data) break; case ixgbe_mac_82599EB: if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - /* - * EICS(0..15) first 0-15 q vectors - * EICS[1] (16..31) q vectors 16-31 - * EICS[2] (0..31) q vectors 32-63 - */ - IXGBE_WRITE_REG(hw, IXGBE_EICS, - (u32)(eics & 0xFFFF)); + IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(0), + (u32)(eics & 0xFFFFFFFF)); IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1), - (u32)(eics & 0xFFFF0000)); - IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(2), (u32)(eics >> 32)); } else { /* @@ -4011,16 +4324,32 @@ static void ixgbe_watchdog_task(struct work_struct *work) struct ixgbe_hw *hw = &adapter->hw; u32 link_speed = adapter->link_speed; bool link_up = adapter->link_up; + int i; + struct ixgbe_ring *tx_ring; + int some_tx_pending = 0; adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) { +#ifdef CONFIG_DCB + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + hw->mac.ops.setup_fc(hw, i); + } else { + hw->mac.ops.setup_fc(hw, 0); + } +#else + hw->mac.ops.setup_fc(hw, 0); +#endif + } + if (link_up || time_after(jiffies, (adapter->link_check_timeout + IXGBE_TRY_LINK_TIMEOUT))) { - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); } adapter->link_up = link_up; adapter->link_speed = link_speed; @@ -4068,6 +4397,25 @@ static void ixgbe_watchdog_task(struct work_struct *work) } } + if (!netif_carrier_ok(netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_ring = &adapter->tx_ring[i]; + if (tx_ring->next_to_use != tx_ring->next_to_clean) { + some_tx_pending = 1; + break; + } + } + + if (some_tx_pending) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + schedule_work(&adapter->reset_task); + } + } + ixgbe_update_stats(adapter); adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; } @@ -4196,12 +4544,18 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, if (ip_hdr(skb)->protocol == IPPROTO_TCP) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) + type_tucmd_mlhl |= + IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; case cpu_to_be16(ETH_P_IPV6): /* XXX what about other V6 headers?? */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) + type_tucmd_mlhl |= + IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; default: if (unlikely(net_ratelimit())) { @@ -4234,10 +4588,12 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, static int ixgbe_tx_map(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring, - struct sk_buff *skb, unsigned int first) + struct sk_buff *skb, u32 tx_flags, + unsigned int first) { struct ixgbe_tx_buffer *tx_buffer_info; - unsigned int len = skb_headlen(skb); + unsigned int len; + unsigned int total = skb->len; unsigned int offset = 0, size, count = 0, i; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int f; @@ -4252,6 +4608,11 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, map = skb_shinfo(skb)->dma_maps; + if (tx_flags & IXGBE_TX_FLAGS_FCOE) + /* excluding fcoe_crc_eof for FCoE */ + total -= sizeof(struct fcoe_crc_eof); + + len = min(skb_headlen(skb), total); while (len) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); @@ -4262,6 +4623,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, tx_buffer_info->next_to_watch = i; len -= size; + total -= size; offset += size; count++; @@ -4276,7 +4638,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, struct skb_frag_struct *frag; frag = &skb_shinfo(skb)->frags[f]; - len = frag->size; + len = min((unsigned int)frag->size, total); offset = 0; while (len) { @@ -4293,9 +4655,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, tx_buffer_info->next_to_watch = i; len -= size; + total -= size; offset += size; count++; } + if (total == 0) + break; } tx_ring->tx_buffer_info[i].skb = skb; @@ -4337,6 +4702,13 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, olinfo_status |= IXGBE_TXD_POPTS_TXSM << IXGBE_ADVTXD_POPTS_SHIFT; + if (tx_flags & IXGBE_TX_FLAGS_FCOE) { + olinfo_status |= IXGBE_ADVTXD_CC; + olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); + if (tx_flags & IXGBE_TX_FLAGS_FSO) + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + } + olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); i = tx_ring->next_to_use; @@ -4433,10 +4805,16 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } - /* three things can cause us to need a context descriptor */ + + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + (skb->protocol == htons(ETH_P_FCOE))) + tx_flags |= IXGBE_TX_FLAGS_FCOE; + + /* four things can cause us to need a context descriptor */ if (skb_is_gso(skb) || (skb->ip_summed == CHECKSUM_PARTIAL) || - (tx_flags & IXGBE_TX_FLAGS_VLAN)) + (tx_flags & IXGBE_TX_FLAGS_VLAN) || + (tx_flags & IXGBE_TX_FLAGS_FCOE)) count++; count += TXD_USE_COUNT(skb_headlen(skb)); @@ -4448,23 +4826,35 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } - if (skb->protocol == htons(ETH_P_IP)) - tx_flags |= IXGBE_TX_FLAGS_IPV4; first = tx_ring->next_to_use; - tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (tso) - tx_flags |= IXGBE_TX_FLAGS_TSO; - else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && - (skb->ip_summed == CHECKSUM_PARTIAL)) - tx_flags |= IXGBE_TX_FLAGS_CSUM; + if (tx_flags & IXGBE_TX_FLAGS_FCOE) { +#ifdef IXGBE_FCOE + /* setup tx offload for FCoE */ + tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + if (tso) + tx_flags |= IXGBE_TX_FLAGS_FSO; +#endif /* IXGBE_FCOE */ + } else { + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= IXGBE_TX_FLAGS_IPV4; + tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } - count = ixgbe_tx_map(adapter, tx_ring, skb, first); + if (tso) + tx_flags |= IXGBE_TX_FLAGS_TSO; + else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && + (skb->ip_summed == CHECKSUM_PARTIAL)) + tx_flags |= IXGBE_TX_FLAGS_CSUM; + } + count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); if (count) { ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, hdr_len); @@ -4519,6 +4909,82 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) return 0; } +static int +ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 value; + int rc; + + if (prtad != hw->phy.mdio.prtad) + return -EINVAL; + rc = hw->phy.ops.read_reg(hw, addr, devad, &value); + if (!rc) + rc = value; + return rc; +} + +static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (prtad != hw->phy.mdio.prtad) + return -EINVAL; + return hw->phy.ops.write_reg(hw, addr, devad, value); +} + +static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); +} + +/** + * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding + * netdev->dev_addr_list + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_add_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + if (is_valid_ether_addr(mac->san_addr)) { + rtnl_lock(); + err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + return err; +} + +/** + * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding + * netdev->dev_addr_list + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_del_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + if (is_valid_ether_addr(mac->san_addr)) { + rtnl_lock(); + err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + return err; +} + #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs @@ -4552,9 +5018,14 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_vlan_rx_register = ixgbe_vlan_rx_register, .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, + .ndo_do_ioctl = ixgbe_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif +#ifdef IXGBE_FCOE + .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, + .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, +#endif /* IXGBE_FCOE */ }; /** @@ -4577,9 +5048,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; static int cards_found; int i, err, pci_using_dac; +#ifdef IXGBE_FCOE + u16 device_caps; +#endif u32 part_num, eec; - err = pci_enable_device(pdev); + err = pci_enable_device_mem(pdev); if (err) return err; @@ -4599,9 +5073,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, pci_using_dac = 0; } - err = pci_request_regions(pdev, ixgbe_driver_name); + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), ixgbe_driver_name); if (err) { - dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); goto err_pci_reg; } @@ -4665,6 +5141,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* PHY */ memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); hw->phy.sfp_type = ixgbe_sfp_type_unknown; + /* ixgbe_identify_phy_generic will set prtad and mmds properly */ + hw->phy.mdio.prtad = MDIO_PRTAD_NONE; + hw->phy.mdio.mmds = 0; + hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + hw->phy.mdio.dev = netdev; + hw->phy.mdio.mdio_read = ixgbe_mdio_read; + hw->phy.mdio.mdio_write = ixgbe_mdio_write; /* set up this timer and work struct before calling get_invariants * which might start the timer @@ -4702,9 +5185,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) goto err_sw_init; + /* + * If there is a fan on this device and it has failed log the + * failure. + */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + DPRINTK(PROBE, CRIT, + "Fan has stopped, replace the adapter\n"); + } + /* reset_hw fills in the perm_addr as well */ err = hw->mac.ops.reset_hw(hw); - if (err) { + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { + dev_err(&adapter->pdev->dev, "failed to load because an " + "unsupported SFP+ module type was detected.\n"); + goto err_sw_init; + } else if (err) { dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); goto err_sw_init; } @@ -4720,6 +5218,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_TSO6; netdev->features |= NETIF_F_GRO; + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + netdev->features |= NETIF_F_SCTP_CSUM; + netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_IP_CSUM; @@ -4732,9 +5233,26 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->dcbnl_ops = &dcbnl_ops; #endif +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + if (hw->mac.ops.get_device_caps) { + hw->mac.ops.get_device_caps(hw, &device_caps); + if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) { + netdev->features |= NETIF_F_FCOE_CRC; + netdev->features |= NETIF_F_FSO; + netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + } else { + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + } + } + } +#endif /* IXGBE_FCOE */ if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; + if (adapter->flags & IXGBE_FLAG_RSC_ENABLED) + netdev->features |= NETIF_F_LRO; + /* make sure the EEPROM is good */ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); @@ -4774,6 +5292,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, device_init_wakeup(&adapter->pdev->dev, true); device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + /* pick up the PCI bus settings for reporting later */ + hw->mac.ops.get_bus_info(hw); + /* print bus type/speed/width info */ dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": @@ -4807,13 +5328,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* reset the hardware with the new settings */ hw->mac.ops.start_hw(hw); - netif_carrier_off(netdev); - strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_register; + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + #ifdef CONFIG_IXGBE_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; @@ -4823,6 +5345,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ixgbe_setup_dca(adapter); } #endif + /* add san mac addr to netdev */ + ixgbe_add_sanmac_netdev(netdev); dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); cards_found++; @@ -4831,8 +5355,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, err_register: ixgbe_release_hw_control(adapter); err_hw_init: + ixgbe_clear_interrupt_scheme(adapter); err_sw_init: - ixgbe_reset_interrupt_capability(adapter); err_eeprom: clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); del_timer_sync(&adapter->sfp_timer); @@ -4843,7 +5367,8 @@ err_eeprom: err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_regions(pdev); + pci_release_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM)); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -4887,19 +5412,27 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) } #endif +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + ixgbe_cleanup_fcoe(adapter); + +#endif /* IXGBE_FCOE */ + + /* remove the added san mac */ + ixgbe_del_sanmac_netdev(netdev); + if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); - ixgbe_reset_interrupt_capability(adapter); + ixgbe_clear_interrupt_scheme(adapter); ixgbe_release_hw_control(adapter); iounmap(adapter->hw.hw_addr); - pci_release_regions(pdev); + pci_release_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM)); DPRINTK(PROBE, INFO, "complete\n"); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); free_netdev(netdev); @@ -4927,6 +5460,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, netif_device_detach(netdev); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + if (netif_running(netdev)) ixgbe_down(adapter); pci_disable_device(pdev); @@ -4948,7 +5484,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) pci_ers_result_t result; int err; - if (pci_enable_device(pdev)) { + if (pci_enable_device_mem(pdev)) { DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; @@ -4956,8 +5492,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) pci_set_master(pdev); pci_restore_state(pdev); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); + pci_wake_from_d3(pdev, false); ixgbe_reset(adapter); IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 14e9606..6d385ea 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -44,7 +44,6 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); static bool ixgbe_get_i2c_data(u32 *i2cctl); static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); -static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); @@ -61,8 +60,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) if (hw->phy.type == ixgbe_phy_unknown) { for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { - if (ixgbe_validate_phy_addr(hw, phy_addr)) { - hw->phy.addr = phy_addr; + if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { ixgbe_get_phy_id(hw); hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); @@ -78,26 +76,6 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) } /** - * ixgbe_validate_phy_addr - Determines phy address is valid - * @hw: pointer to hardware structure - * - **/ -static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) -{ - u16 phy_id = 0; - bool valid = false; - - hw->phy.addr = phy_addr; - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); - - if (phy_id != 0xFFFF && phy_id != 0x0) - valid = true; - - return valid; -} - -/** * ixgbe_get_phy_id - Get the phy type * @hw: pointer to hardware structure * @@ -108,14 +86,12 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) u16 phy_id_high = 0; u16 phy_id_low = 0; - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, + status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, &phy_id_high); if (status == 0) { hw->phy.id = (u32)(phy_id_high << 16); - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, + status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, &phy_id_low); hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); @@ -160,9 +136,8 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) * Perform soft PHY reset to the PHY_XS. * This will cause a soft reset to the PHY */ - return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, - IXGBE_MDIO_PHY_XS_DEV_TYPE, - IXGBE_MDIO_PHY_XS_RESET); + return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, + MDIO_CTRL1_RESET); } /** @@ -192,7 +167,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -223,7 +198,8 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.mdio.prtad << + IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -292,7 +268,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -323,7 +299,8 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.mdio.prtad << + IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -365,7 +342,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) s32 status = IXGBE_NOT_IMPLEMENTED; u32 time_out; u32 max_time_out = 10; - u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + u16 autoneg_reg; /* * Set advertisement settings in PHY based on autoneg_advertised @@ -373,36 +350,31 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) * tnx devices cannot be "forced" to a autoneg 10G and fail. But can * for a 1G. */ - hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) - autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ + autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; else - autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; - hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); /* Restart PHY autonegotiation and wait for completion */ - hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg); - autoneg_reg |= IXGBE_MII_RESTART; + autoneg_reg |= MDIO_AN_CTRL1_RESTART; - hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); /* Wait for autonegotiation to finish */ for (time_out = 0; time_out < max_time_out; time_out++) { udelay(10); /* Restart PHY autonegotiation and wait for completion */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &autoneg_reg); - autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; - if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) { + autoneg_reg &= MDIO_AN_STAT1_COMPLETE; + if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) { status = 0; break; } @@ -457,23 +429,21 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) s32 ret_val = 0; u32 i; - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, - IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); /* reset the PHY and poll for completion */ - hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, - IXGBE_MDIO_PHY_XS_DEV_TYPE, - (phy_data | IXGBE_MDIO_PHY_XS_RESET)); + hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, + (phy_data | MDIO_CTRL1_RESET)); for (i = 0; i < 100; i++) { - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, - IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); - if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) + hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, + &phy_data); + if ((phy_data & MDIO_CTRL1_RESET) == 0) break; msleep(10); } - if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { + if ((phy_data & MDIO_CTRL1_RESET) != 0) { hw_dbg(hw, "PHY reset did not complete.\n"); ret_val = IXGBE_ERR_PHY; goto out; @@ -509,7 +479,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) for (i = 0; i < edata; i++) { hw->eeprom.ops.read(hw, data_offset, &eword); hw->phy.ops.write_reg(hw, phy_offset, - IXGBE_TWINAX_DEV, eword); + MDIO_MMD_PMAPMD, eword); hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, phy_offset); data_offset++; @@ -552,6 +522,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; u8 identifier = 0; u8 comp_codes_1g = 0; u8 comp_codes_10g = 0; @@ -620,8 +591,18 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) hw->phy.sfp_type = ixgbe_sfp_type_unknown; } + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the SFP+ PHY is dual speed or not. */ + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + /* Determine PHY vendor */ - if (hw->phy.type == ixgbe_phy_unknown) { + if (hw->phy.type != ixgbe_phy_nl) { hw->phy.id = identifier; hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE0, @@ -662,23 +643,35 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) break; } } - if (hw->mac.type == ixgbe_mac_82598EB || - (hw->phy.sfp_type != ixgbe_sfp_type_sr && - hw->phy.sfp_type != ixgbe_sfp_type_lr && - hw->phy.sfp_type != ixgbe_sfp_type_srlr_core0 && - hw->phy.sfp_type != ixgbe_sfp_type_srlr_core1)) { + + /* All DA cables are supported */ + if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE) { + status = 0; + goto out; + } + + /* 1G SFP modules are not supported */ + if (comp_codes_10g == 0) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + /* Anything else 82598-based is supported */ + if (hw->mac.type == ixgbe_mac_82598EB) { status = 0; goto out; } - hw->eeprom.ops.read(hw, IXGBE_PHY_ENFORCE_INTEL_SFP_OFFSET, - &enforce_sfp); - if (!(enforce_sfp & IXGBE_PHY_ALLOW_ANY_SFP)) { + /* This is guaranteed to be 82599, no need to check for NULL */ + hw->mac.ops.get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { /* Make sure we're a supported PHY type */ if (hw->phy.type == ixgbe_phy_sfp_intel) { status = 0; } else { hw_dbg(hw, "SFP+ module not supported\n"); + hw->phy.type = ixgbe_phy_sfp_unsupported; status = IXGBE_ERR_SFP_NOT_SUPPORTED; } } else { @@ -1279,7 +1272,7 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, udelay(10); status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + MDIO_MMD_VEND1, &phy_data); phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; @@ -1307,8 +1300,7 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, { s32 status = 0; - status = hw->phy.ops.read_reg(hw, TNX_FW_REV, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, MDIO_MMD_VEND1, firmware_version); return status; diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index cc5f1b3..c9964b7 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -44,6 +44,7 @@ /* Bitmasks */ #define IXGBE_SFF_TWIN_AX_CAPABLE 0x80 #define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 #define IXGBE_I2C_EEPROM_READ_MASK 0x100 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 030ff0a..ba3ed0f 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -29,6 +29,7 @@ #define _IXGBE_TYPE_H_ #include <linux/types.h> +#include <linux/mdio.h> /* Vendor ID */ #define IXGBE_INTEL_VENDOR_ID 0x8086 @@ -48,6 +49,7 @@ #define IXGBE_DEV_ID_82599 0x10D8 #define IXGBE_DEV_ID_82599_KX4 0x10F7 #define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC /* General Registers */ #define IXGBE_CTRL 0x00000 @@ -443,6 +445,21 @@ #define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 +/* HW RSC registers */ +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + ((_i - 64) * 0x40))) +#define IXGBE_RSCDBU 0x03028 +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 +#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 + /* DCB registers */ #define IXGBE_RTRPCS 0x02430 #define IXGBE_RTTDCS 0x04900 @@ -462,6 +479,63 @@ #define IXGBE_RTTDTECC_NO_BCN 0x00000100 #define IXGBE_RTTBCNRC 0x04984 +/* FCoE registers */ +#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ +#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ +#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ +#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ +#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ +#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) +#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ +#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 +#define IXGBE_FCBUFF_OFFSET_SHIFT 16 +#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ +#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ +#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 + +/* FCoE SOF/EOF */ +#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ +#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ +#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ +#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Filter Context Registers */ +#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ +#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ +#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ +#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ +#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ +#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ +/* FCoE Receive Control */ +#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ +#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 +/* FCoE Redirection */ +#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ +#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ +#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ +#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ + /* Stats registers */ #define IXGBE_CRCERRS 0x04000 #define IXGBE_ILLERRC 0x04004 @@ -533,6 +607,13 @@ #define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ #define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ #define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */ +#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ +#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ +#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ +#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ +#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ +#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ /* Management */ #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ @@ -833,13 +914,7 @@ /* Omer bit masks */ #define IXGBE_CORECTL_WRITE_CMD 0x00010000 -/* Device Type definitions for new protocol MDIO commands */ -#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 -#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 -#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 -#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ -#define IXGBE_TWINAX_DEV 1 +/* MDIO definitions */ #define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ @@ -850,31 +925,10 @@ #define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 -#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ -#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ -#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ -#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ -#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ -#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ -#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ -#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ -#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ -#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ -#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ -#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ - #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ -/* MII clause 22/28 definitions */ -#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 - -#define IXGBE_MII_SPEED_SELECTION_REG 0x10 -#define IXGBE_MII_RESTART 0x200 -#define IXGBE_MII_AUTONEG_COMPLETE 0x20 -#define IXGBE_MII_AUTONEG_REG 0x0 - #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 #define IXGBE_MAX_PHY_ADDR 32 @@ -898,8 +952,6 @@ #define IXGBE_CONTROL_NL 0x000F #define IXGBE_CONTROL_EOL_NL 0x0FFF #define IXGBE_CONTROL_SOL_NL 0x0000 -#define IXGBE_PHY_ENFORCE_INTEL_SFP_OFFSET 0x002C -#define IXGBE_PHY_ALLOW_ANY_SFP 0x1 /* General purpose Interrupt Enable */ #define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ @@ -958,6 +1010,8 @@ #define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ #define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ #define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ +#define IXGBE_VT_CTL_POOL_SHIFT 7 +#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) /* VMOLR bitmasks */ #define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ @@ -1148,6 +1202,7 @@ /* Interrupt Vector Allocation Registers */ #define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_REG_NUM_82599 64 #define IXGBE_IVAR_TXRX_ENTRY 96 #define IXGBE_IVAR_RX_ENTRY 64 #define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) @@ -1163,6 +1218,7 @@ /* ETYPE Queue Filter/Select Bit Masks */ #define IXGBE_MAX_ETQF_FILTERS 8 +#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ #define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ #define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ #define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ @@ -1185,6 +1241,7 @@ */ #define IXGBE_ETQF_FILTER_EAPOL 0 #define IXGBE_ETQF_FILTER_BCN 1 +#define IXGBE_ETQF_FILTER_FCOE 2 #define IXGBE_ETQF_FILTER_1588 3 /* VLAN Control Bit Masks */ #define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ @@ -1382,6 +1439,8 @@ #define IXGBE_FW_PTR 0x0F #define IXGBE_PBANUM0_PTR 0x15 #define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_DEVICE_CAPS 0x2C +#define IXGBE_SAN_MAC_ADDR_PTR 0x28 #define IXGBE_PCIE_MSIX_82599_CAPS 0x72 #define IXGBE_PCIE_MSIX_82598_CAPS 0x62 @@ -1425,6 +1484,11 @@ #define IXGBE_EERD_ATTEMPTS 100000 #endif +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 + /* PCI Bus Info */ #define IXGBE_PCI_LINK_STATUS 0xB2 #define IXGBE_PCI_LINK_WIDTH 0x3F0 @@ -1553,7 +1617,8 @@ #define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ #define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ #define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ -#define IXGBE_MTQC_64VF 0x8 /* 2 TX Queues per pool w/64VF's */ +#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ +#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ #define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ /* Receive Descriptor bit definitions */ @@ -1585,6 +1650,8 @@ #define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ #define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ #define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ +#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ #define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ #define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ #define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ @@ -1604,12 +1671,19 @@ #define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ #define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ #define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ /* PSRTYPE bit definitions */ #define IXGBE_PSRTYPE_TCPHDR 0x00000010 #define IXGBE_PSRTYPE_UDPHDR 0x00000020 #define IXGBE_PSRTYPE_IPV4HDR 0x00000100 #define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 /* SRRCTL bit definitions */ #define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ @@ -1836,6 +1910,16 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ +#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */ +#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ @@ -1861,7 +1945,7 @@ typedef u32 ixgbe_physical_layer; #define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 #define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 #define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 -#define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 #define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 #define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 #define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 @@ -1870,6 +1954,8 @@ typedef u32 ixgbe_physical_layer; #define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100 #define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 #define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 enum ixgbe_eeprom_type { ixgbe_eeprom_uninitialized = 0, @@ -1897,6 +1983,7 @@ enum ixgbe_phy_type { ixgbe_phy_sfp_ftl, ixgbe_phy_sfp_unknown, ixgbe_phy_sfp_intel, + ixgbe_phy_sfp_unsupported, ixgbe_phy_generic }; @@ -2075,6 +2162,12 @@ struct ixgbe_hw_stats { u64 fdirfstat_fremove; u64 fdirmatch; u64 fdirmiss; + u64 fccrc; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; }; /* forward declaration */ @@ -2101,6 +2194,8 @@ struct ixgbe_mac_operations { enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); u32 (*get_supported_physical_layer)(struct ixgbe_hw *); s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); s32 (*stop_adapter)(struct ixgbe_hw *); s32 (*get_bus_info)(struct ixgbe_hw *); void (*set_lan_id)(struct ixgbe_hw *); @@ -2146,6 +2241,7 @@ struct ixgbe_mac_operations { struct ixgbe_phy_operations { s32 (*identify)(struct ixgbe_hw *); s32 (*identify_sfp)(struct ixgbe_hw *); + s32 (*init)(struct ixgbe_hw *); s32 (*reset)(struct ixgbe_hw *); s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); @@ -2173,6 +2269,7 @@ struct ixgbe_mac_info { enum ixgbe_mac_type type; u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; s32 mc_filter_type; u32 mcft_size; u32 vft_size; @@ -2189,10 +2286,11 @@ struct ixgbe_mac_info { struct ixgbe_phy_info { struct ixgbe_phy_operations ops; + struct mdio_if_info mdio; enum ixgbe_phy_type type; - u32 addr; u32 id; enum ixgbe_sfp_type sfp_type; + bool sfp_setup_needed; u32 revision; enum ixgbe_media_type media_type; bool reset_disable; |