From 03dd4cb26d967f9588437b0fc9cc0e8353322bb7 Mon Sep 17 00:00:00 2001 From: AndrĂ© Fabian Silva Delgado Date: Fri, 25 Mar 2016 03:53:42 -0300 Subject: Linux-libre 4.5-gnu --- drivers/net/ethernet/intel/Kconfig | 10 + drivers/net/ethernet/intel/e1000/e1000.h | 7 +- drivers/net/ethernet/intel/e1000/e1000_hw.c | 216 ++--- drivers/net/ethernet/intel/e1000/e1000_main.c | 133 +-- drivers/net/ethernet/intel/e1000e/defines.h | 3 +- drivers/net/ethernet/intel/e1000e/e1000.h | 2 +- drivers/net/ethernet/intel/e1000e/hw.h | 1 + drivers/net/ethernet/intel/e1000e/ich8lan.c | 45 +- drivers/net/ethernet/intel/e1000e/netdev.c | 92 +- drivers/net/ethernet/intel/fm10k/Makefile | 20 +- drivers/net/ethernet/intel/fm10k/fm10k.h | 23 +- drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c | 4 - drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c | 4 - drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 30 +- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 80 +- drivers/net/ethernet/intel/fm10k/fm10k_mbx.c | 57 +- drivers/net/ethernet/intel/fm10k/fm10k_mbx.h | 8 +- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 75 +- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 211 +++-- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 151 +-- drivers/net/ethernet/intel/fm10k/fm10k_pf.h | 20 +- drivers/net/ethernet/intel/fm10k/fm10k_tlv.c | 20 +- drivers/net/ethernet/intel/fm10k/fm10k_tlv.h | 11 +- drivers/net/ethernet/intel/fm10k/fm10k_type.h | 43 +- drivers/net/ethernet/intel/fm10k/fm10k_vf.c | 81 +- drivers/net/ethernet/intel/fm10k/fm10k_vf.h | 2 +- drivers/net/ethernet/intel/i40e/i40e.h | 46 +- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 23 +- drivers/net/ethernet/intel/i40e/i40e_common.c | 1 - drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 28 +- drivers/net/ethernet/intel/i40e/i40e_devids.h | 1 - drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 108 ++- drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 2 - drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 1001 ++++++++++++++------ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 114 ++- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 3 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl.h | 1 + drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 100 +- .../net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 23 +- drivers/net/ethernet/intel/i40evf/i40e_common.c | 1 - drivers/net/ethernet/intel/i40evf/i40e_devids.h | 1 - drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 226 +++-- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 18 + drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h | 1 + drivers/net/ethernet/intel/i40evf/i40evf.h | 19 +- drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 79 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 437 ++++++--- .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 86 +- drivers/net/ethernet/intel/igb/e1000_82575.c | 21 +- drivers/net/ethernet/intel/igb/e1000_defines.h | 6 +- drivers/net/ethernet/intel/igb/e1000_hw.h | 1 + drivers/net/ethernet/intel/igb/e1000_i210.c | 32 +- drivers/net/ethernet/intel/igb/e1000_i210.h | 3 +- drivers/net/ethernet/intel/igb/e1000_phy.c | 229 +++-- drivers/net/ethernet/intel/igb/e1000_phy.h | 16 +- drivers/net/ethernet/intel/igb/e1000_regs.h | 4 +- drivers/net/ethernet/intel/igb/igb.h | 2 + drivers/net/ethernet/intel/igb/igb_ethtool.c | 38 +- drivers/net/ethernet/intel/igb/igb_main.c | 26 +- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 50 +- drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 8 +- drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 13 +- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 250 +++-- drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 7 +- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c | 6 +- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 70 +- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 19 +- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 1 - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 553 ++++++++--- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 3 + drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 720 ++++++++++---- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 237 +++-- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 17 +- drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 16 +- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 220 ++--- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 2 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 3 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 104 +- drivers/net/ethernet/intel/ixgbevf/vf.c | 4 +- 81 files changed, 4140 insertions(+), 2213 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 4163b1648..fa593dd3e 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -280,6 +280,16 @@ config I40E_VXLAN Say Y here if you want to use Virtual eXtensible Local Area Network (VXLAN) in the driver. +config I40E_GENEVE + bool "Generic Network Virtualization Encapsulation (GENEVE) Support" + depends on I40E && GENEVE && !(I40E=y && GENEVE=m) + default n + ---help--- + This allows one to create GENEVE virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. GENEVE is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to use GENEVE in the driver. + config I40E_DCB bool "Data Center Bridging (DCB) Support" default n diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 69707108d..98fe5a2cd 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -213,8 +213,11 @@ struct e1000_rx_ring { }; #define E1000_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) \ - ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) #define E1000_RX_DESC_EXT(R, i) \ (&(((union e1000_rx_desc_extended *)((R).desc))[i])) diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index b1af0d613..8172cf08c 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -1,5 +1,5 @@ /******************************************************************************* - +* Intel PRO/1000 Linux driver Copyright(c) 1999 - 2006 Intel Corporation. @@ -106,7 +106,7 @@ u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { 120, 120 }; -static DEFINE_SPINLOCK(e1000_eeprom_lock); +static DEFINE_MUTEX(e1000_eeprom_lock); static DEFINE_SPINLOCK(e1000_phy_lock); /** @@ -624,8 +624,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) /* Workaround for PCI-X problem when BIOS sets MMRBC * incorrectly. */ - if (hw->bus_type == e1000_bus_type_pcix - && e1000_pcix_get_mmrbc(hw) > 2048) + if (hw->bus_type == e1000_bus_type_pcix && + e1000_pcix_get_mmrbc(hw) > 2048) e1000_pcix_set_mmrbc(hw, 2048); break; } @@ -683,10 +683,9 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) } ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, - &eeprom_data); - if (ret_val) { + &eeprom_data); + if (ret_val) return ret_val; - } if (eeprom_data != EEPROM_RESERVED_WORD) { /* Adjust SERDES output amplitude only. */ @@ -1074,8 +1073,8 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) if (hw->mac_type <= e1000_82543 || hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || - hw->mac_type == e1000_82541_rev_2 - || hw->mac_type == e1000_82547_rev_2) + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) hw->phy_reset_disable = false; return E1000_SUCCESS; @@ -1652,7 +1651,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) mii_1000t_ctrl_reg = 0; } else { ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, - mii_1000t_ctrl_reg); + mii_1000t_ctrl_reg); if (ret_val) return ret_val; } @@ -1881,10 +1880,11 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) if (ret_val) return ret_val; - if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) - && (!hw->autoneg) - && (hw->forced_speed_duplex == e1000_10_full - || hw->forced_speed_duplex == e1000_10_half)) { + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { ret_val = e1000_polarity_reversal_workaround(hw); if (ret_val) return ret_val; @@ -2084,11 +2084,12 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) * so we had to force link. In this case, we need to force the * configuration of the MAC to match the "fc" parameter. */ - if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) - || ((hw->media_type == e1000_media_type_internal_serdes) - && (hw->autoneg_failed)) - || ((hw->media_type == e1000_media_type_copper) - && (!hw->autoneg))) { + if (((hw->media_type == e1000_media_type_fiber) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && + (!hw->autoneg))) { ret_val = e1000_force_mac_fc(hw); if (ret_val) { e_dbg("Error forcing flow control settings\n"); @@ -2193,8 +2194,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) - { + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc = E1000_FC_TX_PAUSE; e_dbg ("Flow Control = TX PAUSE frames only.\n"); @@ -2210,8 +2210,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) - { + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc = E1000_FC_RX_PAUSE; e_dbg ("Flow Control = RX PAUSE frames only.\n"); @@ -2460,10 +2459,11 @@ s32 e1000_check_for_link(struct e1000_hw *hw) * happen due to the execution of this workaround. */ - if ((hw->mac_type == e1000_82544 - || hw->mac_type == e1000_82543) && (!hw->autoneg) - && (hw->forced_speed_duplex == e1000_10_full - || hw->forced_speed_duplex == e1000_10_half)) { + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { ew32(IMC, 0xffffffff); ret_val = e1000_polarity_reversal_workaround(hw); @@ -2528,8 +2528,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw) */ if (hw->tbi_compatibility_en) { u16 speed, duplex; + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { e_dbg ("Error getting link speed and duplex\n"); @@ -2628,10 +2630,10 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); if (ret_val) return ret_val; - if ((*speed == SPEED_100 - && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) - || (*speed == SPEED_10 - && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + if ((*speed == SPEED_100 && + !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && + !(phy_data & NWAY_LPAR_10T_FD_CAPS))) *duplex = HALF_DUPLEX; } } @@ -2664,9 +2666,9 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw) ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); if (ret_val) return ret_val; - if (phy_data & MII_SR_AUTONEG_COMPLETE) { + if (phy_data & MII_SR_AUTONEG_COMPLETE) return E1000_SUCCESS; - } + msleep(100); } return E1000_SUCCESS; @@ -2803,11 +2805,11 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) return data; } - /** * e1000_read_phy_reg - read a phy register * @hw: Struct containing variables accessed by shared code * @reg_addr: address of the PHY register to read + * @phy_data: pointer to the value on the PHY register * * Reads the value from a PHY register, if the value is on a specific non zero * page, sets the page first. @@ -2823,14 +2825,13 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, (u16) reg_addr); - if (ret_val) { - spin_unlock_irqrestore(&e1000_phy_lock, flags); - return ret_val; - } + if (ret_val) + goto out; } ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, phy_data); +out: spin_unlock_irqrestore(&e1000_phy_lock, flags); return ret_val; @@ -2881,7 +2882,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, e_dbg("MDI Read Error\n"); return -E1000_ERR_PHY; } - *phy_data = (u16) mdic; + *phy_data = (u16)mdic; } else { mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | (phy_addr << E1000_MDIC_PHY_SHIFT) | @@ -2906,7 +2907,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, e_dbg("MDI Error\n"); return -E1000_ERR_PHY; } - *phy_data = (u16) mdic; + *phy_data = (u16)mdic; } } else { /* We must first send a preamble through the MDIO pin to signal @@ -2960,7 +2961,7 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) if ((hw->phy_type == e1000_phy_igp) && (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, - (u16) reg_addr); + (u16)reg_addr); if (ret_val) { spin_unlock_irqrestore(&e1000_phy_lock, flags); return ret_val; @@ -2993,7 +2994,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, * the desired data. */ if (hw->mac_type == e1000_ce4100) { - mdic = (((u32) phy_data) | + mdic = (((u32)phy_data) | (reg_addr << E1000_MDIC_REG_SHIFT) | (phy_addr << E1000_MDIC_PHY_SHIFT) | (INTEL_CE_GBE_MDIC_OP_WRITE) | @@ -3015,7 +3016,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, return -E1000_ERR_PHY; } } else { - mdic = (((u32) phy_data) | + mdic = (((u32)phy_data) | (reg_addr << E1000_MDIC_REG_SHIFT) | (phy_addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_WRITE)); @@ -3053,7 +3054,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); mdic <<= 16; - mdic |= (u32) phy_data; + mdic |= (u32)phy_data; e1000_shift_out_mdi_bits(hw, mdic, 32); } @@ -3176,14 +3177,14 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw) if (ret_val) return ret_val; - hw->phy_id = (u32) (phy_id_high << 16); + hw->phy_id = (u32)(phy_id_high << 16); udelay(20); ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); if (ret_val) return ret_val; - hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK); - hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK; + hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; switch (hw->mac_type) { case e1000_82543: @@ -3401,7 +3402,6 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> SR_1000T_REMOTE_RX_STATUS_SHIFT) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - } return E1000_SUCCESS; @@ -3449,7 +3449,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) if (hw->phy_type == e1000_phy_igp) return e1000_phy_igp_get_info(hw, phy_info); else if ((hw->phy_type == e1000_phy_8211) || - (hw->phy_type == e1000_phy_8201)) + (hw->phy_type == e1000_phy_8201)) return E1000_SUCCESS; else return e1000_phy_m88_get_info(hw, phy_info); @@ -3611,11 +3611,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) */ mask = 0x01 << (count - 1); eecd = er32(EECD); - if (eeprom->type == e1000_eeprom_microwire) { + if (eeprom->type == e1000_eeprom_microwire) eecd &= ~E1000_EECD_DO; - } else if (eeprom->type == e1000_eeprom_spi) { + else if (eeprom->type == e1000_eeprom_spi) eecd |= E1000_EECD_DO; - } + do { /* A "1" is shifted out to the EEPROM by setting bit "DI" to a * "1", and then raising and then lowering the clock (the SK bit @@ -3851,7 +3851,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) do { e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, hw->eeprom.opcode_bits); - spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8); + spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) break; @@ -3882,9 +3882,10 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { s32 ret; - spin_lock(&e1000_eeprom_lock); + + mutex_lock(&e1000_eeprom_lock); ret = e1000_do_read_eeprom(hw, offset, words, data); - spin_unlock(&e1000_eeprom_lock); + mutex_unlock(&e1000_eeprom_lock); return ret; } @@ -3896,15 +3897,16 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, if (hw->mac_type == e1000_ce4100) { GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, - data); + data); return E1000_SUCCESS; } /* A check for invalid values: offset too large, too many words, and * not enough words. */ - if ((offset >= eeprom->word_size) - || (words > eeprom->word_size - offset) || (words == 0)) { + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { e_dbg("\"words\" parameter out of bounds. Words = %d," "size = %d\n", offset, eeprom->word_size); return -E1000_ERR_EEPROM; @@ -3940,7 +3942,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, /* Send the READ command (opcode + addr) */ e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) (offset * 2), + e1000_shift_out_ee_bits(hw, (u16)(offset * 2), eeprom->address_bits); /* Read the data. The address of the eeprom internally @@ -3960,7 +3962,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) (offset + i), + e1000_shift_out_ee_bits(hw, (u16)(offset + i), eeprom->address_bits); /* Read the data. For microwire, each word requires the @@ -3968,6 +3970,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, */ data[i] = e1000_shift_in_ee_bits(hw, 16); e1000_standby_eeprom(hw); + cond_resched(); } } @@ -4004,7 +4007,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) return E1000_SUCCESS; #endif - if (checksum == (u16) EEPROM_SUM) + if (checksum == (u16)EEPROM_SUM) return E1000_SUCCESS; else { e_dbg("EEPROM Checksum Invalid\n"); @@ -4031,7 +4034,7 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) } checksum += eeprom_data; } - checksum = (u16) EEPROM_SUM - checksum; + checksum = (u16)EEPROM_SUM - checksum; if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { e_dbg("EEPROM Write Error\n"); return -E1000_ERR_EEPROM; @@ -4052,9 +4055,10 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { s32 ret; - spin_lock(&e1000_eeprom_lock); + + mutex_lock(&e1000_eeprom_lock); ret = e1000_do_write_eeprom(hw, offset, words, data); - spin_unlock(&e1000_eeprom_lock); + mutex_unlock(&e1000_eeprom_lock); return ret; } @@ -4066,15 +4070,16 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, if (hw->mac_type == e1000_ce4100) { GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, - data); + data); return E1000_SUCCESS; } /* A check for invalid values: offset too large, too many words, and * not enough words. */ - if ((offset >= eeprom->word_size) - || (words > eeprom->word_size - offset) || (words == 0)) { + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { e_dbg("\"words\" parameter out of bounds\n"); return -E1000_ERR_EEPROM; } @@ -4116,6 +4121,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, return -E1000_ERR_EEPROM; e1000_standby_eeprom(hw); + cond_resched(); /* Send the WRITE ENABLE command (8 bit opcode ) */ e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, @@ -4132,7 +4138,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, /* Send the Write command (8-bit opcode + addr) */ e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2), + e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2), eeprom->address_bits); /* Send the data */ @@ -4142,6 +4148,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, */ while (widx < words) { u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); e1000_shift_out_ee_bits(hw, word_out, 16); widx++; @@ -4183,9 +4190,9 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, * EEPROM into write/erase mode. */ e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, - (u16) (eeprom->opcode_bits + 2)); + (u16)(eeprom->opcode_bits + 2)); - e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); /* Prepare the EEPROM */ e1000_standby_eeprom(hw); @@ -4195,7 +4202,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) (offset + words_written), + e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), eeprom->address_bits); /* Send the data */ @@ -4224,6 +4231,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, /* Recover from write */ e1000_standby_eeprom(hw); + cond_resched(); words_written++; } @@ -4235,9 +4243,9 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, * EEPROM out of write/erase mode. */ e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, - (u16) (eeprom->opcode_bits + 2)); + (u16)(eeprom->opcode_bits + 2)); - e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); return E1000_SUCCESS; } @@ -4260,8 +4268,8 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw) e_dbg("EEPROM Read Error\n"); return -E1000_ERR_EEPROM; } - hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); - hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8); + hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8); } switch (hw->mac_type) { @@ -4328,19 +4336,19 @@ u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) */ case 0: /* [47:36] i.e. 0x563 for above example address */ - hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); + hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); break; case 1: /* [46:35] i.e. 0xAC6 for above example address */ - hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); + hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); break; case 2: /* [45:34] i.e. 0x5D8 for above example address */ - hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); + hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); break; case 3: /* [43:32] i.e. 0x634 for above example address */ - hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); + hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; } @@ -4361,9 +4369,9 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ - rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx * unit hang. @@ -4537,7 +4545,7 @@ s32 e1000_setup_led(struct e1000_hw *hw) if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, - (u16) (hw->phy_spd_default & + (u16)(hw->phy_spd_default & ~IGP01E1000_GMII_SPD)); if (ret_val) return ret_val; @@ -4802,7 +4810,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw) void e1000_update_adaptive(struct e1000_hw *hw) { if (hw->adaptive_ifs) { - if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { if (hw->tx_packet_delta > MIN_NUM_XMITS) { hw->in_ifs_mode = true; if (hw->current_ifs_val < hw->ifs_max_val) { @@ -4816,8 +4824,8 @@ void e1000_update_adaptive(struct e1000_hw *hw) } } } else { - if (hw->in_ifs_mode - && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + if (hw->in_ifs_mode && + (hw->tx_packet_delta <= MIN_NUM_XMITS)) { hw->current_ifs_val = 0; hw->in_ifs_mode = false; ew32(AIT, 0); @@ -4922,7 +4930,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, /* Use old method for Phy older than IGP */ if (hw->phy_type == e1000_phy_m88) { - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) @@ -4966,7 +4973,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, }; /* Read the AGC registers for all channels */ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { - ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); if (ret_val) @@ -4976,8 +4982,8 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, /* Value bound check. */ if ((cur_agc_value >= - IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) - || (cur_agc_value == 0)) + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) return -E1000_ERR_PHY; agc_value += cur_agc_value; @@ -5054,7 +5060,6 @@ static s32 e1000_check_polarity(struct e1000_hw *hw, */ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { - /* Read the GIG initialization PCS register (0x00B4) */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, @@ -5175,8 +5180,8 @@ static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) hw->ffe_config_state = e1000_ffe_config_active; ret_val = e1000_write_phy_reg(hw, - IGP01E1000_PHY_DSP_FFE, - IGP01E1000_PHY_DSP_FFE_CM_CP); + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); if (ret_val) return ret_val; break; @@ -5243,7 +5248,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) msleep(20); ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_FORCE_GIGA); + IGP01E1000_IEEE_FORCE_GIGA); if (ret_val) return ret_val; for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { @@ -5264,7 +5269,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) } ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_RESTART_AUTONEG); + IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) return ret_val; @@ -5299,7 +5304,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) msleep(20); ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_FORCE_GIGA); + IGP01E1000_IEEE_FORCE_GIGA); if (ret_val) return ret_val; ret_val = @@ -5309,7 +5314,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) return ret_val; ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_RESTART_AUTONEG); + IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) return ret_val; @@ -5346,9 +5351,8 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw) ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data); - if (ret_val) { + if (ret_val) return ret_val; - } if ((eeprom_data != EEPROM_RESERVED_WORD) && (eeprom_data & EEPROM_PHY_CLASS_A)) { @@ -5395,8 +5399,8 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) * from the lowest speeds starting from 10Mbps. The capability is used * for Dx transitions and states */ - if (hw->mac_type == e1000_82541_rev_2 - || hw->mac_type == e1000_82547_rev_2) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); if (ret_val) @@ -5446,11 +5450,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) if (ret_val) return ret_val; } - } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) - || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) - || (hw->autoneg_advertised == - AUTONEG_ADVERTISE_10_100_ALL)) { - + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { phy_data |= IGP01E1000_GMII_FLEX_SPD; @@ -5474,7 +5476,6 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) phy_data); if (ret_val) return ret_val; - } return E1000_SUCCESS; } @@ -5542,7 +5543,6 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw) return E1000_SUCCESS; } - /** * e1000_enable_mng_pass_thru - check for bmc pass through * @hw: Struct containing variables accessed by shared code diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index fd7be860c..3fc7bde69 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -99,13 +99,13 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); void e1000_free_all_tx_resources(struct e1000_adapter *adapter); void e1000_free_all_rx_resources(struct e1000_adapter *adapter); static int e1000_setup_tx_resources(struct e1000_adapter *adapter, - struct e1000_tx_ring *txdr); + struct e1000_tx_ring *txdr); static int e1000_setup_rx_resources(struct e1000_adapter *adapter, - struct e1000_rx_ring *rxdr); + struct e1000_rx_ring *rxdr); static void e1000_free_tx_resources(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring); + struct e1000_tx_ring *tx_ring); static void e1000_free_rx_resources(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring); + struct e1000_rx_ring *rx_ring); void e1000_update_stats(struct e1000_adapter *adapter); static int e1000_init_module(void); @@ -122,16 +122,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter); static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); static void e1000_clean_tx_ring(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring); + struct e1000_tx_ring *tx_ring); static void e1000_clean_rx_ring(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring); + struct e1000_rx_ring *rx_ring); static void e1000_set_rx_mode(struct net_device *netdev); static void e1000_update_phy_info_task(struct work_struct *work); static void e1000_watchdog(struct work_struct *work); static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -static struct net_device_stats * e1000_get_stats(struct net_device *netdev); +static struct net_device_stats *e1000_get_stats(struct net_device *netdev); static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); static irqreturn_t e1000_intr(int irq, void *data); @@ -164,7 +164,7 @@ static void e1000_tx_timeout(struct net_device *dev); static void e1000_reset_task(struct work_struct *work); static void e1000_smartspeed(struct e1000_adapter *adapter); static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, - struct sk_buff *skb); + struct sk_buff *skb); static bool e1000_vlan_used(struct e1000_adapter *adapter); static void e1000_vlan_mode(struct net_device *netdev, @@ -195,7 +195,7 @@ MODULE_PARM_DESC(copybreak, "Maximum size of packet that is copied to a new buffer on receive"); static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state); + pci_channel_state_t state); static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); static void e1000_io_resume(struct pci_dev *pdev); @@ -287,7 +287,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) int err; err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, - netdev); + netdev); if (err) { e_err(probe, "Unable to allocate interrupt Error: %d\n", err); } @@ -636,8 +636,8 @@ void e1000_reset(struct e1000_adapter *adapter) * but don't include ethernet FCS because hardware appends it */ min_tx_space = (hw->max_frame_size + - sizeof(struct e1000_tx_desc) - - ETH_FCS_LEN) * 2; + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; min_tx_space = ALIGN(min_tx_space, 1024); min_tx_space >>= 10; /* software strips receive CRC, so leave room for it */ @@ -943,8 +943,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct e1000_adapter *adapter; struct e1000_hw *hw; - static int cards_found = 0; - static int global_quad_port_a = 0; /* global ksp3 port a indication */ + static int cards_found; + static int global_quad_port_a; /* global ksp3 port a indication */ int i, err, pci_using_dac; u16 eeprom_data = 0; u16 tmp = 0; @@ -1046,7 +1046,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->mac_type == e1000_ce4100) { hw->ce4100_gbe_mdio_base_virt = ioremap(pci_resource_start(pdev, BAR_1), - pci_resource_len(pdev, BAR_1)); + pci_resource_len(pdev, BAR_1)); if (!hw->ce4100_gbe_mdio_base_virt) goto err_mdio_ioremap; @@ -1148,7 +1148,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; case e1000_82546: case e1000_82546_rev_3: - if (er32(STATUS) & E1000_STATUS_FUNC_1){ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { e1000_read_eeprom(hw, EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); break; @@ -1199,13 +1199,13 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < 32; i++) { hw->phy_addr = i; e1000_read_phy_reg(hw, PHY_ID2, &tmp); - if (tmp == 0 || tmp == 0xFF) { - if (i == 31) - goto err_eeprom; - continue; - } else + + if (tmp != 0 && tmp != 0xFF) break; } + + if (i >= 32) + goto err_eeprom; } /* reset the hardware with the new settings */ @@ -1263,7 +1263,7 @@ err_pci_reg: * @pdev: PCI device information struct * * e1000_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a + * that it should release a PCI device. That could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ @@ -1334,12 +1334,12 @@ static int e1000_sw_init(struct e1000_adapter *adapter) static int e1000_alloc_queues(struct e1000_adapter *adapter) { adapter->tx_ring = kcalloc(adapter->num_tx_queues, - sizeof(struct e1000_tx_ring), GFP_KERNEL); + sizeof(struct e1000_tx_ring), GFP_KERNEL); if (!adapter->tx_ring) return -ENOMEM; adapter->rx_ring = kcalloc(adapter->num_rx_queues, - sizeof(struct e1000_rx_ring), GFP_KERNEL); + sizeof(struct e1000_rx_ring), GFP_KERNEL); if (!adapter->rx_ring) { kfree(adapter->tx_ring); return -ENOMEM; @@ -1811,20 +1811,20 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) rctl &= ~E1000_RCTL_SZ_4096; rctl |= E1000_RCTL_BSEX; switch (adapter->rx_buffer_len) { - case E1000_RXBUFFER_2048: - default: - rctl |= E1000_RCTL_SZ_2048; - rctl &= ~E1000_RCTL_BSEX; - break; - case E1000_RXBUFFER_4096: - rctl |= E1000_RCTL_SZ_4096; - break; - case E1000_RXBUFFER_8192: - rctl |= E1000_RCTL_SZ_8192; - break; - case E1000_RXBUFFER_16384: - rctl |= E1000_RCTL_SZ_16384; - break; + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; } /* This is useful for sniffing bad packets. */ @@ -1861,12 +1861,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) if (adapter->netdev->mtu > ETH_DATA_LEN) { rdlen = adapter->rx_ring[0].count * - sizeof(struct e1000_rx_desc); + sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_jumbo_rx_irq; adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; } else { rdlen = adapter->rx_ring[0].count * - sizeof(struct e1000_rx_desc); + sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_rx_irq; adapter->alloc_rx_buf = e1000_alloc_rx_buffers; } @@ -2761,7 +2761,9 @@ static int e1000_tso(struct e1000_adapter *adapter, buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - if (++i == tx_ring->count) i = 0; + if (++i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; return true; @@ -2816,7 +2818,9 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - if (unlikely(++i == tx_ring->count)) i = 0; + if (unlikely(++i == tx_ring->count)) + i = 0; + tx_ring->next_to_use = i; return true; @@ -2865,8 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, * packet is smaller than 2048 - 16 - 16 (or 2016) bytes */ if (unlikely((hw->bus_type == e1000_bus_type_pcix) && - (size > 2015) && count == 0)) - size = 2015; + (size > 2015) && count == 0)) + size = 2015; /* Workaround for potential 82544 hang in PCI-X. Avoid * terminating buffers within evenly-aligned dwords. @@ -2963,7 +2967,7 @@ dma_error: count--; while (count--) { - if (i==0) + if (i == 0) i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; @@ -3013,7 +3017,8 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, tx_desc->lower.data = cpu_to_le32(txd_lower | buffer_info->length); tx_desc->upper.data = cpu_to_le32(txd_upper); - if (unlikely(++i == tx_ring->count)) i = 0; + if (unlikely(++i == tx_ring->count)) + i = 0; } tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); @@ -3101,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev, return __e1000_maybe_stop_tx(netdev, size); } -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { @@ -3841,7 +3846,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_buffer *buffer_info; unsigned int i, eop; unsigned int count = 0; - unsigned int total_tx_bytes=0, total_tx_packets=0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; unsigned int bytes_compl = 0, pkts_compl = 0; i = tx_ring->next_to_clean; @@ -3869,14 +3874,18 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, e1000_unmap_and_free_tx_resource(adapter, buffer_info); tx_desc->upper.data = 0; - if (unlikely(++i == tx_ring->count)) i = 0; + if (unlikely(++i == tx_ring->count)) + i = 0; } eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); } - tx_ring->next_to_clean = i; + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); netdev_completed_queue(netdev, pkts_compl, bytes_compl); @@ -3954,9 +3963,11 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, skb_checksum_none_assert(skb); /* 82543 or newer only */ - if (unlikely(hw->mac_type < e1000_82543)) return; + if (unlikely(hw->mac_type < e1000_82543)) + return; /* Ignore Checksum bit is set */ - if (unlikely(status & E1000_RXD_STAT_IXSM)) return; + if (unlikely(status & E1000_RXD_STAT_IXSM)) + return; /* TCP/UDP checksum error bit is set */ if (unlikely(errors & E1000_RXD_ERR_TCPE)) { /* let the stack verify checksum errors */ @@ -4136,7 +4147,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, unsigned int i; int cleaned_count = 0; bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); @@ -4153,7 +4164,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, status = rx_desc->status; - if (++i == rx_ring->count) i = 0; + if (++i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); prefetch(next_rxd); @@ -4356,7 +4369,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, unsigned int i; int cleaned_count = 0; bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); @@ -4395,7 +4408,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, buffer_info->rxbuf.data = NULL; } - if (++i == rx_ring->count) i = 0; + if (++i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); prefetch(next_rxd); @@ -4683,9 +4698,11 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) * we assume back-to-back */ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); - if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); - if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); if (phy_ctrl & CR_1000T_MS_ENABLE) { phy_ctrl &= ~CR_1000T_MS_ENABLE; diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 133d4074d..f7c7804d7 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -441,12 +441,13 @@ #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ #define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ #define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ -#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ +#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupt */ /* Interrupt Cause Set */ #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_ICS_OTHER E1000_ICR_OTHER /* Other Interrupt */ /* Transmit Descriptor Control */ #define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 0b748d195..1dc293bad 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -480,7 +480,7 @@ extern const char e1000e_driver_version[]; void e1000e_check_options(struct e1000_adapter *adapter); void e1000e_set_ethtool_ops(struct net_device *netdev); -int e1000e_up(struct e1000_adapter *adapter); +void e1000e_up(struct e1000_adapter *adapter); void e1000e_down(struct e1000_adapter *adapter, bool reset); void e1000e_reinit_locked(struct e1000_adapter *adapter); void e1000e_reset(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index c9da4654e..b3949d5be 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -91,6 +91,7 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */ #define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ #define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ +#define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LBG PCH */ #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 91a5a0ae9..a049e3063 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1984,7 +1984,7 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) int i = 0; while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && - (i++ < 10)) + (i++ < 30)) usleep_range(10000, 20000); return blocked ? E1000_BLK_PHY_RESET : 0; } @@ -3093,24 +3093,45 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) struct e1000_nvm_info *nvm = &hw->nvm; u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u32 nvm_dword = 0; u8 sig_byte = 0; s32 ret_val; switch (hw->mac.type) { - /* In SPT, read from the CTRL_EXT reg instead of - * accessing the sector valid bits from the nvm - */ case e1000_pch_spt: - *bank = er32(CTRL_EXT) - & E1000_CTRL_EXT_NVMVS; - if ((*bank == 0) || (*bank == 1)) { - e_dbg("ERROR: No valid NVM bank present\n"); - return -E1000_ERR_NVM; - } else { - *bank = *bank - 2; + bank1_offset = nvm->flash_bank_size; + act_offset = E1000_ICH_NVM_SIG_WORD; + + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, + &nvm_dword); + if (ret_val) + return ret_val; + sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; return 0; } - break; + + /* Check bank 1 */ + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset + + bank1_offset, + &nvm_dword); + if (ret_val) + return ret_val; + sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return 0; + } + + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; case e1000_ich8lan: case e1000_ich9lan: eecd = er32(EECD); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 0a854a47d..c71ba1bfc 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1905,30 +1905,15 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 icr = er32(ICR); - - if (!(icr & E1000_ICR_INT_ASSERTED)) { - if (!test_bit(__E1000_DOWN, &adapter->state)) - ew32(IMS, E1000_IMS_OTHER); - return IRQ_NONE; - } - if (icr & adapter->eiac_mask) - ew32(ICS, (icr & adapter->eiac_mask)); + hw->mac.get_link_status = true; - if (icr & E1000_ICR_OTHER) { - if (!(icr & E1000_ICR_LSC)) - goto no_link_interrupt; - hw->mac.get_link_status = true; - /* guard against interrupt when we're going down */ - if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) { + mod_timer(&adapter->watchdog_timer, jiffies + 1); + ew32(IMS, E1000_IMS_OTHER); } -no_link_interrupt: - if (!test_bit(__E1000_DOWN, &adapter->state)) - ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); - return IRQ_HANDLED; } @@ -1946,6 +1931,9 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) /* Ring was not completely cleaned, so fire another interrupt */ ew32(ICS, tx_ring->ims_val); + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, adapter->tx_ring->ims_val); + return IRQ_HANDLED; } @@ -1959,8 +1947,10 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) * previous interrupt. */ if (rx_ring->set_itr) { - writel(1000000000 / (rx_ring->itr_val * 256), - rx_ring->itr_register); + u32 itr = rx_ring->itr_val ? + 1000000000 / (rx_ring->itr_val * 256) : 0; + + writel(itr, rx_ring->itr_register); rx_ring->set_itr = 0; } @@ -2025,6 +2015,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) hw->hw_addr + E1000_EITR_82574(vector)); else writel(1, hw->hw_addr + E1000_EITR_82574(vector)); + adapter->eiac_mask |= E1000_IMS_OTHER; /* Cause Tx interrupts on every write back */ ivar |= (1 << 31); @@ -2032,12 +2023,8 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) ew32(IVAR, ivar); /* enable MSI-X PBA support */ - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; - - /* Auto-Mask Other interrupts upon ICR read */ - ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); - ctrl_ext |= E1000_CTRL_EXT_EIAME; + ctrl_ext = er32(CTRL_EXT) & ~E1000_CTRL_EXT_IAME; + ctrl_ext |= E1000_CTRL_EXT_PBA_CLR | E1000_CTRL_EXT_EIAME; ew32(CTRL_EXT, ctrl_ext); e1e_flush(); } @@ -2253,7 +2240,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); - ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); } else if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); @@ -4144,10 +4131,24 @@ void e1000e_reset(struct e1000_adapter *adapter) } -int e1000e_up(struct e1000_adapter *adapter) +/** + * e1000e_trigger_lsc - trigger an LSC interrupt + * @adapter: + * + * Fire a link status change interrupt to start the watchdog. + **/ +static void e1000e_trigger_lsc(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_OTHER); + else + ew32(ICS, E1000_ICS_LSC); +} + +void e1000e_up(struct e1000_adapter *adapter) +{ /* hardware has been reset, we need to reload some things */ e1000_configure(adapter); @@ -4159,13 +4160,7 @@ int e1000e_up(struct e1000_adapter *adapter) netif_start_queue(adapter->netdev); - /* fire a link change interrupt to start the watchdog */ - if (adapter->msix_entries) - ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); - else - ew32(ICS, E1000_ICS_LSC); - - return 0; + e1000e_trigger_lsc(adapter); } static void e1000e_flush_descriptors(struct e1000_adapter *adapter) @@ -4590,11 +4585,7 @@ static int e1000_open(struct net_device *netdev) hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); - /* fire a link status change interrupt to start the watchdog */ - if (adapter->msix_entries) - ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); - else - ew32(ICS, E1000_ICS_LSC); + e1000e_trigger_lsc(adapter); return 0; @@ -6631,7 +6622,7 @@ static int e1000e_pm_runtime_resume(struct device *dev) return rc; if (netdev->flags & IFF_UP) - rc = e1000e_up(adapter); + e1000e_up(adapter); return rc; } @@ -6822,13 +6813,8 @@ static void e1000_io_resume(struct pci_dev *pdev) e1000_init_manageability_pt(adapter); - if (netif_running(netdev)) { - if (e1000e_up(adapter)) { - dev_err(&pdev->dev, - "can't bring device back up after reset\n"); - return; - } - } + if (netif_running(netdev)) + e1000e_up(adapter); netif_device_attach(netdev); @@ -7465,6 +7451,7 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; @@ -7504,14 +7491,11 @@ static struct pci_driver e1000_driver = { **/ static int __init e1000_init_module(void) { - int ret; - pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); - ret = pci_register_driver(&e1000_driver); - return ret; + return pci_register_driver(&e1000_driver); } module_init(e1000_init_module); diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile index 08859dd22..b006ff66d 100644 --- a/drivers/net/ethernet/intel/fm10k/Makefile +++ b/drivers/net/ethernet/intel/fm10k/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel Ethernet Switch Host Interface Driver -# Copyright(c) 2013 - 2014 Intel Corporation. +# Copyright(c) 2013 - 2015 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -27,7 +27,17 @@ obj-$(CONFIG_FM10K) += fm10k.o -fm10k-objs := fm10k_main.o fm10k_common.o fm10k_pci.o \ - fm10k_netdev.o fm10k_ethtool.o fm10k_pf.o fm10k_vf.o \ - fm10k_mbx.o fm10k_iov.o fm10k_tlv.o \ - fm10k_debugfs.o fm10k_ptp.o fm10k_dcbnl.o +fm10k-y := fm10k_main.o \ + fm10k_common.o \ + fm10k_pci.o \ + fm10k_ptp.o \ + fm10k_netdev.o \ + fm10k_ethtool.o \ + fm10k_pf.o \ + fm10k_vf.o \ + fm10k_mbx.o \ + fm10k_iov.o \ + fm10k_tlv.o + +fm10k-$(CONFIG_DEBUG_FS) += fm10k_debugfs.o +fm10k-$(CONFIG_DCB) += fm10k_dcbnl.o diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 144402004..b34bb008b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -33,7 +34,7 @@ #include "fm10k_pf.h" #include "fm10k_vf.h" -#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */ +#define FM10K_MAX_JUMBO_FRAME_SIZE 15342 /* Maximum supported size 15K */ #define MAX_QUEUES FM10K_MAX_QUEUES_PF @@ -66,6 +67,7 @@ struct fm10k_l2_accel { enum fm10k_ring_state_t { __FM10K_TX_DETECT_HANG, __FM10K_HANG_CHECK_ARMED, + __FM10K_TX_XPS_INIT_DONE, }; #define check_for_tx_hang(ring) \ @@ -138,7 +140,7 @@ struct fm10k_ring { * different for DCB and RSS modes */ u8 qos_pc; /* priority class of queue */ - u16 vid; /* default vlan ID of queue */ + u16 vid; /* default VLAN ID of queue */ u16 count; /* amount of descriptors */ u16 next_to_alloc; @@ -164,14 +166,20 @@ struct fm10k_ring_container { unsigned int total_packets; /* total packets processed this int */ u16 work_limit; /* total work allowed per interrupt */ u16 itr; /* interrupt throttle rate value */ + u8 itr_scale; /* ITR adjustment based on PCI speed */ u8 count; /* total number of rings in vector */ }; #define FM10K_ITR_MAX 0x0FFF /* maximum value for ITR */ #define FM10K_ITR_10K 100 /* 100us */ #define FM10K_ITR_20K 50 /* 50us */ +#define FM10K_ITR_40K 25 /* 25us */ #define FM10K_ITR_ADAPTIVE 0x8000 /* adaptive interrupt moderation flag */ +#define ITR_IS_ADAPTIVE(itr) (!!(itr & FM10K_ITR_ADAPTIVE)) + +#define FM10K_TX_ITR_DEFAULT FM10K_ITR_40K +#define FM10K_RX_ITR_DEFAULT FM10K_ITR_20K #define FM10K_ITR_ENABLE (FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR) static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) @@ -203,6 +211,7 @@ struct fm10k_q_vector { struct fm10k_ring_container rx, tx; struct napi_struct napi; + cpumask_t affinity_mask; char name[IFNAMSIZ + 9]; #ifdef CONFIG_DEBUG_FS @@ -413,7 +422,7 @@ static inline u16 fm10k_desc_unused(struct fm10k_ring *ring) (&(((union fm10k_rx_desc *)((R)->desc))[i])) #define FM10K_MAX_TXD_PWR 14 -#define FM10K_MAX_DATA_PER_TXD (1 << FM10K_MAX_TXD_PWR) +#define FM10K_MAX_DATA_PER_TXD BIT(FM10K_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD) @@ -434,7 +443,7 @@ union fm10k_ftag_info { struct { /* dglort and sglort combined into a single 32bit desc read */ __le32 glort; - /* upper 16 bits of vlan are reserved 0 for swpri_type_user */ + /* upper 16 bits of VLAN are reserved 0 for swpri_type_user */ __le32 vlan; } d; struct { @@ -484,7 +493,7 @@ void fm10k_netpoll(struct net_device *netdev); #endif /* Netdev */ -struct net_device *fm10k_alloc_netdev(void); +struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); int fm10k_setup_rx_resources(struct fm10k_ring *); int fm10k_setup_tx_resources(struct fm10k_ring *); void fm10k_free_rx_resources(struct fm10k_ring *); @@ -551,5 +560,9 @@ int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr); int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr); /* DCB */ +#ifdef CONFIG_DCB void fm10k_dcbnl_set_ops(struct net_device *dev); +#else +static inline void fm10k_dcbnl_set_ops(struct net_device *dev) {} +#endif #endif /* _FM10K_H_ */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index 5c7a4d766..2be436183 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -20,7 +20,6 @@ #include "fm10k.h" -#ifdef CONFIG_DCB /** * fm10k_dcbnl_ieee_getets - get the ETS configuration for the device * @dev: netdev interface for the device @@ -155,7 +154,6 @@ static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = { .setdcbx = fm10k_dcbnl_setdcbx, }; -#endif /* CONFIG_DCB */ /** * fm10k_dcbnl_set_ops - Configures dcbnl ops pointer for netdev * @dev: netdev interface for the device @@ -164,11 +162,9 @@ static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = { **/ void fm10k_dcbnl_set_ops(struct net_device *dev) { -#ifdef CONFIG_DCB struct fm10k_intfc *interface = netdev_priv(dev); struct fm10k_hw *hw = &interface->hw; if (hw->mac.type == fm10k_mac_pf) dev->dcbnl_ops = &fm10k_dcbnl_ops; -#endif /* CONFIG_DCB */ } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 5304bc1fb..5d6137faf 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -18,8 +18,6 @@ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 */ -#ifdef CONFIG_DEBUG_FS - #include "fm10k.h" #include @@ -258,5 +256,3 @@ void fm10k_dbg_exit(void) debugfs_remove_recursive(dbg_root); dbg_root = NULL; } - -#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 2ce0eba5e..2f6a05b57 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -111,12 +111,14 @@ static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { FM10K_MBX_STAT("mbx_tx_busy", tx_busy), - FM10K_MBX_STAT("mbx_tx_oversized", tx_dropped), + FM10K_MBX_STAT("mbx_tx_dropped", tx_dropped), FM10K_MBX_STAT("mbx_tx_messages", tx_messages), FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords), + FM10K_MBX_STAT("mbx_tx_mbmem_pulled", tx_mbmem_pulled), FM10K_MBX_STAT("mbx_rx_messages", rx_messages), FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords), FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err), + FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed), }; #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) @@ -125,7 +127,7 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats) #define FM10K_QUEUE_STATS_LEN(_n) \ - ( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) + ((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ FM10K_NETDEV_STATS_LEN + \ @@ -257,7 +259,8 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) stats_len += FM10K_DEBUG_STATS_LEN; if (iov_data) - stats_len += FM10K_MBX_STATS_LEN * iov_data->num_vfs; + stats_len += FM10K_MBX_STATS_LEN * + iov_data->num_vfs; } return stats_len; @@ -296,14 +299,16 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, if (interface->flags & FM10K_FLAG_DEBUG_STATS) { for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { - p = (char *)interface + fm10k_gstrings_debug_stats[i].stat_offset; + p = (char *)interface + + fm10k_gstrings_debug_stats[i].stat_offset; *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { - p = (char *)&interface->hw.mbx + fm10k_gstrings_mbx_stats[i].stat_offset; + p = (char *)&interface->hw.mbx + + fm10k_gstrings_mbx_stats[i].stat_offset; *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } @@ -320,6 +325,7 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { for (i = 0; i < iov_data->num_vfs; i++) { struct fm10k_vf_info *vf_info; + vf_info = &iov_data->vf_info[i]; /* skip stats if we don't have a vf info */ @@ -329,7 +335,8 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, } for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { - p = (char *)&vf_info->mbx + fm10k_gstrings_mbx_stats[j].stat_offset; + p = (char *)&vf_info->mbx + + fm10k_gstrings_mbx_stats[j].stat_offset; *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } @@ -699,12 +706,10 @@ static int fm10k_get_coalesce(struct net_device *dev, { struct fm10k_intfc *interface = netdev_priv(dev); - ec->use_adaptive_tx_coalesce = - !!(interface->tx_itr & FM10K_ITR_ADAPTIVE); + ec->use_adaptive_tx_coalesce = ITR_IS_ADAPTIVE(interface->tx_itr); ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE; - ec->use_adaptive_rx_coalesce = - !!(interface->rx_itr & FM10K_ITR_ADAPTIVE); + ec->use_adaptive_rx_coalesce = ITR_IS_ADAPTIVE(interface->rx_itr); ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE; return 0; @@ -729,10 +734,10 @@ static int fm10k_set_coalesce(struct net_device *dev, /* set initial values for adaptive ITR */ if (ec->use_adaptive_tx_coalesce) - tx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_10K; + tx_itr = FM10K_ITR_ADAPTIVE | FM10K_TX_ITR_DEFAULT; if (ec->use_adaptive_rx_coalesce) - rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; + rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; /* update interface */ interface->tx_itr = tx_itr; @@ -1020,7 +1025,6 @@ static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags) return 0; } - static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) { return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index e76a44cf3..b243c3cbe 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.15.2-k" +#define DRV_VERSION "0.19.3-k" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = @@ -42,7 +42,7 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); /* single workqueue for entire fm10k driver */ -struct workqueue_struct *fm10k_workqueue = NULL; +struct workqueue_struct *fm10k_workqueue; /** * fm10k_init_module - Driver Registration Routine @@ -56,8 +56,7 @@ static int __init fm10k_init_module(void) pr_info("%s\n", fm10k_copyright); /* create driver workqueue */ - if (!fm10k_workqueue) - fm10k_workqueue = create_workqueue("fm10k"); + fm10k_workqueue = create_workqueue("fm10k"); fm10k_dbg_init(); @@ -80,7 +79,6 @@ static void __exit fm10k_exit_module(void) /* destroy driver workqueue */ flush_workqueue(fm10k_workqueue); destroy_workqueue(fm10k_workqueue); - fm10k_workqueue = NULL; } module_exit(fm10k_exit_module); @@ -917,7 +915,7 @@ static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) /* set timestamping bits */ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) - desc_flags |= FM10K_TXD_FLAG_TIME; + desc_flags |= FM10K_TXD_FLAG_TIME; /* set checksum offload bits */ desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, @@ -1094,11 +1092,11 @@ dma_error: netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_ring *tx_ring) { + u16 count = TXD_USE_COUNT(skb_headlen(skb)); struct fm10k_tx_buffer *first; - int tso; - u32 tx_flags = 0; unsigned short f; - u16 count = TXD_USE_COUNT(skb_headlen(skb)); + u32 tx_flags = 0; + int tso; /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, @@ -1363,10 +1361,10 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, **/ static void fm10k_update_itr(struct fm10k_ring_container *ring_container) { - unsigned int avg_wire_size, packets; + unsigned int avg_wire_size, packets, itr_round; /* Only update ITR if we are using adaptive setting */ - if (!(ring_container->itr & FM10K_ITR_ADAPTIVE)) + if (!ITR_IS_ADAPTIVE(ring_container->itr)) goto clear_counts; packets = ring_container->total_packets; @@ -1375,18 +1373,44 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container) avg_wire_size = ring_container->total_bytes / packets; - /* Add 24 bytes to size to account for CRC, preamble, and gap */ - avg_wire_size += 24; - - /* Don't starve jumbo frames */ - if (avg_wire_size > 3000) - avg_wire_size = 3000; + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (34 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 360) { + /* Start at 250K ints/sec and gradually drop to 77K ints/sec */ + avg_wire_size *= 8; + avg_wire_size += 376; + } else if (avg_wire_size <= 1152) { + /* 77K ints/sec to 45K ints/sec */ + avg_wire_size *= 3; + avg_wire_size += 2176; + } else if (avg_wire_size <= 1920) { + /* 45K ints/sec to 38K ints/sec */ + avg_wire_size += 4480; + } else { + /* plateau at a limit of 38K ints/sec */ + avg_wire_size = 6656; + } - /* Give a little boost to mid-size frames */ - if ((avg_wire_size > 300) && (avg_wire_size < 1200)) - avg_wire_size /= 3; - else - avg_wire_size /= 2; + /* Perform final bitshift for division after rounding up to ensure + * that the calculation will never get below a 1. The bit shift + * accounts for changes in the ITR due to PCIe link speed. + */ + itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8; + avg_wire_size += (1 << itr_round) - 1; + avg_wire_size >>= itr_round; /* write back value and retain adaptive flag */ ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; @@ -1428,11 +1452,15 @@ static int fm10k_poll(struct napi_struct *napi, int budget) fm10k_for_each_ring(ring, q_vector->tx) clean_complete &= fm10k_clean_tx_irq(q_vector, ring); + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + return budget; + /* attempt to distribute budget to each queue fairly, but don't * allow the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) - per_ring_budget = max(budget/q_vector->rx.count, 1); + per_ring_budget = max(budget / q_vector->rx.count, 1); else per_ring_budget = budget; @@ -1600,6 +1628,7 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, q_vector->tx.ring = ring; q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; q_vector->tx.itr = interface->tx_itr; + q_vector->tx.itr_scale = interface->hw.mac.itr_scale; q_vector->tx.count = txr_count; while (txr_count) { @@ -1628,6 +1657,7 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, /* save Rx ring container info */ q_vector->rx.ring = ring; q_vector->rx.itr = interface->rx_itr; + q_vector->rx.itr_scale = interface->hw.mac.itr_scale; q_vector->rx.count = rxr_count; while (rxr_count) { @@ -1966,8 +1996,10 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) /* Allocate memory for queues */ err = fm10k_alloc_q_vectors(interface); - if (err) + if (err) { + fm10k_reset_msix_capability(interface); return err; + } /* Map rings to devices, and map devices to physical queues */ fm10k_assign_rings(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index af09a1b27..98202c3d5 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -57,7 +57,7 @@ static u16 fm10k_fifo_unused(struct fm10k_mbx_fifo *fifo) } /** - * fm10k_fifo_empty - Test to verify if fifo is empty + * fm10k_fifo_empty - Test to verify if FIFO is empty * @fifo: pointer to FIFO * * This function returns true if the FIFO is empty, else false @@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) * @fifo: pointer to FIFO * @offset: offset to add to head * - * This function returns the indices into the fifo based on head + offset + * This function returns the indices into the FIFO based on head + offset **/ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) * @fifo: pointer to FIFO * @offset: offset to add to tail * - * This function returns the indices into the fifo based on tail + offset + * This function returns the indices into the FIFO based on tail + offset **/ static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -160,7 +160,7 @@ static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) /** * fm10k_mbx_tail_add - Determine new tail value with added offset * @mbx: pointer to mailbox - * @offset: length to add to head offset + * @offset: length to add to tail offset * * This function takes the local tail index and recomputes it for * a given length added as an offset. @@ -176,7 +176,7 @@ static u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset) /** * fm10k_mbx_tail_sub - Determine new tail value with subtracted offset * @mbx: pointer to mailbox - * @offset: length to add to head offset + * @offset: length to add to tail offset * * This function takes the local tail index and recomputes it for * a given length added as an offset. @@ -240,7 +240,7 @@ static u16 fm10k_mbx_pushed_tail_len(struct fm10k_mbx_info *mbx) } /** - * fm10k_fifo_write_copy - pulls data off of msg and places it in fifo + * fm10k_fifo_write_copy - pulls data off of msg and places it in FIFO * @fifo: pointer to FIFO * @msg: message array to populate * @tail_offset: additional offset to add to tail pointer @@ -336,6 +336,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) /** * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will take a section of the Tx FIFO and copy it into the @@ -375,6 +376,8 @@ static void fm10k_mbx_write_copy(struct fm10k_hw *hw, if (!tail) tail++; + mbx->tx_mbmem_pulled++; + /* write message to hardware FIFO */ fm10k_write_reg(hw, mbmem + tail++, *(head++)); } while (--len && --end); @@ -459,6 +462,8 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw, if (!head) head++; + mbx->rx_mbmem_pushed++; + /* read message from hardware FIFO */ *(tail++) = fm10k_read_reg(hw, mbmem + head++); } while (--len && --end); @@ -707,7 +712,7 @@ static bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx) * @hw: pointer to hardware structure * @mbx: pointer to mailbox * - * This function dequeues messages and hands them off to the tlv parser. + * This function dequeues messages and hands them off to the TLV parser. * It will return the number of messages processed when called. **/ static u16 fm10k_mbx_dequeue_rx(struct fm10k_hw *hw, @@ -899,7 +904,7 @@ static void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx) } /** - * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mailbox header + * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mbox hdr * @mbx: pointer to mailbox * * This function creates a fake disconnect header for loading into remote @@ -920,7 +925,7 @@ static void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx) } /** - * fm10k_mbx_create_error_msg - Generate a error message + * fm10k_mbx_create_error_msg - Generate an error message * @mbx: pointer to mailbox * @err: local error encountered * @@ -953,7 +958,6 @@ static void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) /** * fm10k_mbx_validate_msg_hdr - Validate common fields in the message header * @mbx: pointer to mailbox - * @msg: message array to read * * This function will parse up the fields in the mailbox header and return * an error if the header contains any of a number of invalid configurations @@ -1017,11 +1021,12 @@ static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx) /** * fm10k_mbx_create_reply - Generate reply based on state and remote head + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * @head: acknowledgement number * * This function will generate an outgoing message based on the current - * mailbox state and the remote fifo head. It will return the length + * mailbox state and the remote FIFO head. It will return the length * of the outgoing message excluding header on success, and a negative value * on error. **/ @@ -1147,8 +1152,8 @@ static void fm10k_mbx_connect_reset(struct fm10k_mbx_info *mbx) /** * fm10k_mbx_process_connect - Process connect header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox - * @msg: message array to process * * This function will read an incoming connect header and reply with the * appropriate message. It will return a value indicating the number of @@ -1194,6 +1199,7 @@ static s32 fm10k_mbx_process_connect(struct fm10k_hw *hw, /** * fm10k_mbx_process_data - Process data header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will read an incoming data header and reply with the @@ -1235,6 +1241,7 @@ static s32 fm10k_mbx_process_data(struct fm10k_hw *hw, /** * fm10k_mbx_process_disconnect - Process disconnect header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will read an incoming disconnect header and reply with the @@ -1287,6 +1294,7 @@ static s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw, /** * fm10k_mbx_process_error - Process error header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will read an incoming error header and reply with the @@ -1556,7 +1564,7 @@ static s32 fm10k_mbx_register_handlers(struct fm10k_mbx_info *mbx, * @id: ID reference for PF as it supports up to 64 PF/VF mailboxes * * This function initializes the mailbox for use. It will split the - * buffer provided an use that th populate both the Tx and Rx FIFO by + * buffer provided and use that to populate both the Tx and Rx FIFO by * evenly splitting it. In order to allow for easy masking of head/tail * the value reported in size must be a power of 2 and is reported in * DWORDs, not bytes. Any invalid values will cause the mailbox to return @@ -1633,7 +1641,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, * fm10k_sm_mbx_create_data_hdr - Generate a mailbox header for local FIFO * @mbx: pointer to mailbox * - * This function returns a connection mailbox header + * This function returns a data mailbox header **/ static void fm10k_sm_mbx_create_data_hdr(struct fm10k_mbx_info *mbx) { @@ -1726,8 +1734,6 @@ static s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) fm10k_sm_mbx_create_connect_hdr(mbx, 0); fm10k_mbx_write(hw, mbx); - /* enable interrupt and notify other party of new message */ - return 0; } @@ -1771,7 +1777,7 @@ static void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw, } /** - * fm10k_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header + * fm10k_sm_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header * @mbx: pointer to mailbox * * This function will parse up the fields in the mailbox header and return @@ -1849,7 +1855,7 @@ static void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx) } /** - * fm10k_sm_mbx_create_error_message - Process an error in FIFO hdr + * fm10k_sm_mbx_create_error_msg - Process an error in FIFO header * @mbx: pointer to mailbox * @err: local error encountered * @@ -1879,6 +1885,7 @@ static void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) * fm10k_sm_mbx_receive - Take message from Rx mailbox FIFO and put it in Rx * @hw: pointer to hardware structure * @mbx: pointer to mailbox + * @tail: tail index of message * * This function will dequeue one message from the Rx switch manager mailbox * FIFO and place it in the Rx mailbox FIFO for processing by software. @@ -1918,6 +1925,7 @@ static s32 fm10k_sm_mbx_receive(struct fm10k_hw *hw, * fm10k_sm_mbx_transmit - Take message from Tx and put it in Tx mailbox FIFO * @hw: pointer to hardware structure * @mbx: pointer to mailbox + * @head: head index of message * * This function will dequeue one message from the Tx mailbox FIFO and place * it in the Tx switch manager mailbox FIFO for processing by hardware. @@ -1957,11 +1965,12 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, /** * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * @head: acknowledgement number * * This function will generate an outgoing message based on the current - * mailbox state and the remote fifo head. It will return the length + * mailbox state and the remote FIFO head. It will return the length * of the outgoing message excluding header on success, and a negative value * on error. **/ @@ -2073,7 +2082,7 @@ send_reply: } /** - * fm10k_sm_mbx_process - Process mailbox switch mailbox interrupt + * fm10k_sm_mbx_process - Process switch manager mailbox interrupt * @hw: pointer to hardware structure * @mbx: pointer to mailbox * @@ -2129,13 +2138,19 @@ fifo_err: * @mbx: pointer to mailbox * @msg_data: handlers for mailbox events * - * This function for now is used to stub out the PF/SM mailbox + * This function initializes the PF/SM mailbox for use. It will split the + * buffer provided and use that to populate both the Tx and Rx FIFO by + * evenly splitting it. In order to allow for easy masking of head/tail + * the value reported in size must be a power of 2 and is reported in + * DWORDs, not bytes. Any invalid values will cause the mailbox to return + * error. **/ s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, const struct fm10k_msg_data *msg_data) { mbx->mbx_reg = FM10K_GMBX; mbx->mbmem_reg = FM10K_MBMEM_PF(0); + /* start out in closed state */ mbx->state = FM10K_STATE_CLOSED; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h index 0419a7f00..245a0a3dc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -128,11 +128,11 @@ enum fm10k_mbx_state { * The maximum message size is provided during connect to avoid * jamming the mailbox with messages that do not fit. * Err_no: Error number - Applies only to error headers - * The error number provides a indication of the type of error + * The error number provides an indication of the type of error * experienced. */ -/* macros for retriving and setting header values */ +/* macros for retrieving and setting header values */ #define FM10K_MSG_HDR_MASK(name) \ ((0x1u << FM10K_MSG_##name##_SIZE) - 1) #define FM10K_MSG_HDR_FIELD_SET(value, name) \ @@ -291,8 +291,10 @@ struct fm10k_mbx_info { u64 tx_dropped; u64 tx_messages; u64 tx_dwords; + u64 tx_mbmem_pulled; u64 rx_messages; u64 rx_dwords; + u64 rx_mbmem_pushed; u64 rx_parse_err; /* Buffer to store messages */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 7781e8089..662569d5b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -20,7 +20,7 @@ #include "fm10k.h" #include -#if IS_ENABLED(CONFIG_FM10K_VXLAN) +#ifdef CONFIG_FM10K_VXLAN #include #endif /* CONFIG_FM10K_VXLAN */ @@ -556,11 +556,11 @@ int fm10k_open(struct net_device *netdev) if (err) goto err_set_queues; -#if IS_ENABLED(CONFIG_FM10K_VXLAN) +#ifdef CONFIG_FM10K_VXLAN /* update VXLAN port configuration */ vxlan_get_rx_port(netdev); - #endif + fm10k_up(interface); return 0; @@ -608,7 +608,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) unsigned int r_idx = skb->queue_mapping; int err; - if ((skb->protocol == htons(ETH_P_8021Q)) && + if ((skb->protocol == htons(ETH_P_8021Q)) && !skb_vlan_tag_present(skb)) { /* FM10K only supports hardware tagging, any tags in frame * are considered 2nd level or "outer" tags @@ -632,7 +632,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - /* locate vlan header */ + /* locate VLAN header */ vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); /* pull the 2 key pieces of data out of it */ @@ -705,7 +705,7 @@ static void fm10k_tx_timeout(struct net_device *netdev) } else { netif_info(interface, drv, netdev, "Fake Tx hang detected with timeout of %d seconds\n", - netdev->watchdog_timeo/HZ); + netdev->watchdog_timeo / HZ); /* fake Tx hang - increase the kernel timeout */ if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) @@ -778,7 +778,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (!set) clear_bit(vid, interface->active_vlans); - /* disable the default VID on ring if we have an active VLAN */ + /* disable the default VLAN ID on ring if we have an active VLAN */ for (i = 0; i < interface->num_rx_queues; i++) { struct fm10k_ring *rx_ring = interface->rx_ring[i]; u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1); @@ -789,7 +789,9 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) rx_ring->vid &= ~FM10K_VLAN_CLEAR; } - /* Do not remove default VID related entries from VLAN and MAC tables */ + /* Do not remove default VLAN ID related entries from VLAN and MAC + * tables + */ if (!set && vid == hw->mac.default_vid) return 0; @@ -814,7 +816,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (err) goto err_out; - /* set vid prior to syncing/unsyncing the VLAN */ + /* set VLAN ID prior to syncing/unsyncing the VLAN */ interface->vid = vid + (set ? VLAN_N_VID : 0); /* Update the unicast and multicast address list to add/drop VLAN */ @@ -1151,6 +1153,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, int fm10k_setup_tc(struct net_device *dev, u8 tc) { struct fm10k_intfc *interface = netdev_priv(dev); + int err; /* Currently only the PF supports priority classes */ if (tc && (interface->hw.mac.type != fm10k_mac_pf)) @@ -1175,17 +1178,30 @@ int fm10k_setup_tc(struct net_device *dev, u8 tc) netdev_reset_tc(dev); netdev_set_num_tc(dev, tc); - fm10k_init_queueing_scheme(interface); + err = fm10k_init_queueing_scheme(interface); + if (err) + goto err_queueing_scheme; - fm10k_mbx_request_irq(interface); + err = fm10k_mbx_request_irq(interface); + if (err) + goto err_mbx_irq; - if (netif_running(dev)) - fm10k_open(dev); + err = netif_running(dev) ? fm10k_open(dev) : 0; + if (err) + goto err_open; /* flag to indicate SWPRI has yet to be updated */ interface->flags |= FM10K_FLAG_SWPRI_CONFIG; return 0; +err_open: + fm10k_mbx_free_irq(interface); +err_mbx_irq: + fm10k_clear_queueing_scheme(interface); +err_queueing_scheme: + netif_device_detach(dev); + + return err; } static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) @@ -1355,7 +1371,7 @@ static netdev_features_t fm10k_features_check(struct sk_buff *skb, if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) return features; - return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } static const struct net_device_ops fm10k_netdev_ops = { @@ -1388,8 +1404,9 @@ static const struct net_device_ops fm10k_netdev_ops = { #define DEFAULT_DEBUG_LEVEL_SHIFT 3 -struct net_device *fm10k_alloc_netdev(void) +struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info) { + netdev_features_t hw_features; struct fm10k_intfc *interface; struct net_device *dev; @@ -1412,27 +1429,31 @@ struct net_device *fm10k_alloc_netdev(void) NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | - NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXHASH | NETIF_F_RXCSUM; + /* Only the PF can support VXLAN and NVGRE tunnel offloads */ + if (info->mac == fm10k_mac_pf) { + dev->hw_enc_features = NETIF_F_IP_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_IPV6_CSUM | + NETIF_F_SG; + + dev->features |= NETIF_F_GSO_UDP_TUNNEL; + } + /* all features defined to this point should be changeable */ - dev->hw_features |= dev->features; + hw_features = dev->features; /* allow user to enable L2 forwarding acceleration */ - dev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; /* configure VLAN features */ dev->vlan_features |= dev->features; - /* configure tunnel offloads */ - dev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_TSO_ECN | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_IPV6_CSUM; - /* we want to leave these both on as we cannot disable VLAN tag * insertion or stripping on the hardware since it is contained * in the FTAG and not in the frame itself. @@ -1443,5 +1464,7 @@ struct net_device *fm10k_alloc_netdev(void) dev->priv_flags |= IFF_UNICAST_FLT; + dev->hw_features |= hw_features; + return dev; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 74be792f3..4eb7a6fa6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -159,16 +159,40 @@ static void fm10k_reinit(struct fm10k_intfc *interface) fm10k_mbx_free_irq(interface); + /* free interrupts */ + fm10k_clear_queueing_scheme(interface); + /* delay any future reset requests */ interface->last_reset = jiffies + (10 * HZ); /* reset and initialize the hardware so it is in a known state */ - err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); - if (err) + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err); + goto reinit_err; + } + + err = hw->mac.ops.init_hw(hw); + if (err) { dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err); + goto reinit_err; + } + + err = fm10k_init_queueing_scheme(interface); + if (err) { + dev_err(&interface->pdev->dev, + "init_queueing_scheme failed: %d\n", err); + goto reinit_err; + } /* reassociate interrupts */ - fm10k_mbx_request_irq(interface); + err = fm10k_mbx_request_irq(interface); + if (err) + goto err_mbx_irq; + + err = fm10k_hw_ready(interface); + if (err) + goto err_open; /* update hardware address for VFs if perm_addr has changed */ if (hw->mac.type == fm10k_mac_vf) { @@ -188,14 +212,27 @@ static void fm10k_reinit(struct fm10k_intfc *interface) /* reset clock */ fm10k_ts_reset(interface); - if (netif_running(netdev)) - fm10k_open(netdev); + err = netif_running(netdev) ? fm10k_open(netdev) : 0; + if (err) + goto err_open; fm10k_iov_resume(interface->pdev); rtnl_unlock(); clear_bit(__FM10K_RESETTING, &interface->state); + + return; +err_open: + fm10k_mbx_free_irq(interface); +err_mbx_irq: + fm10k_clear_queueing_scheme(interface); +reinit_err: + netif_device_detach(netdev); + + rtnl_unlock(); + + clear_bit(__FM10K_RESETTING, &interface->state); } static void fm10k_reset_subtask(struct fm10k_intfc *interface) @@ -563,7 +600,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, /* store tail pointer */ ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)]; - /* reset ntu and ntc to place SW in sync with hardwdare */ + /* reset ntu and ntc to place SW in sync with hardware */ ring->next_to_clean = 0; ring->next_to_use = 0; @@ -579,6 +616,13 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx), FM10K_PFVTCTL_FTAG_DESC_ENABLE); + /* Initialize XPS */ + if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, &ring->state) && + ring->q_vector) + netif_set_xps_queue(ring->netdev, + &ring->q_vector->affinity_mask, + ring->queue_index); + /* enable queue */ fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl); } @@ -669,7 +713,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* store tail pointer */ ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)]; - /* reset ntu and ntc to place SW in sync with hardwdare */ + /* reset ntu and ntc to place SW in sync with hardware */ ring->next_to_clean = 0; ring->next_to_use = 0; ring->next_to_alloc = 0; @@ -694,7 +738,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* assign default VLAN to queue */ ring->vid = hw->mac.default_vid; - /* if we have an active VLAN, disable default VID */ + /* if we have an active VLAN, disable default VLAN ID */ if (test_bit(hw->mac.default_vid, interface->active_vlans)) ring->vid |= FM10K_VLAN_CLEAR; @@ -846,7 +890,7 @@ static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data) struct fm10k_q_vector *q_vector = data; if (q_vector->rx.count || q_vector->tx.count) - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -859,7 +903,8 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) /* re-enable mailbox interrupt and indicate 20us delay */ fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR), - FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY); + FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> + hw->mac.itr_scale)); /* service upstream mailbox */ if (fm10k_mbx_trylock(interface)) { @@ -867,7 +912,7 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) fm10k_mbx_unlock(interface); } - hw->mac.get_host_state = 1; + hw->mac.get_host_state = true; fm10k_service_event_schedule(interface); return IRQ_HANDLED; @@ -897,7 +942,7 @@ void fm10k_netpoll(struct net_device *netdev) #endif #define FM10K_ERR_MSG(type) case (type): error = #type; break static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, - struct fm10k_fault *fault) + struct fm10k_fault *fault) { struct pci_dev *pdev = interface->pdev; struct fm10k_hw *hw = &interface->hw; @@ -1083,14 +1128,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) } /* we should validate host state after interrupt event */ - hw->mac.get_host_state = 1; + hw->mac.get_host_state = true; /* validate host state, and handle VF mailboxes in the service task */ fm10k_service_event_schedule(interface); /* re-enable mailbox interrupt and indicate 20us delay */ fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR), - FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY); + FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> + hw->mac.itr_scale)); return IRQ_HANDLED; } @@ -1101,6 +1147,10 @@ void fm10k_mbx_free_irq(struct fm10k_intfc *interface) struct fm10k_hw *hw = &interface->hw; int itr_reg; + /* no mailbox IRQ to free if MSI-X is not enabled */ + if (!interface->msix_entries) + return; + /* disconnect the mailbox */ hw->mbx.ops.disconnect(hw, &hw->mbx); @@ -1141,7 +1191,7 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, /* MAC was changed so we need reset */ if (is_valid_ether_addr(hw->mac.perm_addr) && - memcmp(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN)) + !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr)) interface->flags |= FM10K_FLAG_RESET_REQUESTED; /* VLAN override was changed, or default VLAN changed */ @@ -1269,7 +1319,7 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; - /* verify VID is valid */ + /* verify VLAN ID is valid */ if (pvid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; @@ -1388,14 +1438,14 @@ static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface) } /* Enable interrupts w/ no moderation for "other" interrupts */ - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SRAM), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_MaxHoldTime), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_VFLR), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr); /* Enable interrupts w/ moderation for mailbox */ - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_Mailbox), mbx_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr); /* Enable individual interrupt causes */ fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | @@ -1423,10 +1473,15 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface) err = fm10k_mbx_request_irq_pf(interface); else err = fm10k_mbx_request_irq_vf(interface); + if (err) + return err; /* connect mailbox */ - if (!err) - err = hw->mbx.ops.connect(hw, &hw->mbx); + err = hw->mbx.ops.connect(hw, &hw->mbx); + + /* if the mailbox failed to connect, then free IRQ */ + if (err) + fm10k_mbx_free_irq(interface); return err; } @@ -1455,8 +1510,10 @@ void fm10k_qv_free_irq(struct fm10k_intfc *interface) if (!q_vector->tx.count && !q_vector->rx.count) continue; - /* disable interrupts */ + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + /* disable interrupts */ writel(FM10K_ITR_MASK_SET, q_vector->itr); free_irq(entry->vector, q_vector); @@ -1514,6 +1571,9 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface) goto err_out; } + /* assign the mask for this irq */ + irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); + /* Enable q_vector */ writel(FM10K_ITR_ENABLE, q_vector->itr); @@ -1534,8 +1594,10 @@ err_out: if (!q_vector->tx.count && !q_vector->rx.count) continue; - /* disable interrupts */ + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + /* disable interrupts */ writel(FM10K_ITR_MASK_SET, q_vector->itr); free_irq(entry->vector, q_vector); @@ -1573,7 +1635,7 @@ void fm10k_up(struct fm10k_intfc *interface) netif_tx_start_all_queues(interface->netdev); /* kick off the service timer now */ - hw->mac.get_host_state = 1; + hw->mac.get_host_state = true; mod_timer(&interface->service_timer, jiffies); } @@ -1684,7 +1746,13 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, interface->last_reset = jiffies + (10 * HZ); /* reset and initialize the hardware so it is in a known state */ - err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_err(&pdev->dev, "reset_hw failed: %d\n", err); + return err; + } + + err = hw->mac.ops.init_hw(hw); if (err) { dev_err(&pdev->dev, "init_hw failed: %d\n", err); return err; @@ -1722,13 +1790,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, pci_resource_len(pdev, 4)); hw->sw_addr = interface->sw_addr; - /* Only the PF can support VXLAN and NVGRE offloads */ - if (hw->mac.type != fm10k_mac_pf) { - netdev->hw_enc_features = 0; - netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; - } - /* initialize DCBNL interface */ fm10k_dcbnl_set_ops(netdev); @@ -1749,8 +1810,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, interface->rx_ring_count = FM10K_DEFAULT_RXD; /* set default interrupt moderation */ - interface->tx_itr = FM10K_ITR_10K; - interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; + interface->tx_itr = FM10K_TX_ITR_DEFAULT; + interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; /* initialize vxlan_port list */ INIT_LIST_HEAD(&interface->vxlan_port); @@ -1835,17 +1896,18 @@ static void fm10k_slot_warn(struct fm10k_intfc *interface) return; } - if (max_gts < expected_gts) { - dev_warn(&interface->pdev->dev, - "This device requires %dGT/s of bandwidth for optimal performance.\n", - expected_gts); - dev_warn(&interface->pdev->dev, - "A %sslot with x%d lanes is suggested.\n", - (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " : - hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " : - hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""), - hw->bus_caps.width); - } + if (max_gts >= expected_gts) + return; + + dev_warn(&interface->pdev->dev, + "This device requires %dGT/s of bandwidth for optimal performance.\n", + expected_gts); + dev_warn(&interface->pdev->dev, + "A %sslot with x%d lanes is suggested.\n", + (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " : + hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " : + hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""), + hw->bus_caps.width); } /** @@ -1859,8 +1921,7 @@ static void fm10k_slot_warn(struct fm10k_intfc *interface) * The OS initialization, configuring of the interface private structure, * and a hardware reset occur. **/ -static int fm10k_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct fm10k_intfc *interface; @@ -1894,7 +1955,7 @@ static int fm10k_probe(struct pci_dev *pdev, pci_set_master(pdev); pci_save_state(pdev); - netdev = fm10k_alloc_netdev(); + netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]); if (!netdev) { err = -ENOMEM; goto err_alloc_netdev; @@ -2071,8 +2132,10 @@ static int fm10k_resume(struct pci_dev *pdev) /* reset hardware to known state */ err = hw->mac.ops.init_hw(&interface->hw); - if (err) + if (err) { + dev_err(&pdev->dev, "init_hw failed: %d\n", err); return err; + } /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); @@ -2083,16 +2146,22 @@ static int fm10k_resume(struct pci_dev *pdev) rtnl_lock(); err = fm10k_init_queueing_scheme(interface); - if (!err) { - fm10k_mbx_request_irq(interface); - if (netif_running(netdev)) - err = fm10k_open(netdev); - } + if (err) + goto err_queueing_scheme; - rtnl_unlock(); + err = fm10k_mbx_request_irq(interface); + if (err) + goto err_mbx_irq; + err = fm10k_hw_ready(interface); if (err) - return err; + goto err_open; + + err = netif_running(netdev) ? fm10k_open(netdev) : 0; + if (err) + goto err_open; + + rtnl_unlock(); /* assume host is not ready, to prevent race with watchdog in case we * actually don't have connection to the switch @@ -2110,6 +2179,14 @@ static int fm10k_resume(struct pci_dev *pdev) netif_device_attach(netdev); return 0; +err_open: + fm10k_mbx_free_irq(interface); +err_mbx_irq: + fm10k_clear_queueing_scheme(interface); +err_queueing_scheme: + rtnl_unlock(); + + return err; } /** @@ -2185,6 +2262,9 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) fm10k_close(netdev); + /* free interrupts */ + fm10k_clear_queueing_scheme(interface); + fm10k_mbx_free_irq(interface); pci_disable_device(pdev); @@ -2248,11 +2328,22 @@ static void fm10k_io_resume(struct pci_dev *pdev) int err = 0; /* reset hardware to known state */ - hw->mac.ops.init_hw(&interface->hw); + err = hw->mac.ops.init_hw(&interface->hw); + if (err) { + dev_err(&pdev->dev, "init_hw failed: %d\n", err); + return; + } /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + err = fm10k_init_queueing_scheme(interface); + if (err) { + dev_err(&interface->pdev->dev, + "init_queueing_scheme failed: %d\n", err); + return; + } + /* reassociate interrupts */ fm10k_mbx_request_irq(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 8c0bdc4e4..62ccebc5f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -150,19 +150,26 @@ static s32 fm10k_init_hw_pf(struct fm10k_hw *hw) FM10K_TPH_RXCTRL_HDR_WROEN); } - /* set max hold interval to align with 1.024 usec in all modes */ + /* set max hold interval to align with 1.024 usec in all modes and + * store ITR scale + */ switch (hw->bus.speed) { case fm10k_bus_speed_2500: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1; break; case fm10k_bus_speed_5000: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2; break; case fm10k_bus_speed_8000: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; break; default: dma_ctrl = 0; + /* just in case, assume Gen3 ITR scale */ + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; break; } @@ -259,7 +266,6 @@ static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw) { u8 perm_addr[ETH_ALEN]; u32 serial_num; - int i; serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1)); @@ -281,10 +287,8 @@ static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw) perm_addr[4] = (u8)(serial_num >> 8); perm_addr[5] = (u8)(serial_num); - for (i = 0; i < ETH_ALEN; i++) { - hw->mac.perm_addr[i] = perm_addr[i]; - hw->mac.addr[i] = perm_addr[i]; - } + ether_addr_copy(hw->mac.perm_addr, perm_addr); + ether_addr_copy(hw->mac.addr, perm_addr); return 0; } @@ -325,7 +329,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, /* clear set bit from VLAN ID */ vid &= ~FM10K_VLAN_CLEAR; - /* if glort or vlan are not valid return error */ + /* if glort or VLAN are not valid return error */ if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; @@ -334,8 +338,8 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, ((u32)mac[3] << 16) | ((u32)mac[4] << 8) | ((u32)mac[5])); - mac_update.mac_upper = cpu_to_le16(((u32)mac[0] << 8) | - ((u32)mac[1])); + mac_update.mac_upper = cpu_to_le16(((u16)mac[0] << 8) | + ((u16)mac[1])); mac_update.vlan = cpu_to_le16(vid); mac_update.glort = cpu_to_le16(glort); mac_update.action = add ? 0 : 1; @@ -410,6 +414,7 @@ static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode) if (mode > FM10K_XCAST_MODE_NONE) return FM10K_ERR_PARAM; + /* if glort is not valid return error */ if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; @@ -903,6 +908,13 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah); + /* Provide the VF the ITR scale, using software-defined fields in TDLEN + * to pass the information during VF initialization. See definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more details. + */ + fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale << + FM10K_TDLEN_ITR_SCALE_SHIFT); + err_out: /* configure Queue control register */ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) & @@ -910,7 +922,7 @@ err_out: txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) | FM10K_TXQCTL_VF | vf_idx; - /* assign VID */ + /* assign VLAN ID */ for (i = 0; i < queues_per_pool; i++) fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl); @@ -1035,6 +1047,12 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, for (i = queues_per_pool; i--;) { fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); + /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an + * explanation of how TDLEN is used. + */ + fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx + i), + hw->mac.itr_scale << + FM10K_TDLEN_ITR_SCALE_SHIFT); fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i); fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i); } @@ -1155,14 +1173,14 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, } /** - * fm10k_iov_select_vid - Select correct default VID + * fm10k_iov_select_vid - Select correct default VLAN ID * @hw: Pointer to hardware structure - * @vid: VID to correct + * @vid: VLAN ID to correct * - * Will report an error if VID is out of range. For VID = 0, it will return - * either the pf_vid or sw_vid depending on which one is set. + * Will report an error if the VLAN ID is out of range. For VID = 0, it will + * return either the pf_vid or sw_vid depending on which one is set. */ -static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) +static s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) { if (!vid) return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid; @@ -1212,11 +1230,11 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, set = !(vid & FM10K_VLAN_CLEAR); vid &= ~FM10K_VLAN_CLEAR; - err = fm10k_iov_select_vid(vf_info, vid); + err = fm10k_iov_select_vid(vf_info, (u16)vid); if (err < 0) return err; - else - vid = err; + + vid = err; /* update VSI info for VF in regards to VLAN table */ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); @@ -1232,7 +1250,7 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, /* block attempts to set MAC for a locked device */ if (is_valid_ether_addr(vf_info->mac) && - memcmp(mac, vf_info->mac, ETH_ALEN)) + !ether_addr_equal(mac, vf_info->mac)) return FM10K_ERR_PARAM; set = !(vlan & FM10K_VLAN_CLEAR); @@ -1241,8 +1259,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, err = fm10k_iov_select_vid(vf_info, vlan); if (err < 0) return err; - else - vlan = err; + + vlan = (u16)err; /* notify switch of request for new unicast address */ err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, @@ -1267,8 +1285,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, err = fm10k_iov_select_vid(vf_info, vlan); if (err < 0) return err; - else - vlan = err; + + vlan = (u16)err; /* notify switch of request for new multicast address */ err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, @@ -1396,14 +1414,6 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, return err; } -const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = { - FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), - FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), - FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), - FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), - FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), -}; - /** * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF * @hw: pointer to hardware structure @@ -1431,9 +1441,10 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec); vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP, &stats->vlan_drop); - loopback_drop = fm10k_read_hw_stats_32b(hw, - FM10K_STATS_LOOPBACK_DROP, - &stats->loopback_drop); + loopback_drop = + fm10k_read_hw_stats_32b(hw, + FM10K_STATS_LOOPBACK_DROP, + &stats->loopback_drop); nodesc_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_NODESC_DROP, &stats->nodesc_drop); @@ -1678,8 +1689,8 @@ const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = { * * This handler configures the default VLAN for the PF **/ -s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) +static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) { u16 glort, pvid; u32 pvid_update; @@ -1698,7 +1709,7 @@ s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; - /* verify VID is valid */ + /* verify VLAN ID is valid */ if (pvid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; @@ -1855,39 +1866,39 @@ static const struct fm10k_msg_data fm10k_msg_data_pf[] = { FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), }; -static struct fm10k_mac_ops mac_ops_pf = { - .get_bus_info = &fm10k_get_bus_info_generic, - .reset_hw = &fm10k_reset_hw_pf, - .init_hw = &fm10k_init_hw_pf, - .start_hw = &fm10k_start_hw_generic, - .stop_hw = &fm10k_stop_hw_generic, - .update_vlan = &fm10k_update_vlan_pf, - .read_mac_addr = &fm10k_read_mac_addr_pf, - .update_uc_addr = &fm10k_update_uc_addr_pf, - .update_mc_addr = &fm10k_update_mc_addr_pf, - .update_xcast_mode = &fm10k_update_xcast_mode_pf, - .update_int_moderator = &fm10k_update_int_moderator_pf, - .update_lport_state = &fm10k_update_lport_state_pf, - .update_hw_stats = &fm10k_update_hw_stats_pf, - .rebind_hw_stats = &fm10k_rebind_hw_stats_pf, - .configure_dglort_map = &fm10k_configure_dglort_map_pf, - .set_dma_mask = &fm10k_set_dma_mask_pf, - .get_fault = &fm10k_get_fault_pf, - .get_host_state = &fm10k_get_host_state_pf, - .adjust_systime = &fm10k_adjust_systime_pf, - .read_systime = &fm10k_read_systime_pf, +static const struct fm10k_mac_ops mac_ops_pf = { + .get_bus_info = fm10k_get_bus_info_generic, + .reset_hw = fm10k_reset_hw_pf, + .init_hw = fm10k_init_hw_pf, + .start_hw = fm10k_start_hw_generic, + .stop_hw = fm10k_stop_hw_generic, + .update_vlan = fm10k_update_vlan_pf, + .read_mac_addr = fm10k_read_mac_addr_pf, + .update_uc_addr = fm10k_update_uc_addr_pf, + .update_mc_addr = fm10k_update_mc_addr_pf, + .update_xcast_mode = fm10k_update_xcast_mode_pf, + .update_int_moderator = fm10k_update_int_moderator_pf, + .update_lport_state = fm10k_update_lport_state_pf, + .update_hw_stats = fm10k_update_hw_stats_pf, + .rebind_hw_stats = fm10k_rebind_hw_stats_pf, + .configure_dglort_map = fm10k_configure_dglort_map_pf, + .set_dma_mask = fm10k_set_dma_mask_pf, + .get_fault = fm10k_get_fault_pf, + .get_host_state = fm10k_get_host_state_pf, + .adjust_systime = fm10k_adjust_systime_pf, + .read_systime = fm10k_read_systime_pf, }; -static struct fm10k_iov_ops iov_ops_pf = { - .assign_resources = &fm10k_iov_assign_resources_pf, - .configure_tc = &fm10k_iov_configure_tc_pf, - .assign_int_moderator = &fm10k_iov_assign_int_moderator_pf, +static const struct fm10k_iov_ops iov_ops_pf = { + .assign_resources = fm10k_iov_assign_resources_pf, + .configure_tc = fm10k_iov_configure_tc_pf, + .assign_int_moderator = fm10k_iov_assign_int_moderator_pf, .assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf, - .reset_resources = &fm10k_iov_reset_resources_pf, - .set_lport = &fm10k_iov_set_lport_pf, - .reset_lport = &fm10k_iov_reset_lport_pf, - .update_stats = &fm10k_iov_update_stats_pf, - .report_timestamp = &fm10k_iov_report_timestamp_pf, + .reset_resources = fm10k_iov_reset_resources_pf, + .set_lport = fm10k_iov_set_lport_pf, + .reset_lport = fm10k_iov_reset_lport_pf, + .update_stats = fm10k_iov_update_stats_pf, + .report_timestamp = fm10k_iov_report_timestamp_pf, }; static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw) @@ -1897,9 +1908,9 @@ static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw) return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf); } -struct fm10k_info fm10k_pf_info = { +const struct fm10k_info fm10k_pf_info = { .mac = fm10k_mac_pf, - .get_invariants = &fm10k_get_invariants_pf, + .get_invariants = fm10k_get_invariants_pf, .mac_ops = &mac_ops_pf, .iov_ops = &iov_ops_pf, }; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index 40a0dbc62..b2d96b45c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -74,6 +74,11 @@ enum fm10k_pf_tlv_attr_id_v1 { #define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16 #define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16 +/* The following data structures are overlayed directly onto TLV mailbox + * messages, and must not break 4 byte alignment. Ensure the structures line + * up correctly as per their TLV definition. + */ + struct fm10k_mac_update { __le32 mac_lower; __le16 mac_upper; @@ -81,34 +86,32 @@ struct fm10k_mac_update { __le16 glort; u8 flags; u8 action; -} __packed; +} __aligned(4) __packed; struct fm10k_global_table_data { __le32 used; __le32 avail; -} __packed; +} __aligned(4) __packed; struct fm10k_swapi_error { __le32 status; struct fm10k_global_table_data mac; struct fm10k_global_table_data nexthop; struct fm10k_global_table_data ffu; -} __packed; +} __aligned(4) __packed; struct fm10k_swapi_1588_timestamp { __le64 egress; __le64 ingress; __le16 dglort; __le16 sglort; -} __packed; +} __aligned(4) __packed; s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[]; #define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \ fm10k_lport_map_msg_attr, func) -s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *, u32 **, - struct fm10k_mbx_info *); extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[]; #define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \ @@ -129,7 +132,6 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); -extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[]; -extern struct fm10k_info fm10k_pf_info; +extern const struct fm10k_info fm10k_pf_info; #endif /* _FM10K_PF_H */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index 9b29d7b03..ab01bb307 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -48,8 +48,8 @@ s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id) * the attribute buffer. It will return success if provided with a valid * pointers. **/ -s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, - const unsigned char *string) +static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, + const unsigned char *string) { u32 attr_data = 0, len = 0; u32 *attr; @@ -98,7 +98,7 @@ s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, * it in the array pointed by by string. It will return success if provided * with a valid pointers. **/ -s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) +static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) { u32 len; @@ -353,7 +353,7 @@ s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len) * function will return NULL on failure, and a pointer to the start * of the nested attributes on success. **/ -u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) +static u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) { u32 *attr; @@ -370,7 +370,7 @@ u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) } /** - * fm10k_tlv_attr_nest_start - Start a set of nested attributes + * fm10k_tlv_attr_nest_stop - Stop a set of nested attributes * @msg: Pointer to message block * * This function closes off an existing set of nested attributes. The @@ -378,7 +378,7 @@ u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) * the case of a nest within the nest this would be the outer nest pointer. * This function will return success provided all pointers are valid. **/ -s32 fm10k_tlv_attr_nest_stop(u32 *msg) +static s32 fm10k_tlv_attr_nest_stop(u32 *msg) { u32 *attr; u32 len; @@ -483,8 +483,8 @@ static s32 fm10k_tlv_attr_validate(u32 *attr, * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array * and 0 on success. **/ -s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, - const struct fm10k_tlv_attr *tlv_attr) +static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, + const struct fm10k_tlv_attr *tlv_attr) { u32 i, attr_id, offset = 0; s32 err = 0; @@ -755,7 +755,7 @@ parse_nested: err = fm10k_tlv_attr_get_mac_vlan( results[FM10K_TEST_MSG_MAC_ADDR], result_mac, &result_vlan); - if (!err && memcmp(test_mac, result_mac, ETH_ALEN)) + if (!err && !ether_addr_equal(test_mac, result_mac)) err = FM10K_ERR_INVALID_VALUE; if (!err && test_vlan != result_vlan) err = FM10K_ERR_INVALID_VALUE; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h index 7e045e8bf..e1845e0a1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -38,9 +38,9 @@ struct fm10k_msg_data; * mailbox size we will provide a message with the above header and it * will be segmented and transported to the mailbox to the other side where * it is reassembled. It contains the following fields: - * Len: Length of the message in bytes excluding the message header + * Length: Length of the message in bytes excluding the message header * Flags: TBD - * Rule: These will be the message/argument types we pass + * Type/ID: These will be the message/argument types we pass */ /* message data header */ #define FM10K_TLV_ID_SHIFT 0 @@ -106,8 +106,6 @@ struct fm10k_msg_data { #define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func } s32 fm10k_tlv_msg_init(u32 *, u16); -s32 fm10k_tlv_attr_put_null_string(u32 *, u16, const unsigned char *); -s32 fm10k_tlv_attr_get_null_string(u32 *, unsigned char *); s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16); s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *); s32 fm10k_tlv_attr_put_bool(u32 *, u16); @@ -147,9 +145,6 @@ s32 fm10k_tlv_attr_get_value(u32 *, void *, u32); fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64)) s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32); s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32); -u32 *fm10k_tlv_attr_nest_start(u32 *, u16); -s32 fm10k_tlv_attr_nest_stop(u32 *); -s32 fm10k_tlv_attr_parse(u32 *, u32 **, const struct fm10k_tlv_attr *); s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *, const struct fm10k_msg_data *); s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 318a212f0..854ebb190 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -77,6 +77,7 @@ struct fm10k_hw; #define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10 #define FM10K_ERR_PARAM -2 +#define FM10K_ERR_NO_RESOURCES -3 #define FM10K_ERR_REQUESTS_PENDING -4 #define FM10K_ERR_RESET_REQUESTED -5 #define FM10K_ERR_DMA_PENDING -6 @@ -271,6 +272,20 @@ struct fm10k_hw; #define FM10K_TDBAL(_n) ((0x40 * (_n)) + 0x8000) #define FM10K_TDBAH(_n) ((0x40 * (_n)) + 0x8001) #define FM10K_TDLEN(_n) ((0x40 * (_n)) + 0x8002) +/* When fist initialized, VFs need to know the Interrupt Throttle Rate (ITR) + * scale which is based on the PCIe speed but the speed information in the PCI + * configuration space may not be accurate. The PF already knows the ITR scale + * but there is no defined method to pass that information from the PF to the + * VF. This is accomplished during VF initialization by temporarily co-opting + * the yet-to-be-used TDLEN register to have the PF store the ITR shift for + * the VF to retrieve before the VF needs to use the TDLEN register for its + * intended purpose, i.e. before the Tx resources are allocated. + */ +#define FM10K_TDLEN_ITR_SCALE_SHIFT 9 +#define FM10K_TDLEN_ITR_SCALE_MASK 0x00000E00 +#define FM10K_TDLEN_ITR_SCALE_GEN1 2 +#define FM10K_TDLEN_ITR_SCALE_GEN2 1 +#define FM10K_TDLEN_ITR_SCALE_GEN3 0 #define FM10K_TPH_TXCTRL(_n) ((0x40 * (_n)) + 0x8003) #define FM10K_TPH_TXCTRL_DESC_TPHEN 0x00000020 #define FM10K_TPH_TXCTRL_DESC_RROEN 0x00000200 @@ -339,7 +354,7 @@ struct fm10k_hw; #define FM10K_VLAN_TABLE_VID_MAX 4096 #define FM10K_VLAN_TABLE_VSI_MAX 64 #define FM10K_VLAN_LENGTH_SHIFT 16 -#define FM10K_VLAN_CLEAR (1 << 15) +#define FM10K_VLAN_CLEAR BIT(15) #define FM10K_VLAN_ALL \ ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT) @@ -373,13 +388,13 @@ struct fm10k_hw; #define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252) enum fm10k_int_source { - fm10k_int_Mailbox = 0, - fm10k_int_PCIeFault = 1, - fm10k_int_SwitchUpDown = 2, - fm10k_int_SwitchEvent = 3, - fm10k_int_SRAM = 4, - fm10k_int_VFLR = 5, - fm10k_int_MaxHoldTime = 6, + fm10k_int_mailbox = 0, + fm10k_int_pcie_fault = 1, + fm10k_int_switch_up_down = 2, + fm10k_int_switch_event = 3, + fm10k_int_sram = 4, + fm10k_int_vflr = 5, + fm10k_int_max_hold_time = 6, fm10k_int_sources_max_pf }; @@ -535,7 +550,6 @@ struct fm10k_mac_ops { struct fm10k_dglort_cfg *); void (*set_dma_mask)(struct fm10k_hw *, u64); s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *); - void (*request_lport_map)(struct fm10k_hw *); s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb); u64 (*read_systime)(struct fm10k_hw *); }; @@ -559,6 +573,7 @@ struct fm10k_mac_info { bool get_host_state; bool tx_ready; u32 dglort_map; + u8 itr_scale; }; struct fm10k_swapi_table_info { @@ -644,10 +659,10 @@ enum fm10k_devices { }; struct fm10k_info { - enum fm10k_mac_type mac; - s32 (*get_invariants)(struct fm10k_hw *); - struct fm10k_mac_ops *mac_ops; - struct fm10k_iov_ops *iov_ops; + enum fm10k_mac_type mac; + s32 (*get_invariants)(struct fm10k_hw *); + const struct fm10k_mac_ops *mac_ops; + const struct fm10k_iov_ops *iov_ops; }; struct fm10k_hw { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 36c8b0aa0..91f8d7311 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -28,7 +28,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) { u8 *perm_addr = hw->mac.perm_addr; - u32 bal = 0, bah = 0; + u32 bal = 0, bah = 0, tdlen; s32 err; u16 i; @@ -48,6 +48,9 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) ((u32)perm_addr[2]); } + /* restore default itr_scale for next VF initialization */ + tdlen = hw->mac.itr_scale << FM10K_TDLEN_ITR_SCALE_SHIFT; + /* The queues have already been disabled so we just need to * update their base address registers */ @@ -56,6 +59,12 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) fm10k_write_reg(hw, FM10K_TDBAH(i), bah); fm10k_write_reg(hw, FM10K_RDBAL(i), bal); fm10k_write_reg(hw, FM10K_RDBAH(i), bah); + /* Restore ITR scale in software-defined mechanism in TDLEN + * for next VF initialization. See definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more details on the use of + * TDLEN here. + */ + fm10k_write_reg(hw, FM10K_TDLEN(i), tdlen); } return 0; @@ -103,7 +112,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) s32 err; u16 i; - /* assume we always have at least 1 queue */ + /* verify we have at least 1 queue */ + if (!~fm10k_read_reg(hw, FM10K_TXQCTL(0)) || + !~fm10k_read_reg(hw, FM10K_RXQCTL(0))) { + err = FM10K_ERR_NO_RESOURCES; + goto reset_max_queues; + } + + /* determine how many queues we have */ for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) { /* verify the Descriptor cache offsets are increasing */ tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i)); @@ -119,16 +135,28 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) /* shut down queues we own and reset DMA configuration */ err = fm10k_disable_queues_generic(hw, i); if (err) - return err; + goto reset_max_queues; /* record maximum queue count */ hw->mac.max_queues = i; - /* fetch default VLAN */ + /* fetch default VLAN and ITR scale */ hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) & FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT; + /* Read the ITR scale from TDLEN. See the definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is + * used here. + */ + hw->mac.itr_scale = (fm10k_read_reg(hw, FM10K_TDLEN(0)) & + FM10K_TDLEN_ITR_SCALE_MASK) >> + FM10K_TDLEN_ITR_SCALE_SHIFT; return 0; + +reset_max_queues: + hw->mac.max_queues = 0; + + return err; } /* This structure defines the attibutes to be parsed below */ @@ -270,7 +298,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, /* verify we are not locked down on the MAC address */ if (is_valid_ether_addr(hw->mac.perm_addr) && - memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) + !ether_addr_equal(hw->mac.perm_addr, mac)) return FM10K_ERR_PARAM; /* add bit to notify us if this is a set or clear operation */ @@ -414,6 +442,7 @@ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) if (mode > FM10K_XCAST_MODE_NONE) return FM10K_ERR_PARAM; + /* generate message requesting to change xcast mode */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode); @@ -533,25 +562,25 @@ static const struct fm10k_msg_data fm10k_msg_data_vf[] = { FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), }; -static struct fm10k_mac_ops mac_ops_vf = { - .get_bus_info = &fm10k_get_bus_info_generic, - .reset_hw = &fm10k_reset_hw_vf, - .init_hw = &fm10k_init_hw_vf, - .start_hw = &fm10k_start_hw_generic, - .stop_hw = &fm10k_stop_hw_vf, - .update_vlan = &fm10k_update_vlan_vf, - .read_mac_addr = &fm10k_read_mac_addr_vf, - .update_uc_addr = &fm10k_update_uc_addr_vf, - .update_mc_addr = &fm10k_update_mc_addr_vf, - .update_xcast_mode = &fm10k_update_xcast_mode_vf, - .update_int_moderator = &fm10k_update_int_moderator_vf, - .update_lport_state = &fm10k_update_lport_state_vf, - .update_hw_stats = &fm10k_update_hw_stats_vf, - .rebind_hw_stats = &fm10k_rebind_hw_stats_vf, - .configure_dglort_map = &fm10k_configure_dglort_map_vf, - .get_host_state = &fm10k_get_host_state_generic, - .adjust_systime = &fm10k_adjust_systime_vf, - .read_systime = &fm10k_read_systime_vf, +static const struct fm10k_mac_ops mac_ops_vf = { + .get_bus_info = fm10k_get_bus_info_generic, + .reset_hw = fm10k_reset_hw_vf, + .init_hw = fm10k_init_hw_vf, + .start_hw = fm10k_start_hw_generic, + .stop_hw = fm10k_stop_hw_vf, + .update_vlan = fm10k_update_vlan_vf, + .read_mac_addr = fm10k_read_mac_addr_vf, + .update_uc_addr = fm10k_update_uc_addr_vf, + .update_mc_addr = fm10k_update_mc_addr_vf, + .update_xcast_mode = fm10k_update_xcast_mode_vf, + .update_int_moderator = fm10k_update_int_moderator_vf, + .update_lport_state = fm10k_update_lport_state_vf, + .update_hw_stats = fm10k_update_hw_stats_vf, + .rebind_hw_stats = fm10k_rebind_hw_stats_vf, + .configure_dglort_map = fm10k_configure_dglort_map_vf, + .get_host_state = fm10k_get_host_state_generic, + .adjust_systime = fm10k_adjust_systime_vf, + .read_systime = fm10k_read_systime_vf, }; static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw) @@ -561,8 +590,8 @@ static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw) return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0); } -struct fm10k_info fm10k_vf_info = { +const struct fm10k_info fm10k_vf_info = { .mac = fm10k_mac_vf, - .get_invariants = &fm10k_get_invariants_vf, + .get_invariants = fm10k_get_invariants_vf, .mac_ops = &mac_ops_vf, }; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h index 06a99d794..c4439f131 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h @@ -74,5 +74,5 @@ extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[]; #define FM10K_VF_MSG_1588_HANDLER(func) \ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func) -extern struct fm10k_info fm10k_vf_info; +extern const struct fm10k_info fm10k_vf_info; #endif /* _FM10K_VF_H */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 4dd3e2612..68f2204ec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -42,7 +42,6 @@ #include #include #include -#include #include #include #include @@ -104,6 +103,7 @@ #define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) #define I40E_PRIV_FLAGS_FD_ATR BIT(2) #define I40E_PRIV_FLAGS_VEB_STATS BIT(3) +#define I40E_PRIV_FLAGS_PS BIT(4) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) @@ -187,6 +187,7 @@ struct i40e_lump_tracking { #define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4) #define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) +#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, @@ -244,6 +245,11 @@ struct i40e_tc_configuration { struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS]; }; +struct i40e_udp_port_config { + __be16 index; + u8 type; +}; + /* struct that defines the Ethernet device */ struct i40e_pf { struct pci_dev *pdev; @@ -265,7 +271,7 @@ struct i40e_pf { u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */ int queues_left; /* queues left unclaimed */ - u16 rss_size; /* num queues in the RSS array */ + u16 alloc_rss_size; /* allocated RSS queues */ u16 rss_size_max; /* HW defined max RSS queues */ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ u16 num_alloc_vsi; /* num VSIs this driver supports */ @@ -280,11 +286,9 @@ struct i40e_pf { u32 fd_atr_cnt; u32 fd_tcp_rule; -#ifdef CONFIG_I40E_VXLAN - __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; - u16 pending_vxlan_bitmap; + struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; + u16 pending_udp_bitmap; -#endif enum i40e_interrupt_policy int_policy; u16 rx_itr_default; u16 tx_itr_default; @@ -321,9 +325,7 @@ struct i40e_pf { #define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) #define I40E_FLAG_PTP BIT_ULL(25) #define I40E_FLAG_MFP_ENABLED BIT_ULL(26) -#ifdef CONFIG_I40E_VXLAN -#define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27) -#endif +#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27) #define I40E_FLAG_PORT_ID_VALID BIT_ULL(28) #define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) #define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31) @@ -335,7 +337,9 @@ struct i40e_pf { #define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38) #define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) +#define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE BIT_ULL(41) #define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) +#define I40E_FLAG_PF_MAC BIT_ULL(50) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; @@ -412,7 +416,7 @@ struct i40e_pf { u32 rx_hwtstamp_cleared; bool ptp_tx; bool ptp_rx; - u16 rss_table_size; + u16 rss_table_size; /* HW RSS table size */ /* These are only valid in NPAR modes */ u32 npar_max_bw; u32 npar_min_bw; @@ -487,6 +491,7 @@ struct i40e_vsi { u32 tx_restart; u32 tx_busy; u64 tx_linearize; + u64 tx_force_wb; u32 rx_buf_failed; u32 rx_page_failed; @@ -504,8 +509,10 @@ struct i40e_vsi { u16 tx_itr_setting; u16 int_rate_limit; /* value in usecs */ - u16 rss_table_size; - u16 rss_size; + u16 rss_table_size; /* HW RSS table size */ + u16 rss_size; /* Allocated RSS queues */ + u8 *rss_hkey_user; /* User configured hash keys */ + u8 *rss_lut_user; /* User configured lookup table entries */ u16 max_frame; u16 rx_hdr_len; @@ -575,6 +582,9 @@ struct i40e_q_vector { u8 num_ringpairs; /* total number of ring pairs in vector */ +#define I40E_Q_VECTOR_HUNG_DETECT 0 /* Bit Index for hung detection logic */ + unsigned long hung_detected; /* Set/Reset for hung_detection logic */ + cpumask_t affinity_mask; struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; @@ -602,8 +612,8 @@ static inline char *i40e_nvm_version_str(struct i40e_hw *hw) full_ver = hw->nvm.oem_ver; ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT); - build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) - & I40E_OEM_VER_BUILD_MASK); + build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) & + I40E_OEM_VER_BUILD_MASK); patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK); snprintf(buf, sizeof(buf), @@ -668,6 +678,8 @@ extern const char i40e_driver_name[]; extern const char i40e_driver_version_str[]; void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags); +int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); +int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id); void i40e_update_stats(struct i40e_vsi *vsi); void i40e_update_eth_stats(struct i40e_vsi *vsi); @@ -691,7 +703,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, bool is_vf, bool is_netdev); void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, bool is_vf, bool is_netdev); -int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl); +int i40e_sync_vsi_filters(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink, u32 param1); int i40e_vsi_release(struct i40e_vsi *vsi); @@ -709,7 +721,7 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, void i40e_veb_release(struct i40e_veb *veb); int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc); -i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid); +int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid); void i40e_vsi_remove_pvid(struct i40e_vsi *vsi); void i40e_vsi_reset_stats(struct i40e_vsi *vsi); void i40e_pf_reset_stats(struct i40e_pf *pf); @@ -767,6 +779,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); +int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 6584b6cd7..b22012a44 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -227,6 +227,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_oem_post_update = 0x0720, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -1891,6 +1892,26 @@ struct i40e_aqc_nvm_config_data_immediate_field { I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); +/* OEM Post Update (indirect 0x0720) + * no command data struct used + */ +struct i40e_aqc_nvm_oem_post_update { +#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 + u8 sel_data; + u8 reserved[7]; +}; + +I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); + +struct i40e_aqc_nvm_oem_post_update_buffer { + u8 str_len; + u8 dev_addr; + __le16 eeprom_addr; + u8 data[36]; +}; + +I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) @@ -2403,4 +2424,4 @@ struct i40e_aqc_debug_modify_internals { I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); -#endif +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 2d74c6e4d..6a034ddac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -44,7 +44,6 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: - case I40E_DEV_ID_KX_A: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index d4b7af9a2..10744a698 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -103,8 +103,8 @@ static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos)); bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len); - if (bytes_not_copied < 0) - return bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; *ppos += len; return len; @@ -353,8 +353,8 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, bytes_not_copied = copy_to_user(buffer, buf, len); kfree(buf); - if (bytes_not_copied < 0) - return bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; *ppos = len; return len; @@ -981,12 +981,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!cmd_buf) return count; bytes_not_copied = copy_from_user(cmd_buf, buffer, count); - if (bytes_not_copied < 0) { + if (bytes_not_copied) { kfree(cmd_buf); - return bytes_not_copied; + return -EFAULT; } - if (bytes_not_copied > 0) - count -= bytes_not_copied; cmd_buf[count] = '\0'; cmd_buf_tmp = strchr(cmd_buf, '\n'); @@ -1140,7 +1138,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, ma, vlan, false, false); spin_unlock_bh(&vsi->mac_filter_list_lock); - ret = i40e_sync_vsi_filters(vsi, true); + ret = i40e_sync_vsi_filters(vsi); if (f && !ret) dev_info(&pf->pdev->dev, "add macaddr: %pM vlan=%d added to VSI %d\n", @@ -1179,7 +1177,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, ma, vlan, false, false); spin_unlock_bh(&vsi->mac_filter_list_lock); - ret = i40e_sync_vsi_filters(vsi, true); + ret = i40e_sync_vsi_filters(vsi); if (!ret) dev_info(&pf->pdev->dev, "del macaddr: %pM vlan=%d removed from VSI %d\n", @@ -2034,8 +2032,8 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, bytes_not_copied = copy_to_user(buffer, buf, len); kfree(buf); - if (bytes_not_copied < 0) - return bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; *ppos = len; return len; @@ -2068,10 +2066,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf)); bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf, buffer, count); - if (bytes_not_copied < 0) - return bytes_not_copied; - else if (bytes_not_copied > 0) - count -= bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; i40e_dbg_netdev_ops_buf[count] = '\0'; buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index c601ca4a6..448ef4c17 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -30,7 +30,6 @@ /* Device IDs */ #define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F #define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_QSFP_A 0x1583 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 3f385ffe4..29d5833e2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -88,6 +88,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("tx_linearize", tx_linearize), + I40E_VSI_STAT("tx_force_wb", tx_force_wb), }; /* These PF_STATs might look like duplicates of some NETDEV_STATs, @@ -230,6 +231,7 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "LinkPolling", "flow-director-atr", "veb-stats", + "packet-split", }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) @@ -2110,7 +2112,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = vsi->alloc_queue_pairs; + cmd->data = vsi->num_queue_pairs; ret = 0; break; case ETHTOOL_GRXFH: @@ -2583,7 +2585,6 @@ static int i40e_set_channels(struct net_device *dev, return -EINVAL; } -#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) /** * i40e_get_rxfh_key_size - get the RSS hash key size * @netdev: network interface device structure @@ -2611,10 +2612,9 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u32 reg_val; - int i, j; + u8 *lut, *seed = NULL; + int ret; + u16 i; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; @@ -2622,24 +2622,20 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { - reg_val = rd32(hw, I40E_PFQF_HLUT(i)); - indir[j++] = reg_val & 0xff; - indir[j++] = (reg_val >> 8) & 0xff; - indir[j++] = (reg_val >> 16) & 0xff; - indir[j++] = (reg_val >> 24) & 0xff; - } + seed = key; + lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!lut) + return -ENOMEM; + ret = i40e_get_rss(vsi, seed, lut, I40E_HLUT_ARRAY_SIZE); + if (ret) + goto out; + for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) + indir[i] = (u32)(lut[i]); - if (key) { - for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { - reg_val = rd32(hw, I40E_PFQF_HKEY(i)); - key[j++] = (u8)(reg_val & 0xff); - key[j++] = (u8)((reg_val >> 8) & 0xff); - key[j++] = (u8)((reg_val >> 16) & 0xff); - key[j++] = (u8)((reg_val >> 24) & 0xff); - } - } - return 0; +out: + kfree(lut); + + return ret; } /** @@ -2656,10 +2652,8 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u32 reg_val; - int i, j; + u8 *seed = NULL; + u16 i; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; @@ -2667,24 +2661,28 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { - reg_val = indir[j++]; - reg_val |= indir[j++] << 8; - reg_val |= indir[j++] << 16; - reg_val |= indir[j++] << 24; - wr32(hw, I40E_PFQF_HLUT(i), reg_val); - } - if (key) { - for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { - reg_val = key[j++]; - reg_val |= key[j++] << 8; - reg_val |= key[j++] << 16; - reg_val |= key[j++] << 24; - wr32(hw, I40E_PFQF_HKEY(i), reg_val); + if (!vsi->rss_hkey_user) { + vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE, + GFP_KERNEL); + if (!vsi->rss_hkey_user) + return -ENOMEM; } + memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE); + seed = vsi->rss_hkey_user; } - return 0; + if (!vsi->rss_lut_user) { + vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!vsi->rss_lut_user) + return -ENOMEM; + } + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) + vsi->rss_lut_user[i] = (u8)(indir[i]); + + return i40e_config_rss(vsi, seed, vsi->rss_lut_user, + I40E_HLUT_ARRAY_SIZE); } /** @@ -2712,6 +2710,8 @@ static u32 i40e_get_priv_flags(struct net_device *dev) I40E_PRIV_FLAGS_FD_ATR : 0; ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? I40E_PRIV_FLAGS_VEB_STATS : 0; + ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ? + I40E_PRIV_FLAGS_PS : 0; return ret_flags; } @@ -2726,6 +2726,26 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + bool reset_required = false; + + /* NOTE: MFP is not settable */ + + /* allow the user to control the method of receive + * buffer DMA, whether the packet is split at header + * boundaries into two separate buffers. In some cases + * one routine or the other will perform better. + */ + if ((flags & I40E_PRIV_FLAGS_PS) && + !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) { + pf->flags |= I40E_FLAG_RX_PS_ENABLED; + pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED; + reset_required = true; + } else if (!(flags & I40E_PRIV_FLAGS_PS) && + (pf->flags & I40E_FLAG_RX_PS_ENABLED)) { + pf->flags &= ~I40E_FLAG_RX_PS_ENABLED; + pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; + reset_required = true; + } if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG) pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED; @@ -2748,6 +2768,10 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) else pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + /* if needed, issue reset to cause things to take effect */ + if (reset_required) + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); + return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index fe5d9bf3e..579a46ca8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1544,8 +1544,6 @@ void i40e_fcoe_vsi_setup(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) return; - BUG_ON(!pf->vsi[pf->lan_vsi]); - for (i = 0; i < pf->num_alloc_vsi; i++) { vsi = pf->vsi[i]; if (vsi && vsi->type == I40E_VSI_FCOE) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 79ae7beea..daa920442 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -762,7 +762,7 @@ static void i40e_write_byte(u8 *hmc_bits, /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - mask = BIT(ce_info->width) - 1; + mask = (u8)(BIT(ce_info->width) - 1); src_byte = *from; src_byte &= mask; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 4a9873ec2..8f3b53e0d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -24,12 +24,24 @@ * ******************************************************************************/ +#include +#include +#include + +#ifdef CONFIG_SPARC +#include +#include +#endif + /* Local includes */ #include "i40e.h" #include "i40e_diag.h" -#ifdef CONFIG_I40E_VXLAN +#if IS_ENABLED(CONFIG_VXLAN) #include #endif +#if IS_ENABLED(CONFIG_GENEVE) +#include +#endif const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = @@ -38,8 +50,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 46 +#define DRV_VERSION_MINOR 4 +#define DRV_VERSION_BUILD 8 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -55,6 +67,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); +static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, + u16 rss_table_size, u16 rss_size); static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); @@ -68,7 +82,6 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb); static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, @@ -789,75 +802,6 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) } #endif -/** - * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode - * @pf: the corresponding PF - * - * Update the Rx XOFF counter (PAUSE frames) in link flow control mode - **/ -static void i40e_update_link_xoff_rx(struct i40e_pf *pf) -{ - struct i40e_hw_port_stats *osd = &pf->stats_offsets; - struct i40e_hw_port_stats *nsd = &pf->stats; - struct i40e_hw *hw = &pf->hw; - u64 xoff = 0; - - if ((hw->fc.current_mode != I40E_FC_FULL) && - (hw->fc.current_mode != I40E_FC_RX_PAUSE)) - return; - - xoff = nsd->link_xoff_rx; - i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), - pf->stat_offsets_loaded, - &osd->link_xoff_rx, &nsd->link_xoff_rx); - - /* No new LFC xoff rx */ - if (!(nsd->link_xoff_rx - xoff)) - return; - -} - -/** - * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode - * @pf: the corresponding PF - * - * Update the Rx XOFF counter (PAUSE frames) in PFC mode - **/ -static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) -{ - struct i40e_hw_port_stats *osd = &pf->stats_offsets; - struct i40e_hw_port_stats *nsd = &pf->stats; - bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; - struct i40e_dcbx_config *dcb_cfg; - struct i40e_hw *hw = &pf->hw; - u16 i; - u8 tc; - - dcb_cfg = &hw->local_dcbx_config; - - /* Collect Link XOFF stats when PFC is disabled */ - if (!dcb_cfg->pfc.pfcenable) { - i40e_update_link_xoff_rx(pf); - return; - } - - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - u64 prio_xoff = nsd->priority_xoff_rx[i]; - - i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), - pf->stat_offsets_loaded, - &osd->priority_xoff_rx[i], - &nsd->priority_xoff_rx[i]); - - /* No new PFC xoff rx */ - if (!(nsd->priority_xoff_rx[i] - prio_xoff)) - continue; - /* Get the TC for given priority */ - tc = dcb_cfg->etscfg.prioritytable[i]; - xoff[tc] = true; - } -} - /** * i40e_update_vsi_stats - Update the vsi statistics counters. * @vsi: the VSI to be updated @@ -881,6 +825,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) u64 bytes, packets; unsigned int start; u64 tx_linearize; + u64 tx_force_wb; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -899,7 +844,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) */ rx_b = rx_p = 0; tx_b = tx_p = 0; - tx_restart = tx_busy = tx_linearize = 0; + tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); @@ -917,6 +862,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_restart += p->tx_stats.restart_queue; tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; + tx_force_wb += p->tx_stats.tx_force_wb; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; @@ -934,6 +880,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_restart = tx_restart; vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; + vsi->tx_force_wb = tx_force_wb; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; @@ -1049,12 +996,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); - i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ + i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), + pf->stat_offsets_loaded, + &osd->link_xoff_rx, &nsd->link_xoff_rx); i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); for (i = 0; i < 8; i++) { + i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xoff_rx[i], + &nsd->priority_xoff_rx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_rx[i], @@ -1316,6 +1269,42 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, struct i40e_mac_filter, list); } +/** + * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS + * @vsi: the VSI to be searched + * @macaddr: the mac address to be removed + * @is_vf: true if it is a VF + * @is_netdev: true if it is a netdev + * + * Removes a given MAC address from a VSI, regardless of VLAN + * + * Returns 0 for success, or error + **/ +int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f = NULL; + int changed = 0; + + WARN(!spin_is_locked(&vsi->mac_filter_list_lock), + "Missing mac_filter_list_lock\n"); + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if ((ether_addr_equal(macaddr, f->macaddr)) && + (is_vf == f->is_vf) && + (is_netdev == f->is_netdev)) { + f->counter--; + f->changed = true; + changed = 1; + } + } + if (changed) { + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + return 0; + } + return -ENOENT; +} + /** * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM * @vsi: the PF Main VSI - inappropriate for any other VSI @@ -1547,10 +1536,9 @@ static int i40e_set_mac(struct net_device *netdev, void *p) spin_unlock_bh(&vsi->mac_filter_list_lock); } - i40e_sync_vsi_filters(vsi, false); ether_addr_copy(netdev->dev_addr, addr->sa_data); - return 0; + return i40e_sync_vsi_filters(vsi); } /** @@ -1590,7 +1578,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) /* TC is enabled */ + if (enabled_tc & BIT(i)) /* TC is enabled */ numtc++; } if (!numtc) { @@ -1619,13 +1607,14 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ - if (vsi->tc_config.enabled_tc & BIT_ULL(i)) { + if (vsi->tc_config.enabled_tc & BIT(i)) { /* TC is enabled */ int pow, num_qps; switch (vsi->type) { case I40E_VSI_MAIN: - qcount = min_t(int, pf->rss_size, num_tc_qps); + qcount = min_t(int, pf->alloc_rss_size, + num_tc_qps); break; #ifdef I40E_FCOE case I40E_VSI_FCOE: @@ -1851,13 +1840,12 @@ static void i40e_cleanup_add_list(struct list_head *add_list) /** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI - * @grab_rtnl: whether RTNL needs to be grabbed * * Push any outstanding VSI filter changes through the AdminQ. * * Returns 0 or error value **/ -int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) +int i40e_sync_vsi_filters(struct i40e_vsi *vsi) { struct list_head tmp_del_list, tmp_add_list; struct i40e_mac_filter *f, *ftmp, *fclone; @@ -1865,8 +1853,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) bool add_happened = false; int filter_list_len = 0; u32 changed_flags = 0; + i40e_status aq_ret = 0; bool err_cond = false; - i40e_status ret = 0; + int retval = 0; struct i40e_pf *pf; int num_add = 0; int num_del = 0; @@ -1929,17 +1918,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } spin_unlock_bh(&vsi->mac_filter_list_lock); - if (err_cond) + if (err_cond) { i40e_cleanup_add_list(&tmp_add_list); + retval = -ENOMEM; + goto out; + } } /* Now process 'del_list' outside the lock */ if (!list_empty(&tmp_del_list)) { + int del_list_size; + filter_list_len = pf->hw.aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_remove_macvlan_element_data), - GFP_KERNEL); + del_list_size = filter_list_len * + sizeof(struct i40e_aqc_remove_macvlan_element_data); + del_list = kzalloc(del_list_size, GFP_KERNEL); if (!del_list) { i40e_cleanup_add_list(&tmp_add_list); @@ -1948,7 +1942,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) i40e_undo_del_filter_entries(vsi, &tmp_del_list); i40e_undo_add_filter_entries(vsi); spin_unlock_bh(&vsi->mac_filter_list_lock); - return -ENOMEM; + retval = -ENOMEM; + goto out; } list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { @@ -1966,18 +1961,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) /* flush a full buffer */ if (num_del == filter_list_len) { - ret = i40e_aq_remove_macvlan(&pf->hw, - vsi->seid, del_list, num_del, - NULL); + aq_ret = i40e_aq_remove_macvlan(&pf->hw, + vsi->seid, + del_list, + num_del, + NULL); aq_err = pf->hw.aq.asq_last_status; num_del = 0; - memset(del_list, 0, sizeof(*del_list)); + memset(del_list, 0, del_list_size); - if (ret && aq_err != I40E_AQ_RC_ENOENT) + if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { + retval = -EIO; dev_err(&pf->pdev->dev, "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); + } } /* Release memory for MAC filter entries which were * synced up with HW. @@ -1987,15 +1986,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } if (num_del) { - ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, - del_list, num_del, NULL); + aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, + del_list, num_del, + NULL); aq_err = pf->hw.aq.asq_last_status; num_del = 0; - if (ret && aq_err != I40E_AQ_RC_ENOENT) + if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, "ignoring delete macvlan error, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); } @@ -2004,13 +2004,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } if (!list_empty(&tmp_add_list)) { + int add_list_size; /* do all the adds now */ filter_list_len = pf->hw.aq.asq_buf_size / sizeof(struct i40e_aqc_add_macvlan_element_data), - add_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_add_macvlan_element_data), - GFP_KERNEL); + add_list_size = filter_list_len * + sizeof(struct i40e_aqc_add_macvlan_element_data); + add_list = kzalloc(add_list_size, GFP_KERNEL); if (!add_list) { /* Purge element from temporary lists */ i40e_cleanup_add_list(&tmp_add_list); @@ -2019,7 +2020,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) spin_lock_bh(&vsi->mac_filter_list_lock); i40e_undo_add_filter_entries(vsi); spin_unlock_bh(&vsi->mac_filter_list_lock); - return -ENOMEM; + retval = -ENOMEM; + goto out; } list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { @@ -2040,15 +2042,15 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) /* flush a full buffer */ if (num_add == filter_list_len) { - ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, - NULL); + aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, + NULL); aq_err = pf->hw.aq.asq_last_status; num_add = 0; - if (ret) + if (aq_ret) break; - memset(add_list, 0, sizeof(*add_list)); + memset(add_list, 0, add_list_size); } /* Entries from tmp_add_list were cloned from MAC * filter list, hence clean those cloned entries @@ -2058,18 +2060,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } if (num_add) { - ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, NULL); + aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, NULL); aq_err = pf->hw.aq.asq_last_status; num_add = 0; } kfree(add_list); add_list = NULL; - if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) { + if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { + retval = i40e_aq_rc_to_posix(aq_ret, aq_err); dev_info(&pf->pdev->dev, "add filter failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, @@ -2087,16 +2090,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) bool cur_multipromisc; cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); - ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, - vsi->seid, - cur_multipromisc, - NULL); - if (ret) + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, + vsi->seid, + cur_multipromisc, + NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set multi promisc failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + } } if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { bool cur_promisc; @@ -2112,44 +2118,50 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) */ if (pf->cur_promisc != cur_promisc) { pf->cur_promisc = cur_promisc; - if (grab_rtnl) - i40e_do_reset_safe(pf, - BIT(__I40E_PF_RESET_REQUESTED)); - else - i40e_do_reset(pf, - BIT(__I40E_PF_RESET_REQUESTED)); + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } } else { - ret = i40e_aq_set_vsi_unicast_promiscuous( + aq_ret = i40e_aq_set_vsi_unicast_promiscuous( &vsi->back->hw, vsi->seid, cur_promisc, NULL); - if (ret) + if (aq_ret) { + retval = + i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set unicast promisc failed, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); - ret = i40e_aq_set_vsi_multicast_promiscuous( + aq_ret, pf->hw.aq.asq_last_status); + } + aq_ret = i40e_aq_set_vsi_multicast_promiscuous( &vsi->back->hw, vsi->seid, cur_promisc, NULL); - if (ret) + if (aq_ret) { + retval = + i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set multicast promisc failed, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + aq_ret, pf->hw.aq.asq_last_status); + } } - ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, - vsi->seid, - cur_promisc, NULL); - if (ret) + aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set brdcast promisc failed, err %s, aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + } } - +out: clear_bit(__I40E_CONFIG_BUSY, &vsi->state); - return 0; + return retval; } /** @@ -2166,8 +2178,15 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && - (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) - i40e_sync_vsi_filters(pf->vsi[v], true); + (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { + int ret = i40e_sync_vsi_filters(pf->vsi[v]); + + if (ret) { + /* come back and try again later */ + pf->flags |= I40E_FLAG_FILTER_SYNC; + break; + } + } } } @@ -2377,16 +2396,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) } } - /* Make sure to release before sync_vsi_filter because that - * function will lock/unlock as necessary - */ spin_unlock_bh(&vsi->mac_filter_list_lock); - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) - return 0; - - return i40e_sync_vsi_filters(vsi, false); + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); + return 0; } /** @@ -2459,16 +2475,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) } } - /* Make sure to release before sync_vsi_filter because that - * function with lock/unlock as necessary - */ spin_unlock_bh(&vsi->mac_filter_list_lock); - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) - return 0; - - return i40e_sync_vsi_filters(vsi, false); + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); + return 0; } /** @@ -2711,6 +2724,11 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) netif_set_xps_queue(ring->netdev, mask, ring->queue_index); free_cpumask_var(mask); } + + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); } /** @@ -4360,17 +4378,41 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); + /* Bail out if interrupts are disabled because napi_poll + * execution in-progress or will get scheduled soon. + * napi_poll cleans TX and RX queues and updates 'next_to_clean'. + */ + if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)) + return; + head = i40e_get_head(tx_ring); tx_pending = i40e_get_tx_pending(tx_ring); - /* Interrupts are disabled and TX pending is non-zero, - * trigger the SW interrupt (don't wait). Worst case - * there will be one extra interrupt which may result - * into not cleaning any queues because queues are cleaned. + /* HW is done executing descriptors, updated HEAD write back, + * but SW hasn't processed those descriptors. If interrupt is + * not generated from this point ON, it could result into + * dev_watchdog detecting timeout on those netdev_queue, + * hence proactively trigger SW interrupt. */ - if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) - i40e_force_wb(vsi, tx_ring->q_vector); + if (tx_pending) { + /* NAPI Poll didn't run and clear since it was set */ + if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, + &tx_ring->q_vector->hung_detected)) { + netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", + vsi->seid, q_idx, tx_pending, + tx_ring->next_to_clean, head, + tx_ring->next_to_use, + readl(tx_ring->tail)); + netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n", + vsi->seid, q_idx, val); + i40e_force_wb(vsi, tx_ring->q_vector); + } else { + /* First Chance - detected possible hung */ + set_bit(I40E_Q_VECTOR_HUNG_DETECT, + &tx_ring->q_vector->hung_detected); + } + } } /** @@ -4441,7 +4483,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) if (app.selector == I40E_APP_SEL_TCPIP && app.protocolid == I40E_APP_PROTOID_ISCSI) { tc = dcbcfg->etscfg.prioritytable[app.priority]; - enabled_tc |= BIT_ULL(tc); + enabled_tc |= BIT(tc); break; } } @@ -4525,7 +4567,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) /* At least have TC0 */ enabled_tc = (enabled_tc ? enabled_tc : 0x1); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) num_tc++; } return num_tc; @@ -4547,7 +4589,7 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) /* Find the first enabled TC */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) break; } @@ -4707,7 +4749,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) * will set the numtc for netdev as 2 that will be * referenced by the netdev layer as TC 0 and 1. */ - if (vsi->tc_config.enabled_tc & BIT_ULL(i)) + if (vsi->tc_config.enabled_tc & BIT(i)) netdev_set_tc_queue(netdev, vsi->tc_config.tc_info[i].netdev_tc, vsi->tc_config.tc_info[i].qcount, @@ -4769,7 +4811,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) bw_share[i] = 1; } @@ -4843,7 +4885,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) bw_data.tc_bw_share_credits[i] = 1; } @@ -5240,7 +5282,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc) /* Generate TC map for number of tc requested */ for (i = 0; i < tc; i++) - enabled_tc |= BIT_ULL(i); + enabled_tc |= BIT(i); /* Requesting same TC configuration as already enabled */ if (enabled_tc == vsi->tc_config.enabled_tc) @@ -5305,6 +5347,9 @@ int i40e_open(struct net_device *netdev) #ifdef CONFIG_I40E_VXLAN vxlan_get_rx_port(netdev); #endif +#ifdef CONFIG_I40E_GENEVE + geneve_get_rx_port(netdev); +#endif return 0; } @@ -5738,7 +5783,7 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, **/ static void i40e_service_event_complete(struct i40e_pf *pf) { - BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); + WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); /* flush memory to make sure state is correct before next watchog */ smp_mb__before_atomic(); @@ -6013,6 +6058,9 @@ static void i40e_link_event(struct i40e_pf *pf) i40e_status status; bool new_link, old_link; + /* save off old link status information */ + pf->hw.phy.link_info_old = pf->hw.phy.link_info; + /* set this to force the get_link_status call to refresh state */ pf->hw.phy.get_link_info = true; @@ -6101,23 +6149,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf) rtnl_lock(); if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED); + reset_flags |= BIT(__I40E_REINIT_REQUESTED); clear_bit(__I40E_REINIT_REQUESTED, &pf->state); } if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED); + reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED); + reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED); + reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED); + reset_flags |= BIT(__I40E_DOWN_REQUESTED); clear_bit(__I40E_DOWN_REQUESTED, &pf->state); } @@ -6147,13 +6195,9 @@ unlock: static void i40e_handle_link_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { - struct i40e_hw *hw = &pf->hw; struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; - /* save off old link status information */ - hw->phy.link_info_old = hw->phy.link_info; - /* Do a new status request to re-enable LSE reporting * and load new status information into the hw struct * This completely ignores any state information @@ -6192,15 +6236,18 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val = rd32(&pf->hw, pf->hw.aq.arq.len); oldval = val; if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { - dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); + if (hw->debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; } if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { - dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); + if (hw->debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; } if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { - dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); + if (hw->debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; } if (oldval != val) @@ -6209,15 +6256,18 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val = rd32(&pf->hw, pf->hw.aq.asq.len); oldval = val; if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { - dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); + if (pf->hw.debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; } if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { - dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); + if (pf->hw.debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; } if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { - dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); + if (pf->hw.debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; } if (oldval != val) @@ -6268,6 +6318,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) break; case i40e_aqc_opc_nvm_erase: case i40e_aqc_opc_nvm_update: + case i40e_aqc_opc_oem_post_update: i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); break; default: @@ -6685,6 +6736,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) struct i40e_hw *hw = &pf->hw; u8 set_fc_aq_fail = 0; i40e_status ret; + u32 val; u32 v; /* Now we wait for GRST to settle out. @@ -6823,6 +6875,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } } + /* Reconfigure hardware for allowing smaller MSS in the case + * of TSO, so that we avoid the MDD being fired and causing + * a reset in the case of small MSS+TSO. + */ +#define I40E_REG_MSS 0x000E64DC +#define I40E_REG_MSS_MIN_MASK 0x3FF0000 +#define I40E_64BYTE_MSS 0x400000 + val = rd32(hw, I40E_REG_MSS); + if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { + val &= ~I40E_REG_MSS_MIN_MASK; + val |= I40E_64BYTE_MSS; + wr32(hw, I40E_REG_MSS, val); + } + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); @@ -6984,30 +7050,30 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) i40e_flush(hw); } -#ifdef CONFIG_I40E_VXLAN /** - * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW + * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW * @pf: board private structure **/ -static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) +static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { +#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) struct i40e_hw *hw = &pf->hw; i40e_status ret; __be16 port; int i; - if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) + if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) return; - pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; + pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->pending_vxlan_bitmap & BIT_ULL(i)) { - pf->pending_vxlan_bitmap &= ~BIT_ULL(i); - port = pf->vxlan_ports[i]; + if (pf->pending_udp_bitmap & BIT_ULL(i)) { + pf->pending_udp_bitmap &= ~BIT_ULL(i); + port = pf->udp_ports[i].index; if (port) ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), - I40E_AQC_TUNNEL_TYPE_VXLAN, + pf->udp_ports[i].type, NULL, NULL); else ret = i40e_aq_del_udp_tunnel(hw, i, NULL); @@ -7020,13 +7086,13 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - pf->vxlan_ports[i] = 0; + pf->udp_ports[i].index = 0; } } } +#endif } -#endif /** * i40e_service_task - Run the driver's async subtasks * @work: pointer to work_struct containing our data @@ -7051,9 +7117,7 @@ static void i40e_service_task(struct work_struct *work) i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); i40e_sync_filters_subtask(pf); -#ifdef CONFIG_I40E_VXLAN - i40e_sync_vxlan_filters_subtask(pf); -#endif + i40e_sync_udp_filters_subtask(pf); i40e_clean_adminq_subtask(pf); i40e_service_event_complete(pf); @@ -7281,6 +7345,23 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) vsi->rx_rings = NULL; } +/** + * i40e_clear_rss_config_user - clear the user configured RSS hash keys + * and lookup table + * @vsi: Pointer to VSI structure + */ +static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) +{ + if (!vsi) + return; + + kfree(vsi->rss_hkey_user); + vsi->rss_hkey_user = NULL; + + kfree(vsi->rss_lut_user); + vsi->rss_lut_user = NULL; +} + /** * i40e_vsi_clear - Deallocate the VSI provided * @vsi: the VSI being un-configured @@ -7318,6 +7399,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); i40e_vsi_free_arrays(vsi, true); + i40e_clear_rss_config_user(vsi); pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi) @@ -7780,7 +7862,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) * @vsi: vsi structure * @seed: RSS hash seed **/ -static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) { struct i40e_aqc_get_set_rss_key_data rss_key; struct i40e_pf *pf = vsi->back; @@ -7833,43 +7916,57 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) { u8 seed[I40E_HKEY_ARRAY_SIZE]; struct i40e_pf *pf = vsi->back; + u8 *lut; + int ret; - netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); - vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) + return 0; - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) - return i40e_config_rss_aq(vsi, seed); + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; - return 0; + i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); + ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); + kfree(lut); + + return ret; } /** - * i40e_config_rss_reg - Prepare for RSS if used - * @pf: board private structure + * i40e_config_rss_reg - Configure RSS keys and lut by writing registers + * @vsi: Pointer to vsi structure * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure **/ -static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed) +static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, + const u8 *lut, u16 lut_size) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - u32 *seed_dw = (u32 *)seed; - u32 current_queue = 0; - u32 lut = 0; - int i, j; + u8 i; /* Fill out hash function seed */ - for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + if (seed) { + u32 *seed_dw = (u32 *)seed; - for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { - lut = 0; - for (j = 0; j < 4; j++) { - if (current_queue == vsi->rss_size) - current_queue = 0; - lut |= ((current_queue) << (8 * j)); - current_queue++; - } - wr32(&pf->hw, I40E_PFQF_HLUT(i), lut); + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + } + + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40E_HLUT_ARRAY_SIZE) + return -EINVAL; + + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); } i40e_flush(hw); @@ -7877,18 +7974,101 @@ static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed) } /** - * i40e_config_rss - Prepare for RSS if used + * i40e_get_rss_reg - Get the RSS keys and lut by reading registers + * @vsi: Pointer to VSI structure + * @seed: Buffer to store the keys + * @lut: Buffer to store the lookup table entries + * @lut_size: Size of buffer to store the lookup table entries + * + * Returns 0 on success, negative on failure + */ +static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, + u8 *lut, u16 lut_size) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u16 i; + + if (seed) { + u32 *seed_dw = (u32 *)seed; + + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + seed_dw[i] = rd32(hw, I40E_PFQF_HKEY(i)); + } + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40E_HLUT_ARRAY_SIZE) + return -EINVAL; + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) + lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i)); + } + + return 0; +} + +/** + * i40e_config_rss - Configure RSS keys and lut + * @vsi: Pointer to VSI structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + */ +int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) +{ + struct i40e_pf *pf = vsi->back; + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_config_rss_aq(vsi, seed, lut, lut_size); + else + return i40e_config_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40e_get_rss - Get RSS keys and lut + * @vsi: Pointer to VSI structure + * @seed: Buffer to store the keys + * @lut: Buffer to store the lookup table entries + * lut_size: Size of buffer to store the lookup table entries + * + * Returns 0 on success, negative on failure + */ +int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) +{ + return i40e_get_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40e_fill_rss_lut - Fill the RSS lookup table with default values + * @pf: Pointer to board private structure + * @lut: Lookup table + * @rss_table_size: Lookup table size + * @rss_size: Range of queue number for hashing + */ +static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, + u16 rss_table_size, u16 rss_size) +{ + u16 i; + + for (i = 0; i < rss_table_size; i++) + lut[i] = i % rss_size; +} + +/** + * i40e_pf_config_rss - Prepare for RSS if used * @pf: board private structure **/ -static int i40e_config_rss(struct i40e_pf *pf) +static int i40e_pf_config_rss(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 seed[I40E_HKEY_ARRAY_SIZE]; + u8 *lut; struct i40e_hw *hw = &pf->hw; u32 reg_val; u64 hena; - - netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + int ret; /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | @@ -7898,8 +8078,6 @@ static int i40e_config_rss(struct i40e_pf *pf) wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); - vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); - /* Determine the RSS table size based on the hardware capabilities */ reg_val = rd32(hw, I40E_PFQF_CTL_0); reg_val = (pf->rss_table_size == 512) ? @@ -7907,10 +8085,32 @@ static int i40e_config_rss(struct i40e_pf *pf) (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); wr32(hw, I40E_PFQF_CTL_0, reg_val); - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) - return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed); + /* Determine the RSS size of the VSI */ + if (!vsi->rss_size) + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + /* Use user configured lut if there is one, otherwise use default */ + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); else - return i40e_config_rss_reg(pf, seed); + i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); + + /* Use user configured hash key if there is one, otherwise + * use default. + */ + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); + else + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); + kfree(lut); + + return ret; } /** @@ -7935,13 +8135,28 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); - pf->rss_size = new_rss_size; + pf->alloc_rss_size = new_rss_size; i40e_reset_and_rebuild(pf, true); - i40e_config_rss(pf); + + /* Discard the user configured hash keys and lut, if less + * queues are enabled. + */ + if (queue_count < vsi->rss_size) { + i40e_clear_rss_config_user(vsi); + dev_dbg(&pf->pdev->dev, + "discard user configured hash keys and lut\n"); + } + + /* Reset vsi->rss_size, as number of enabled queues changed */ + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + + i40e_pf_config_rss(pf); } - dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); - return pf->rss_size; + dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n", + pf->alloc_rss_size, pf->rss_size_max); + return pf->alloc_rss_size; } /** @@ -8112,13 +8327,14 @@ static int i40e_sw_init(struct i40e_pf *pf) * maximum might end up larger than the available queues */ pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); - pf->rss_size = 1; + pf->alloc_rss_size = 1; pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; - pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); + pf->alloc_rss_size = min_t(int, pf->rss_size_max, + num_online_cpus()); } /* MFP mode enabled */ @@ -8176,7 +8392,8 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_HW_ATR_EVICT_CAPABLE | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | I40E_FLAG_WB_ON_ITR_CAPABLE | - I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE; + I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | + I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; } pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; @@ -8275,26 +8492,29 @@ static int i40e_set_features(struct net_device *netdev, return 0; } -#ifdef CONFIG_I40E_VXLAN +#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) /** - * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port + * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port * @pf: board private structure * @port: The UDP port to look up * * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found **/ -static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) +static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) { u8 i; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->vxlan_ports[i] == port) + if (pf->udp_ports[i].index == port) return i; } return i; } +#endif + +#if IS_ENABLED(CONFIG_VXLAN) /** * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up * @netdev: This physical port's netdev @@ -8313,7 +8533,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, if (sa_family == AF_INET6) return; - idx = i40e_get_vxlan_port_idx(pf, port); + idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { @@ -8323,7 +8543,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, } /* Now check if there is space to add the new port */ - next_idx = i40e_get_vxlan_port_idx(pf, 0); + next_idx = i40e_get_udp_port_idx(pf, 0); if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", @@ -8332,9 +8552,10 @@ static void i40e_add_vxlan_port(struct net_device *netdev, } /* New port: add it and mark its index in the bitmap */ - pf->vxlan_ports[next_idx] = port; - pf->pending_vxlan_bitmap |= BIT_ULL(next_idx); - pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + pf->udp_ports[next_idx].index = port; + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; + pf->pending_udp_bitmap |= BIT_ULL(next_idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; } /** @@ -8354,23 +8575,106 @@ static void i40e_del_vxlan_port(struct net_device *netdev, if (sa_family == AF_INET6) return; - idx = i40e_get_vxlan_port_idx(pf, port); + idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { /* if port exists, set it to 0 (mark for deletion) * and make it pending */ - pf->vxlan_ports[idx] = 0; - pf->pending_vxlan_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + pf->udp_ports[idx].index = 0; + pf->pending_udp_bitmap |= BIT_ULL(idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; } else { netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", ntohs(port)); } } +#endif + +#if IS_ENABLED(CONFIG_GENEVE) +/** + * i40e_add_geneve_port - Get notifications about GENEVE ports that come up + * @netdev: This physical port's netdev + * @sa_family: Socket Family that GENEVE is notifying us about + * @port: New UDP port number that GENEVE started listening to + **/ +static void i40e_add_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 next_idx; + u8 idx; + + if (sa_family == AF_INET6) + return; + + idx = i40e_get_udp_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "udp port %d already offloaded\n", + ntohs(port)); + return; + } + + /* Now check if there is space to add the new port */ + next_idx = i40e_get_udp_port_idx(pf, 0); + + if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", + ntohs(port)); + return; + } + + /* New port: add it and mark its index in the bitmap */ + pf->udp_ports[next_idx].index = port; + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; + pf->pending_udp_bitmap |= BIT_ULL(next_idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); +} + +/** + * i40e_del_geneve_port - Get notifications about GENEVE ports that go away + * @netdev: This physical port's netdev + * @sa_family: Socket Family that GENEVE is notifying us about + * @port: UDP port number that GENEVE stopped listening to + **/ +static void i40e_del_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 idx; + if (sa_family == AF_INET6) + return; + + idx = i40e_get_udp_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + /* if port exists, set it to 0 (mark for deletion) + * and make it pending + */ + pf->udp_ports[idx].index = 0; + pf->pending_udp_bitmap |= BIT_ULL(idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "deleting geneve port %d\n", + ntohs(port)); + } else { + netdev_warn(netdev, "geneve port %d was not found, not deleting\n", + ntohs(port)); + } +} #endif + static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) { @@ -8548,7 +8852,10 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, nlflags, 0, 0, filter_mask, NULL); } -#define I40E_MAX_TUNNEL_HDR_LEN 80 +/* Hardware supports L4 tunnel length of 128B (=2^7) which includes + * inner mac plus all inner ethertypes. + */ +#define I40E_MAX_TUNNEL_HDR_LEN 128 /** * i40e_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff @@ -8560,9 +8867,9 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb, netdev_features_t features) { if (skb->encapsulation && - (skb_inner_mac_header(skb) - skb_transport_header(skb) > + ((skb_inner_network_header(skb) - skb_transport_header(skb)) > I40E_MAX_TUNNEL_HDR_LEN)) - return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); return features; } @@ -8595,9 +8902,13 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, -#ifdef CONFIG_I40E_VXLAN +#if IS_ENABLED(CONFIG_VXLAN) .ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port, +#endif +#if IS_ENABLED(CONFIG_GENEVE) + .ndo_add_geneve_port = i40e_add_geneve_port, + .ndo_del_geneve_port = i40e_del_geneve_port, #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, @@ -8632,13 +8943,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np->vsi = vsi; netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_RXCSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_TSO; netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_SCTP_CSUM | + NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | @@ -9051,7 +9363,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) f->is_vf, f->is_netdev); spin_unlock_bh(&vsi->mac_filter_list_lock); - i40e_sync_vsi_filters(vsi, false); + i40e_sync_vsi_filters(vsi); i40e_vsi_delete(vsi); i40e_vsi_free_q_vectors(vsi); @@ -9212,6 +9524,44 @@ err_vsi: return NULL; } +/** + * i40e_macaddr_init - explicitly write the mac address filters. + * + * @vsi: pointer to the vsi. + * @macaddr: the MAC address + * + * This is needed when the macaddr has been obtained by other + * means than the default, e.g., from Open Firmware or IDPROM. + * Returns 0 on success, negative on failure + **/ +static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) +{ + int ret; + struct i40e_aqc_add_macvlan_element_data element; + + ret = i40e_aq_mac_address_write(&vsi->back->hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + macaddr, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Addr change for VSI failed: %d\n", ret); + return -EADDRNOTAVAIL; + } + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, macaddr); + element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); + ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "add filter failed err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); + } + return ret; +} + /** * i40e_vsi_setup - Set up a VSI by a given type * @pf: board private structure @@ -9336,6 +9686,17 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, switch (vsi->type) { /* setup the netdev if needed */ case I40E_VSI_MAIN: + /* Apply relevant filters if a platform-specific mac + * address was selected. + */ + if (!!(pf->flags & I40E_FLAG_PF_MAC)) { + ret = i40e_macaddr_init(vsi, pf->hw.mac.addr); + if (ret) { + dev_warn(&pf->pdev->dev, + "could not set up macaddr; err %d\n", + ret); + } + } case I40E_VSI_VMDQ2: case I40E_VSI_FCOE: ret = i40e_config_netdev(vsi); @@ -9947,7 +10308,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) * the hash */ if ((pf->flags & I40E_FLAG_RSS_ENABLED)) - i40e_config_rss(pf); + i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ i40e_update_link_info(&pf->hw); @@ -9985,7 +10346,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { /* one qp for PF, no queues for anything else */ queues_left = 0; - pf->rss_size = pf->num_lan_qps = 1; + pf->alloc_rss_size = pf->num_lan_qps = 1; /* make sure all the fancies are disabled */ pf->flags &= ~(I40E_FLAG_RSS_ENABLED | @@ -10002,7 +10363,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE))) { /* one qp for PF */ - pf->rss_size = pf->num_lan_qps = 1; + pf->alloc_rss_size = pf->num_lan_qps = 1; queues_left -= pf->num_lan_qps; pf->flags &= ~(I40E_FLAG_RSS_ENABLED | @@ -10072,8 +10433,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", pf->hw.func_caps.num_tx_qp, !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), - pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps, - pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); + pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, + pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, + queues_left); #ifdef I40E_FCOE dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); #endif @@ -10111,55 +10473,86 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) } #define INFO_STRING_LEN 255 +#define REMAIN(__x) (INFO_STRING_LEN - (__x)) static void i40e_print_features(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - char *buf, *string; + char *buf; + int i; - string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); - if (!string) { - dev_err(&pf->pdev->dev, "Features string allocation failed\n"); + buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!buf) return; - } - buf = string; - - buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); + i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); #ifdef CONFIG_PCI_IOV - buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); + i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif - buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", - pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs, - pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); + i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s", + pf->hw.func_caps.num_vsis, + pf->vsi[pf->lan_vsi]->num_queue_pairs, + pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); if (pf->flags & I40E_FLAG_RSS_ENABLED) - buf += sprintf(buf, "RSS "); + i += snprintf(&buf[i], REMAIN(i), " RSS"); if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) - buf += sprintf(buf, "FD_ATR "); + i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { - buf += sprintf(buf, "FD_SB "); - buf += sprintf(buf, "NTUPLE "); + i += snprintf(&buf[i], REMAIN(i), " FD_SB"); + i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); } if (pf->flags & I40E_FLAG_DCB_CAPABLE) - buf += sprintf(buf, "DCB "); + i += snprintf(&buf[i], REMAIN(i), " DCB"); #if IS_ENABLED(CONFIG_VXLAN) - buf += sprintf(buf, "VxLAN "); + i += snprintf(&buf[i], REMAIN(i), " VxLAN"); +#endif +#if IS_ENABLED(CONFIG_GENEVE) + i += snprintf(&buf[i], REMAIN(i), " Geneve"); #endif if (pf->flags & I40E_FLAG_PTP) - buf += sprintf(buf, "PTP "); + i += snprintf(&buf[i], REMAIN(i), " PTP"); #ifdef I40E_FCOE if (pf->flags & I40E_FLAG_FCOE_ENABLED) - buf += sprintf(buf, "FCOE "); + i += snprintf(&buf[i], REMAIN(i), " FCOE"); #endif if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) - buf += sprintf(buf, "VEB "); + i += snprintf(&buf[i], REMAIN(i), " VEB"); else - buf += sprintf(buf, "VEPA "); + i += snprintf(&buf[i], REMAIN(i), " VEPA"); - BUG_ON(buf > (string + INFO_STRING_LEN)); - dev_info(&pf->pdev->dev, "%s\n", string); - kfree(string); + dev_info(&pf->pdev->dev, "%s\n", buf); + kfree(buf); + WARN_ON(i > INFO_STRING_LEN); +} + +/** + * i40e_get_platform_mac_addr - get platform-specific MAC address + * + * @pdev: PCI device information struct + * @pf: board private structure + * + * Look up the MAC address in Open Firmware on systems that support it, + * and use IDPROM on SPARC if no OF address is found. On return, the + * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value + * has been selected. + **/ +static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) +{ + struct device_node *dp = pci_device_to_OF_node(pdev); + const unsigned char *addr; + u8 *mac_addr = pf->hw.mac.addr; + + pf->flags &= ~I40E_FLAG_PF_MAC; + addr = of_get_mac_address(dp); + if (addr) { + ether_addr_copy(mac_addr, addr); + pf->flags |= I40E_FLAG_PF_MAC; +#ifdef CONFIG_SPARC + } else { + ether_addr_copy(mac_addr, idprom->id_ethaddr); + pf->flags |= I40E_FLAG_PF_MAC; +#endif /* CONFIG_SPARC */ + } } /** @@ -10183,6 +10576,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u16 link_status; int err; u32 len; + u32 val; u32 i; u8 set_fc_aq_fail; @@ -10302,6 +10696,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mutex_init(&hw->aq.arq_mutex); err = i40e_init_adminq(hw); + if (err) { + if (err == I40E_ERR_FIRMWARE_API_VERSION) + dev_info(&pdev->dev, + "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); + else + dev_info(&pdev->dev, + "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); + + goto err_pf_reset; + } /* provide nvm, fw, api versions */ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", @@ -10309,12 +10713,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->aq.api_maj_ver, hw->aq.api_min_ver, i40e_nvm_version_str(hw)); - if (err) { - dev_info(&pdev->dev, - "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); - goto err_pf_reset; - } - if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) dev_info(&pdev->dev, @@ -10367,6 +10765,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } i40e_get_mac_addr(hw, hw->mac.addr); + /* allow a platform config to override the HW addr */ + i40e_get_platform_mac_addr(pdev, pf); if (!is_valid_ether_addr(hw->mac.addr)) { dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); err = -EIO; @@ -10411,7 +10811,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); - if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) + if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) pf->wol_en = false; else pf->wol_en = true; @@ -10493,6 +10893,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + /* Reconfigure hardware for allowing smaller MSS in the case + * of TSO, so that we avoid the MDD being fired and causing + * a reset in the case of small MSS+TSO. + */ + val = rd32(hw, I40E_REG_MSS); + if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { + val &= ~I40E_REG_MSS_MIN_MASK; + val |= I40E_64BYTE_MSS; + wr32(hw, I40E_REG_MSS, val); + } + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 635b3ac17..47bd8b314 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -235,6 +235,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, "Filter deleted for PCTYPE %d loc = %d\n", fd_data->pctype, fd_data->fd_id); } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -312,6 +315,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, fd_data->pctype, fd_data->fd_id); } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -322,7 +328,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, * @fd_data: the flow director data required for the FDir descriptor * @add: true adds a filter, false removes it * - * Always returns -EOPNOTSUPP + * Returns 0 if the filters were successfully added or removed **/ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, struct i40e_fdir_filter *fd_data, @@ -387,6 +393,9 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, } } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -506,9 +515,6 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; } - } else { - dev_info(&pdev->dev, - "FD filter programming failed due to incorrect filter parameters\n"); } } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) @@ -526,11 +532,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, struct i40e_tx_buffer *tx_buffer) { if (tx_buffer->skb) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) - kfree(tx_buffer->raw_buf); - else - dev_kfree_skb_any(tx_buffer->skb); - + dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), @@ -542,6 +544,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } + + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); @@ -1374,7 +1380,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* If VXLAN traffic has an outer UDPv4 checksum we need to check + /* If VXLAN/GENEVE traffic has an outer UDPv4 checksum we need to check * it in the driver, hardware does not do it for us. * Since L3L4P bit was set we assume a valid IHL value (>=5) * so the total length of IPv4 header is IHL*4 bytes @@ -1416,31 +1422,12 @@ checksum_fail: } /** - * i40e_rx_hash - returns the hash value from the Rx descriptor - * @ring: descriptor ring - * @rx_desc: specific descriptor - **/ -static inline u32 i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc) -{ - const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - - if ((ring->netdev->features & NETIF_F_RXHASH) && - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - else - return 0; -} - -/** - * i40e_ptype_to_hash - get a hash type + * i40e_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -1457,6 +1444,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) return PKT_HASH_TYPE_L2; } +/** + * i40e_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline void i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) +{ + u32 hash; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if (ring->netdev->features & NETIF_F_RXHASH) + return; + + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + } +} + /** * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean @@ -1606,8 +1617,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> @@ -1632,7 +1643,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } #endif - skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_desc->wb.qword1.status_error_len = 0; @@ -1736,8 +1746,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> @@ -1864,7 +1873,6 @@ enable_int: q_vector->itr_countdown--; else q_vector->itr_countdown = ITR_COUNTDOWN_START; - } /** @@ -1892,12 +1900,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) return 0; } + /* Clear hung_detected bit */ + clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected); /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); - arm_wb |= ring->arm_wb; + arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -1926,8 +1936,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) /* If work not completed, return budget and polling will return */ if (!clean_complete) { tx_only: - if (arm_wb) + if (arm_wb) { + q_vector->tx.ring[0].tx_stats.tx_force_wb++; i40e_force_wb(vsi, q_vector); + } return budget; } @@ -1993,7 +2005,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) return; - if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) { + if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) { /* snag network header to get L4 type and address */ hdr.network = skb_network_header(skb); @@ -2078,7 +2090,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; - if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) + if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) dtype_cmd |= ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & @@ -2187,14 +2199,12 @@ out: * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header - * @cd_type_cmd_tso_mss: ptr to u64 object - * @cd_tunneling: ptr to context descriptor bits + * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u8 *hdr_len, u64 *cd_type_cmd_tso_mss, - u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -2247,7 +2257,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @tx_flags: the collected send information - * @cd_type_cmd_tso_mss: ptr to u64 object + * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen **/ @@ -2303,8 +2313,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; - struct udphdr *oudph; - struct iphdr *oiph; + struct udphdr *oudph = NULL; + struct iphdr *oiph = NULL; u32 l4_tunnel = 0; if (skb->encapsulation) { @@ -2313,7 +2323,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, oudph = udp_hdr(skb); oiph = ip_hdr(skb); l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; case IPPROTO_GRE: l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; @@ -2807,6 +2817,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, int tsyn; int tso; + /* prefetch the data, we'll need it later */ + prefetch(skb->data); + if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; @@ -2826,8 +2839,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, &hdr_len, - &cd_type_cmd_tso_mss, &cd_tunneling); + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 6779fb771..3f081e25e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -163,7 +163,7 @@ enum i40e_dyn_idx_t { #define I40E_TX_FLAGS_FSO BIT(7) #define I40E_TX_FLAGS_TSYN BIT(8) #define I40E_TX_FLAGS_FD_SB BIT(9) -#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) +#define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -202,6 +202,7 @@ struct i40e_tx_queue_stats { u64 tx_busy; u64 tx_done_old; u64 tx_linearize; + u64 tx_force_wb; }; struct i40e_rx_queue_stats { diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index ae8798260..3226946bf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -153,6 +153,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 44462b40f..63e62f9ae 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -290,8 +290,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, next_q = find_first_bit(&linklistmap, (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)); - vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; - qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; + vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; + qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); @@ -549,12 +549,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); spin_lock_bh(&vsi->mac_filter_list_lock); - f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? vf->port_vlan_id : -1, - true, false); - if (!f) - dev_info(&pf->pdev->dev, - "Could not allocate VF MAC addr\n"); + if (is_valid_ether_addr(vf->default_lan_addr.addr)) { + f = i40e_add_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); + if (!f) + dev_info(&pf->pdev->dev, + "Could not add MAC filter %pM for VF %d\n", + vf->default_lan_addr.addr, vf->vf_id); + } f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); @@ -565,7 +568,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) } /* program mac filter */ - ret = i40e_sync_vsi_filters(vsi, false); + ret = i40e_sync_vsi_filters(vsi); if (ret) dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); @@ -1094,8 +1097,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, /* single place to detect unsuccessful return values */ if (v_retval) { vf->num_invalid_msgs++; - dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", - v_opcode, v_retval); + dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n", + vf->vf_id, v_opcode, v_retval); if (vf->num_invalid_msgs > I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { dev_err(&pf->pdev->dev, @@ -1623,7 +1626,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (!f) { dev_err(&pf->pdev->dev, - "Unable to add VF MAC filter\n"); + "Unable to add MAC filter %pM for VF %d\n", + al->list[i].addr, vf->vf_id); ret = I40E_ERR_PARAM; spin_unlock_bh(&vsi->mac_filter_list_lock); goto error_param; @@ -1632,8 +1636,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ - if (i40e_sync_vsi_filters(vsi, false)) - dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); + ret = i40e_sync_vsi_filters(vsi); + if (ret) + dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", + vf->vf_id, ret); error_param: /* send the response to the VF */ @@ -1669,8 +1675,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < al->num_elements; i++) { if (is_broadcast_ether_addr(al->list[i].addr) || is_zero_ether_addr(al->list[i].addr)) { - dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", - al->list[i].addr); + dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", + al->list[i].addr, vf->vf_id); ret = I40E_ERR_INVALID_MAC_ADDR; goto error_param; } @@ -1680,13 +1686,19 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) spin_lock_bh(&vsi->mac_filter_list_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) - i40e_del_filter(vsi, al->list[i].addr, - I40E_VLAN_ANY, true, false); + if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) { + ret = I40E_ERR_INVALID_MAC_ADDR; + spin_unlock_bh(&vsi->mac_filter_list_lock); + goto error_param; + } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ - if (i40e_sync_vsi_filters(vsi, false)) - dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); + ret = i40e_sync_vsi_filters(vsi); + if (ret) + dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", + vf->vf_id, ret); error_param: /* send the response to the VF */ @@ -1740,8 +1752,8 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (ret) dev_err(&pf->pdev->dev, - "Unable to add VF vlan filter %d, error %d\n", - vfl->vlan_id[i], ret); + "Unable to add VLAN filter %d for VF %d, error %d\n", + vfl->vlan_id[i], vf->vf_id, ret); } error_param: @@ -1792,8 +1804,8 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (ret) dev_err(&pf->pdev->dev, - "Unable to delete VF vlan filter %d, error %d\n", - vfl->vlan_id[i], ret); + "Unable to delete VLAN filter %d for VF %d, error %d\n", + vfl->vlan_id[i], vf->vf_id, ret); } error_param: @@ -2066,15 +2078,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, - "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_param; } - if (!is_valid_ether_addr(mac)) { + if (is_multicast_ether_addr(mac)) { dev_err(&pf->pdev->dev, - "Invalid VF ethernet address\n"); + "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); ret = -EINVAL; goto error_param; } @@ -2085,9 +2097,10 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) spin_lock_bh(&vsi->mac_filter_list_lock); /* delete the temporary mac address */ - i40e_del_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? vf->port_vlan_id : -1, - true, false); + if (!is_zero_ether_addr(vf->default_lan_addr.addr)) + i40e_del_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); /* Delete all the filters for this VSI - we're going to kill it * anyway. @@ -2099,7 +2112,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); /* program mac filter */ - if (i40e_sync_vsi_filters(vsi, false)) { + if (i40e_sync_vsi_filters(vsi)) { dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); ret = -EIO; goto error_param; @@ -2150,8 +2163,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_pvid; } @@ -2270,8 +2284,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error; } @@ -2344,8 +2359,9 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, /* first vsi is always the LAN vsi */ vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_param; } @@ -2460,6 +2476,12 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) } vf = &(pf->vf[vf_id]); + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; + goto out; + } if (enable == vf->spoofchk) goto out; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index fcb9ef34c..f5b2b369d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -227,6 +227,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_oem_post_update = 0x0720, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -1888,6 +1889,26 @@ struct i40e_aqc_nvm_config_data_immediate_field { I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); +/* OEM Post Update (indirect 0x0720) + * no command data struct used + */ + struct i40e_aqc_nvm_oem_post_update { +#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 + u8 sel_data; + u8 reserved[7]; +}; + +I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); + +struct i40e_aqc_nvm_oem_post_update_buffer { + u8 str_len; + u8 dev_addr; + __le16 eeprom_addr; + u8 data[36]; +}; + +I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) @@ -2311,4 +2332,4 @@ struct i40e_aqc_debug_modify_internals { I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); -#endif +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 72b1942a9..938783e0b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -44,7 +44,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: - case I40E_DEV_ID_KX_A: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index e6a39c986..ca8b58c3d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -30,7 +30,6 @@ /* Device IDs */ #define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F #define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_QSFP_A 0x1583 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 47e9a90d6..7a00657da 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -51,11 +51,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, struct i40e_tx_buffer *tx_buffer) { if (tx_buffer->skb) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) - kfree(tx_buffer->raw_buf); - else - dev_kfree_skb_any(tx_buffer->skb); - + dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), @@ -67,6 +63,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } + + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); @@ -127,17 +127,24 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) } /** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of + * i40evf_get_tx_pending - how many Tx descriptors not processed + * @tx_ring: the ring of descriptors * - * Returns value of Tx ring head based on value stored - * in head write-back location + * Since there is no access to the ring head register + * in XL710, we need to use our local copies **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +u32 i40evf_get_tx_pending(struct i40e_ring *ring) { - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); - return le32_to_cpu(*(volatile __le32 *)head); + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; } #define WB_STRIDE 0x3 @@ -245,16 +252,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; - /* check to see if there are any non-cache aligned descriptors - * waiting to be written back, and kick the hardware to force - * them to be written back in case of napi polling - */ - if (budget && - !((i & WB_STRIDE) == WB_STRIDE) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; - netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), total_packets, total_bytes); @@ -414,7 +411,7 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) return false; } -/* +/** * i40evf_setup_tx_descriptors - Allocate the Tx descriptors * @tx_ring: the tx ring to set up * @@ -889,31 +886,12 @@ checksum_fail: } /** - * i40e_rx_hash - returns the hash value from the Rx descriptor - * @ring: descriptor ring - * @rx_desc: specific descriptor - **/ -static inline u32 i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc) -{ - const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - - if ((ring->netdev->features & NETIF_F_RXHASH) && - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - else - return 0; -} - -/** - * i40e_ptype_to_hash - get a hash type + * i40e_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -930,6 +908,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) return PKT_HASH_TYPE_L2; } +/** + * i40e_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline void i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) +{ + u32 hash; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if (ring->netdev->features & NETIF_F_RXHASH) + return; + + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + } +} + /** * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean @@ -1071,8 +1073,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1090,7 +1092,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } #endif - skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_desc->wb.qword1.status_error_len = 0; @@ -1189,8 +1190,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1263,10 +1263,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, rx = i40e_set_new_dynamic_itr(&q_vector->rx); rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); } + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { tx = i40e_set_new_dynamic_itr(&q_vector->tx); txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); } + if (rx || tx) { /* get the higher of the two ITR adjustments and * use the same value for both ITR registers @@ -1302,7 +1304,6 @@ enable_int: q_vector->itr_countdown--; else q_vector->itr_countdown = ITR_COUNTDOWN_START; - } /** @@ -1335,7 +1336,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) */ i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); - arm_wb |= ring->arm_wb; + arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -1364,8 +1365,10 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) /* If work not completed, return budget and polling will return */ if (!clean_complete) { tx_only: - if (arm_wb) + if (arm_wb) { + q_vector->tx.ring[0].tx_stats.tx_force_wb++; i40evf_force_wb(vsi, q_vector); + } return budget; } @@ -1437,13 +1440,12 @@ out: * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header - * @cd_tunneling: ptr to context descriptor bits + * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u8 *hdr_len, u64 *cd_type_cmd_tso_mss, - u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -1555,7 +1557,6 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, *tx_flags |= I40E_TX_FLAGS_IPV6; } - if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { @@ -1654,7 +1655,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); } - /** +/** * i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @tx_flags: collected send information @@ -1770,6 +1771,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 td_tag = 0; dma_addr_t dma; u16 gso_segs; + u16 desc_count = 0; + bool tail_bump = true; + bool do_rs = false; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; @@ -1810,6 +1814,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -1829,6 +1835,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -1843,35 +1851,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = &tx_ring->tx_bi[i]; } - /* Place RS bit on last descriptor of any packet that spans across the - * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. - */ -#define WB_STRIDE 0x3 - if (((i & WB_STRIDE) != WB_STRIDE) && - (first <= &tx_ring->tx_bi[i]) && - (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << - I40E_TXD_QW1_CMD_SHIFT); - } else { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TXD_CMD << - I40E_TXD_QW1_CMD_SHIFT); - } - - netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index), - first->bytecount); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; @@ -1881,15 +1860,72 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + first->bytecount); i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* Algorithm to optimize tail and RS bit setting: + * if xmit_more is supported + * if xmit_more is true + * do not update tail and do not mark RS bit. + * if xmit_more is false and last xmit_more was false + * if every packet spanned less than 4 desc + * then set RS bit on 4th packet and update tail + * on every packet + * else + * update tail and set RS bit on every packet. + * if xmit_more is false and last_xmit_more was true + * update tail and set RS bit. + * + * Optimization: wmb to be issued only in case of tail update. + * Also optimize the Descriptor WB path for RS bit with the same + * algorithm. + * + * Note: If there are less than 4 packets + * pending and interrupts were disabled the service task will + * trigger a force WB. + */ + if (skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) { + tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + tail_bump = false; + } else if (!skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index)) && + (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && + (tx_ring->packet_stride < WB_STRIDE) && + (desc_count < WB_STRIDE)) { + tx_ring->packet_stride++; + } else { + tx_ring->packet_stride = 0; + tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + do_rs = true; + } + if (do_rs) + tx_ring->packet_stride = 0; + + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD : + I40E_TX_DESC_CMD_EOP) << + I40E_TXD_QW1_CMD_SHIFT); + /* notify HW of packet */ - if (!skb->xmit_more || - netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index))) - writel(i, tx_ring->tail); - else + if (!tail_bump) prefetchw(tx_desc + 1); + if (tail_bump) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, tx_ring->tail); + } + return; dma_error: @@ -1961,6 +1997,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u8 hdr_len = 0; int tso; + /* prefetch the data, we'll need it later */ + prefetch(skb->data); + if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; @@ -1980,8 +2019,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, &hdr_len, - &cd_type_cmd_tso_mss, &cd_tunneling); + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; @@ -2029,7 +2067,7 @@ out_drop: netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; + struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; /* hardware can't handle really short frames, hardware padding works * beyond this point diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index ebc1bf77f..e29bb3e86 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -201,6 +201,7 @@ struct i40e_tx_queue_stats { u64 tx_busy; u64 tx_done_old; u64 tx_linearize; + u64 tx_force_wb; }; struct i40e_rx_queue_stats { @@ -267,6 +268,8 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ bool arm_wb; /* do something to arm write back */ + u8 packet_stride; +#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2) u16 flags; #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) @@ -321,4 +324,19 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); void i40evf_free_tx_resources(struct i40e_ring *tx_ring); void i40evf_free_rx_resources(struct i40e_ring *rx_ring); int i40evf_napi_poll(struct napi_struct *napi, int budget); +u32 i40evf_get_tx_pending(struct i40e_ring *ring); + +/** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: Tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index 9f7b279b9..3b9d20374 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -153,6 +153,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 22fc3d49c..be1b72b93 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -67,6 +67,8 @@ struct i40e_vsi { u16 rx_itr_setting; u16 tx_itr_setting; u16 qs_handle; + u8 *rss_hkey_user; /* User configured hash keys */ + u8 *rss_lut_user; /* User configured lookup table entries */ }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -95,10 +97,10 @@ struct i40e_vsi { #define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) #define I40E_TX_CTXTDESC(R, i) \ (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) -#define MAX_RX_QUEUES 8 -#define MAX_TX_QUEUES MAX_RX_QUEUES +#define MAX_QUEUES 16 #define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) +#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4) /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. @@ -142,9 +144,6 @@ struct i40e_q_vector { #define OTHER_VECTOR 1 #define NONQ_VECS (OTHER_VECTOR) -#define MAX_MSIX_Q_VECTORS 4 -#define MAX_MSIX_COUNT 5 - #define MIN_MSIX_Q_VECTORS 1 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) @@ -190,19 +189,19 @@ struct i40evf_adapter { struct work_struct reset_task; struct work_struct adminq_task; struct delayed_work init_task; - struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + struct i40e_q_vector *q_vectors; struct list_head vlan_filter_list; char misc_vector_name[IFNAMSIZ + 9]; int num_active_queues; /* TX */ - struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; + struct i40e_ring *tx_rings; u32 tx_timeout_count; struct list_head mac_filter_list; u32 tx_desc_count; /* RX */ - struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; + struct i40e_ring *rx_rings; u64 hw_csum_rx_error; u32 rx_desc_count; int num_msix_vectors; @@ -313,4 +312,8 @@ void i40evf_request_reset(struct i40evf_adapter *adapter); void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, enum i40e_virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen); +int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, + u16 lut_size); +int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, + u16 lut_size); #endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 4790437a5..a4c9feb58 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -121,12 +121,12 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, data[i] = *(u64 *)p; } for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->tx_rings[j]->stats.packets; - data[i++] = adapter->tx_rings[j]->stats.bytes; + data[i++] = adapter->tx_rings[j].stats.packets; + data[i++] = adapter->tx_rings[j].stats.bytes; } for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->rx_rings[j]->stats.packets; - data[i++] = adapter->rx_rings[j]->stats.bytes; + data[i++] = adapter->rx_rings[j].stats.packets; + data[i++] = adapter->rx_rings[j].stats.bytes; } } @@ -351,7 +351,7 @@ static int i40evf_set_coalesce(struct net_device *netdev, vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) { - q_vector = adapter->q_vector[i]; + q_vector = &adapter->q_vectors[i]; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); @@ -634,25 +634,34 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; - u32 hlut_val; - int i, j; + struct i40e_vsi *vsi = &adapter->vsi; + u8 *seed = NULL, *lut; + int ret; + u16 i; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!indir) return 0; - if (indir) { - for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); - indir[j++] = hlut_val & 0xff; - indir[j++] = (hlut_val >> 8) & 0xff; - indir[j++] = (hlut_val >> 16) & 0xff; - indir[j++] = (hlut_val >> 24) & 0xff; - } - } - return 0; + seed = key; + + lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); + if (ret) + goto out; + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) + indir[i] = (u32)lut[i]; + +out: + kfree(lut); + + return ret; } /** @@ -668,9 +677,9 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; - u32 hlut_val; - int i, j; + struct i40e_vsi *vsi = &adapter->vsi; + u8 *seed = NULL; + u16 i; /* We do not allow change in unsupported parameters */ if (key || @@ -679,15 +688,29 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - hlut_val = indir[j++]; - hlut_val |= indir[j++] << 8; - hlut_val |= indir[j++] << 16; - hlut_val |= indir[j++] << 24; - wr32(hw, I40E_VFQF_HLUT(i), hlut_val); + if (key) { + if (!vsi->rss_hkey_user) { + vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE, + GFP_KERNEL); + if (!vsi->rss_hkey_user) + return -ENOMEM; + } + memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE); + seed = vsi->rss_hkey_user; + } + if (!vsi->rss_lut_user) { + vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE, + GFP_KERNEL); + if (!vsi->rss_lut_user) + return -ENOMEM; } - return 0; + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) + vsi->rss_lut_user[i] = (u8)(indir[i]); + + return i40evf_config_rss(vsi, seed, vsi->rss_lut_user, + I40EVF_HLUT_ARRAY_SIZE); } static const struct ethtool_ops i40evf_ethtool_ops = { diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 99d2cffae..94da913b1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -34,7 +34,15 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.3.33" +#define DRV_KERN "-k" + +#define DRV_VERSION_MAJOR 1 +#define DRV_VERSION_MINOR 4 +#define DRV_VERSION_BUILD 4 +#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." \ + __stringify(DRV_VERSION_BUILD) \ + DRV_KERN const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2015 Intel Corporation."; @@ -259,7 +267,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) { struct i40e_hw *hw = &adapter->hw; int i; - uint32_t dyn_ctl; + u32 dyn_ctl; if (mask & 1) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); @@ -307,10 +315,9 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) struct i40e_hw *hw = &adapter->hw; u32 val; - /* handle non-queue interrupts */ - rd32(hw, I40E_VFINT_ICR01); - rd32(hw, I40E_VFINT_ICR0_ENA1); - + /* handle non-queue interrupts, these reads clear the registers */ + val = rd32(hw, I40E_VFINT_ICR01); + val = rd32(hw, I40E_VFINT_ICR0_ENA1); val = rd32(hw, I40E_VFINT_DYN_CTL01) | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; @@ -348,8 +355,8 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) static void i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) { - struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; - struct i40e_ring *rx_ring = adapter->rx_rings[r_idx]; + struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx]; rx_ring->q_vector = q_vector; rx_ring->next = q_vector->rx.ring; @@ -369,8 +376,8 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) static void i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) { - struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; - struct i40e_ring *tx_ring = adapter->tx_rings[t_idx]; + struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx]; tx_ring->q_vector = q_vector; tx_ring->next = q_vector->tx.ring; @@ -465,7 +472,7 @@ static void i40evf_netpoll(struct net_device *netdev) return; for (i = 0; i < q_vectors; i++) - i40evf_msix_clean_rings(0, adapter->q_vector[i]); + i40evf_msix_clean_rings(0, &adapter->q_vectors[i]); } #endif @@ -487,7 +494,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (vector = 0; vector < q_vectors; vector++) { - struct i40e_q_vector *q_vector = adapter->q_vector[vector]; + struct i40e_q_vector *q_vector = &adapter->q_vectors[vector]; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -532,7 +539,7 @@ free_queue_irqs: adapter->msix_entries[vector + NONQ_VECS].vector, NULL); free_irq(adapter->msix_entries[vector + NONQ_VECS].vector, - adapter->q_vector[vector]); + &adapter->q_vectors[vector]); } return err; } @@ -582,7 +589,7 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) irq_set_affinity_hint(adapter->msix_entries[i+1].vector, NULL); free_irq(adapter->msix_entries[i+1].vector, - adapter->q_vector[i]); + &adapter->q_vectors[i]); } } @@ -611,7 +618,7 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) int i; for (i = 0; i < adapter->num_active_queues; i++) - adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i); + adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i); } /** @@ -656,8 +663,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) } for (i = 0; i < adapter->num_active_queues; i++) { - adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i); - adapter->rx_rings[i]->rx_buf_len = rx_buf_len; + adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); + adapter->rx_rings[i].rx_buf_len = rx_buf_len; } } @@ -954,7 +961,7 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { struct napi_struct *napi; - q_vector = adapter->q_vector[q_idx]; + q_vector = &adapter->q_vectors[q_idx]; napi = &q_vector->napi; napi_enable(napi); } @@ -971,7 +978,7 @@ static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) int q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { - q_vector = adapter->q_vector[q_idx]; + q_vector = &adapter->q_vectors[q_idx]; napi_disable(&q_vector->napi); } } @@ -992,7 +999,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; for (i = 0; i < adapter->num_active_queues; i++) { - struct i40e_ring *ring = adapter->rx_rings[i]; + struct i40e_ring *ring = &adapter->rx_rings[i]; i40evf_alloc_rx_buffers_1buf(ring, ring->count); ring->next_to_use = ring->count - 1; @@ -1112,16 +1119,10 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) **/ static void i40evf_free_queues(struct i40evf_adapter *adapter) { - int i; - if (!adapter->vsi_res) return; - for (i = 0; i < adapter->num_active_queues; i++) { - if (adapter->tx_rings[i]) - kfree_rcu(adapter->tx_rings[i], rcu); - adapter->tx_rings[i] = NULL; - adapter->rx_rings[i] = NULL; - } + kfree(adapter->tx_rings); + kfree(adapter->rx_rings); } /** @@ -1136,13 +1137,20 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) { int i; + adapter->tx_rings = kcalloc(adapter->num_active_queues, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!adapter->tx_rings) + goto err_out; + adapter->rx_rings = kcalloc(adapter->num_active_queues, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!adapter->rx_rings) + goto err_out; + for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *tx_ring; struct i40e_ring *rx_ring; - tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL); - if (!tx_ring) - goto err_out; + tx_ring = &adapter->tx_rings[i]; tx_ring->queue_index = i; tx_ring->netdev = adapter->netdev; @@ -1150,14 +1158,12 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) tx_ring->count = adapter->tx_desc_count; if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; - adapter->tx_rings[i] = tx_ring; - rx_ring = &tx_ring[1]; + rx_ring = &adapter->rx_rings[i]; rx_ring->queue_index = i; rx_ring->netdev = adapter->netdev; rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; - adapter->rx_rings[i] = rx_ring; } return 0; @@ -1207,115 +1213,273 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) err = i40evf_acquire_msix_vectors(adapter, v_budget); out: - adapter->netdev->real_num_tx_queues = pairs; + netif_set_real_num_rx_queues(adapter->netdev, pairs); + netif_set_real_num_tx_queues(adapter->netdev, pairs); return err; } /** - * i40e_configure_rss_aq - Prepare for RSS using AQ commands + * i40e_config_rss_aq - Prepare for RSS using AQ commands * @vsi: vsi structure * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Return 0 on success, negative on failure **/ -static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) { - struct i40e_aqc_get_set_rss_key_data rss_key; struct i40evf_adapter *adapter = vsi->back; struct i40e_hw *hw = &adapter->hw; - int ret = 0, i; - u8 *rss_lut; + int ret = 0; if (!vsi->id) - return; + return -EINVAL; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n", + dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", adapter->current_op); - return; + return -EBUSY; } - memset(&rss_key, 0, sizeof(rss_key)); - memcpy(&rss_key, seed, sizeof(rss_key)); + if (seed) { + struct i40e_aqc_get_set_rss_key_data *rss_key = + (struct i40e_aqc_get_set_rss_key_data *)seed; + ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key); + if (ret) { + dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } - rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL); - if (!rss_lut) - return; + if (lut) { + ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } - /* Populate the LUT with max no. PF queues in round robin fashion */ - for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++) - rss_lut[i] = i % adapter->num_active_queues; + return ret; +} - ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot set RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return; +/** + * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, + const u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + struct i40e_hw *hw = &adapter->hw; + u16 i; + + if (seed) { + u32 *seed_dw = (u32 *)seed; + + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); } - ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut, - (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4); - if (ret) - dev_err(&adapter->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40EVF_HLUT_ARRAY_SIZE) + return -EINVAL; + + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]); + } + i40e_flush(hw); + + return 0; } /** - * i40e_configure_rss_reg - Prepare for RSS if used - * @adapter: board private structure - * @seed: RSS hash seed + * * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Return 0 on success, negative on failure **/ -static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter, - const u8 *seed) +static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) { + struct i40evf_adapter *adapter = vsi->back; struct i40e_hw *hw = &adapter->hw; - u32 *seed_dw = (u32 *)seed; - u32 cqueue = 0; - u32 lut = 0; - int i, j; + int ret = 0; - /* Fill out hash function seed */ - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); - - /* Populate the LUT with max no. PF queues in round robin fashion */ - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - lut = 0; - for (j = 0; j < 4; j++) { - if (cqueue == adapter->num_active_queues) - cqueue = 0; - lut |= ((cqueue) << (8 * j)); - cqueue++; + if (seed) { + ret = i40evf_aq_get_rss_key(hw, vsi->id, + (struct i40e_aqc_get_set_rss_key_data *)seed); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot get RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; } - wr32(hw, I40E_VFQF_HLUT(i), lut); } - i40e_flush(hw); + + if (lut) { + ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot get RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } + + return ret; } /** - * i40evf_configure_rss - Prepare for RSS + * * i40evf_get_rss_reg - Get RSS keys and lut by reading registers + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed, + const u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + struct i40e_hw *hw = &adapter->hw; + u16 i; + + if (seed) { + u32 *seed_dw = (u32 *)seed; + + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i)); + } + + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40EVF_HLUT_ARRAY_SIZE) + return -EINVAL; + + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) + lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i)); + } + + return 0; +} + +/** + * i40evf_config_rss - Configure RSS keys and lut + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + + if (RSS_AQ(adapter)) + return i40evf_config_rss_aq(vsi, seed, lut, lut_size); + else + return i40evf_config_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40evf_get_rss - Get RSS keys and lut + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + + if (RSS_AQ(adapter)) + return i40evf_get_rss_aq(vsi, seed, lut, lut_size); + else + return i40evf_get_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40evf_fill_rss_lut - Fill the lut with default values + * @lut: Lookup table to be filled with + * @rss_table_size: Lookup table size + * @rss_size: Range of queue number for hashing + **/ +static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) +{ + u16 i; + + for (i = 0; i < rss_table_size; i++) + lut[i] = i % rss_size; +} + +/** + * i40evf_init_rss - Prepare for RSS * @adapter: board private structure + * + * Return 0 on success, negative on failure **/ -static void i40evf_configure_rss(struct i40evf_adapter *adapter) +static int i40evf_init_rss(struct i40evf_adapter *adapter) { + struct i40e_vsi *vsi = &adapter->vsi; struct i40e_hw *hw = &adapter->hw; u8 seed[I40EVF_HKEY_ARRAY_SIZE]; u64 hena; - - netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); + u8 *lut; + int ret; /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ hena = I40E_DEFAULT_RSS_HENA; wr32(hw, I40E_VFQF_HENA(0), (u32)hena); wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); - if (RSS_AQ(adapter)) - i40evf_configure_rss_aq(&adapter->vsi, seed); + lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + /* Use user configured lut if there is one, otherwise use default */ + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE); + else + i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE, + adapter->num_active_queues); + + /* Use user configured hash key if there is one, otherwise + * user default. + */ + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE); else - i40evf_configure_rss_reg(adapter, seed); + netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); + ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); + kfree(lut); + + return ret; } /** @@ -1327,21 +1491,22 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) **/ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) { - int q_idx, num_q_vectors; + int q_idx = 0, num_q_vectors; struct i40e_q_vector *q_vector; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; + adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), + GFP_KERNEL); + if (!adapter->q_vectors) + goto err_out; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); - if (!q_vector) - goto err_out; + q_vector = &adapter->q_vectors[q_idx]; q_vector->adapter = adapter; q_vector->vsi = &adapter->vsi; q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, i40evf_napi_poll, NAPI_POLL_WEIGHT); - adapter->q_vector[q_idx] = q_vector; } return 0; @@ -1349,11 +1514,10 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) err_out: while (q_idx) { q_idx--; - q_vector = adapter->q_vector[q_idx]; + q_vector = &adapter->q_vectors[q_idx]; netif_napi_del(&q_vector->napi); - kfree(q_vector); - adapter->q_vector[q_idx] = NULL; } + kfree(adapter->q_vectors); return -ENOMEM; } @@ -1374,13 +1538,11 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) napi_vectors = adapter->num_active_queues; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - struct i40e_q_vector *q_vector = adapter->q_vector[q_idx]; - - adapter->q_vector[q_idx] = NULL; + struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx]; if (q_idx < napi_vectors) netif_napi_del(&q_vector->napi); - kfree(q_vector); } + kfree(adapter->q_vectors); } /** @@ -1438,6 +1600,22 @@ err_set_interrupt: return err; } +/** + * i40evf_clear_rss_config_user - Clear user configurations of RSS + * @vsi: Pointer to VSI structure + **/ +static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi) +{ + if (!vsi) + return; + + kfree(vsi->rss_hkey_user); + vsi->rss_hkey_user = NULL; + + kfree(vsi->rss_lut_user); + vsi->rss_lut_user = NULL; +} + /** * i40evf_watchdog_timer - Periodic call-back timer * @data: pointer to adapter disguised as unsigned long @@ -1565,7 +1743,7 @@ static void i40evf_watchdog_task(struct work_struct *work) * PF, so we don't have to set current_op as we will * not get a response through the ARQ. */ - i40evf_configure_rss(adapter); + i40evf_init_rss(adapter); adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; goto watchdog_done; } @@ -1864,9 +2042,12 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) { int i; + if (!adapter->tx_rings) + return; + for (i = 0; i < adapter->num_active_queues; i++) - if (adapter->tx_rings[i]->desc) - i40evf_free_tx_resources(adapter->tx_rings[i]); + if (adapter->tx_rings[i].desc) + i40evf_free_tx_resources(&adapter->tx_rings[i]); } /** @@ -1884,8 +2065,8 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { - adapter->tx_rings[i]->count = adapter->tx_desc_count; - err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); + adapter->tx_rings[i].count = adapter->tx_desc_count; + err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@ -1911,8 +2092,8 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { - adapter->rx_rings[i]->count = adapter->rx_desc_count; - err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); + adapter->rx_rings[i].count = adapter->rx_desc_count; + err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@ -1932,9 +2113,12 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) { int i; + if (!adapter->rx_rings) + return; + for (i = 0; i < adapter->num_active_queues; i++) - if (adapter->rx_rings[i]->desc) - i40evf_free_rx_resources(adapter->rx_rings[i]); + if (adapter->rx_rings[i].desc) + i40evf_free_rx_resources(&adapter->rx_rings[i]); } /** @@ -2137,7 +2321,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) netdev->features |= NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_SCTP_CSUM | + NETIF_F_SCTP_CRC | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | @@ -2263,6 +2447,14 @@ static void i40evf_init_task(struct work_struct *work) if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { err = i40evf_send_vf_config_msg(adapter); goto err; + } else if (err == I40E_ERR_PARAM) { + /* We only get ERR_PARAM if the device is in a very bad + * state or if we've been disabled for previous bad + * behavior. Either way, we're done now. + */ + i40evf_shutdown_adminq(hw); + dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); + return; } if (err) { dev_err(&pdev->dev, "Unable to get VF config (%d)\n", @@ -2313,7 +2505,7 @@ static void i40evf_init_task(struct work_struct *work) I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; if (!RSS_AQ(adapter)) - i40evf_configure_rss(adapter); + i40evf_init_rss(adapter); err = i40evf_request_misc_irq(adapter); if (err) goto err_sw_init; @@ -2334,7 +2526,6 @@ static void i40evf_init_task(struct work_struct *work) if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); - dev_info(&pdev->dev, "%s\n", i40evf_driver_string); adapter->state = __I40EVF_DOWN; set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_misc_irq_enable(adapter); @@ -2343,7 +2534,7 @@ static void i40evf_init_task(struct work_struct *work) adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } else { - i40evf_configure_rss(adapter); + i40evf_init_rss(adapter); } return; restart: @@ -2438,8 +2629,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), - MAX_TX_QUEUES); + netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; @@ -2632,6 +2822,9 @@ static void i40evf_remove(struct pci_dev *pdev) flush_scheduled_work(); + /* Clear user configurations for RSS */ + i40evf_clear_rss_config_user(&adapter->vsi); + if (hw->aq.asq.count) i40evf_shutdown_adminq(hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 32e620e1e..c1c526283 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -157,7 +157,9 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | I40E_VIRTCHNL_VF_OFFLOAD_VLAN | - I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) @@ -242,7 +244,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); - vqci = kzalloc(len, GFP_ATOMIC); + vqci = kzalloc(len, GFP_KERNEL); if (!vqci) return; @@ -255,19 +257,19 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) for (i = 0; i < pairs; i++) { vqpi->txq.vsi_id = vqci->vsi_id; vqpi->txq.queue_id = i; - vqpi->txq.ring_len = adapter->tx_rings[i]->count; - vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; + vqpi->txq.ring_len = adapter->tx_rings[i].count; + vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; vqpi->txq.headwb_enabled = 1; vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr + (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc)); vqpi->rxq.vsi_id = vqci->vsi_id; vqpi->rxq.queue_id = i; - vqpi->rxq.ring_len = adapter->rx_rings[i]->count; - vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma; + vqpi->rxq.ring_len = adapter->rx_rings[i].count; + vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; vqpi->rxq.max_pkt_size = adapter->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; - vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len; + vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; vqpi++; } @@ -353,14 +355,14 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) len = sizeof(struct i40e_virtchnl_irq_map_info) + (adapter->num_msix_vectors * sizeof(struct i40e_virtchnl_vector_map)); - vimi = kzalloc(len, GFP_ATOMIC); + vimi = kzalloc(len, GFP_KERNEL); if (!vimi) return; vimi->num_vectors = adapter->num_msix_vectors; /* Queue vectors first */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { - q_vector = adapter->q_vector[v_idx]; + q_vector = adapter->q_vectors + v_idx; vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS; vimi->vecmap[v_idx].txq_map = q_vector->ring_mask; @@ -391,6 +393,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) struct i40e_virtchnl_ether_addr_list *veal; int len, i = 0, count = 0; struct i40evf_mac_filter *f; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -415,10 +418,12 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + more = true; } - veal = kzalloc(len, GFP_ATOMIC); + veal = kzalloc(len, GFP_KERNEL); if (!veal) return; @@ -431,7 +436,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) f->add = false; } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)veal, len); kfree(veal); @@ -450,6 +456,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) struct i40e_virtchnl_ether_addr_list *veal; struct i40evf_mac_filter *f, *ftmp; int len, i = 0, count = 0; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -474,9 +481,11 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + more = true; } - veal = kzalloc(len, GFP_ATOMIC); + veal = kzalloc(len, GFP_KERNEL); if (!veal) return; @@ -490,7 +499,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) kfree(f); } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)veal, len); kfree(veal); @@ -509,6 +519,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) struct i40e_virtchnl_vlan_filter_list *vvfl; int len, i = 0, count = 0; struct i40evf_vlan_filter *f; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -534,9 +545,11 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + more = true; } - vvfl = kzalloc(len, GFP_ATOMIC); + vvfl = kzalloc(len, GFP_KERNEL); if (!vvfl) return; @@ -549,7 +562,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) f->add = false; } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -567,6 +581,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) struct i40e_virtchnl_vlan_filter_list *vvfl; struct i40evf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -592,9 +607,11 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + more = true; } - vvfl = kzalloc(len, GFP_ATOMIC); + vvfl = kzalloc(len, GFP_KERNEL); if (!vvfl) return; @@ -608,7 +625,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) kfree(f); } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -724,9 +742,29 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, return; } if (v_retval) { - dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", - v_retval, i40evf_stat_str(&adapter->hw, v_retval), - v_opcode); + switch (v_opcode) { + case I40E_VIRTCHNL_OP_ADD_VLAN: + dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case I40E_VIRTCHNL_OP_DEL_VLAN: + dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + default: + dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", + v_retval, + i40evf_stat_str(&adapter->hw, v_retval), + v_opcode); + } } switch (v_opcode) { case I40E_VIRTCHNL_OP_GET_STATS: { diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 7a73510e5..adb33e2a0 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -45,8 +45,6 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *); static s32 igb_init_hw_82575(struct e1000_hw *); static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); -static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); -static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); static s32 igb_reset_hw_82575(struct e1000_hw *); static s32 igb_reset_hw_82580(struct e1000_hw *); static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); @@ -205,13 +203,10 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) case e1000_82580: case e1000_i350: case e1000_i354: - phy->ops.read_reg = igb_read_phy_reg_82580; - phy->ops.write_reg = igb_write_phy_reg_82580; - break; case e1000_i210: case e1000_i211: - phy->ops.read_reg = igb_read_phy_reg_gs40g; - phy->ops.write_reg = igb_write_phy_reg_gs40g; + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; break; default: phy->ops.read_reg = igb_read_phy_reg_igp; @@ -272,6 +267,11 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) if (ret_val) goto out; } + if (phy->id == M88E1543_E_PHY_ID) { + ret_val = igb_initialize_M88E1543_phy(hw); + if (ret_val) + goto out; + } break; case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; @@ -294,6 +294,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) case I210_I_PHY_ID: phy->type = e1000_phy_i210; phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_cfg_done = igb_get_cfg_done_i210; phy->ops.get_phy_info = igb_get_phy_info_m88; phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; @@ -925,6 +926,8 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) if (phy->id == M88E1512_E_PHY_ID) ret_val = igb_initialize_M88E1512_phy(hw); + if (phy->id == M88E1543_E_PHY_ID) + ret_val = igb_initialize_M88E1543_phy(hw); out: return ret_val; } @@ -2145,7 +2148,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ -static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; @@ -2169,7 +2172,7 @@ out: * * Writes data to MDI control register in the PHY at offset. **/ -static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index b1915043b..c3c598c34 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -927,7 +927,10 @@ /* Intel i347-AT4 Registers */ -#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDL0 0x10 /* Pair 0 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL1 0x11 /* Pair 1 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL2 0x12 /* Pair 2 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL3 0x13 /* Pair 3 PHY Cable Diagnostics Length */ #define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ #define I347AT4_PAGE_SELECT 0x16 @@ -990,6 +993,7 @@ #define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ #define E1000_M88E1543_EEE_CTRL_1 0x0 #define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_M88E1543_FIBER_CTRL 0x0 #define E1000_EEE_ADV_DEV_I354 7 #define E1000_EEE_ADV_ADDR_I354 60 #define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 2003b3756..4034207eb 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -441,6 +441,7 @@ struct e1000_phy_info { u16 cable_length; u16 max_cable_length; u16 min_cable_length; + u16 pair_length[4]; u8 mdix; diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 65d931669..8aa798737 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -861,10 +861,10 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw) if (ret_val) nvm_word = E1000_INVM_DEFAULT_AL; tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { /* check current state directly from internal PHY */ - igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | - E1000_PHY_PLL_FREQ_REG), &phy_word); + igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); if ((phy_word & E1000_PHY_PLL_UNCONF) != E1000_PHY_PLL_UNCONF) { ret_val = 0; @@ -896,7 +896,35 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw) /* restore WUC register */ wr32(E1000_WUC, wuc); } + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); /* restore MDICNFG setting */ wr32(E1000_MDICNFG, mdicnfg); return ret_val; } + +/** + * igb_get_cfg_done_i210 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +s32 igb_get_cfg_done_i210(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + while (timeout) { + if (rd32(E1000_EEMNGCTL_I210) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + return 0; +} diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index 3442b6357..b2964a2a6 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -34,6 +34,7 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); s32 igb_init_nvm_params_i210(struct e1000_hw *hw); bool igb_get_flash_presence_i210(struct e1000_hw *hw); s32 igb_pll_workaround_i210(struct e1000_hw *hw); +s32 igb_get_cfg_done_i210(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 @@ -84,7 +85,7 @@ enum E1000_INVM_STRUCTURE_TYPE { #define E1000_PCI_PMCSR_D3 0x03 #define E1000_MAX_PLL_TRIES 5 #define E1000_PHY_PLL_UNCONF 0xFF -#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_PAGE 0xFC #define E1000_PHY_PLL_FREQ_REG 0x000E #define E1000_INVM_DEFAULT_AL 0x202F #define E1000_INVM_AUTOLOAD 0x0A diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 23ec28f43..5b54254ae 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1717,59 +1717,76 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, phy_data2, index, default_page, is_cm; + int len_tot = 0; + u16 len_min; + u16 len_max; switch (hw->phy.id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: case I210_I_PHY_ID: - /* Get cable length from PHY Cable Diagnostics Control Reg */ - ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + - (I347AT4_PCDL + phy->addr), - &phy_data); + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); if (ret_val) - return ret_val; + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + goto out; /* Check if the unit of cable length is meters or cm */ - ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + - I347AT4_PCDC, &phy_data2); + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); if (ret_val) - return ret_val; + goto out; is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); - /* Populate the phy structure with cable length in meters */ - phy->min_cable_length = phy_data / (is_cm ? 100 : 1); - phy->max_cable_length = phy_data / (is_cm ? 100 : 1); - phy->cable_length = phy_data / (is_cm ? 100 : 1); - break; - case M88E1543_E_PHY_ID: - case M88E1512_E_PHY_ID: - case I347AT4_E_PHY_ID: - /* Remember the original page select and set it to 7 */ - ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, - &default_page); + /* Get cable length from Pair 0 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL0, &phy_data); if (ret_val) goto out; - ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + phy->pair_length[0] = phy_data / (is_cm ? 100 : 1); + len_tot = phy->pair_length[0]; + len_min = phy->pair_length[0]; + len_max = phy->pair_length[0]; + + /* Get cable length from Pair 1 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL1, &phy_data); if (ret_val) goto out; - /* Get cable length from PHY Cable Diagnostics Control Reg */ - ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), - &phy_data); + phy->pair_length[1] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[1]; + len_min = min(len_min, phy->pair_length[1]); + len_max = max(len_max, phy->pair_length[1]); + + /* Get cable length from Pair 2 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL2, &phy_data); if (ret_val) goto out; - /* Check if the unit of cable length is meters or cm */ - ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + phy->pair_length[2] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[2]; + len_min = min(len_min, phy->pair_length[2]); + len_max = max(len_max, phy->pair_length[2]); + + /* Get cable length from Pair 3 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL3, &phy_data); if (ret_val) goto out; - is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + phy->pair_length[3] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[3]; + len_min = min(len_min, phy->pair_length[3]); + len_max = max(len_max, phy->pair_length[3]); /* Populate the phy structure with cable length in meters */ - phy->min_cable_length = phy_data / (is_cm ? 100 : 1); - phy->max_cable_length = phy_data / (is_cm ? 100 : 1); - phy->cable_length = phy_data / (is_cm ? 100 : 1); + phy->min_cable_length = len_min; + phy->max_cable_length = len_max; + phy->cable_length = len_tot / 4; /* Reset the page selec to its original value */ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, @@ -2277,6 +2294,100 @@ out: return ret_val; } +/** + * igb_initialize_M88E1543_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1543 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x0C0D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Switch to PHY page 1. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); + if (ret_val) + goto out; + + /* Change mode to 1000BASE-X/SGMII and autoneg enable */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + /** * igb_power_up_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure @@ -2493,66 +2604,6 @@ out: return ret_val; } -/** - * igb_write_phy_reg_gs40g - Write GS40G PHY register - * @hw: pointer to the HW structure - * @offset: lower half is register offset to write to - * upper half is page to use. - * @data: data to write at register offset - * - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) -{ - s32 ret_val; - u16 page = offset >> GS40G_PAGE_SHIFT; - - offset = offset & GS40G_OFFSET_MASK; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); - if (ret_val) - goto release; - ret_val = igb_write_phy_reg_mdic(hw, offset, data); - -release: - hw->phy.ops.release(hw); - return ret_val; -} - -/** - * igb_read_phy_reg_gs40g - Read GS40G PHY register - * @hw: pointer to the HW structure - * @offset: lower half is register offset to read to - * upper half is page to use. - * @data: data to read at register offset - * - * Acquires semaphore, if necessary, then reads the data in the PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) -{ - s32 ret_val; - u16 page = offset >> GS40G_PAGE_SHIFT; - - offset = offset & GS40G_OFFSET_MASK; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); - if (ret_val) - goto release; - ret_val = igb_read_phy_reg_mdic(hw, offset, data); - -release: - hw->phy.ops.release(hw); - return ret_val; -} - /** * igb_set_master_slave_mode - Setup PHY for Master/slave mode * @hw: pointer to the HW structure diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 24d55edbb..969a6ddaf 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -62,6 +62,7 @@ void igb_power_up_phy_copper(struct e1000_hw *hw); void igb_power_down_phy_copper(struct e1000_hw *hw); s32 igb_phy_init_script_igp3(struct e1000_hw *hw); s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw); +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw); s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); @@ -71,8 +72,8 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw); s32 igb_get_phy_info_82580(struct e1000_hw *hw); s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); s32 igb_get_cable_length_82580(struct e1000_hw *hw); -s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); -s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_check_polarity_m88(struct e1000_hw *hw); /* IGP01E1000 Specific Registers */ @@ -143,17 +144,6 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw); #define E1000_CABLE_LENGTH_UNDEFINED 0xFF -/* GS40G - I210 PHY defines */ -#define GS40G_PAGE_SELECT 0x16 -#define GS40G_PAGE_SHIFT 16 -#define GS40G_OFFSET_MASK 0xFFFF -#define GS40G_PAGE_2 0x20000 -#define GS40G_MAC_REG2 0x15 -#define GS40G_MAC_LB 0x4140 -#define GS40G_MAC_SPEED_1G 0X0006 -#define GS40G_COPPER_SPEC 0x0010 -#define GS40G_LINE_LB 0x4000 - /* SFP modules ID memory locations */ #define E1000_SFF_IDENTIFIER_OFFSET 0x00 #define E1000_SFF_IDENTIFIER_SFF 0x02 diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 4af2870e4..21d9d0288 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -66,6 +66,7 @@ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */ #define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ @@ -385,8 +386,7 @@ do { \ #define array_wr32(reg, offset, value) \ wr32((reg) + ((offset) << 2), (value)) -#define array_rd32(reg, offset) \ - (readl(hw->hw_addr + reg + ((offset) << 2))) +#define array_rd32(reg, offset) (igb_rd32(hw, reg + ((offset) << 2))) /* DMA Coalescing registers */ #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 1a2f1cc44..e3cb93bdb 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -389,6 +389,8 @@ struct igb_adapter { u16 link_speed; u16 link_duplex; + u8 __iomem *io_addr; /* Mainly for iounmap use */ + struct work_struct reset_task; struct work_struct watchdog_task; bool fc_autoneg; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 2529bc625..1d329f1d0 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -127,10 +127,20 @@ static const struct igb_stats igb_gstrings_net_stats[] = { #define IGB_STATS_LEN \ (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) +enum igb_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { - "Register test (offline)", "Eeprom test (offline)", - "Interrupt test (offline)", "Loopback test (offline)", - "Link test (on/offline)" + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" }; #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) @@ -2002,7 +2012,7 @@ static void igb_diag_test(struct net_device *netdev, /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ - if (igb_link_test(adapter, &data[4])) + if (igb_link_test(adapter, &data[TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; if (if_running) @@ -2011,21 +2021,21 @@ static void igb_diag_test(struct net_device *netdev, else igb_reset(adapter); - if (igb_reg_test(adapter, &data[0])) + if (igb_reg_test(adapter, &data[TEST_REG])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); - if (igb_eeprom_test(adapter, &data[1])) + if (igb_eeprom_test(adapter, &data[TEST_EEP])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); - if (igb_intr_test(adapter, &data[2])) + if (igb_intr_test(adapter, &data[TEST_IRQ])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); /* power up link for loopback test */ igb_power_up_link(adapter); - if (igb_loopback_test(adapter, &data[3])) + if (igb_loopback_test(adapter, &data[TEST_LOOP])) eth_test->flags |= ETH_TEST_FL_FAILED; /* restore speed, duplex, autoneg settings */ @@ -2045,16 +2055,16 @@ static void igb_diag_test(struct net_device *netdev, dev_info(&adapter->pdev->dev, "online testing starting\n"); /* PHY is powered down when interface is down */ - if (if_running && igb_link_test(adapter, &data[4])) + if (if_running && igb_link_test(adapter, &data[TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; else - data[4] = 0; + data[TEST_LINK] = 0; /* Online tests aren't run; pass by default */ - data[0] = 0; - data[1] = 0; - data[2] = 0; - data[3] = 0; + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; clear_bit(__IGB_TESTING, &adapter->state); } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ea7b09887..31e5f3942 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -946,7 +946,6 @@ static void igb_configure_msix(struct igb_adapter *adapter) static int igb_request_msix(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; int i, err = 0, vector = 0, free_vector = 0; err = request_irq(adapter->msix_entries[vector].vector, @@ -959,7 +958,7 @@ static int igb_request_msix(struct igb_adapter *adapter) vector++; - q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); + q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); if (q_vector->rx.ring && q_vector->tx.ring) sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, @@ -1230,7 +1229,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, q_vector->tx.work_limit = adapter->tx_work_limit; /* initialize ITR configuration */ - q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); + q_vector->itr_register = adapter->io_addr + E1000_EITR(0); q_vector->itr_val = IGB_START_ITR; /* initialize pointer to rings */ @@ -2294,9 +2293,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); err = -EIO; - hw->hw_addr = pci_iomap(pdev, 0, 0); - if (!hw->hw_addr) + adapter->io_addr = pci_iomap(pdev, 0, 0); + if (!adapter->io_addr) goto err_ioremap; + /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; netdev->netdev_ops = &igb_netdev_ops; igb_set_ethtool_ops(netdev); @@ -2378,8 +2379,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } if (hw->mac.type >= e1000_82576) { - netdev->hw_features |= NETIF_F_SCTP_CSUM; - netdev->features |= NETIF_F_SCTP_CSUM; + netdev->hw_features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_SCTP_CRC; } netdev->priv_flags |= IFF_UNICAST_FLT; @@ -2656,7 +2657,7 @@ err_sw_init: #ifdef CONFIG_PCI_IOV igb_disable_sriov(pdev); #endif - pci_iounmap(pdev, hw->hw_addr); + pci_iounmap(pdev, adapter->io_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: @@ -2823,7 +2824,7 @@ static void igb_remove(struct pci_dev *pdev) igb_clear_interrupt_scheme(adapter); - pci_iounmap(pdev, hw->hw_addr); + pci_iounmap(pdev, adapter->io_addr); if (hw->flash_address) iounmap(hw->flash_address); pci_release_selected_regions(pdev, @@ -2856,6 +2857,13 @@ static void igb_probe_vfs(struct igb_adapter *adapter) if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) return; + /* Of the below we really only want the effect of getting + * IGB_FLAG_HAS_MSIX set (if available), without which + * igb_enable_sriov() has no effect. + */ + igb_set_interrupt_capability(adapter, true); + igb_reset_interrupt_capability(adapter); + pci_sriov_set_totalvfs(pdev, 7); igb_enable_sriov(pdev, max_vfs); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 1d2174526..4b9156cd8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -139,6 +139,7 @@ enum ixgbe_tx_flags { #define IXGBE_X540_VF_DEVICE_ID 0x1515 struct vf_data_storage { + struct pci_dev *vfdev; unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; @@ -224,6 +225,8 @@ struct ixgbe_rx_queue_stats { u64 csum_err; }; +#define IXGBE_TS_HDR_LEN 8 + enum ixgbe_ring_state_t { __IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_XPS_INIT_DONE, @@ -282,6 +285,8 @@ struct ixgbe_ring { u16 next_to_use; u16 next_to_clean; + unsigned long last_rx_timestamp; + union { u16 next_to_alloc; struct { @@ -312,7 +317,7 @@ enum ixgbe_ring_f_enum { }; #define IXGBE_MAX_RSS_INDICES 16 -#define IXGBE_MAX_RSS_INDICES_X550 64 +#define IXGBE_MAX_RSS_INDICES_X550 63 #define IXGBE_MAX_VMDQ_INDICES 64 #define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ #define IXGBE_MAX_FCOE_INDICES 8 @@ -587,9 +592,10 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) struct ixgbe_mac_addr { u8 addr[ETH_ALEN]; - u16 queue; + u16 pool; u16 state; /* bitmask */ }; + #define IXGBE_MAC_STATE_DEFAULT 0x1 #define IXGBE_MAC_STATE_MODIFIED 0x2 #define IXGBE_MAC_STATE_IN_USE 0x4 @@ -639,6 +645,8 @@ struct ixgbe_adapter { #define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) #define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) +#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) +#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) u32 flags2; #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) @@ -656,6 +664,7 @@ struct ixgbe_adapter { #ifdef CONFIG_IXGBE_VXLAN #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) #endif +#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) /* Tx fast path data */ int num_tx_queues; @@ -755,9 +764,12 @@ struct ixgbe_adapter { unsigned long last_rx_ptp_check; unsigned long last_rx_timestamp; spinlock_t tmreg_lock; - struct cyclecounter cc; - struct timecounter tc; + struct cyclecounter hw_cc; + struct timecounter hw_tc; u32 base_incval; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; + void (*ptp_setup_sdp)(struct ixgbe_adapter *); /* SR-IOV */ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); @@ -883,9 +895,10 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); #endif int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, - u8 *addr, u16 queue); + const u8 *addr, u16 queue); int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, - u8 *addr, u16 queue); + const u8 *addr, u16 queue); +void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, struct ixgbe_ring *); @@ -968,12 +981,33 @@ void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); -void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb); +void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *); +void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb); +static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) { + ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb); + return; + } + + if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) + return; + + ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; +} + int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); -void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 65db69b86..d8a9fb8a5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -765,13 +765,14 @@ mac_reset_top: ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST) { status = IXGBE_ERR_RESET_FAILED; @@ -879,11 +880,12 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VFTA * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * @vlvf_bypass: boolean flag - unused * * Turn on/off specified VLAN in the VLAN filter table. **/ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) + bool vlan_on, bool vlvf_bypass) { u32 regindex; u32 bitindex; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index a39afcf03..fa8d4f40a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -990,13 +990,14 @@ mac_reset_top: ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { @@ -1082,12 +1083,16 @@ mac_reset_top: /* Add the SAN MAC address to the RAR only if it's a valid address */ if (is_valid_ether_addr(hw->mac.san_addr)) { - hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); + /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index ce61b36b9..64045053e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -1884,10 +1884,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); - - /* clear VMDq pool/queue selection for RAR 0 */ - hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); } + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + hw->addr_ctrl.overflow_promisc = 0; hw->addr_ctrl.rar_used_count = 1; @@ -2454,6 +2455,17 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) /* Always set this bit to ensure any future transactions are blocked */ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); + /* Poll for bit to read as set */ + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) + break; + usleep_range(100, 120); + } + if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) { + hw_dbg(hw, "GIO disable did not set - requesting resets\n"); + goto gio_disable_fail; + } + /* Exit if master requests are blocked */ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || ixgbe_removed(hw->hw_addr)) @@ -2475,6 +2487,7 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) * again to clear out any effects they may have had on our device. */ hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); +gio_disable_fail: hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; if (hw->mac.type >= ixgbe_mac_X550) @@ -2987,43 +3000,44 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) * return the VLVF index where this VLAN id should be placed * **/ -static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) +static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) { - u32 bits = 0; - u32 first_empty_slot = 0; - s32 regindex; + s32 regindex, first_empty_slot; + u32 bits; /* short cut the special case */ if (vlan == 0) return 0; - /* - * Search for the vlan id in the VLVF entries. Save off the first empty - * slot found along the way - */ - for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; + + /* add VLAN enable bit for comparison */ + vlan |= IXGBE_VLVF_VIEN; + + /* Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); - if (!bits && !(first_empty_slot)) + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) first_empty_slot = regindex; - else if ((bits & 0x0FFF) == vlan) - break; } - /* - * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan - * in the VLVF. Else use the first empty VLVF register for this - * vlan id. - */ - if (regindex >= IXGBE_VLVF_ENTRIES) { - if (first_empty_slot) - regindex = first_empty_slot; - else { - hw_dbg(hw, "No space in VLVF.\n"); - regindex = IXGBE_ERR_NO_SPACE; - } - } + /* If we are here then we didn't find the VLAN. Return first empty + * slot we found during our search, else error. + */ + if (!first_empty_slot) + hw_dbg(hw, "No space in VLVF.\n"); - return regindex; + return first_empty_slot ? : IXGBE_ERR_NO_SPACE; } /** @@ -3032,21 +3046,17 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VFVFB * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vlvf_bypass: boolean flag indicating updating default pool is okay * * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) + bool vlan_on, bool vlvf_bypass) { - s32 regindex; - u32 bitindex; - u32 vfta; - u32 bits; - u32 vt; - u32 targetbit; - bool vfta_changed = false; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; - if (vlan > 4095) + if ((vlan > 4095) || (vind > 63)) return IXGBE_ERR_PARAM; /* @@ -3061,22 +3071,16 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * bits[11-5]: which register * bits[4-0]: which bit in the register */ - regindex = (vlan >> 5) & 0x7F; - bitindex = vlan & 0x1F; - targetbit = (1 << bitindex); - vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); - - if (vlan_on) { - if (!(vfta & targetbit)) { - vfta |= targetbit; - vfta_changed = true; - } - } else { - if ((vfta & targetbit)) { - vfta &= ~targetbit; - vfta_changed = true; - } - } + regidx = vlan / 32; + vfta_delta = 1 << (vlan % 32); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; /* Part 2 * If VT Mode is set @@ -3086,85 +3090,67 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * Or !vlan_on * clear the pool bit and possibly the vind */ - vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - if (vt & IXGBE_VT_CTL_VT_ENABLE) { - s32 vlvf_index; - - vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); - if (vlvf_index < 0) - return vlvf_index; - - if (vlan_on) { - /* set the pool bit */ - if (vind < 32) { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - bits |= (1 << vind); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB(vlvf_index*2), - bits); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - bits |= (1 << (vind-32)); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1), - bits); - } - } else { - /* clear the pool bit */ - if (vind < 32) { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - bits &= ~(1 << vind); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB(vlvf_index*2), - bits); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - bits &= ~(1 << (vind-32)); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1), - bits); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - } - } + if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) + goto vfta_update; + + vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; + } - /* - * If there are still bits set in the VLVFB registers - * for the VLAN ID indicated we need to see if the - * caller is requesting that we clear the VFTA entry bit. - * If the caller has requested that we clear the VFTA - * entry bit but there are still pools/VFs using this VLAN - * ID entry then ignore the request. We're not worried - * about the case where we're turning the VFTA VLAN ID - * entry bit on, only when requested to turn it off as - * there may be multiple pools and/or VFs using the - * VLAN ID entry. In that case we cannot clear the - * VFTA bit until all pools/VFs using that VLAN ID have also - * been cleared. This will be indicated by "bits" being - * zero. + bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); + + /* set the pool bit */ + bits |= 1 << (vind % 32); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= 1 << (vind % 32); + + if (!bits && + !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool */ - if (bits) { - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), - (IXGBE_VLVF_VIEN | vlan)); - if (!vlan_on) { - /* someone wants to clear the vfta entry - * but some pools/VFs are still using it. - * Ignore it. */ - vfta_changed = false; - } - } else { - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); - } + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); + + /* disable VLVF and clear remaining bit from pool */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); + + return 0; } - if (vfta_changed) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); + +vfta_update: + /* Update VFTA now that we are ready for traffic */ + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); return 0; } @@ -3184,8 +3170,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index a0044e4a8..2b9563137 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -92,7 +92,7 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on); + u32 vind, bool vlan_on, bool vlvf_bypass); s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index a507a6fe3..02c7333a9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -139,6 +139,11 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, /* Calculate credit refill ratio using multiplier */ credit_refill = min(link_percentage * min_multiplier, MAX_CREDIT_REFILL); + + /* Refill at least minimum credit */ + if (credit_refill < min_credit) + credit_refill = min_credit; + p->data_credits_refill = (u16)credit_refill; /* Calculate maximum credit for the TC */ @@ -149,7 +154,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, * of a TC is too small, the maximum credit may not be * enough to send out a jumbo frame in data plane arbitration. */ - if (credit_max && (credit_max < min_credit)) + if (credit_max < min_credit) credit_max = min_credit; if (direction == DCB_TX_CONFIG) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 23277ab15..b5cc989a3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -223,13 +223,13 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) reg |= IXGBE_MFLCN_DPF; /* - * X540 supports per TC Rx priority flow control. So - * clear all TCs and only enable those that should be + * X540 & X550 supports per TC Rx priority flow control. + * So clear all TCs and only enable those that should be * enabled. */ reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); - if (hw->mac.type == ixgbe_mac_X540) + if (hw->mac.type >= ixgbe_mac_X540) reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; if (pfc_en) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index d681273bd..bea96b3bc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -151,47 +151,70 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { }; #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN +/* currently supported speeds for 10G */ +#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ + SUPPORTED_10000baseKX4_Full | \ + SUPPORTED_10000baseKR_Full) + +#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane) + +static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) +{ + if (!ixgbe_isbackplane(hw->phy.media_type)) + return SUPPORTED_10000baseT_Full; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_X550EM_X_KX4: + return SUPPORTED_10000baseKX4_Full; + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_X550EM_X_KR: + return SUPPORTED_10000baseKR_Full; + default: + return SUPPORTED_10000baseKX4_Full | + SUPPORTED_10000baseKR_Full; + } +} + static int ixgbe_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed supported_link; - u32 link_speed = 0; bool autoneg = false; - bool link_up; hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); /* set the supported link speeds */ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->supported |= SUPPORTED_10000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL) - ecmd->supported |= SUPPORTED_2500baseX_Full; + ecmd->supported |= ixgbe_get_supported_10gtypes(hw); if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) ecmd->supported |= SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->supported |= SUPPORTED_100baseT_Full; + ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ? + SUPPORTED_1000baseKX_Full : + SUPPORTED_1000baseT_Full; + /* default advertised speed if phy.autoneg_advertised isn't set */ + ecmd->advertising = ecmd->supported; /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { + ecmd->advertising = 0; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) ecmd->advertising |= ADVERTISED_100baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) - ecmd->advertising |= ADVERTISED_2500baseX_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; + ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { + if (ecmd->supported & SUPPORTED_1000baseKX_Full) + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + else + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } } else { - /* default modes in case phy.autoneg_advertised isn't set */ - if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; - if (hw->phy.multispeed_fiber && !autoneg) { if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) ecmd->advertising = ADVERTISED_10000baseT_Full; @@ -229,6 +252,10 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_phy_sfp_avago: case ixgbe_phy_sfp_intel: case ixgbe_phy_sfp_unknown: + case ixgbe_phy_qsfp_passive_unknown: + case ixgbe_phy_qsfp_active_unknown: + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: /* SFP+ devices, further checking needed */ switch (adapter->hw.phy.sfp_type) { case ixgbe_sfp_type_da_cu: @@ -284,9 +311,8 @@ static int ixgbe_get_settings(struct net_device *netdev, break; } - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - if (link_up) { - switch (link_speed) { + if (netif_carrier_ok(netdev)) { + switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 631c603fc..2a653ec95 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -77,7 +77,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) if (!netdev) return 0; - if (xid >= IXGBE_FCOE_DDP_MAX) + if (xid >= netdev->fcoe_ddp_xid) return 0; adapter = netdev_priv(netdev); @@ -177,7 +177,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, return 0; adapter = netdev_priv(netdev); - if (xid >= IXGBE_FCOE_DDP_MAX) { + if (xid >= netdev->fcoe_ddp_xid) { e_warn(drv, "xid=0x%x out-of-range\n", xid); return 0; } @@ -517,6 +517,7 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens; u32 fcoe_sof_eof = 0; u32 mss_l4len_idx; + u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; u8 sof, eof; if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { @@ -593,6 +594,8 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, skb_shinfo(skb)->gso_size); first->bytecount += (first->gso_segs - 1) * *hdr_len; first->tx_flags |= IXGBE_TX_FLAGS_TSO; + /* Hardware expects L4T to be RSV for FCoE TSO */ + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV; } /* set flag indicating FCOE to ixgbe_tx_map call */ @@ -610,7 +613,7 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, /* write context desc */ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, - IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); + type_tucmd, mss_l4len_idx); return 0; } @@ -620,8 +623,7 @@ static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) struct ixgbe_fcoe_ddp_pool *ddp_pool; ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); - if (ddp_pool->pool) - dma_pool_destroy(ddp_pool->pool); + dma_pool_destroy(ddp_pool->pool); ddp_pool->pool = NULL; } @@ -997,8 +999,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, return -EINVAL; /* Don't return information on unsupported devices */ - if (hw->mac.type != ixgbe_mac_82599EB && - hw->mac.type != ixgbe_mac_X540) + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return -EINVAL; /* Manufacturer */ @@ -1044,6 +1045,10 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, snprintf(info->model, sizeof(info->model), "Intel 82599"); + } else if (hw->mac.type == ixgbe_mac_X550) { + snprintf(info->model, + sizeof(info->model), + "Intel X550"); } else { snprintf(info->model, sizeof(info->model), diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index f3168bcc7..e771e764d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -844,7 +844,6 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); - napi_hash_add(&q_vector->napi); #ifdef CONFIG_NET_RX_BUSY_POLL /* initialize busy poll */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index aed8d029b..c4003a88b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -65,9 +65,6 @@ #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" -#ifdef CONFIG_IXGBE_VXLAN -#include -#endif char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -175,6 +172,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +static struct workqueue_struct *ixgbe_wq; + static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, @@ -316,7 +315,7 @@ static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) if (!test_bit(__IXGBE_DOWN, &adapter->state) && !test_bit(__IXGBE_REMOVING, &adapter->state) && !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) - schedule_work(&adapter->service_task); + queue_work(ixgbe_wq, &adapter->service_task); } static void ixgbe_remove_adapter(struct ixgbe_hw *hw) @@ -1484,7 +1483,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, return; if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { - ring->rx_stats.csum_err++; + skb->ip_summed = CHECKSUM_NONE; return; } /* If we checked the outer header let the stack know */ @@ -1635,6 +1634,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; + u32 flags = rx_ring->q_vector->adapter->flags; ixgbe_update_rsc_stats(rx_ring, skb); @@ -1642,8 +1642,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ixgbe_rx_checksum(rx_ring, rx_desc, skb); - if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) - ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); + if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) + ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@ -1659,6 +1659,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { + skb_mark_napi_id(skb, &q_vector->napi); if (ixgbe_qv_busy_polling(q_vector)) netif_receive_skb(skb); else @@ -2123,7 +2124,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } #endif /* IXGBE_FCOE */ - skb_mark_napi_id(skb, &q_vector->napi); ixgbe_rx_skb(q_vector, skb); /* update budget accounting */ @@ -2741,7 +2741,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter, eicr); + ixgbe_ptp_check_pps_event(adapter); /* re-enable the original interrupt state, no lsc, no queues */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -2757,7 +2757,7 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -2786,7 +2786,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_for_each_ring(ring, q_vector->tx) clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); - if (!ixgbe_qv_lock_napi(q_vector)) + /* Exit if we are called by netpoll or busy polling is active */ + if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) return budget; /* attempt to distribute budget to each queue fairly, but don't allow @@ -2947,10 +2948,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter, eicr); + ixgbe_ptp_check_pps_event(adapter); /* would disable interrupts here but EIAM disabled it */ - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); /* * re-enable link(maybe) and non-queue interrupts, no flush. @@ -3315,8 +3316,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, } /** - * Return a number of entries in the RSS indirection table - * + * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries * @adapter: device handle * * - 82598/82599/X540: 128 @@ -3334,8 +3334,7 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) } /** - * Write the RETA table to HW - * + * ixgbe_store_reta - Write the RETA table to HW * @adapter: device handle * * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. @@ -3374,8 +3373,7 @@ void ixgbe_store_reta(struct ixgbe_adapter *adapter) } /** - * Write the RETA table to HW (for x550 devices in SRIOV mode) - * + * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode) * @adapter: device handle * * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. @@ -3621,6 +3619,9 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); + /* Force flushing of IXGBE_RDLEN to prevent MDD */ + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); @@ -3704,6 +3705,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + /* * Set up VF register offsets for selected VT Mode, * i.e. 32 or 64 VFs for SR-IOV @@ -3901,12 +3905,56 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* add VID to filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true); + hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true); set_bit(vid, adapter->active_vlans); return 0; } +static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +{ + u32 vlvf; + int idx; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (idx = IXGBE_VLVF_ENTRIES; --idx;) { + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + return idx; +} + +void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 bits, word; + int idx; + + idx = ixgbe_find_vlvf_entry(hw, vid); + if (!idx) + return; + + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + word = idx * 2 + (VMDQ_P(0) / 32); + bits = ~(1 << (VMDQ_P(0)) % 32); + bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + + /* Disable the filter so this falls into the default pool. */ + if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) { + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0); + } +} + static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { @@ -3914,7 +3962,11 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* remove VID from filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false); + if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) + ixgbe_update_pf_promisc_vlvf(adapter, vid); + else + hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); + clear_bit(vid, adapter->active_vlans); return 0; @@ -3992,6 +4044,129 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) } } +static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl, i; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + default: + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + break; + /* fall through */ + case ixgbe_mac_82598EB: + /* legacy case, we can just disable VLAN filtering */ + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + return; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; + + /* Add PF to all active pools */ + for (i = IXGBE_VLVF_ENTRIES; --i;) { + u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); + u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); + + vlvfb |= 1 << (VMDQ_P(0) % 32); + IXGBE_WRITE_REG(hw, reg_offset, vlvfb); + } + + /* Set all bits in the VLAN filter table array */ + for (i = hw->mac.vft_size; i--;) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U); +} + +#define VFTA_BLOCK_SIZE 8 +static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * 32; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); + u32 i, vid, word, bits; + + for (i = IXGBE_VLVF_ENTRIES; --i;) { + u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); + + /* pull VLAN ID from VLVF */ + vid = vlvf & VLAN_VID_MASK; + + /* only concern outselves with a certain range */ + if (vid < vid_start || vid >= vid_end) + continue; + + if (vlvf) { + /* record VLAN ID in VFTA */ + vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + word = i * 2 + VMDQ_P(0) / 32; + bits = ~(1 << (VMDQ_P(0) % 32)); + bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); + } + + /* extract values from active_vlans and write back to VFTA */ + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * 32; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->active_vlans[word] >> bits; + + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]); + } +} + +static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl, i; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + default: + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + break; + /* fall through */ + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + vlnctrl |= IXGBE_VLNCTRL_VFE; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + return; + } + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + + for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) + ixgbe_scrub_vfta(adapter, i); +} + static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { u16 vid; @@ -4034,124 +4209,156 @@ static int ixgbe_write_mc_addr_list(struct net_device *netdev) #ifdef CONFIG_PCI_IOV void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) - hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr, - adapter->mac_table[i].queue, + + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; + + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, + mac_table->addr, + mac_table->pool, IXGBE_RAH_AV); else hw->mac.ops.clear_rar(hw, i); - - adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED); } } -#endif +#endif static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) { - if (adapter->mac_table[i].state & - IXGBE_MAC_STATE_IN_USE) - hw->mac.ops.set_rar(hw, i, - adapter->mac_table[i].addr, - adapter->mac_table[i].queue, - IXGBE_RAH_AV); - else - hw->mac.ops.clear_rar(hw, i); - adapter->mac_table[i].state &= - ~(IXGBE_MAC_STATE_MODIFIED); - } + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) + continue; + + mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; + + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, + mac_table->addr, + mac_table->pool, + IXGBE_RAH_AV); + else + hw->mac.ops.clear_rar(hw, i); } } static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) { - int i; + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; + int i; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - eth_zero_addr(adapter->mac_table[i].addr); - adapter->mac_table[i].queue = 0; + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + mac_table->state |= IXGBE_MAC_STATE_MODIFIED; + mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; } + ixgbe_sync_mac_table(adapter); } -static int ixgbe_available_rars(struct ixgbe_adapter *adapter) +static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i, count = 0; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state == 0) - count++; + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + /* do not count default RAR as available */ + if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) + continue; + + /* only count unused and addresses that belong to us */ + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { + if (mac_table->pool != pool) + continue; + } + + count++; } + return count; } /* this function destroys the first RAR entry */ -static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter, - u8 *addr) +static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; - memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); - adapter->mac_table[0].queue = VMDQ_P(0); - adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT | - IXGBE_MAC_STATE_IN_USE); - hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, - adapter->mac_table[0].queue, + memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); + mac_table->pool = VMDQ_P(0); + + mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; + + hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, IXGBE_RAH_AV); } -int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) +int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 pool) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; if (is_zero_ether_addr(addr)) return -EINVAL; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) continue; - adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED | - IXGBE_MAC_STATE_IN_USE); - ether_addr_copy(adapter->mac_table[i].addr, addr); - adapter->mac_table[i].queue = queue; + + ether_addr_copy(mac_table->addr, addr); + mac_table->pool = pool; + + mac_table->state |= IXGBE_MAC_STATE_MODIFIED | + IXGBE_MAC_STATE_IN_USE; + ixgbe_sync_mac_table(adapter); + return i; } + return -ENOMEM; } -int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) +int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 pool) { - /* search table for addr, if found, set to 0 and sync */ - int i; + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; + int i; if (is_zero_ether_addr(addr)) return -EINVAL; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (ether_addr_equal(addr, adapter->mac_table[i].addr) && - adapter->mac_table[i].queue == queue) { - adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - eth_zero_addr(adapter->mac_table[i].addr); - adapter->mac_table[i].queue = 0; - ixgbe_sync_mac_table(adapter); - return 0; - } + /* search table for addr, if found clear IN_USE flag and sync */ + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + /* we can only delete an entry if it is in use */ + if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) + continue; + /* we only care about entries that belong to the given pool */ + if (mac_table->pool != pool) + continue; + /* we only care about a specific MAC address */ + if (!ether_addr_equal(addr, mac_table->addr)) + continue; + + mac_table->state |= IXGBE_MAC_STATE_MODIFIED; + mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; + + ixgbe_sync_mac_table(adapter); + + return 0; } + return -ENOMEM; } /** @@ -4169,7 +4376,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) int count = 0; /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter)) + if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn)) return -ENOMEM; if (!netdev_uc_empty(netdev)) { @@ -4183,6 +4390,25 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) return count; } +static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); + + return min_t(int, ret, 0); +} + +static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); + + return 0; +} + /** * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -4197,12 +4423,10 @@ void ixgbe_set_rx_mode(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; - u32 vlnctrl; int count; /* Check for Promiscuous and All Multicast modes */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); /* set all bits that we expect to always be set */ fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ @@ -4212,25 +4436,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev) /* clear the bits we are changing the status of */ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); - vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= IXGBE_VMOLR_MPE; - /* Only disable hardware filter vlans in promiscuous mode - * if SR-IOV and VMDQ are disabled - otherwise ensure - * that hardware VLAN filters remain enabled. - */ - if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | - IXGBE_FLAG_SRIOV_ENABLED)) - vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + ixgbe_vlan_promisc_enable(adapter); } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } - vlnctrl |= IXGBE_VLNCTRL_VFE; hw->addr_ctrl.user_set_promisc = false; + ixgbe_vlan_promisc_disable(adapter); } /* @@ -4238,8 +4455,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0)); - if (count < 0) { + if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) { fctrl |= IXGBE_FCTRL_UPE; vmolr |= IXGBE_VMOLR_ROPE; } @@ -4275,7 +4491,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev) /* NOTE: VLAN filtering is disabled by setting PROMISC */ } - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) @@ -5042,7 +5257,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int err; - u8 old_addr[ETH_ALEN]; if (ixgbe_removed(hw->hw_addr)) return; @@ -5078,10 +5292,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) } clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - /* do not flush user set addresses */ - memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + + /* flush entries out of MAC table */ ixgbe_flush_sw_mac_table(adapter); - ixgbe_mac_set_default_filter(adapter, old_addr); + __dev_uc_unsync(netdev, NULL); + + /* do not flush user set addresses */ + ixgbe_mac_set_default_filter(adapter); /* update SAN MAC vmdq pool selection */ if (hw->mac.san_mac_rar_index) @@ -5331,6 +5548,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * hw->mac.num_rar_entries, GFP_ATOMIC); + if (!adapter->mac_table) + return -ENOMEM; /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { @@ -6616,10 +6835,8 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - struct pci_dev *vfdev; + unsigned int vf; u32 gpc; - int pos; - unsigned short vf_id; if (!(netif_carrier_ok(adapter->netdev))) return; @@ -6636,26 +6853,17 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) if (!pdev) return; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - if (!pos) - return; - - /* get the device ID for the VF */ - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); - /* check status reg for all VFs owned by this PF */ - vfdev = pci_get_device(pdev->vendor, vf_id, NULL); - while (vfdev) { - if (vfdev->is_virtfn && (vfdev->physfn == pdev)) { - u16 status_reg; - - pci_read_config_word(vfdev, PCI_STATUS, &status_reg); - if (status_reg & PCI_STATUS_REC_MASTER_ABORT) - /* issue VFLR */ - ixgbe_issue_vf_flr(adapter, vfdev); - } + for (vf = 0; vf < adapter->num_vfs; ++vf) { + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + u16 status_reg; - vfdev = pci_get_device(pdev->vendor, vf_id, vfdev); + if (!vfdev) + continue; + pci_read_config_word(vfdev, PCI_STATUS, &status_reg); + if (status_reg != IXGBE_FAILED_READ_CFG_WORD && + status_reg & PCI_STATUS_REC_MASTER_ABORT) + ixgbe_issue_vf_flr(adapter, vfdev); } } @@ -7024,6 +7232,7 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, struct tcphdr *tcphdr; u8 *raw; } transport_hdr; + __be16 frag_off; if (skb->encapsulation) { network_hdr.raw = skb_inner_network_header(skb); @@ -7047,13 +7256,17 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, case 6: vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; l4_hdr = network_hdr.ipv6->nexthdr; + if (likely((transport_hdr.raw - network_hdr.raw) == + sizeof(struct ipv6hdr))) + break; + ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + l4_hdr = NEXTHDR_FRAGMENT; break; default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but version=%d\n", - network_hdr.ipv4->version); - } + break; } switch (l4_hdr) { @@ -7074,16 +7287,18 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); + "partial checksum, version=%d, l4 proto=%x\n", + network_hdr.ipv4->version, l4_hdr); } - break; + skb_checksum_help(skb); + goto no_csum; } /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; } +no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; @@ -7358,7 +7573,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, /* snag network header to get L4 type and address */ skb = first->skb; hdr.network = skb_network_header(skb); - if (skb->encapsulation) { + if (!skb->encapsulation) { + th = tcp_hdr(skb); + } else { #ifdef CONFIG_IXGBE_VXLAN struct ixgbe_adapter *adapter = q_vector->adapter; @@ -7377,14 +7594,34 @@ static void ixgbe_atr(struct ixgbe_ring *ring, #else return; #endif /* CONFIG_IXGBE_VXLAN */ - } else { - /* Currently only IPv4/IPv6 with TCP is supported */ - if ((first->protocol != htons(ETH_P_IPV6) || - hdr.ipv6->nexthdr != IPPROTO_TCP) && - (first->protocol != htons(ETH_P_IP) || - hdr.ipv4->protocol != IPPROTO_TCP)) + } + + /* Currently only IPv4/IPv6 with TCP is supported */ + switch (hdr.ipv4->version) { + case IPVERSION: + if (hdr.ipv4->protocol != IPPROTO_TCP) return; - th = tcp_hdr(skb); + break; + case 6: + if (likely((unsigned char *)th - hdr.network == + sizeof(struct ipv6hdr))) { + if (hdr.ipv6->nexthdr != IPPROTO_TCP) + return; + } else { + __be16 frag_off; + u8 l4_hdr; + + ipv6_skip_exthdr(skb, hdr.network - skb->data + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + return; + if (l4_hdr != IPPROTO_TCP) + return; + } + break; + default: + return; } /* skip this packet since it is invalid or the socket is closing */ @@ -7419,10 +7656,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring, common.port.src ^= th->dest ^ first->protocol; common.port.dst ^= th->source; - if (first->protocol == htons(ETH_P_IP)) { + switch (hdr.ipv4->version) { + case IPVERSION: input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; - } else { + break; + case 6: input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ hdr.ipv6->saddr.s6_addr32[1] ^ @@ -7432,6 +7671,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, hdr.ipv6->daddr.s6_addr32[1] ^ hdr.ipv6->daddr.s6_addr32[2] ^ hdr.ipv6->daddr.s6_addr32[3]; + break; + default: + break; } #ifdef CONFIG_IXGBE_VXLAN @@ -7659,17 +7901,16 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct sockaddr *addr = p; - int ret; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); - return ret > 0 ? 0 : ret; + ixgbe_mac_set_default_filter(adapter); + + return 0; } static int @@ -8155,7 +8396,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], { /* guarantee we can provide a unique filter for the unicast address */ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { - if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev)) + struct ixgbe_adapter *adapter = netdev_priv(dev); + u16 pool = VMDQ_P(0); + + if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) return -ENOMEM; } @@ -8387,7 +8631,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > IXGBE_MAX_TUNNEL_HDR_LEN)) - return features & ~NETIF_F_ALL_CSUM; + return features & ~NETIF_F_CSUM_MASK; return features; } @@ -8784,8 +9028,8 @@ skip_sriov: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - netdev->features |= NETIF_F_SCTP_CSUM; - netdev->hw_features |= NETIF_F_SCTP_CSUM | + netdev->features |= NETIF_F_SCTP_CRC; + netdev->hw_features |= NETIF_F_SCTP_CRC | NETIF_F_NTUPLE; break; default: @@ -8801,8 +9045,7 @@ skip_sriov: netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_SG; - netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM; + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; @@ -8811,9 +9054,7 @@ skip_sriov: switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - netdev->hw_enc_features |= NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM; + netdev->hw_enc_features |= NETIF_F_RXCSUM; break; default: break; @@ -8873,7 +9114,7 @@ skip_sriov: goto err_sw_init; } - ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + ixgbe_mac_set_default_filter(adapter); setup_timer(&adapter->service_timer, &ixgbe_service_timer, (unsigned long) adapter); @@ -9328,6 +9569,12 @@ static int __init ixgbe_init_module(void) pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); pr_info("%s\n", ixgbe_copyright); + ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); + if (!ixgbe_wq) { + pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name); + return -ENOMEM; + } + ixgbe_dbg_init(); ret = pci_register_driver(&ixgbe_driver); @@ -9359,6 +9606,10 @@ static void __exit ixgbe_exit_module(void) pci_unregister_driver(&ixgbe_driver); ixgbe_dbg_exit(); + if (ixgbe_wq) { + destroy_workqueue(ixgbe_wq); + ixgbe_wq = NULL; + } } #ifdef CONFIG_IXGBE_DCA diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index fb8673d63..db0731e05 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -2393,6 +2393,9 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return 0; + if (!on && ixgbe_mng_present(hw)) + return 0; + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index e5ba04025..ef1504d41 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -27,6 +27,7 @@ *******************************************************************************/ #include "ixgbe.h" #include +#include /* * The 82599 and the X540 do not have true 64bit nanosecond scale @@ -93,7 +94,6 @@ #define IXGBE_INCVAL_SHIFT_82599 7 #define IXGBE_INCPER_SHIFT_82599 24 -#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL #define IXGBE_OVERFLOW_PERIOD (HZ * 30) #define IXGBE_PTP_TX_TIMEOUT (HZ * 15) @@ -104,8 +104,68 @@ */ #define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL +/* In contrast, the X550 controller has two registers, SYSTIMEH and SYSTIMEL + * which contain measurements of seconds and nanoseconds respectively. This + * matches the standard linux representation of time in the kernel. In addition, + * the X550 also has a SYSTIMER register which represents residue, or + * subnanosecond overflow adjustments. To control clock adjustment, the TIMINCA + * register is used, but it is unlike the X540 and 82599 devices. TIMINCA + * represents units of 2^-32 nanoseconds, and uses 31 bits for this, with the + * high bit representing whether the adjustent is positive or negative. Every + * clock cycle, the X550 will add 12.5 ns + TIMINCA which can result in a range + * of 12 to 13 nanoseconds adjustment. Unlike the 82599 and X540 devices, the + * X550's clock for purposes of SYSTIME generation is constant and not dependent + * on the link speed. + * + * SYSTIMEH SYSTIMEL SYSTIMER + * +--------------+ +--------------+ +-------------+ + * X550 | 32 | | 32 | | 32 | + * *--------------+ +--------------+ +-------------+ + * \____seconds___/ \_nanoseconds_/ \__2^-32 ns__/ + * + * This results in a full 96 bits to represent the clock, with 32 bits for + * seconds, 32 bits for nanoseconds (largest value is 0d999999999 or just under + * 1 second) and an additional 32 bits to measure sub nanosecond adjustments for + * underflow of adjustments. + * + * The 32 bits of seconds for the X550 overflows every + * 2^32 / ( 365.25 * 24 * 60 * 60 ) = ~136 years. + * + * In order to adjust the clock frequency for the X550, the TIMINCA register is + * provided. This register represents a + or minus nearly 0.5 ns adjustment to + * the base frequency. It is measured in 2^-32 ns units, with the high bit being + * the sign bit. This register enables software to calculate frequency + * adjustments and apply them directly to the clock rate. + * + * The math for converting ppb into TIMINCA values is fairly straightforward. + * TIMINCA value = ( Base_Frequency * ppb ) / 1000000000ULL + * + * This assumes that ppb is never high enough to create a value bigger than + * TIMINCA's 31 bits can store. This is ensured by the stack. Calculating this + * value is also simple. + * Max ppb = ( Max Adjustment / Base Frequency ) / 1000000000ULL + * + * For the X550, the Max adjustment is +/- 0.5 ns, and the base frequency is + * 12.5 nanoseconds. This means that the Max ppb is 39999999 + * Note: We subtract one in order to ensure no overflow, because the TIMINCA + * register can only hold slightly under 0.5 nanoseconds. + * + * Because TIMINCA is measured in 2^-32 ns units, we have to convert 12.5 ns + * into 2^-32 units, which is + * + * 12.5 * 2^32 = C80000000 + * + * Some revisions of hardware have a faster base frequency than the registers + * were defined for. To fix this, we use a timecounter structure with the + * proper mult and shift to convert the cycles into nanoseconds of time. + */ +#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL +#define INCVALUE_MASK 0x7FFFFFFF +#define ISGN 0x80000000 +#define MAX_TIMADJ 0x7FFFFFFF + /** - * ixgbe_ptp_setup_sdp + * ixgbe_ptp_setup_sdp_x540 * @hw: the hardware private structure * * this function enables or disables the clock out feature on SDP0 for @@ -116,83 +176,116 @@ * aligns the start of the PPS signal to that value. The shift is * necessary because it can change based on the link speed. */ -static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter) +static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int shift = adapter->cc.shift; + int shift = adapter->hw_cc.shift; u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; u64 ns = 0, clock_edge = 0; - if ((adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED) && - (hw->mac.type == ixgbe_mac_X540)) { + /* disable the pin first */ + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); + IXGBE_WRITE_FLUSH(hw); - /* disable the pin first */ - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); - IXGBE_WRITE_FLUSH(hw); + if (!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) + return; - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - /* - * enable the SDP0 pin as output, and connected to the - * native function for Timesync (ClockOut) - */ - esdp |= (IXGBE_ESDP_SDP0_DIR | - IXGBE_ESDP_SDP0_NATIVE); + /* enable the SDP0 pin as output, and connected to the + * native function for Timesync (ClockOut) + */ + esdp |= IXGBE_ESDP_SDP0_DIR | + IXGBE_ESDP_SDP0_NATIVE; - /* - * enable the Clock Out feature on SDP0, and allow - * interrupts to occur when the pin changes - */ - tsauxc = (IXGBE_TSAUXC_EN_CLK | - IXGBE_TSAUXC_SYNCLK | - IXGBE_TSAUXC_SDP0_INT); + /* enable the Clock Out feature on SDP0, and allow + * interrupts to occur when the pin changes + */ + tsauxc = IXGBE_TSAUXC_EN_CLK | + IXGBE_TSAUXC_SYNCLK | + IXGBE_TSAUXC_SDP0_INT; - /* clock period (or pulse length) */ - clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); - clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); + /* clock period (or pulse length) */ + clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); + clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); - /* - * Account for the cyclecounter wrap-around value by - * using the converted ns value of the current time to - * check for when the next aligned second would occur. - */ - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; - ns = timecounter_cyc2time(&adapter->tc, clock_edge); + /* Account for the cyclecounter wrap-around value by + * using the converted ns value of the current time to + * check for when the next aligned second would occur. + */ + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge); - div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem); - clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift); + div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem); + clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift); - /* specify the initial clock start time */ - trgttiml = (u32)clock_edge; - trgttimh = (u32)(clock_edge >> 32); + /* specify the initial clock start time */ + trgttiml = (u32)clock_edge; + trgttimh = (u32)(clock_edge >> 32); - IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); - IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); + IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); + IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); - } else { - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); - } + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); IXGBE_WRITE_FLUSH(hw); } /** - * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) + * ixgbe_ptp_read_X550 - read cycle counter value + * @hw_cc: cyclecounter structure + * + * This function reads SYSTIME registers. It is called by the cyclecounter + * structure to convert from internal representation into nanoseconds. We need + * this for X550 since some skews do not have expected clock frequency and + * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of + * "cycles", rather than seconds and nanoseconds. + */ +static cycle_t ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) +{ + struct ixgbe_adapter *adapter = + container_of(hw_cc, struct ixgbe_adapter, hw_cc); + struct ixgbe_hw *hw = &adapter->hw; + struct timespec64 ts; + + /* storage is 32 bits of 'billions of cycles' and 32 bits of 'cycles'. + * Some revisions of hardware run at a higher frequency and so the + * cycles are not guaranteed to be nanoseconds. The timespec64 created + * here is used for its math/conversions but does not necessarily + * represent nominal time. + * + * It should be noted that this cyclecounter will overflow at a + * non-bitmask field since we have to convert our billions of cycles + * into an actual cycles count. This results in some possible weird + * situations at high cycle counter stamps. However given that 32 bits + * of "seconds" is ~138 years this isn't a problem. Even at the + * increased frequency of some revisions, this is still ~103 years. + * Since the SYSTIME values start at 0 and we never write them, it is + * highly unlikely for the cyclecounter to overflow in practice. + */ + IXGBE_READ_REG(hw, IXGBE_SYSTIMR); + ts.tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML); + ts.tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH); + + return (u64)timespec64_to_ns(&ts); +} + +/** + * ixgbe_ptp_read_82599 - read raw cycle counter (to be used by time counter) * @cc: the cyclecounter structure * * this function reads the cyclecounter registers and is called by the * cyclecounter structure used to construct a ns counter from the * arbitrary fixed point registers */ -static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) +static cycle_t ixgbe_ptp_read_82599(const struct cyclecounter *cc) { struct ixgbe_adapter *adapter = - container_of(cc, struct ixgbe_adapter, cc); + container_of(cc, struct ixgbe_adapter, hw_cc); struct ixgbe_hw *hw = &adapter->hw; u64 stamp = 0; @@ -203,20 +296,79 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) } /** - * ixgbe_ptp_adjfreq + * ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @adapter: private adapter structure + * @hwtstamp: stack timestamp structure + * @systim: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + struct timespec64 systime; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + switch (adapter->hw.mac.type) { + /* X550 and later hardware supposedly represent time using a seconds + * and nanoseconds counter, instead of raw 64bits nanoseconds. We need + * to convert the timestamp into cycles before it can be fed to the + * cyclecounter. We need an actual cyclecounter because some revisions + * of hardware run at a higher frequency and thus the counter does + * not represent seconds/nanoseconds. Instead it can be thought of as + * cycles and billions of cycles. + */ + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* Upper 32 bits represent billions of cycles, lower 32 bits + * represent cycles. However, we use timespec64_to_ns for the + * correct math even though the units haven't been corrected + * yet. + */ + systime.tv_sec = timestamp >> 32; + systime.tv_nsec = timestamp & 0xFFFFFFFF; + + timestamp = timespec64_to_ns(&systime); + break; + default: + break; + } + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * ixgbe_ptp_adjfreq_82599 * @ptp: the ptp clock structure * @ppb: parts per billion adjustment from base * * adjust the frequency of the ptp cycle counter by the * indicated ppb from the base frequency. */ -static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); struct ixgbe_hw *hw = &adapter->hw; - u64 freq; - u32 diff, incval; + u64 freq, incval; + u32 diff; int neg_adj = 0; if (ppb < 0) { @@ -235,12 +387,16 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) switch (hw->mac.type) { case ixgbe_mac_X540: - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + if (incval > 0xFFFFFFFFULL) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (u32)incval); break; case ixgbe_mac_82599EB: + if (incval > 0x00FFFFFFULL) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (1 << IXGBE_INCPER_SHIFT_82599) | - incval); + ((u32)incval & 0x00FFFFFFUL)); break; default: break; @@ -249,6 +405,43 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) return 0; } +/** + * ixgbe_ptp_adjfreq_X550 + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the SYSTIME registers by the indicated ppb from base + * frequency + */ +static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + struct ixgbe_hw *hw = &adapter->hw; + int neg_adj = 0; + u64 rate = IXGBE_X550_BASE_PERIOD; + u32 inca; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate *= ppb; + rate = div_u64(rate, 1000000000ULL); + + /* warn if rate is too large */ + if (rate >= INCVALUE_MASK) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, inca); + + return 0; +} + /** * ixgbe_ptp_adjtime * @ptp: the ptp clock structure @@ -263,10 +456,11 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) unsigned long flags; spin_lock_irqsave(&adapter->tmreg_lock, flags); - timecounter_adjtime(&adapter->tc, delta); + timecounter_adjtime(&adapter->hw_tc, delta); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - ixgbe_ptp_setup_sdp(adapter); + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); return 0; } @@ -283,11 +477,11 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); - u64 ns; unsigned long flags; + u64 ns; spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_read(&adapter->tc); + ns = timecounter_read(&adapter->hw_tc); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); *ts = ns_to_timespec64(ns); @@ -308,17 +502,16 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); - u64 ns; unsigned long flags; - - ns = timespec64_to_ns(ts); + u64 ns = timespec64_to_ns(ts); /* reset the timecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); - timecounter_init(&adapter->tc, &adapter->cc, ns); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - ixgbe_ptp_setup_sdp(adapter); + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); return 0; } @@ -343,33 +536,26 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp, * event when the clock SDP triggers. Clear mask when PPS is * disabled */ - if (rq->type == PTP_CLK_REQ_PPS) { - switch (adapter->hw.mac.type) { - case ixgbe_mac_X540: - if (on) - adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; - else - adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; - - ixgbe_ptp_setup_sdp(adapter); - return 0; - default: - break; - } - } + if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) + return -ENOTSUPP; + + if (on) + adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; + else + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; - return -ENOTSUPP; + adapter->ptp_setup_sdp(adapter); + return 0; } /** * ixgbe_ptp_check_pps_event * @adapter: the private adapter structure - * @eicr: the interrupt cause register value * * This function is called by the interrupt routine when checking for * interrupts. It will check and handle a pps event. */ -void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ptp_clock_event event; @@ -425,7 +611,9 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + struct ixgbe_ring *rx_ring; unsigned long rx_event; + int n; /* if we don't have a valid timestamp in the registers, just update the * timeout counter and exit @@ -437,18 +625,42 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) /* determine the most recent watchdog or rx_timestamp event */ rx_event = adapter->last_rx_ptp_check; - if (time_after(adapter->last_rx_timestamp, rx_event)) - rx_event = adapter->last_rx_timestamp; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } /* only need to read the high RXSTMP register to clear the lock */ - if (time_is_before_jiffies(rx_event + 5*HZ)) { + if (time_is_before_jiffies(rx_event + 5 * HZ)) { IXGBE_READ_REG(hw, IXGBE_RXSTMPH); adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; e_warn(drv, "clearing RX Timestamp hang\n"); } } +/** + * ixgbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @adapter: the private adapter structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_READ_REG(hw, IXGBE_TXSTMPH); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + /** * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp * @adapter: the private adapter struct @@ -461,23 +673,15 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; - u64 regval = 0, ns; - unsigned long flags; + u64 regval = 0; regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; - spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_cyc2time(&adapter->tc, regval); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = ns_to_ktime(ns); + ixgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + ixgbe_ptp_clear_tx_timestamp(adapter); } /** @@ -497,38 +701,85 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) IXGBE_PTP_TX_TIMEOUT); u32 tsynctxctl; - if (timeout) { - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); - e_warn(drv, "clearing Tx Timestamp hang\n"); + /* we have to have a valid skb to poll for a timestamp */ + if (!adapter->ptp_tx_skb) { + ixgbe_ptp_clear_tx_timestamp(adapter); return; } + /* stop polling once we have a valid timestamp */ tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); - if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) + if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) { ixgbe_ptp_tx_hwtstamp(adapter); - else + return; + } + + if (timeout) { + ixgbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx Timestamp hang\n"); + } else { /* reschedule to keep checking if it's not available yet */ schedule_work(&adapter->ptp_tx_work); + } } /** - * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp - * @adapter: pointer to adapter struct + * ixgbe_ptp_rx_pktstamp - utility function to get RX time stamp from buffer + * @q_vector: structure containing interrupt and ring information + * @skb: the packet + * + * This function will be called by the Rx routine of the timestamp for this + * packet is stored in the buffer. The value is stored in little endian format + * starting at the end of the packet data. + */ +void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + __le64 regval; + + /* copy the bits out of the skb, and then trim the skb length */ + skb_copy_bits(skb, skb->len - IXGBE_TS_HDR_LEN, ®val, + IXGBE_TS_HDR_LEN); + __pskb_trim(skb, skb->len - IXGBE_TS_HDR_LEN); + + /* The timestamp is recorded in little endian format, and is stored at + * the end of the packet. + * + * DWORD: N N + 1 N + 2 + * Field: End of Packet SYSTIMH SYSTIML + */ + ixgbe_ptp_convert_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), + le64_to_cpu(regval)); +} + +/** + * ixgbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information * @skb: particular skb to send timestamp with * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the shhwtstamps structure which * is passed up the network stack */ -void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb) +void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) { - struct ixgbe_hw *hw = &adapter->hw; - struct skb_shared_hwtstamps *shhwtstamps; - u64 regval = 0, ns; + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + u64 regval = 0; u32 tsyncrxctl; - unsigned long flags; + + /* we cannot process timestamps on a ring without a q_vector */ + if (!q_vector || !q_vector->adapter) + return; + + adapter = q_vector->adapter; + hw = &adapter->hw; + + /* Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) @@ -537,17 +788,7 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb) regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; - spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_cyc2time(&adapter->tc, regval); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - shhwtstamps = skb_hwtstamps(skb); - shhwtstamps->hwtstamp = ns_to_ktime(ns); - - /* Update the last_rx_timestamp timer in order to enable watchdog check - * for error case of latched timestamp on a dropped packet. - */ - adapter->last_rx_timestamp = jiffies; + ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); } int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) @@ -610,14 +851,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; tsync_rx_mtrl = 0; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -631,9 +878,21 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_ALL: + /* The X550 controller is capable of timestamping all packets, + * which allows it to accept any filter. + */ + if (hw->mac.type >= ixgbe_mac_X550) { + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + break; + } + /* fall through */ default: /* * register RXMTRL must be set in order to do V1 packets, @@ -641,16 +900,46 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, * Delay_Req messages and hardware does not support * timestamping all packets => return error */ + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } if (hw->mac.type == ixgbe_mac_82598EB) { + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); if (tsync_rx_ctl | tsync_tx_ctl) return -ERANGE; return 0; } + /* Per-packet timestamping only works if the filter is set to all + * packets. Since this is desired, always timestamp all packets as long + * as any Rx filter was configured. + */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* enable timestamping all packets only if at least some + * packets were requested. Otherwise, play nice and disable + * timestamping + */ + if (config->rx_filter == HWTSTAMP_FILTER_NONE) + break; + + tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED | + IXGBE_TSYNCRXCTL_TYPE_ALL | + IXGBE_TSYNCRXCTL_TSIP_UT_EN; + config->rx_filter = HWTSTAMP_FILTER_ALL; + adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; + is_l2 = true; + break; + default: + break; + } + /* define ethertype filter for timestamping L2 packets */ if (is_l2) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), @@ -678,8 +967,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_WRITE_FLUSH(hw); /* clear TX/RX time stamp registers, just to be sure */ - regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); - regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + ixgbe_ptp_clear_tx_timestamp(adapter); + IXGBE_READ_REG(hw, IXGBE_RXSTMPH); return 0; } @@ -712,23 +1001,9 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) -EFAULT : 0; } -/** - * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw - * @adapter: pointer to the adapter structure - * - * This function should be called to set the proper values for the TIMINCA - * register and tell the cyclecounter structure what the tick rate of SYSTIME - * is. It does not directly modify SYSTIME registers or the timecounter - * structure. It should be called whenever a new TIMINCA value is necessary, - * such as during initialization or when the link speed changes. - */ -void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) +static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter, + u32 *shift, u32 *incval) { - struct ixgbe_hw *hw = &adapter->hw; - u32 incval = 0; - u32 shift = 0; - unsigned long flags; - /** * Scale the NIC cycle counter by a large factor so that * relatively small corrections to the frequency can be added @@ -745,36 +1020,98 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) */ switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: - incval = IXGBE_INCVAL_100; - shift = IXGBE_INCVAL_SHIFT_100; + *shift = IXGBE_INCVAL_SHIFT_100; + *incval = IXGBE_INCVAL_100; break; case IXGBE_LINK_SPEED_1GB_FULL: - incval = IXGBE_INCVAL_1GB; - shift = IXGBE_INCVAL_SHIFT_1GB; + *shift = IXGBE_INCVAL_SHIFT_1GB; + *incval = IXGBE_INCVAL_1GB; break; case IXGBE_LINK_SPEED_10GB_FULL: default: - incval = IXGBE_INCVAL_10GB; - shift = IXGBE_INCVAL_SHIFT_10GB; + *shift = IXGBE_INCVAL_SHIFT_10GB; + *incval = IXGBE_INCVAL_10GB; break; } +} - /** - * Modify the calculated values to fit within the correct - * number of bits specified by the hardware. The 82599 doesn't - * have the same space as the X540, so bitshift the calculated - * values to fit. +/** + * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct cyclecounter cc; + unsigned long flags; + u32 incval = 0; + u32 tsauxc = 0; + u32 fuse0 = 0; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + switch (hw->mac.type) { + case ixgbe_mac_X550EM_x: + /* SYSTIME assumes X550EM_x board frequency is 300Mhz, and is + * designed to represent seconds and nanoseconds when this is + * the case. However, some revisions of hardware have a 400Mhz + * clock and we have to compensate for this frequency + * variation using corrected mult and shift values. + */ + fuse0 = IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)); + if (!(fuse0 & IXGBE_FUSES0_300MHZ)) { + cc.mult = 3; + cc.shift = 2; + } + /* fallthrough */ + case ixgbe_mac_X550: + cc.read = ixgbe_ptp_read_X550; + + /* enable SYSTIME counter */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, + tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); + IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); + + IXGBE_WRITE_FLUSH(hw); + break; case ixgbe_mac_X540: + cc.read = ixgbe_ptp_read_82599; + + ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); break; case ixgbe_mac_82599EB: + cc.read = ixgbe_ptp_read_82599; + + ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); incval >>= IXGBE_INCVAL_SHIFT_82599; - shift -= IXGBE_INCVAL_SHIFT_82599; + cc.shift -= IXGBE_INCVAL_SHIFT_82599; IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | - incval); + (1 << IXGBE_INCPER_SHIFT_82599) | incval); break; default: /* other devices aren't supported */ @@ -787,13 +1124,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) /* need lock to prevent incorrect read while modifying cyclecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); - - memset(&adapter->cc, 0, sizeof(adapter->cc)); - adapter->cc.read = ixgbe_ptp_read; - adapter->cc.mask = CYCLECOUNTER_MASK(64); - adapter->cc.shift = shift; - adapter->cc.mult = 1; - + memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); } @@ -814,29 +1145,27 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; unsigned long flags; - /* set SYSTIME registers to 0 just in case */ - IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); - IXGBE_WRITE_FLUSH(hw); - /* reset the hardware timestamping mode */ ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + /* 82598 does not support PTP */ + if (hw->mac.type == ixgbe_mac_82598EB) + return; + ixgbe_ptp_start_cyclecounter(adapter); spin_lock_irqsave(&adapter->tmreg_lock, flags); - - /* reset the ns time counter */ - timecounter_init(&adapter->tc, &adapter->cc, + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ktime_to_ns(ktime_get_real())); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - /* - * Now that the shift has been calculated and the systime + adapter->last_overflow_check = jiffies; + + /* Now that the shift has been calculated and the systime * registers reset, (re-)enable the Clock out feature */ - ixgbe_ptp_setup_sdp(adapter); + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); } /** @@ -845,11 +1174,11 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) * * This function performs setup of the user entry point function table and * initializes the PTP clock device, which is used to access the clock-like - * features of the PTP core. It will be called by ixgbe_ptp_init, only if - * there isn't already a clock device (such as after a suspend/resume cycle, - * where the clock device wasn't destroyed). + * features of the PTP core. It will be called by ixgbe_ptp_init, and may + * reuse a previously initialized clock (such as during a suspend/resume + * cycle). */ -static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) +static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; long err; @@ -869,11 +1198,12 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 1; - adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540; break; case ixgbe_mac_82599EB: snprintf(adapter->ptp_caps.name, @@ -885,14 +1215,31 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 0; - adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; + adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 30000000; + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + adapter->ptp_setup_sdp = NULL; break; default: adapter->ptp_clock = NULL; + adapter->ptp_setup_sdp = NULL; return -EOPNOTSUPP; } @@ -961,18 +1308,13 @@ void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter) if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) return; - /* since this might be called in suspend, we don't clear the state, - * but simply reset the auxiliary PPS signal control register - */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0); + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); /* ensure that we cancel any pending PTP Tx work item in progress */ cancel_work_sync(&adapter->ptp_tx_work); - if (adapter->ptp_tx_skb) { - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); - } + ixgbe_ptp_clear_tx_timestamp(adapter); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index fcd8b27a0..8025a3f93 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -130,6 +130,38 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) return -ENOMEM; } +/** + * ixgbe_get_vfs - Find and take references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u16 vendor = pdev->vendor; + struct pci_dev *vfdev; + int vf = 0; + u16 vf_id; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + vfdev = pci_get_device(vendor, vf_id, NULL); + for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { + if (!vfdev->is_virtfn) + continue; + if (vfdev->physfn != pdev) + continue; + if (vf >= adapter->num_vfs) + continue; + pci_dev_get(vfdev); + adapter->vfinfo[vf].vfdev = vfdev; + ++vf; + } +} + /* Note this function is called when the user wants to enable SR-IOV * VFs using the now deprecated module parameter */ @@ -170,8 +202,10 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) } } - if (!__ixgbe_enable_sriov(adapter)) + if (!__ixgbe_enable_sriov(adapter)) { + ixgbe_get_vfs(adapter); return; + } /* If we have gotten to this point then there is no memory available * to manage the VF devices - print message and bail. @@ -184,6 +218,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) #endif /* #ifdef CONFIG_PCI_IOV */ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { + unsigned int num_vfs = adapter->num_vfs, vf; struct ixgbe_hw *hw = &adapter->hw; u32 gpie; u32 vmdctl; @@ -192,6 +227,16 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; + /* put the reference to all of the vf devices */ + for (vf = 0; vf < num_vfs; ++vf) { + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + + if (!vfdev) + continue; + adapter->vfinfo[vf].vfdev = NULL; + pci_dev_put(vfdev); + } + /* free VF control structures */ kfree(adapter->vfinfo); adapter->vfinfo = NULL; @@ -289,6 +334,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) e_dev_warn("Failed to enable PCI sriov: %d\n", err); return err; } + ixgbe_get_vfs(adapter); ixgbe_sriov_reinit(adapter); return num_vfs; @@ -406,11 +452,34 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) { - /* VLAN 0 is a special case, don't allow it to be removed */ - if (!vid && !add) - return 0; + struct ixgbe_hw *hw = &adapter->hw; + int err; + + /* If VLAN overlaps with one the PF is currently monitoring make + * sure that we are able to allocate a VLVF entry. This may be + * redundant but it guarantees PF will maintain visibility to + * the VLAN. + */ + if (add && test_bit(vid, adapter->active_vlans)) { + err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); + if (err) + return err; + } + + err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); - return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); + if (add && !err) + return err; + + /* If we failed to add the VF VLAN or we are removing the VF VLAN + * we may need to drop the PF pool bit in order to allow us to free + * up the VLVF resources. + */ + if (test_bit(vid, adapter->active_vlans) || + (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + ixgbe_update_pf_promisc_vlvf(adapter, vid); + + return err; } static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) @@ -516,13 +585,75 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); } + +static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 i; + + /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ + for (i = IXGBE_VLVF_ENTRIES; i--;) { + u32 bits[2], vlvfb, vid, vfta, vlvf; + u32 word = i * 2 + vf / 32; + u32 mask = 1 << (vf % 32); + + vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + + /* if our bit isn't set we can skip it */ + if (!(vlvfb & mask)) + continue; + + /* clear our bit from vlvfb */ + vlvfb ^= mask; + + /* create 64b mask to chedk to see if we should clear VLVF */ + bits[word % 2] = vlvfb; + bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); + + /* if promisc is enabled, PF will be present, leave VFTA */ + if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) { + bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32)); + + if (bits[0] || bits[1]) + goto update_vlvfb; + goto update_vlvf; + } + + /* if other pools are present, just remove ourselves */ + if (bits[0] || bits[1]) + goto update_vlvfb; + + /* if we cannot determine VLAN just remove ourselves */ + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); + if (!vlvf) + goto update_vlvfb; + + vid = vlvf & VLAN_VID_MASK; + mask = 1 << (vid % 32); + + /* clear bit from VFTA */ + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); + if (vfta & mask) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); +update_vlvf: + /* clear POOL selection enable */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); +update_vlvfb: + /* clear pool bits */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); + } +} + static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; u8 num_tcs = netdev_get_num_tc(adapter->netdev); - /* add PF assigned VLAN or VLAN 0 */ + /* remove VLAN filters beloning to this VF */ + ixgbe_clear_vf_vlans(adapter, vf); + + /* add back PF assigned VLAN or VLAN 0 */ ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); /* reset offloads to defaults */ @@ -768,40 +899,14 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; } -static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) -{ - u32 vlvf; - s32 regindex; - - /* short cut the special case */ - if (vlan == 0) - return 0; - - /* Search for the vlan id in the VLVF entries */ - for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); - if ((vlvf & VLAN_VID_MASK) == vlan) - break; - } - - /* Return a negative value if not found */ - if (regindex >= IXGBE_VLVF_ENTRIES) - regindex = -1; - - return regindex; -} - static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { + u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; + u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); + u8 tcs = netdev_get_num_tc(adapter->netdev); struct ixgbe_hw *hw = &adapter->hw; - int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; - int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); int err; - s32 reg_ndx; - u32 vlvf; - u32 bits; - u8 tcs = netdev_get_num_tc(adapter->netdev); if (adapter->vfinfo[vf].pf_vlan || tcs) { e_warn(drv, @@ -811,54 +916,23 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, return -1; } - if (add) - adapter->vfinfo[vf].vlan_count++; - else if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; - - /* in case of promiscuous mode any VLAN filter set for a VF must - * also have the PF pool added to it. - */ - if (add && adapter->netdev->flags & IFF_PROMISC) - err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; err = ixgbe_set_vf_vlan(adapter, add, vid, vf); - if (!err && adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + if (err) + return err; - /* Go through all the checks to see if the VLAN filter should - * be wiped completely. - */ - if (!add && adapter->netdev->flags & IFF_PROMISC) { - reg_ndx = ixgbe_find_vlvf_entry(hw, vid); - if (reg_ndx < 0) - return err; - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx)); - /* See if any other pools are set for this VLAN filter - * entry other than the PF. - */ - if (VMDQ_P(0) < 32) { - bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); - bits &= ~(1 << VMDQ_P(0)); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB(reg_ndx * 2) + 1); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(reg_ndx * 2) + 1); - bits &= ~(1 << (VMDQ_P(0) - 32)); - bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); - } + if (adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - /* If the filter was removed then ensure PF pool bit - * is cleared if the PF only added itself to the pool - * because the PF is in promiscuous mode. - */ - if ((vlvf & VLAN_VID_MASK) == vid && - !test_bit(vid, adapter->active_vlans) && !bits) - ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); - } + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; - return err; + return 0; } static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, @@ -1239,6 +1313,9 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, if (err) goto out; + /* Revoke tagless access via VLAN 0 */ + ixgbe_set_vf_vlan(adapter, false, 0, vf); + ixgbe_set_vmvir(adapter, vlan, qos, vf); ixgbe_set_vmolr(hw, vf, false); if (adapter->vfinfo[vf].spoofchk_enabled) @@ -1272,6 +1349,8 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) err = ixgbe_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan, vf); + /* Restore tagless access via VLAN 0 */ + ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_clear_vmvir(adapter, vf); ixgbe_set_vmolr(hw, vf, true); hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 995f03107..bf7367a08 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1020,6 +1020,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ #define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ #define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ #define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ #define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ #define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ @@ -1036,6 +1037,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ #define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ #define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ +#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ /* Diagnostic Registers */ #define IXGBE_RDSTATCTL 0x02C20 @@ -1345,7 +1347,10 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ #define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ #define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ #define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* global fault msg */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ #define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ /* autoneg vendor alarm int enable */ #define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 @@ -1353,6 +1358,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ #define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ #define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ +#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /*int dev fault enable */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ @@ -2209,6 +2215,7 @@ enum { #define IXGBE_TSAUXC_EN_CLK 0x00000004 #define IXGBE_TSAUXC_SYNCLK 0x00000008 #define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 #define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ #define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ @@ -2218,8 +2225,12 @@ enum { #define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 #define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 #define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08 #define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A #define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ +#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ + +#define IXGBE_TSIM_TXTS 0x00000002 #define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF #define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 @@ -2332,6 +2343,7 @@ enum { #define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ #define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ #define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ #define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ #define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ #define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ @@ -2768,6 +2780,7 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ #define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ #define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ @@ -3288,7 +3301,7 @@ struct ixgbe_mac_operations { s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); - s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); s32 (*init_uta_tables)(struct ixgbe_hw *); void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); @@ -3508,7 +3521,7 @@ struct ixgbe_info { #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) #define IXGBE_FUSES0_300MHZ BIT(5) -#define IXGBE_FUSES0_REV1 BIT(6) +#define IXGBE_FUSES0_REV_MASK (3 << 6) #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index c1d4584f6..2358c1b7d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -57,8 +57,7 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) struct ixgbe_phy_info *phy = &hw->phy; /* set_phy_power was set by default to NULL */ - if (!ixgbe_mng_present(hw)) - phy->ops.set_phy_power = ixgbe_set_copper_phy_power; + phy->ops.set_phy_power = ixgbe_set_copper_phy_power; mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; @@ -110,13 +109,14 @@ mac_reset_top: ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { @@ -154,12 +154,16 @@ mac_reset_top: /* Add the SAN MAC address to the RAR only if it's a valid address */ if (is_valid_ether_addr(hw->mac.san_addr)) { - hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); + /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index ebe0ac950..87aca3f7c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -26,6 +26,8 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" +static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); + static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -84,79 +86,6 @@ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) value); } -/** - * ixgbe_check_cs4227_reg - Perform diag on a CS4227 register - * @hw: pointer to hardware structure - * @reg: the register to check - * - * Performs a diagnostic on a register in the CS4227 chip. Returns an error - * if it is not operating correctly. - * This function assumes that the caller has acquired the proper semaphore. - */ -static s32 ixgbe_check_cs4227_reg(struct ixgbe_hw *hw, u16 reg) -{ - s32 status; - u32 retry; - u16 reg_val; - - reg_val = (IXGBE_CS4227_EDC_MODE_DIAG << 1) | 1; - status = ixgbe_write_cs4227(hw, reg, reg_val); - if (status) - return status; - for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { - msleep(IXGBE_CS4227_CHECK_DELAY); - reg_val = 0xFFFF; - ixgbe_read_cs4227(hw, reg, ®_val); - if (!reg_val) - break; - } - if (reg_val) { - hw_err(hw, "CS4227 reg 0x%04X failed diagnostic\n", reg); - return status; - } - - return 0; -} - -/** - * ixgbe_get_cs4227_status - Return CS4227 status - * @hw: pointer to hardware structure - * - * Performs a diagnostic on the CS4227 chip. Returns an error if it is - * not operating correctly. - * This function assumes that the caller has acquired the proper semaphore. - */ -static s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw) -{ - s32 status; - u16 value = 0; - - /* Exit if the diagnostic has already been performed. */ - status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); - if (status) - return status; - if (value == IXGBE_CS4227_RESET_COMPLETE) - return 0; - - /* Check port 0. */ - status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB); - if (status) - return status; - - status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB); - if (status) - return status; - - /* Check port 1. */ - status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB + - (1 << 12)); - if (status) - return status; - - return ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB + - (1 << 12)); -} - /** * ixgbe_read_pe - Read register from port expander * @hw: pointer to hardware structure @@ -326,13 +255,6 @@ static void ixgbe_check_cs4227(struct ixgbe_hw *hw) return; } - /* Is the CS4227 working correctly? */ - status = ixgbe_get_cs4227_status(hw); - if (status) { - hw_err(hw, "CS4227 status failed: %d", status); - goto out; - } - /* Record completion for next time. */ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, IXGBE_CS4227_RESET_COMPLETE); @@ -1257,31 +1179,71 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, if (status) return status; - /* Configure CS4227 LINE side to 10G SR. */ - slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); - value = IXGBE_CS4227_SPEED_10G; - status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, - value); - - /* Configure CS4227 for HOST connection rate then type. */ - slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); - value = speed & IXGBE_LINK_SPEED_10GB_FULL ? - IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; - status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, - value); + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + /* Configure CS4227 LINE side to 10G SR. */ + slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); + value = IXGBE_CS4227_SPEED_10G; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; - slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); - if (setup_linear) - value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; - else + slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; - status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, - value); + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; + + /* Configure CS4227 for HOST connection rate then type. */ + slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); + value = speed & IXGBE_LINK_SPEED_10GB_FULL ? + IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; - /* If internal link mode is XFI, then setup XFI internal link. */ - if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) + slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); + if (setup_linear) + value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; + + /* Setup XFI internal link. */ status = ixgbe_setup_ixfi_x550em(hw, &speed); + if (status) { + hw_dbg(hw, "setup_ixfi failed with %d\n", status); + return status; + } + } else { + /* Configure internal PHY for KR/KX. */ + status = ixgbe_setup_kr_speed_x550em(hw, speed); + if (status) { + hw_dbg(hw, "setup_kr_speed failed with %d\n", status); + return status; + } + /* Configure CS4227 LINE side to proper mode. */ + slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); + if (setup_linear) + value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; + } + + return 0; + +i2c_err: + hw_dbg(hw, "combined i2c access failed with %d\n", status); return status; } @@ -1482,7 +1444,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) IXGBE_MDIO_GLOBAL_ALARM_1_INT))) return status; - /* High temperature failure alarm triggered */ + /* Global alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); @@ -1496,6 +1458,21 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) ixgbe_set_copper_phy_power(hw, false); return IXGBE_ERR_OVERTEMP; } + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { + /* device fault alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + /* if device fault was due to high temp alarm handle and exit */ + if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { + /* power down the PHY in case the PHY FW didn't */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } + } /* Vendor alarm 2 triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, @@ -1549,14 +1526,15 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) if (status) return status; - /* Enables high temperature failure alarm */ + /* Enable high temperature failure and global fault alarms */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status) return status; - reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN; + reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | + IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, @@ -1765,6 +1743,12 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return IXGBE_ERR_CONFIG; + if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + return ixgbe_setup_kr_speed_x550em(hw, speed); + } + /* If link is not up, then there is no setup necessary so return */ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) @@ -1873,10 +1857,6 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) u32 save_autoneg; bool link_up; - /* SW LPLU not required on later HW revisions. */ - if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))) - return 0; - /* If blocked by MNG FW, then don't restart AN */ if (ixgbe_check_reset_blocked(hw)) return 0; @@ -1969,7 +1949,6 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; - ixgbe_link_speed speed; s32 ret_val; hw->mac.ops.set_lan_id(hw); @@ -1982,13 +1961,6 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) * to determine internal PHY mode. */ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); - - /* If internal PHY mode is KR, then initialize KR link */ - if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { - speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); - } } /* Identify the PHY or SFP module */ @@ -2020,18 +1992,13 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) /* If internal link mode is XFI, then setup iXFI internal link, * else setup KR now. */ - if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { - phy->ops.setup_internal_link = - ixgbe_setup_internal_phy_t_x550em; - } else { - speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); - } + phy->ops.setup_internal_link = + ixgbe_setup_internal_phy_t_x550em; /* setup SW LPLU only for first revision */ - if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, - IXGBE_FUSES0_GROUP(0)))) + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) & + IXGBE_FUSES0_REV_MASK)) phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; @@ -2176,13 +2143,14 @@ mac_reset_top: ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear meaning reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index d3e5f5b37..c48aef613 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -774,7 +774,7 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, adapter->tx_itr_setting = ec->tx_coalesce_usecs; if (adapter->tx_itr_setting == 1) - tx_itr_param = IXGBE_10K_ITR; + tx_itr_param = IXGBE_12K_ITR; else tx_itr_param = adapter->tx_itr_setting; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index ec3147279..68ec7daa0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -326,8 +326,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) #define IXGBE_MIN_RSC_ITR 24 #define IXGBE_100K_ITR 40 #define IXGBE_20K_ITR 200 -#define IXGBE_10K_ITR 400 -#define IXGBE_8K_ITR 500 +#define IXGBE_12K_ITR 336 /* Helper macros to switch between ints/sec and what the register uses. * And yes, it's the same math going both ways. The lowest value diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 592ff237d..3558f019b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -59,7 +59,7 @@ static const char ixgbevf_driver_string[] = #define DRV_VERSION "2.12.1-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2012 Intel Corporation."; + "Copyright (c) 2009 - 2015 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@ -96,12 +96,14 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +static struct workqueue_struct *ixgbevf_wq; + static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter) { if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state) && !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) - schedule_work(&adapter->service_task); + queue_work(ixgbevf_wq, &adapter->service_task); } static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) @@ -1014,6 +1016,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) ixgbevf_for_each_ring(ring, q_vector->tx) clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); + if (budget <= 0) + return budget; #ifdef CONFIG_NET_RX_BUSY_POLL if (!ixgbevf_qv_lock_napi(q_vector)) return budget; @@ -1043,7 +1047,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) return budget; /* all work done, exit the polling mode */ napi_complete_done(napi, work_done); - if (adapter->rx_itr_setting & 1) + if (adapter->rx_itr_setting == 1) ixgbevf_set_itr(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state)) @@ -1138,7 +1142,7 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) if (q_vector->tx.ring && !q_vector->rx.ring) { /* Tx only vector */ if (adapter->tx_itr_setting == 1) - q_vector->itr = IXGBE_10K_ITR; + q_vector->itr = IXGBE_12K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { @@ -1196,7 +1200,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, /* simple throttle rate management * 0-20MB/s lowest (100000 ints/s) * 20-100MB/s low (20000 ints/s) - * 100-1249MB/s bulk (8000 ints/s) + * 100-1249MB/s bulk (12000 ints/s) */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; @@ -1246,8 +1250,9 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) new_itr = IXGBE_20K_ITR; break; case bulk_latency: + new_itr = IXGBE_12K_ITR; + break; default: - new_itr = IXGBE_8K_ITR; break; } @@ -1288,7 +1293,7 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -1332,7 +1337,6 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) int txr_remaining = adapter->num_tx_queues; int i, j; int rqpv, tqpv; - int err = 0; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; @@ -1345,7 +1349,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) for (; txr_idx < txr_remaining; v_start++, txr_idx++) map_vector_to_txq(adapter, v_start, txr_idx); - goto out; + return 0; } /* If we don't have enough vectors for a 1-to-1 @@ -1370,8 +1374,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) } } -out: - return err; + return 0; } /** @@ -1469,9 +1472,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) **/ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) { - int err = 0; - - err = ixgbevf_request_msix_irqs(adapter); + int err = ixgbevf_request_msix_irqs(adapter); if (err) hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); @@ -1830,7 +1831,7 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - int err = -EOPNOTSUPP; + int err; spin_lock_bh(&adapter->mbx_lock); @@ -2046,7 +2047,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; - int err = 0, idx = 0; + int err, idx = 0; spin_lock_bh(&adapter->mbx_lock); @@ -2260,10 +2261,8 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) } if (is_valid_ether_addr(adapter->hw.mac.addr)) { - memcpy(netdev->dev_addr, adapter->hw.mac.addr, - netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, - netdev->addr_len); + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } adapter->last_reset = jiffies; @@ -2421,7 +2420,7 @@ err_allocation: static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int err = 0; + int err; int vector, v_budget; /* It's easy to be greedy for MSI-X vectors, but it really @@ -2439,26 +2438,21 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) */ adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); - if (!adapter->msix_entries) { - err = -ENOMEM; - goto out; - } + if (!adapter->msix_entries) + return -ENOMEM; for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; err = ixgbevf_acquire_msix_vectors(adapter, v_budget); if (err) - goto out; + return err; err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); if (err) - goto out; - - err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + return err; -out: - return err; + return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); } /** @@ -2483,9 +2477,6 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); -#ifdef CONFIG_NET_RX_BUSY_POLL - napi_hash_add(&q_vector->napi); -#endif adapter->q_vector[q_idx] = q_vector; } @@ -2662,13 +2653,14 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + ether_addr_copy(netdev->dev_addr, hw->mac.addr); } if (!is_valid_ether_addr(netdev->dev_addr)) { dev_info(&pdev->dev, "Assigning random MAC address\n"); eth_hw_addr_random(netdev); - memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); + ether_addr_copy(hw->mac.addr, netdev->dev_addr); + ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr); } /* Enable dynamic interrupt throttling rates */ @@ -3355,6 +3347,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_hdr = 0; + __be16 frag_off; switch (first->protocol) { case htons(ETH_P_IP): @@ -3365,13 +3358,16 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, case htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb); l4_hdr = ipv6_hdr(skb)->nexthdr; + if (likely(skb_network_header_len(skb) == + sizeof(struct ipv6hdr))) + break; + ipv6_skip_exthdr(skb, skb_network_offset(skb) + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + l4_hdr = NEXTHDR_FRAGMENT; break; default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - first->protocol); - } break; } @@ -3393,16 +3389,18 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); + "partial checksum, l3 proto=%x, l4 proto=%x\n", + first->protocol, l4_hdr); } - break; + skb_checksum_help(skb); + goto no_csum; } /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; } +no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; @@ -3698,8 +3696,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + ether_addr_copy(netdev->dev_addr, addr->sa_data); + ether_addr_copy(hw->mac.addr, addr->sa_data); spin_lock_bh(&adapter->mbx_lock); @@ -4248,15 +4246,17 @@ static struct pci_driver ixgbevf_driver = { **/ static int __init ixgbevf_init_module(void) { - int ret; - pr_info("%s - version %s\n", ixgbevf_driver_string, ixgbevf_driver_version); pr_info("%s\n", ixgbevf_copyright); + ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); + if (!ixgbevf_wq) { + pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name); + return -ENOMEM; + } - ret = pci_register_driver(&ixgbevf_driver); - return ret; + return pci_register_driver(&ixgbevf_driver); } module_init(ixgbevf_init_module); @@ -4270,6 +4270,10 @@ module_init(ixgbevf_init_module); static void __exit ixgbevf_exit_module(void) { pci_unregister_driver(&ixgbevf_driver); + if (ixgbevf_wq) { + destroy_workqueue(ixgbevf_wq); + ixgbevf_wq = NULL; + } } #ifdef DEBUG diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 427f3605c..61a98f4c5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -117,7 +117,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) return IXGBE_ERR_INVALID_MAC_ADDR; - ether_addr_copy(hw->mac.perm_addr, addr); + if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) + ether_addr_copy(hw->mac.perm_addr, addr); + hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; return 0; -- cgit v1.2.3-54-g00ecf