diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-09-11 04:34:46 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-09-11 04:34:46 -0300 |
commit | 863981e96738983919de841ec669e157e6bdaeb0 (patch) | |
tree | d6d89a12e7eb8017837c057935a2271290907f76 /drivers/net/ethernet/intel/ixgbe | |
parent | 8dec7c70575785729a6a9e6719a955e9c545bcab (diff) |
Linux-libre 4.7.1-gnupck-4.7.1-gnu
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe')
23 files changed, 1836 insertions, 838 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e4949af7d..9f2db1855 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -143,14 +143,11 @@ struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; - u16 default_vf_vlan_id; - u16 vlans_enabled; bool clear_to_send; bool pf_set_mac; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; u16 tx_rate; - u16 vlan_count; u8 spoofchk_enabled; bool rss_query_enabled; u8 trusted; @@ -173,7 +170,7 @@ struct vf_macvlans { }; #define IXGBE_MAX_TXD_PWR 14 -#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) +#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) @@ -456,7 +453,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) IXGBE_QV_STATE_POLL); #ifdef BP_EXTENDED_STATS if (rc != IXGBE_QV_STATE_IDLE) - q_vector->tx.ring->stats.yields++; + q_vector->rx.ring->stats.yields++; #endif return rc == IXGBE_QV_STATE_IDLE; } @@ -623,44 +620,45 @@ struct ixgbe_adapter { * thus the additional *_CAPABLE flags. */ u32 flags; -#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) -#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) -#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) -#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) -#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) -#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) -#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11) -#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12) -#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13) -#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14) -#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15) -#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16) -#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17) -#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18) -#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19) -#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20) -#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21) -#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) -#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) +#define IXGBE_FLAG_MSI_ENABLED BIT(1) +#define IXGBE_FLAG_MSIX_ENABLED BIT(3) +#define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4) +#define IXGBE_FLAG_RX_PS_CAPABLE BIT(5) +#define IXGBE_FLAG_RX_PS_ENABLED BIT(6) +#define IXGBE_FLAG_DCA_ENABLED BIT(8) +#define IXGBE_FLAG_DCA_CAPABLE BIT(9) +#define IXGBE_FLAG_IMIR_ENABLED BIT(10) +#define IXGBE_FLAG_MQ_CAPABLE BIT(11) +#define IXGBE_FLAG_DCB_ENABLED BIT(12) +#define IXGBE_FLAG_VMDQ_CAPABLE BIT(13) +#define IXGBE_FLAG_VMDQ_ENABLED BIT(14) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15) +#define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16) +#define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19) +#define IXGBE_FLAG_FCOE_CAPABLE BIT(20) +#define IXGBE_FLAG_FCOE_ENABLED BIT(21) +#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22) +#define IXGBE_FLAG_SRIOV_ENABLED BIT(23) #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) +#define IXGBE_FLAG_DCB_CAPABLE BIT(27) u32 flags2; -#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) -#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) -#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) -#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4) -#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) -#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) -#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) -#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) -#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) -#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) +#define IXGBE_FLAG2_RSC_CAPABLE BIT(0) +#define IXGBE_FLAG2_RSC_ENABLED BIT(1) +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2) +#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3) +#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4) +#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5) +#define IXGBE_FLAG2_RESET_REQUESTED BIT(6) +#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7) +#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8) +#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) +#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) +#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) @@ -795,7 +793,7 @@ struct ixgbe_adapter { unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ #define IXGBE_MAX_LINK_HANDLE 10 - struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE]; + struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE]; unsigned long tables; /* maximum number of RETA entries among all devices supported by ixgbe @@ -806,6 +804,8 @@ struct ixgbe_adapter { #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; + + bool need_crosstalk_fix; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -817,6 +817,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) return IXGBE_MAX_RSS_INDICES; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: return IXGBE_MAX_RSS_INDICES_X550; default: return 0; @@ -827,7 +828,7 @@ struct ixgbe_fdir_filter { struct hlist_node fdir_node; union ixgbe_atr_input filter; u16 sw_idx; - u16 action; + u64 action; }; enum ixgbe_state_t { @@ -860,13 +861,15 @@ enum ixgbe_boards { board_X540, board_X550, board_X550EM_x, + board_x550em_a, }; -extern struct ixgbe_info ixgbe_82598_info; -extern struct ixgbe_info ixgbe_82599_info; -extern struct ixgbe_info ixgbe_X540_info; -extern struct ixgbe_info ixgbe_X550_info; -extern struct ixgbe_info ixgbe_X550EM_x_info; +extern const struct ixgbe_info ixgbe_82598_info; +extern const struct ixgbe_info ixgbe_82599_info; +extern const struct ixgbe_info ixgbe_X540_info; +extern const struct ixgbe_info ixgbe_X550_info; +extern const struct ixgbe_info ixgbe_X550EM_x_info; +extern const struct ixgbe_info ixgbe_x550em_a_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops dcbnl_ops; #endif @@ -893,8 +896,8 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); void ixgbe_update_stats(struct ixgbe_adapter *adapter); int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); -int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, - u16 subdevice_id); +bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id); #ifdef CONFIG_PCI_IOV void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index d8a9fb8a5..fb51be74d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -792,7 +792,7 @@ mac_reset_top: } gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); - gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); + gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6)); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); /* @@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); if (vlan_on) /* Turn on this VLAN id */ - bits |= (1 << bitindex); + bits |= BIT(bitindex); else /* Turn off this VLAN id */ - bits &= ~(1 << bitindex); + bits &= ~BIT(bitindex); IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); return 0; @@ -1160,7 +1160,7 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); } -static struct ixgbe_mac_operations mac_ops_82598 = { +static const struct ixgbe_mac_operations mac_ops_82598 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82598, .start_hw = &ixgbe_start_hw_82598, @@ -1192,9 +1192,11 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .clear_vfta = &ixgbe_clear_vfta_82598, .set_vfta = &ixgbe_set_vfta_82598, .fc_enable = &ixgbe_fc_enable_82598, + .setup_fc = ixgbe_setup_fc_generic, .set_fw_drv_ver = NULL, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, + .init_swfw_sync = NULL, .get_thermal_sensor_data = NULL, .init_thermal_sensor_thresh = NULL, .prot_autoc_read = &prot_autoc_read_generic, @@ -1203,7 +1205,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .disable_rx = &ixgbe_disable_rx_generic, }; -static struct ixgbe_eeprom_operations eeprom_ops_82598 = { +static const struct ixgbe_eeprom_operations eeprom_ops_82598 = { .init_params = &ixgbe_init_eeprom_params_generic, .read = &ixgbe_read_eerd_generic, .write = &ixgbe_write_eeprom_generic, @@ -1214,7 +1216,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = { .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; -static struct ixgbe_phy_operations phy_ops_82598 = { +static const struct ixgbe_phy_operations phy_ops_82598 = { .identify = &ixgbe_identify_phy_generic, .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82598, @@ -1230,7 +1232,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = { .check_overtemp = &ixgbe_tn_check_overtemp, }; -struct ixgbe_info ixgbe_82598_info = { +const struct ixgbe_info ixgbe_82598_info = { .mac = ixgbe_mac_82598EB, .get_invariants = &ixgbe_get_invariants_82598, .mac_ops = &mac_ops_82598, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index fa8d4f40a..47afed74a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \ common_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ bucket_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \ sig_hash ^= lo_hash_dword << (16 - n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \ common_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ bucket_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \ sig_hash ^= hi_hash_dword << (16 - n); \ } while (0) @@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ - if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ bucket_hash ^= lo_hash_dword >> n; \ - if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ bucket_hash ^= hi_hash_dword >> n; \ } while (0) @@ -1633,6 +1633,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); break; default: @@ -2181,7 +2182,7 @@ release_i2c_access: return status; } -static struct ixgbe_mac_operations mac_ops_82599 = { +static const struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, .start_hw = &ixgbe_start_hw_82599, @@ -2220,6 +2221,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .clear_vfta = &ixgbe_clear_vfta_generic, .set_vfta = &ixgbe_set_vfta_generic, .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, .init_uta_tables = &ixgbe_init_uta_tables_generic, .setup_sfp = &ixgbe_setup_sfp_modules_82599, @@ -2227,6 +2229,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, + .init_swfw_sync = NULL, .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, .prot_autoc_read = &prot_autoc_read_82599, @@ -2235,7 +2238,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .disable_rx = &ixgbe_disable_rx_generic, }; -static struct ixgbe_eeprom_operations eeprom_ops_82599 = { +static const struct ixgbe_eeprom_operations eeprom_ops_82599 = { .init_params = &ixgbe_init_eeprom_params_generic, .read = &ixgbe_read_eeprom_82599, .read_buffer = &ixgbe_read_eeprom_buffer_82599, @@ -2246,7 +2249,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = { .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; -static struct ixgbe_phy_operations phy_ops_82599 = { +static const struct ixgbe_phy_operations phy_ops_82599 = { .identify = &ixgbe_identify_phy_82599, .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82599, @@ -2263,7 +2266,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = { .check_overtemp = &ixgbe_tn_check_overtemp, }; -struct ixgbe_info ixgbe_82599_info = { +const struct ixgbe_info ixgbe_82599_info = { .mac = ixgbe_mac_82599EB, .get_invariants = &ixgbe_get_invariants_82599, .mac_ops = &mac_ops_82599, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 64045053e..902d2061c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -97,6 +97,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550T1: case IXGBE_DEV_ID_X550EM_X_10G_T: supported = true; break; @@ -111,12 +112,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) } /** - * ixgbe_setup_fc - Set up flow control + * ixgbe_setup_fc_generic - Set up flow control * @hw: pointer to hardware structure * * Called at init time to set up flow control. **/ -static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) { s32 ret_val = 0; u32 reg = 0, reg_bp = 0; @@ -296,7 +297,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); /* Setup flow control */ - ret_val = ixgbe_setup_fc(hw); + ret_val = hw->mac.ops.setup_fc(hw); if (ret_val) return ret_val; @@ -681,6 +682,7 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; + u16 ee_ctrl_4; u32 reg; reg = IXGBE_READ_REG(hw, IXGBE_STATUS); @@ -691,6 +693,13 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); if (reg & IXGBE_FACTPS_LFS) bus->func ^= 0x1; + + /* Get MAC instance from EEPROM for configuring CS4227 */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); + bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> + IXGBE_EE_CTRL_4_INST_ID_SHIFT; + } } /** @@ -816,8 +825,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) */ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); } if (eec & IXGBE_EEC_ADDR_SIZE) @@ -1493,7 +1502,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, * Mask is used to shift "count" bits of "data" out to the EEPROM * one bit at a time. Determine the starting bit based on count */ - mask = 0x01 << (count - 1); + mask = BIT(count - 1); for (i = 0; i < count; i++) { /* @@ -1982,7 +1991,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) */ vector_reg = (vector >> 5) & 0x7F; vector_bit = vector & 0x1F; - hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); + hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); } /** @@ -2854,6 +2863,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; @@ -2911,10 +2921,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) mpsar_hi = 0; } } else if (vmdq < 32) { - mpsar_lo &= ~(1 << vmdq); + mpsar_lo &= ~BIT(vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); } else { - mpsar_hi &= ~(1 << (vmdq - 32)); + mpsar_hi &= ~BIT(vmdq - 32); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); } @@ -2943,11 +2953,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) if (vmdq < 32) { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar |= 1 << vmdq; + mpsar |= BIT(vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); } else { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - mpsar |= 1 << (vmdq - 32); + mpsar |= BIT(vmdq - 32); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); } return 0; @@ -2968,11 +2978,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) u32 rar = hw->mac.san_mac_rar_index; if (vmdq < 32) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); } else { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); } return 0; @@ -3072,7 +3082,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * bits[4-0]: which bit in the register */ regidx = vlan / 32; - vfta_delta = 1 << (vlan % 32); + vfta_delta = BIT(vlan % 32); vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); /* vfta_delta represents the difference between the current value @@ -3103,12 +3113,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); /* set the pool bit */ - bits |= 1 << (vind % 32); + bits |= BIT(vind % 32); if (vlan_on) goto vlvf_update; /* clear the pool bit */ - bits ^= 1 << (vind % 32); + bits ^= BIT(vind % 32); if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { @@ -3300,43 +3310,25 @@ wwn_prefix_err: /** * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing * @hw: pointer to hardware structure - * @enable: enable or disable switch for anti-spoofing - * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * @enable: enable or disable switch for MAC anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing * **/ -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) { - int j; - int pf_target_reg = pf >> 3; - int pf_target_shift = pf % 8; - u32 pfvfspoof = 0; + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8; + u32 pfvfspoof; if (hw->mac.type == ixgbe_mac_82598EB) return; + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof = IXGBE_SPOOF_MACAS_MASK; - - /* - * PFVFSPOOF register array is size 8 with 8 bits assigned to - * MAC anti-spoof enables in each register array element. - */ - for (j = 0; j < pf_target_reg; j++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); - - /* - * The PF should be allowed to spoof so that it can support - * emulation mode NICs. Do not set the bits assigned to the PF - */ - pfvfspoof &= (1 << pf_target_shift) - 1; - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); - - /* - * Remaining pools belong to the PF so they do not need to have - * anti-spoofing enabled. - */ - for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); + pfvfspoof |= BIT(vf_target_shift); + else + pfvfspoof &= ~BIT(vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** @@ -3357,9 +3349,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof |= (1 << vf_target_shift); + pfvfspoof |= BIT(vf_target_shift); else - pfvfspoof &= ~(1 << vf_target_shift); + pfvfspoof &= ~BIT(vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } @@ -3483,18 +3475,27 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * Communicates with the manageability block. On success return 0 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. **/ -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, u32 length, u32 timeout, bool return_data) { - u32 hicr, i, bi, fwsts; u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + u32 hicr, i, bi, fwsts; u16 buf_len, dword_len; + union { + struct ixgbe_hic_hdr hdr; + u32 u32arr[1]; + } *bp = buffer; + s32 status; - if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Take management host interface semaphore */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + if (status) + return status; /* Set bit 9 of FWSTS clearing FW reset indication */ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); @@ -3502,26 +3503,27 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, /* Check that the host interface is enabled. */ hicr = IXGBE_READ_REG(hw, IXGBE_HICR); - if ((hicr & IXGBE_HICR_EN) == 0) { + if (!(hicr & IXGBE_HICR_EN)) { hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } /* Calculate length in DWORDs. We must be DWORD aligned */ - if ((length % (sizeof(u32))) != 0) { + if (length % sizeof(u32)) { hw_dbg(hw, "Buffer length failure, not aligned to dword"); - return IXGBE_ERR_INVALID_ARGUMENT; + status = IXGBE_ERR_INVALID_ARGUMENT; + goto rel_out; } dword_len = length >> 2; - /* - * The device driver writes the relevant command block + /* The device driver writes the relevant command block * into the ram area. */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, cpu_to_le32(buffer[i])); + i, cpu_to_le32(bp->u32arr[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); @@ -3534,44 +3536,49 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, } /* Check command successful completion. */ - if ((timeout != 0 && i == timeout) || - (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { + if ((timeout && i == timeout) || + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { hw_dbg(hw, "Command has failed with no status valid.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } if (!return_data) - return 0; + goto rel_out; /* Calculate length in DWORDs */ dword_len = hdr_size >> 2; /* first pull in the header so we know the buffer length */ for (bi = 0; bi < dword_len; bi++) { - buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - le32_to_cpus(&buffer[bi]); + bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + le32_to_cpus(&bp->u32arr[bi]); } /* If there is any thing in data position pull it in */ - buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; - if (buf_len == 0) - return 0; + buf_len = bp->hdr.buf_len; + if (!buf_len) + goto rel_out; - if (length < (buf_len + hdr_size)) { + if (length < round_up(buf_len, 4) + hdr_size) { hw_dbg(hw, "Buffer not large enough for reply message.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } /* Calculate length in DWORDs, add 3 for odd lengths */ dword_len = (buf_len + 3) >> 2; - /* Pull in the rest of the buffer (bi is where we left off)*/ + /* Pull in the rest of the buffer (bi is where we left off) */ for (; bi <= dword_len; bi++) { - buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - le32_to_cpus(&buffer[bi]); + bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + le32_to_cpus(&bp->u32arr[bi]); } - return 0; +rel_out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + + return status; } /** @@ -3594,13 +3601,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, int i; s32 ret_val; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)) - return IXGBE_ERR_SWFW_SYNC; - fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.port_num = hw->bus.func; fw_cmd.ver_maj = maj; fw_cmd.ver_min = min; fw_cmd.ver_build = build; @@ -3612,7 +3616,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, fw_cmd.pad2 = 0; for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + ret_val = ixgbe_host_interface_command(hw, &fw_cmd, sizeof(fw_cmd), IXGBE_HI_COMMAND_TIMEOUT, true); @@ -3628,7 +3632,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, break; } - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); return ret_val; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 2b9563137..6d4c260d0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -81,6 +81,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *); bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); @@ -105,13 +106,13 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver); -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, - u32 length, u32 timeout, bool return_data); +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, + u32 timeout, bool return_data); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); bool ixgbe_mng_present(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 02c7333a9..072ef3b5f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { if (tc_config[tc].dcb_pfc != pfc_disabled) - *pfc_en |= 1 << tc; + *pfc_en |= BIT(tc); } } @@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) { struct tc_configuration *tc_config = &cfg->tc_config[0]; - u8 prio_mask = 1 << up; + u8 prio_mask = BIT(up); u8 tc = cfg->num_tcs.pg_tcs; /* If tc is 0 then DCB is likely not enabled or supported */ @@ -293,6 +293,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, bwgid, ptype, prio_tc); default: @@ -311,6 +312,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); default: break; @@ -368,6 +370,7 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, @@ -398,6 +401,7 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: ixgbe_dcb_read_rtrup2tc_82599(hw, map); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index d3ba63f9a..b79e93a5b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) /* Configure PFC Tx thresholds per TC */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - if (!(pfc_en & (1 << i))) { + if (!(pfc_en & BIT(i))) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); continue; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index b5cc989a3..1011d6449 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) int enabled = 0; for (j = 0; j < MAX_USER_PRIORITY; j++) { - if ((prio_tc[j] == i) && (pfc_en & (1 << j))) { + if ((prio_tc[j] == i) && (pfc_en & BIT(j))) { enabled = 1; break; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 2707bda37..b8fc3cfec 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) }; u8 up = dcb_getapp(adapter->netdev, &app); - if (up && !(up & (1 << adapter->fcoe.up))) + if (up && !(up & BIT(adapter->fcoe.up))) changes |= BIT_APP_UPCHG; #endif @@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); - if (app_mask & (1 << adapter->fcoe.up)) + if (app_mask & BIT(adapter->fcoe.up)) return 0; adapter->fcoe.up = app->priority; @@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); - if (app_mask & (1 << adapter->fcoe.up)) + if (app_mask & BIT(adapter->fcoe.up)) return 0; adapter->fcoe.up = app_mask ? diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index b3530e1e3..59b771b9b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -533,10 +533,8 @@ static void ixgbe_get_regs(struct net_device *netdev, /* Flow Control */ regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); - regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); - regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); - regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); - regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); + for (i = 0; i < 4; i++) + regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i)); for (i = 0; i < 8; i++) { switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -547,6 +545,7 @@ static void ixgbe_get_regs(struct net_device *netdev, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); break; @@ -660,6 +659,7 @@ static void ixgbe_get_regs(struct net_device *netdev, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); for (i = 0; i < 8; i++) @@ -718,8 +718,10 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); - regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); - regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); + regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc); + regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32); + regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc); + regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32); for (i = 0; i < 8; i++) regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); @@ -729,7 +731,8 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); - regs_buff[961] = IXGBE_GET_STAT(adapter, tor); + regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor); + regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32); regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); @@ -801,15 +804,11 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); - regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); - regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); - regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); - regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); + for (i = 0; i < 4; i++) + regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i)); regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); - regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); - regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); - regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); - regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); + for (i = 0; i < 4; i++) + regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i)); for (i = 0; i < 8; i++) regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); @@ -1443,6 +1442,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: toggle = 0x7FFFF30F; test = reg_test_82599; break; @@ -1583,7 +1583,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) /* Test each interrupt */ for (; i < 10; i++) { /* Interrupt to test */ - mask = 1 << i; + mask = BIT(i); if (!shared_int) { /* @@ -1681,6 +1681,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg_ctl &= ~IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); @@ -1720,6 +1721,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); @@ -1780,6 +1782,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); reg_data |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); @@ -2991,6 +2994,7 @@ static int ixgbe_get_ts_info(struct net_device *dev, switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = @@ -3007,14 +3011,14 @@ static int ixgbe_get_ts_info(struct net_device *dev, info->phc_index = -1; info->tx_types = - (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); info->rx_filters = - (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); break; default: return ethtool_op_get_ts_info(dev, info); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index e771e764d..bcdc88444 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -128,6 +128,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7df3fe29b..8bebd862a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -53,15 +53,7 @@ #include <net/vxlan.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> - -#ifdef CONFIG_OF -#include <linux/of_net.h> -#endif - -#ifdef CONFIG_SPARC -#include <asm/idprom.h> -#include <asm/prom.h> -#endif +#include <net/tc_act/tc_mirred.h> #include "ixgbe.h" #include "ixgbe_common.h" @@ -79,10 +71,10 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "4.2.1-k" +#define DRV_VERSION "4.4.0-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = - "Copyright (c) 1999-2015 Intel Corporation."; + "Copyright (c) 1999-2016 Intel Corporation."; static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; @@ -92,6 +84,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_X540] = &ixgbe_X540_info, [board_X550] = &ixgbe_X550_info, [board_X550EM_x] = &ixgbe_X550EM_x_info, + [board_x550em_a] = &ixgbe_x550em_a_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -134,10 +127,17 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, /* required last entry */ {0, } }; @@ -372,6 +372,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) if (ixgbe_removed(reg_addr)) return IXGBE_FAILED_READ_REG; + if (unlikely(hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) { + struct ixgbe_adapter *adapter; + int i; + + for (i = 0; i < 200; ++i) { + value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY); + if (likely(!value)) + goto writes_completed; + if (value == IXGBE_FAILED_READ_REG) { + ixgbe_remove_adapter(hw); + return IXGBE_FAILED_READ_REG; + } + udelay(5); + } + + adapter = hw->back; + e_warn(hw, "register writes incomplete %08x\n", value); + } + +writes_completed: value = readl(reg_addr + reg); if (unlikely(value == IXGBE_FAILED_READ_REG)) ixgbe_check_remove(hw, reg); @@ -588,7 +609,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, netdev->state, - netdev->trans_start, + dev_trans_start(netdev), netdev->last_rx); } @@ -869,6 +890,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -907,6 +929,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); @@ -1087,9 +1110,40 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) } /** + * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate + **/ +static int ixgbe_tx_maxrate(struct net_device *netdev, + int queue_index, u32 maxrate) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 bcnrc_val = ixgbe_link_mbps(adapter); + + if (!maxrate) + return 0; + + /* Calculate the rate factor values to set */ + bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; + bcnrc_val /= maxrate; + + /* clear everything but the rate factor */ + bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | + IXGBE_RTTBCNRC_RF_DEC_MASK; + + /* enable the rate scheduler */ + bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + + return 0; +} + +/** * ixgbe_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll **/ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget) @@ -2192,7 +2246,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) /* Populate MSIX to EITR Select */ if (adapter->num_vfs > 32) { - u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; + u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); } @@ -2222,6 +2276,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: @@ -2333,6 +2388,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt @@ -2494,6 +2550,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) return false; case ixgbe_mac_82599EB: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: case ixgbe_media_type_fiber_qsfp: @@ -2568,6 +2625,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); @@ -2596,6 +2654,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); @@ -2631,6 +2690,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask |= IXGBE_EIMS_TS; break; default: @@ -2646,7 +2706,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP) + case ixgbe_mac_x550em_a: + if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || + adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || + adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) mask |= IXGBE_EICR_GPI_SDP0_X540; @@ -2704,6 +2767,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (hw->phy.type == ixgbe_phy_x550em_ext_t && (eicr & IXGBE_EICR_GPI_SDP0_X540)) { adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; @@ -2786,8 +2850,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_update_dca(q_vector); #endif - ixgbe_for_each_ring(ring, q_vector->tx) - clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget); + ixgbe_for_each_ring(ring, q_vector->tx) { + if (!ixgbe_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } /* Exit if we are called by netpoll or busy polling is active */ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) @@ -2805,7 +2871,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget) per_ring_budget); work_done += cleaned; - clean_complete &= (cleaned < per_ring_budget); + if (cleaned >= per_ring_budget) + clean_complete = false; } ixgbe_qv_unlock_napi(q_vector); @@ -2818,9 +2885,9 @@ int ixgbe_poll(struct napi_struct *napi, int budget) if (adapter->rx_itr_setting & 1) ixgbe_set_itr(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); + ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); - return 0; + return min(work_done, budget - 1); } /** @@ -2937,6 +3004,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; @@ -3033,6 +3101,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); @@ -3109,15 +3178,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, * currently 40. */ if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) - txdctl |= (1 << 16); /* WTHRESH = 1 */ + txdctl |= 1u << 16; /* WTHRESH = 1 */ else - txdctl |= (8 << 16); /* WTHRESH = 8 */ + txdctl |= 8u << 16; /* WTHRESH = 8 */ /* * Setting PTHRESH to 32 both improves performance * and avoids a TX hang with DFP enabled */ - txdctl |= (1 << 8) | /* HTHRESH = 1 */ + txdctl |= (1u << 8) | /* HTHRESH = 1 */ 32; /* PTHRESH = 32 */ /* reinitialize flowdirector state */ @@ -3669,9 +3738,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) return; if (rss_i > 3) - psrtype |= 2 << 29; + psrtype |= 2u << 29; else if (rss_i > 1) - psrtype |= 1 << 29; + psrtype |= 1u << 29; for_each_set_bit(pool, &adapter->fwd_bitmask, 32) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); @@ -3698,9 +3767,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; /* Enable only the PF's pool for Tx/Rx */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); if (adapter->bridge_mode == BRIDGE_MODE_VEB) IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); @@ -3729,34 +3798,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); - - /* Enable MAC Anti-Spoofing */ - hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), - adapter->num_vfs); - - /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be - * calling set_ethertype_anti_spoofing for each VF in loop below - */ - if (hw->mac.ops.set_ethertype_anti_spoofing) { - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), - (IXGBE_ETQF_FILTER_EN | - IXGBE_ETQF_TX_ANTISPOOF | - IXGBE_ETH_P_LLDP)); - - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), - (IXGBE_ETQF_FILTER_EN | - IXGBE_ETQF_TX_ANTISPOOF | - ETH_P_PAUSE)); - } - - /* For VFs that have spoof checking turned off */ for (i = 0; i < adapter->num_vfs; i++) { - if (!adapter->vfinfo[i].spoofchk_enabled) - ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); - - /* enable ethertype anti spoofing if hw supports it */ - if (hw->mac.ops.set_ethertype_anti_spoofing) - hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); + /* configure spoof checking */ + ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, + adapter->vfinfo[i].spoofchk_enabled); /* Enable/Disable RSS query feature */ ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, @@ -3832,6 +3877,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (adapter->num_vfs) rdrxctl |= IXGBE_RDRXCTL_PSP; /* fall through for older HW */ @@ -3908,7 +3954,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* add VID to filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true); + if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); + set_bit(vid, adapter->active_vlans); return 0; @@ -3947,7 +3995,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) * entry other than the PF. */ word = idx * 2 + (VMDQ_P(0) / 32); - bits = ~(1 << (VMDQ_P(0)) % 32); + bits = ~BIT(VMDQ_P(0) % 32); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); /* Disable the filter so this falls into the default pool. */ @@ -3965,9 +4013,7 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* remove VID from filter table */ - if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) - ixgbe_update_pf_promisc_vlvf(adapter, vid); - else + if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); clear_bit(vid, adapter->active_vlans); @@ -3995,6 +4041,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -4031,6 +4078,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -4057,6 +4105,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: default: if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) break; @@ -4081,7 +4130,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); - vlvfb |= 1 << (VMDQ_P(0) % 32); + vlvfb |= BIT(VMDQ_P(0) % 32); IXGBE_WRITE_REG(hw, reg_offset, vlvfb); } @@ -4111,7 +4160,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) if (vlvf) { /* record VLAN ID in VFTA */ - vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); /* if PF is part of this then continue */ if (test_bit(vid, adapter->active_vlans)) @@ -4120,7 +4169,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) /* remove PF from the pool */ word = i * 2 + VMDQ_P(0) / 32; - bits = ~(1 << (VMDQ_P(0) % 32)); + bits = ~BIT(VMDQ_P(0) % 32); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); } @@ -4147,6 +4196,7 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: default: if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) break; @@ -4172,11 +4222,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { - u16 vid; + u16 vid = 1; ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } @@ -4426,6 +4476,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; + netdev_features_t features = netdev->features; int count; /* Check for Promiscuous and All Multicast modes */ @@ -4443,14 +4494,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev) hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= IXGBE_VMOLR_MPE; - ixgbe_vlan_promisc_enable(adapter); + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } hw->addr_ctrl.user_set_promisc = false; - ixgbe_vlan_promisc_disable(adapter); } /* @@ -4483,7 +4533,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) } /* This is useful for sniffing bad packets. */ - if (adapter->netdev->features & NETIF_F_RXALL) { + if (features & NETIF_F_RXALL) { /* UPE and MPE will be handled by normal PROMISC logic * in e1000e_set_rx_mode */ fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ @@ -4496,10 +4546,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev) IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) ixgbe_vlan_strip_enable(adapter); else ixgbe_vlan_strip_disable(adapter); + + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + ixgbe_vlan_promisc_disable(adapter); + else + ixgbe_vlan_promisc_enable(adapter); } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) @@ -4530,6 +4585,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter) switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); adapter->vxlan_port = 0; break; @@ -4630,6 +4686,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: dv_id = IXGBE_DV_X540(link, tc); break; default: @@ -4690,6 +4747,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: dv_id = IXGBE_LOW_DV_X540(tc); break; default: @@ -4805,9 +4863,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) return; if (rss_i > 3) - psrtype |= 2 << 29; + psrtype |= 2u << 29; else if (rss_i > 1) - psrtype |= 1 << 29; + psrtype |= 1u << 29; IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); } @@ -4871,7 +4929,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, /* shutdown specific queue receive and wait for dma to settle */ ixgbe_disable_rx_queue(adapter, rx_ring); usleep_range(10000, 20000); - ixgbe_irq_disable_queues(adapter, ((u64)1 << index)); + ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); ixgbe_clean_rx_ring(rx_ring); rx_ring->l2_accel_priv = NULL; } @@ -5106,6 +5164,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); @@ -5156,6 +5215,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; break; case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: gpie |= IXGBE_SDP0_GPIEN_X540; break; default: @@ -5228,7 +5288,7 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) { WARN_ON(in_interrupt()); /* put off any impending NetWatchDogTimeout */ - adapter->netdev->trans_start = jiffies; + netif_trans_update(adapter->netdev); while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); @@ -5467,6 +5527,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); @@ -5498,6 +5559,58 @@ static void ixgbe_tx_timeout(struct net_device *netdev) ixgbe_tx_timeout_reset(adapter); } +#ifdef CONFIG_IXGBE_DCB +static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct tc_configuration *tc; + int j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + default: + adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; + break; + } + + /* Configure DCB traffic classes */ + for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { + tc = &adapter->dcb_cfg.tc_config[j]; + tc->path[DCB_TX_CONFIG].bwg_id = 0; + tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); + tc->path[DCB_RX_CONFIG].bwg_id = 0; + tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); + tc->dcb_pfc = pfc_disabled; + } + + /* Initialize default user to priority mapping, UPx->TC0 */ + tc = &adapter->dcb_cfg.tc_config[0]; + tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + + adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_set_bitmap = 0x00; + if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, + sizeof(adapter->temp_dcb_cfg)); +} +#endif + /** * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) * @adapter: board private structure to initialize @@ -5512,10 +5625,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) struct pci_dev *pdev = adapter->pdev; unsigned int rss, fdir; u32 fwsm; -#ifdef CONFIG_IXGBE_DCB - int j; - struct tc_configuration *tc; -#endif + u16 device_caps; + int i; /* PCI config space info */ @@ -5537,6 +5648,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCA adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; #endif +#ifdef CONFIG_IXGBE_DCB + adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; +#endif #ifdef IXGBE_FCOE adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; @@ -5547,7 +5662,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #endif /* IXGBE_FCOE */ /* initialize static ixgbe jump table entries */ - adapter->jump_tables[0] = ixgbe_ipv4_fields; + adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]), + GFP_KERNEL); + if (!adapter->jump_tables[0]) + return -ENOMEM; + adapter->jump_tables[0]->mat = ixgbe_ipv4_fields; + + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) + adapter->jump_tables[i] = NULL; adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * hw->mac.num_rar_entries, @@ -5585,6 +5707,17 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: +#ifdef CONFIG_IXGBE_DCB + adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; +#endif +#ifdef IXGBE_FCOE + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; +#ifdef CONFIG_IXGBE_DCB + adapter->fcoe.up = 0; +#endif /* IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + /* Fall Through */ case ixgbe_mac_X550: #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; @@ -5606,42 +5739,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) spin_lock_init(&adapter->fdir_perfect_lock); #ifdef CONFIG_IXGBE_DCB - switch (hw->mac.type) { - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; - break; - default: - adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; - break; - } - - /* Configure DCB traffic classes */ - for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { - tc = &adapter->dcb_cfg.tc_config[j]; - tc->path[DCB_TX_CONFIG].bwg_id = 0; - tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); - tc->path[DCB_RX_CONFIG].bwg_id = 0; - tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); - tc->dcb_pfc = pfc_disabled; - } - - /* Initialize default user to priority mapping, UPx->TC0 */ - tc = &adapter->dcb_cfg.tc_config[0]; - tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; - tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - - adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; - adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; - adapter->dcb_cfg.pfc_mode_enable = false; - adapter->dcb_set_bitmap = 0x00; - adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; - memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, - sizeof(adapter->temp_dcb_cfg)); - + ixgbe_init_dcb(adapter); #endif /* default flow control settings */ @@ -5675,6 +5773,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->tx_ring_count = IXGBE_DEFAULT_TXD; adapter->rx_ring_count = IXGBE_DEFAULT_RXD; + /* Cache bit indicating need for crosstalk fix */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) + adapter->need_crosstalk_fix = false; + else + adapter->need_crosstalk_fix = true; + break; + default: + adapter->need_crosstalk_fix = false; + break; + } + /* set default work limits */ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; @@ -6217,6 +6331,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: pci_wake_from_d3(pdev, !!wufc); break; default: @@ -6352,6 +6467,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; @@ -6367,7 +6483,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) if ((hw->mac.type == ixgbe_mac_82599EB) || (hw->mac.type == ixgbe_mac_X540) || (hw->mac.type == ixgbe_mac_X550) || - (hw->mac.type == ixgbe_mac_X550EM_x)) { + (hw->mac.type == ixgbe_mac_X550EM_x) || + (hw->mac.type == ixgbe_mac_x550em_a)) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); @@ -6392,6 +6509,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* OS2BMC stats are X540 and later */ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); @@ -6562,7 +6680,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) { struct ixgbe_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) - eics |= ((u64)1 << i); + eics |= BIT_ULL(i); } } @@ -6593,6 +6711,18 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) link_up = true; } + /* If Crosstalk fix enabled do the sanity check of making sure + * the SFP+ cage is empty. + */ + if (adapter->need_crosstalk_fix) { + u32 sfp_cage_full; + + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full) + link_up = false; + } + if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); @@ -6662,6 +6792,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: case ixgbe_mac_82599EB: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); @@ -6938,6 +7069,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; s32 err; + /* If crosstalk fix enabled verify the SFP+ cage is full */ + if (adapter->need_crosstalk_fix) { + u32 sfp_cage_full; + + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + if (!sfp_cage_full) + return; + } + /* not searching for SFP so there is nothing to do here */ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) @@ -7122,10 +7263,12 @@ static void ixgbe_service_task(struct work_struct *work) return; } #ifdef CONFIG_IXGBE_VXLAN + rtnl_lock(); if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; vxlan_get_rx_port(adapter->netdev); } + rtnl_unlock(); #endif /* CONFIG_IXGBE_VXLAN */ ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); @@ -7148,9 +7291,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len) { + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -7163,46 +7315,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, if (err < 0) return err; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_IPV4; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + } else { + ip.v6->payload_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; } - /* compute header lengths */ - l4len = tcp_hdrlen(skb); - *hdr_len = skb_transport_offset(skb) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* mss_l4len_id: use 0 as index for TSO */ - mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = skb_network_header_len(skb); - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, @@ -7211,103 +7369,61 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, return 1; } +static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; - u32 mss_l4len_idx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && - !(first->tx_flags & IXGBE_TX_FLAGS_CC)) +csum_failed: + if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | + IXGBE_TX_FLAGS_CC))) return; - vlan_macip_lens = skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } else { - u8 l4_hdr = 0; - union { - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - u8 *raw; - } network_hdr; - union { - struct tcphdr *tcphdr; - u8 *raw; - } transport_hdr; - __be16 frag_off; - - if (skb->encapsulation) { - network_hdr.raw = skb_inner_network_header(skb); - transport_hdr.raw = skb_inner_transport_header(skb); - vlan_macip_lens = skb_inner_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } else { - network_hdr.raw = skb_network_header(skb); - transport_hdr.raw = skb_transport_header(skb); - vlan_macip_lens = skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } - - /* use first 4 bits to determine IP version */ - switch (network_hdr.ipv4->version) { - case IPVERSION: - vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = network_hdr.ipv4->protocol; - break; - case 6: - vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; - l4_hdr = network_hdr.ipv6->nexthdr; - if (likely((transport_hdr.raw - network_hdr.raw) == - sizeof(struct ipv6hdr))) - break; - ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + - sizeof(struct ipv6hdr), - &l4_hdr, &frag_off); - if (unlikely(frag_off)) - l4_hdr = NEXTHDR_FRAGMENT; - break; - default: - break; - } + goto no_csum; + } - switch (l4_hdr) { - case IPPROTO_TCP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_SCTP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((first->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((first->protocol == htons(ETH_P_IPV6)) && + ixgbe_ipv6_csum_is_sctp(skb))) { + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum, version=%d, l4 proto=%x\n", - network_hdr.ipv4->version, l4_hdr); - } - skb_checksum_help(skb); - goto no_csum; } - - /* update TX checksum flag */ - first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + /* fall through */ + default: + skb_checksum_help(skb); + goto csum_failed; } + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, - type_tucmd, mss_l4len_idx); + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); } #define IXGBE_SET_FLAG(_input, _flag, _result) \ @@ -8238,6 +8354,134 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, return 0; } +#ifdef CONFIG_NET_CLS_ACT +static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, + u8 *queue, u64 *action) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + struct net_device *upper; + struct list_head *iter; + + /* redirect to a SRIOV VF */ + for (vf = 0; vf < num_vfs; ++vf) { + upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); + if (upper->ifindex == ifindex) { + if (adapter->num_rx_pools > 1) + *queue = vf * 2; + else + *queue = vf * adapter->num_rx_queues_per_pool; + + *action = vf + 1; + *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + return 0; + } + } + + /* redirect to a offloaded macvlan netdev */ + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + + if (vadapter && vadapter->netdev->ifindex == ifindex) { + *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; + *action = *queue; + return 0; + } + } + } + + return -EINVAL; +} + +static int parse_tc_actions(struct ixgbe_adapter *adapter, + struct tcf_exts *exts, u64 *action, u8 *queue) +{ + const struct tc_action *a; + int err; + + if (tc_no_actions(exts)) + return -EINVAL; + + tc_for_each_action(a, exts) { + + /* Drop action */ + if (is_tcf_gact_shot(a)) { + *action = IXGBE_FDIR_DROP_QUEUE; + *queue = IXGBE_FDIR_DROP_QUEUE; + return 0; + } + + /* Redirect to a VF or a offloaded macvlan */ + if (is_tcf_mirred_redirect(a)) { + int ifindex = tcf_mirred_ifindex(a); + + err = handle_redirect_action(adapter, ifindex, queue, + action); + if (err == 0) + return err; + } + } + + return -EINVAL; +} +#else +static int parse_tc_actions(struct ixgbe_adapter *adapter, + struct tcf_exts *exts, u64 *action, u8 *queue) +{ + return -EINVAL; +} +#endif /* CONFIG_NET_CLS_ACT */ + +static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + struct tc_cls_u32_offload *cls, + struct ixgbe_mat_field *field_ptr, + struct ixgbe_nexthdr *nexthdr) +{ + int i, j, off; + __be32 val, m; + bool found_entry = false, found_jump_field = false; + + for (i = 0; i < cls->knode.sel->nkeys; i++) { + off = cls->knode.sel->keys[i].off; + val = cls->knode.sel->keys[i].val; + m = cls->knode.sel->keys[i].mask; + + for (j = 0; field_ptr[j].val; j++) { + if (field_ptr[j].off == off) { + field_ptr[j].val(input, mask, val, m); + input->filter.formatted.flow_type |= + field_ptr[j].type; + found_entry = true; + break; + } + } + if (nexthdr) { + if (nexthdr->off == cls->knode.sel->keys[i].off && + nexthdr->val == cls->knode.sel->keys[i].val && + nexthdr->mask == cls->knode.sel->keys[i].mask) + found_jump_field = true; + else + continue; + } + } + + if (nexthdr && !found_jump_field) + return -EINVAL; + + if (!found_entry) + return 0; + + mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + + return 0; +} + static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, __be16 protocol, struct tc_cls_u32_offload *cls) @@ -8245,16 +8489,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, u32 loc = cls->knode.handle & 0xfffff; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_mat_field *field_ptr; - struct ixgbe_fdir_filter *input; - union ixgbe_atr_input mask; -#ifdef CONFIG_NET_CLS_ACT - const struct tc_action *a; -#endif - int i, err = 0; + struct ixgbe_fdir_filter *input = NULL; + union ixgbe_atr_input *mask = NULL; + struct ixgbe_jump_table *jump = NULL; + int i, err = -EINVAL; u8 queue; u32 uhtid, link_uhtid; - memset(&mask, 0, sizeof(union ixgbe_atr_input)); uhtid = TC_U32_USERHTID(cls->knode.handle); link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); @@ -8266,38 +8507,11 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, * headers when needed. */ if (protocol != htons(ETH_P_IP)) - return -EINVAL; - - if (link_uhtid) { - struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; - - if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) - return -EINVAL; - - if (!test_bit(link_uhtid - 1, &adapter->tables)) - return -EINVAL; - - for (i = 0; nexthdr[i].jump; i++) { - if (nexthdr->o != cls->knode.sel->offoff || - nexthdr->s != cls->knode.sel->offshift || - nexthdr->m != cls->knode.sel->offmask || - /* do not support multiple key jumps its just mad */ - cls->knode.sel->nkeys > 1) - return -EINVAL; - - if (nexthdr->off != cls->knode.sel->keys[0].off || - nexthdr->val != cls->knode.sel->keys[0].val || - nexthdr->mask != cls->knode.sel->keys[0].mask) - return -EINVAL; - - adapter->jump_tables[link_uhtid] = nexthdr->jump; - } - return 0; - } + return err; if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { e_err(drv, "Location out of range\n"); - return -EINVAL; + return err; } /* cls u32 is a graph starting at root node 0x800. The driver tracks @@ -8308,87 +8522,123 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, * this function _should_ be generic try not to hardcode values here. */ if (uhtid == 0x800) { - field_ptr = adapter->jump_tables[0]; + field_ptr = (adapter->jump_tables[0])->mat; } else { if (uhtid >= IXGBE_MAX_LINK_HANDLE) - return -EINVAL; - - field_ptr = adapter->jump_tables[uhtid]; + return err; + if (!adapter->jump_tables[uhtid]) + return err; + field_ptr = (adapter->jump_tables[uhtid])->mat; } if (!field_ptr) - return -EINVAL; + return err; - input = kzalloc(sizeof(*input), GFP_KERNEL); - if (!input) - return -ENOMEM; + /* At this point we know the field_ptr is valid and need to either + * build cls_u32 link or attach filter. Because adding a link to + * a handle that does not exist is invalid and the same for adding + * rules to handles that don't exist. + */ - for (i = 0; i < cls->knode.sel->nkeys; i++) { - int off = cls->knode.sel->keys[i].off; - __be32 val = cls->knode.sel->keys[i].val; - __be32 m = cls->knode.sel->keys[i].mask; - bool found_entry = false; - int j; + if (link_uhtid) { + struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; - for (j = 0; field_ptr[j].val; j++) { - if (field_ptr[j].off == off) { - field_ptr[j].val(input, &mask, val, m); - input->filter.formatted.flow_type |= - field_ptr[j].type; - found_entry = true; + if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) + return err; + + if (!test_bit(link_uhtid - 1, &adapter->tables)) + return err; + + for (i = 0; nexthdr[i].jump; i++) { + if (nexthdr[i].o != cls->knode.sel->offoff || + nexthdr[i].s != cls->knode.sel->offshift || + nexthdr[i].m != cls->knode.sel->offmask) + return err; + + jump = kzalloc(sizeof(*jump), GFP_KERNEL); + if (!jump) + return -ENOMEM; + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) { + err = -ENOMEM; + goto free_jump; + } + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + err = -ENOMEM; + goto free_input; + } + jump->input = input; + jump->mask = mask; + err = ixgbe_clsu32_build_input(input, mask, cls, + field_ptr, &nexthdr[i]); + if (!err) { + jump->mat = nexthdr[i].jump; + adapter->jump_tables[link_uhtid] = jump; break; } } - - if (!found_entry) - goto err_out; + return 0; } - mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | - IXGBE_ATR_L4TYPE_MASK; - - if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) - mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + err = -ENOMEM; + goto free_input; + } -#ifdef CONFIG_NET_CLS_ACT - if (list_empty(&cls->knode.exts->actions)) + if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { + if ((adapter->jump_tables[uhtid])->input) + memcpy(input, (adapter->jump_tables[uhtid])->input, + sizeof(*input)); + if ((adapter->jump_tables[uhtid])->mask) + memcpy(mask, (adapter->jump_tables[uhtid])->mask, + sizeof(*mask)); + } + err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); + if (err) goto err_out; - list_for_each_entry(a, &cls->knode.exts->actions, list) { - if (!is_tcf_gact_shot(a)) - goto err_out; - } -#endif + err = parse_tc_actions(adapter, cls->knode.exts, &input->action, + &queue); + if (err < 0) + goto err_out; - input->action = IXGBE_FDIR_DROP_QUEUE; - queue = IXGBE_FDIR_DROP_QUEUE; input->sw_idx = loc; spin_lock(&adapter->fdir_perfect_lock); if (hlist_empty(&adapter->fdir_filter_list)) { - memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); - err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, mask); if (err) goto err_out_w_lock; - } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { err = -EINVAL; goto err_out_w_lock; } - ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, input->sw_idx, queue); if (!err) ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock); + kfree(mask); return err; err_out_w_lock: spin_unlock(&adapter->fdir_perfect_lock); err_out: + kfree(mask); +free_input: kfree(input); - return -EINVAL; +free_jump: + kfree(jump); + return err; } static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, @@ -8515,11 +8765,6 @@ static int ixgbe_set_features(struct net_device *netdev, adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; } - if (features & NETIF_F_HW_VLAN_CTAG_RX) - ixgbe_vlan_strip_enable(adapter); - else - ixgbe_vlan_strip_disable(adapter); - if (changed & NETIF_F_RXALL) need_reset = true; @@ -8536,6 +8781,9 @@ static int ixgbe_set_features(struct net_device *netdev, if (need_reset) ixgbe_do_reset(netdev); + else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER)) + ixgbe_set_rx_mode(netdev); return 0; } @@ -8833,17 +9081,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) kfree(fwd_adapter); } -#define IXGBE_MAX_TUNNEL_HDR_LEN 80 +#define IXGBE_MAX_MAC_HDR_LEN 127 +#define IXGBE_MAX_NETWORK_HDR_LEN 511 + static netdev_features_t ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { - if (!skb->encapsulation) - return features; - - if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > - IXGBE_MAX_TUNNEL_HDR_LEN)) - return features & ~NETIF_F_CSUM_MASK; + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; return features; } @@ -8858,6 +9125,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_mac_address = ixgbe_set_mac, .ndo_change_mtu = ixgbe_change_mtu, .ndo_tx_timeout = ixgbe_tx_timeout, + .ndo_set_tx_maxrate = ixgbe_tx_maxrate, .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, .ndo_do_ioctl = ixgbe_ioctl, @@ -8943,7 +9211,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) /** * ixgbe_wol_supported - Check whether device supports WoL - * @hw: hw specific details + * @adapter: the adapter private structure * @device_id: the device ID * @subdev_id: the subsystem device ID * @@ -8951,19 +9219,33 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) * which devices have WoL support * **/ -int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, - u16 subdevice_id) +bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id) { struct ixgbe_hw *hw = &adapter->hw; u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; - int is_wol_supported = 0; + /* WOL not supported on 82598 */ + if (hw->mac.type == ixgbe_mac_82598EB) + return false; + + /* check eeprom to see if WOL is enabled for X540 and newer */ + if (hw->mac.type >= ixgbe_mac_X540) { + if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || + ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && + (hw->bus.func == 0))) + return true; + } + + /* WOL is determined based on device IDs for 82599 MACs */ switch (device_id) { case IXGBE_DEV_ID_82599_SFP: /* Only these subdevices could supports WOL */ switch (subdevice_id) { - case IXGBE_SUBDEV_ID_82599_SFP_WOL0: case IXGBE_SUBDEV_ID_82599_560FLR: + case IXGBE_SUBDEV_ID_82599_LOM_SNAP6: + case IXGBE_SUBDEV_ID_82599_SFP_WOL0: + case IXGBE_SUBDEV_ID_82599_SFP_2OCP: /* only support first port */ if (hw->bus.func != 0) break; @@ -8971,66 +9253,31 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, case IXGBE_SUBDEV_ID_82599_SFP: case IXGBE_SUBDEV_ID_82599_RNDC: case IXGBE_SUBDEV_ID_82599_ECNA_DP: - case IXGBE_SUBDEV_ID_82599_LOM_SFP: - is_wol_supported = 1; - break; + case IXGBE_SUBDEV_ID_82599_SFP_1OCP: + case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1: + case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2: + return true; } break; case IXGBE_DEV_ID_82599EN_SFP: - /* Only this subdevice supports WOL */ + /* Only these subdevices support WOL */ switch (subdevice_id) { case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: - is_wol_supported = 1; - break; + return true; } break; case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: /* All except this subdevice support WOL */ if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) - is_wol_supported = 1; + return true; break; case IXGBE_DEV_ID_82599_KX4: - is_wol_supported = 1; - break; - case IXGBE_DEV_ID_X540T: - case IXGBE_DEV_ID_X540T1: - case IXGBE_DEV_ID_X550T: - case IXGBE_DEV_ID_X550EM_X_KX4: - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_X_10G_T: - /* check eeprom to see if enabled wol */ - if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || - ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && - (hw->bus.func == 0))) { - is_wol_supported = 1; - } + return true; + default: break; } - return is_wol_supported; -} - -/** - * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM - * @adapter: Pointer to adapter struct - */ -static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter) -{ -#ifdef CONFIG_OF - struct device_node *dp = pci_device_to_OF_node(adapter->pdev); - struct ixgbe_hw *hw = &adapter->hw; - const unsigned char *addr; - - addr = of_get_mac_address(dp); - if (addr) { - ether_addr_copy(hw->mac.perm_addr, addr); - return; - } -#endif /* CONFIG_OF */ - -#ifdef CONFIG_SPARC - ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr); -#endif /* CONFIG_SPARC */ + return false; } /** @@ -9136,23 +9383,23 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); /* Setup hw api */ - memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); + hw->mac.ops = *ii->mac_ops; hw->mac.type = ii->mac; hw->mvals = ii->mvals; /* EEPROM */ - memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); + hw->eeprom.ops = *ii->eeprom_ops; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (ixgbe_removed(hw->hw_addr)) { err = -EIO; goto err_ioremap; } /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ - if (!(eec & (1 << 8))) + if (!(eec & BIT(8))) hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; /* PHY */ - memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); + hw->phy.ops = *ii->phy_ops; hw->phy.sfp_type = ixgbe_sfp_type_unknown; /* ixgbe_identify_phy_generic will set prtad and mmds properly */ hw->phy.mdio.prtad = MDIO_PRTAD_NONE; @@ -9169,12 +9416,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + /* Make sure the SWFW semaphore is in a valid state */ + if (hw->mac.ops.init_swfw_sync) + hw->mac.ops.init_swfw_sync(hw); + /* Make it possible the adapter to be woken up via WOL */ switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: @@ -9215,54 +9467,62 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto skip_sriov; /* Mailbox */ ixgbe_init_mbx_params_pf(hw); - memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); + hw->mbx.ops = ii->mbx_ops; pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); ixgbe_enable_sriov(adapter); skip_sriov: #endif netdev->features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | - NETIF_F_RXCSUM; + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; - netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; +#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: + netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + IXGBE_GSO_PARTIAL_FEATURES; + + if (hw->mac.type >= ixgbe_mac_82599EB) netdev->features |= NETIF_F_SCTP_CRC; - netdev->hw_features |= NETIF_F_SCTP_CRC | - NETIF_F_NTUPLE | + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL | + NETIF_F_HW_L2FW_DOFFLOAD; + + if (hw->mac.type >= ixgbe_mac_82599EB) + netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; - break; - default: - break; - } - netdev->hw_features |= NETIF_F_RXALL; - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_IP_CSUM; - netdev->vlan_features |= NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; + netdev->mpls_features |= NETIF_F_HW_CSUM; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; #ifdef CONFIG_IXGBE_DCB - netdev->dcbnl_ops = &dcbnl_ops; + if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) + netdev->dcbnl_ops = &dcbnl_ops; #endif #ifdef IXGBE_FCOE @@ -9287,10 +9547,6 @@ skip_sriov: NETIF_F_FCOE_MTU; } #endif /* IXGBE_FCOE */ - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) netdev->hw_features |= NETIF_F_LRO; @@ -9304,7 +9560,8 @@ skip_sriov: goto err_sw_init; } - ixgbe_get_platform_mac_addr(adapter); + eth_platform_get_mac_address(&adapter->pdev->dev, + adapter->hw.mac.perm_addr); memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); @@ -9455,6 +9712,7 @@ err_sw_init: ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; iounmap(adapter->io_addr); + kfree(adapter->jump_tables[0]); kfree(adapter->mac_table); err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); @@ -9483,6 +9741,7 @@ static void ixgbe_remove(struct pci_dev *pdev) struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev; bool disable_dev; + int i; /* if !adapter then we already cleaned up in probe */ if (!adapter) @@ -9532,6 +9791,14 @@ static void ixgbe_remove(struct pci_dev *pdev) e_dev_info("complete\n"); + for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) { + if (adapter->jump_tables[i]) { + kfree(adapter->jump_tables[i]->input); + kfree(adapter->jump_tables[i]->mask); + } + kfree(adapter->jump_tables[i]); + } + kfree(adapter->mac_table); disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); @@ -9612,6 +9879,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, case ixgbe_mac_X550EM_x: device_id = IXGBE_DEV_ID_X550EM_X_VF; break; + case ixgbe_mac_x550em_a: + device_id = IXGBE_DEV_ID_X550EM_A_VF; + break; default: device_id = 0; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index 9993a471d..a0cb84381 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -48,10 +48,10 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) if (size > mbx->size) size = mbx->size; - if (!mbx->ops.read) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.read(hw, msg, size, mbx_id); + return mbx->ops->read(hw, msg, size, mbx_id); } /** @@ -70,10 +70,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) if (size > mbx->size) return IXGBE_ERR_MBX; - if (!mbx->ops.write) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.write(hw, msg, size, mbx_id); + return mbx->ops->write(hw, msg, size, mbx_id); } /** @@ -87,10 +87,10 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - if (!mbx->ops.check_for_msg) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.check_for_msg(hw, mbx_id); + return mbx->ops->check_for_msg(hw, mbx_id); } /** @@ -104,10 +104,10 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - if (!mbx->ops.check_for_ack) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.check_for_ack(hw, mbx_id); + return mbx->ops->check_for_ack(hw, mbx_id); } /** @@ -121,10 +121,10 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - if (!mbx->ops.check_for_rst) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.check_for_rst(hw, mbx_id); + return mbx->ops->check_for_rst(hw, mbx_id); } /** @@ -139,10 +139,10 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; - if (!countdown || !mbx->ops.check_for_msg) + if (!countdown || !mbx->ops) return IXGBE_ERR_MBX; - while (mbx->ops.check_for_msg(hw, mbx_id)) { + while (mbx->ops->check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) return IXGBE_ERR_MBX; @@ -164,10 +164,10 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; - if (!countdown || !mbx->ops.check_for_ack) + if (!countdown || !mbx->ops) return IXGBE_ERR_MBX; - while (mbx->ops.check_for_ack(hw, mbx_id)) { + while (mbx->ops->check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) return IXGBE_ERR_MBX; @@ -193,7 +193,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, struct ixgbe_mbx_info *mbx = &hw->mbx; s32 ret_val; - if (!mbx->ops.read) + if (!mbx->ops) return IXGBE_ERR_MBX; ret_val = ixgbe_poll_for_msg(hw, mbx_id); @@ -201,7 +201,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, return ret_val; /* if ack received read message */ - return mbx->ops.read(hw, msg, size, mbx_id); + return mbx->ops->read(hw, msg, size, mbx_id); } /** @@ -221,11 +221,11 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, s32 ret_val; /* exit if either we can't write or there isn't a defined timeout */ - if (!mbx->ops.write || !mbx->timeout) + if (!mbx->ops || !mbx->timeout) return IXGBE_ERR_MBX; /* send msg */ - ret_val = mbx->ops.write(hw, msg, size, mbx_id); + ret_val = mbx->ops->write(hw, msg, size, mbx_id); if (ret_val) return ret_val; @@ -307,14 +307,15 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; default: break; } - if (vflre & (1 << vf_shift)) { - IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); + if (vflre & BIT(vf_shift)) { + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift)); hw->mbx.stats.rsts++; return 0; } @@ -430,6 +431,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) if (hw->mac.type != ixgbe_mac_82599EB && hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_x550em_a && hw->mac.type != ixgbe_mac_X540) return; @@ -446,7 +448,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) } #endif /* CONFIG_PCI_IOV */ -struct ixgbe_mbx_operations mbx_ops_generic = { +const struct ixgbe_mbx_operations mbx_ops_generic = { .read = ixgbe_read_mbx_pf, .write = ixgbe_write_mbx_pf, .read_posted = ixgbe_read_posted_mbx, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 8daa95f74..01c2667c0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -123,6 +123,6 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); #endif /* CONFIG_PCI_IOV */ -extern struct ixgbe_mbx_operations mbx_ops_generic; +extern const struct ixgbe_mbx_operations mbx_ops_generic; #endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index 74c53ad9d..a8bed3d88 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -38,6 +38,12 @@ struct ixgbe_mat_field { unsigned int type; }; +struct ixgbe_jump_table { + struct ixgbe_mat_field *mat; + struct ixgbe_fdir_filter *input; + union ixgbe_atr_input *mask; +}; + static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) @@ -82,6 +88,12 @@ static struct ixgbe_mat_field ixgbe_tcp_fields[] = { { .val = NULL } /* terminal node */ }; +static struct ixgbe_mat_field ixgbe_udp_fields[] = { + {.off = 0, .val = ixgbe_mat_prgm_ports, + .type = IXGBE_ATR_FLOW_TYPE_UDPV4}, + { .val = NULL } /* terminal node */ +}; + struct ixgbe_nexthdr { /* offset, shift, and mask of position to next header */ unsigned int o; @@ -98,6 +110,8 @@ struct ixgbe_nexthdr { static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = { { .o = 0, .s = 6, .m = 0xf, .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields}, + { .o = 0, .s = 6, .m = 0xf, + .off = 8, .val = 0x1100, .mask = 0xff00, .jump = ixgbe_udp_fields}, { .jump = NULL } /* terminal node */ }; #endif /* _IXGBE_MODEL_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 5abd66c84..cc735ec3e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -81,7 +81,11 @@ #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 #define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_GLOBAL_ID_LSB 0 +#define IXGBE_CS4227_GLOBAL_ID_MSB 1 #define IXGBE_CS4227_SCRATCH 2 +#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */ +#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */ #define IXGBE_CS4227_RESET_PENDING 0x1357 #define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 #define IXGBE_CS4227_RETRIES 15 @@ -103,7 +107,7 @@ #define IXGBE_PE 0xE0 /* Port expander addr */ #define IXGBE_PE_OUTPUT 1 /* Output reg offset */ #define IXGBE_PE_CONFIG 3 /* Config reg offset */ -#define IXGBE_PE_BIT1 (1 << 1) +#define IXGBE_PE_BIT1 BIT(1) /* Flow control defines */ #define IXGBE_TAF_SYM_PAUSE 0x400 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index ef1504d41..e5431bfe3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -333,6 +333,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, */ case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* Upper 32 bits represent billions of cycles, lower 32 bits * represent cycles. However, we use timespec64_to_ns for the * correct math even though the units haven't been corrected @@ -395,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) if (incval > 0x00FFFFFFULL) e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | + BIT(IXGBE_INCPER_SHIFT_82599) | ((u32)incval & 0x00FFFFFFUL)); break; default: @@ -921,6 +922,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* enable timestamping all packets only if at least some * packets were requested. Otherwise, play nice and disable * timestamping @@ -1083,6 +1085,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) cc.shift = 2; } /* fallthrough */ + case ixgbe_mac_x550em_a: case ixgbe_mac_X550: cc.read = ixgbe_ptp_read_X550; @@ -1111,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) incval >>= IXGBE_INCVAL_SHIFT_82599; cc.shift -= IXGBE_INCVAL_SHIFT_82599; IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | incval); + BIT(IXGBE_INCPER_SHIFT_82599) | incval); break; default: /* other devices aren't supported */ @@ -1223,6 +1226,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 30000000; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 8025a3f93..c5caacdd1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= (1 << vector_bit); + mta_reg |= BIT(vector_bit); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); } vmolr |= IXGBE_VMOLR_ROMPE; @@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= (1 << vector_bit); + mta_reg |= BIT(vector_bit); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); } @@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* enable or disable receive depending on error */ vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); if (err) - vfre &= ~(1 << vf_shift); + vfre &= ~BIT(vf_shift); else - vfre |= 1 << vf_shift; + vfre |= BIT(vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); if (err) { @@ -589,47 +589,47 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - u32 i; + u32 vlvfb_mask, pool_mask, i; + + /* create mask for VF and other pools */ + pool_mask = ~BIT(VMDQ_P(0) % 32); + vlvfb_mask = BIT(vf % 32); /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ for (i = IXGBE_VLVF_ENTRIES; i--;) { u32 bits[2], vlvfb, vid, vfta, vlvf; u32 word = i * 2 + vf / 32; - u32 mask = 1 << (vf % 32); + u32 mask; vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); /* if our bit isn't set we can skip it */ - if (!(vlvfb & mask)) + if (!(vlvfb & vlvfb_mask)) continue; /* clear our bit from vlvfb */ - vlvfb ^= mask; + vlvfb ^= vlvfb_mask; /* create 64b mask to chedk to see if we should clear VLVF */ bits[word % 2] = vlvfb; bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); - /* if promisc is enabled, PF will be present, leave VFTA */ - if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) { - bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32)); - - if (bits[0] || bits[1]) - goto update_vlvfb; - goto update_vlvf; - } - /* if other pools are present, just remove ourselves */ - if (bits[0] || bits[1]) + if (bits[(VMDQ_P(0) / 32) ^ 1] || + (bits[VMDQ_P(0) / 32] & pool_mask)) goto update_vlvfb; + /* if PF is present, leave VFTA */ + if (bits[0] || bits[1]) + goto update_vlvf; + /* if we cannot determine VLAN just remove ourselves */ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); if (!vlvf) goto update_vlvfb; vid = vlvf & VLAN_VID_MASK; - mask = 1 << (vid % 32); + mask = BIT(vid % 32); /* clear bit from VFTA */ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); @@ -638,6 +638,9 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) update_vlvf: /* clear POOL selection enable */ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); + + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + vlvfb = 0; update_vlvfb: /* clear pool bits */ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); @@ -810,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* enable transmit for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= 1 << vf_shift; + reg |= BIT(vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); /* force drop enable for all VF Rx queues */ @@ -818,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= 1 << vf_shift; + reg |= BIT(vf_shift); /* * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. * For more info take a look at ixgbe_set_vf_lpe @@ -834,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) #endif /* CONFIG_FCOE */ if (pf_max_frame > ETH_FRAME_LEN) - reg &= ~(1 << vf_shift); + reg &= ~BIT(vf_shift); } IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); @@ -843,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* Enable counting of spoofed packets in the SSVPC register */ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); - reg |= (1 << vf_shift); + reg |= BIT(vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); /* @@ -887,7 +890,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, return -1; } - if (adapter->vfinfo[vf].pf_set_mac && + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { e_warn(drv, "VF %d attempted to override administratively set MAC address\n" @@ -905,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); u8 tcs = netdev_get_num_tc(adapter->netdev); - struct ixgbe_hw *hw = &adapter->hw; - int err; if (adapter->vfinfo[vf].pf_vlan || tcs) { e_warn(drv, @@ -920,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, if (!vid && !add) return 0; - err = ixgbe_set_vf_vlan(adapter, add, vid, vf); - if (err) - return err; - - if (adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - - if (add) - adapter->vfinfo[vf].vlan_count++; - else if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; - - return 0; + return ixgbe_set_vf_vlan(adapter, add, vid, vf); } static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, @@ -961,8 +950,11 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, * If the VF is allowed to set MAC filters then turn off * anti-spoofing to avoid false positives. */ - if (adapter->vfinfo[vf].spoofchk_enabled) - ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) { + struct ixgbe_hw *hw = &adapter->hw; + + hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); + } } err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); @@ -1318,9 +1310,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, ixgbe_set_vmvir(adapter, vlan, qos, vf); ixgbe_set_vmolr(hw, vf, false); - if (adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - adapter->vfinfo[vf].vlan_count++; /* enable hide vlan on X550 */ if (hw->mac.type >= ixgbe_mac_X550) @@ -1353,9 +1342,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_clear_vmvir(adapter, vf); ixgbe_set_vmolr(hw, vf, true); - hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); - if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; /* disable hide VLAN on X550 */ if (hw->mac.type >= ixgbe_mac_X550) @@ -1395,7 +1381,7 @@ out: return err; } -static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) +int ixgbe_link_mbps(struct ixgbe_adapter *adapter) { switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: @@ -1522,27 +1508,34 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - int vf_target_reg = vf >> 3; - int vf_target_shift = vf % 8; struct ixgbe_hw *hw = &adapter->hw; - u32 regval; if (vf >= adapter->num_vfs) return -EINVAL; adapter->vfinfo[vf].spoofchk_enabled = setting; - regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - regval &= ~(1 << vf_target_shift); - regval |= (setting << vf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); - - if (adapter->vfinfo[vf].vlan_count) { - vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT; - regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - regval &= ~(1 << vf_target_shift); - regval |= (setting << vf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); + /* configure MAC spoofing */ + hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); + + /* configure VLAN spoofing */ + hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); + + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) { + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + IXGBE_ETH_P_LLDP)); + + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + ETH_P_PAUSE)); + + hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index dad925706..47e65e2f8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -44,6 +44,7 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, u8 qos); +int ixgbe_link_mbps(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate); int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index bf7367a08..da3d8358f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -59,8 +59,12 @@ #define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 #define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B +#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 +#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D +#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE #define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 -#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D #define IXGBE_DEV_ID_82599EN_SFP 0x1557 @@ -75,21 +79,25 @@ #define IXGBE_DEV_ID_X540T1 0x1560 #define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550T1 0x15D1 #define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA #define IXGBE_DEV_ID_X550EM_X_KR 0x15AB #define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC #define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD #define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE -#define IXGBE_DEV_ID_X550_VF_HV 0x1564 -#define IXGBE_DEV_ID_X550_VF 0x1565 -#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 -#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 +#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 +#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 +#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 +#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 +#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE /* VF Device IDs */ -#define IXGBE_DEV_ID_82599_VF 0x10ED -#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 #define IXGBE_CAT(r, m) IXGBE_##r##_##m @@ -128,7 +136,7 @@ #define IXGBE_FLA_X540 IXGBE_FLA_8259X #define IXGBE_FLA_X550 IXGBE_FLA_8259X #define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X -#define IXGBE_FLA_X550EM_a 0x15F6C +#define IXGBE_FLA_X550EM_a 0x15F68 #define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA) #define IXGBE_EEMNGCTL 0x10110 #define IXGBE_EEMNGDATA 0x10114 @@ -143,13 +151,6 @@ #define IXGBE_GRC_X550EM_a 0x15F64 #define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC) -#define IXGBE_SRAMREL_8259X 0x10210 -#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X -#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X -#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X -#define IXGBE_SRAMREL_X550EM_a 0x15F6C -#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL) - /* General Receive Control */ #define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ #define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ @@ -375,6 +376,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) #define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) #define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ +#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ #define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ #define IXGBE_RXFECCERR0 0x051B8 #define IXGBE_LLITHRESH 0x0EC90 @@ -446,6 +449,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ #define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ #define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ +#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ #define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ #define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ @@ -543,6 +548,7 @@ struct ixgbe_thermal_sensor_data { /* DCB registers */ #define MAX_TRAFFIC_CLASS 8 #define X540_TRAFFIC_CLASS 4 +#define DEF_TRAFFIC_CLASS 1 #define IXGBE_RMCS 0x03D00 #define IXGBE_DPMCS 0x07F40 #define IXGBE_PDPMCS 0x0CD00 @@ -554,7 +560,6 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ - /* Security Control Registers */ #define IXGBE_SECTXCTRL 0x08800 #define IXGBE_SECTXSTAT 0x08804 @@ -693,16 +698,16 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ #define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ #define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) -#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ -#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ -#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_VALID BIT(0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3u << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX BIT(7) /* 0: Initiator, 1: Target */ #define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ #define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ #define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 #define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 #define IXGBE_FCBUFF_OFFSET_SHIFT 16 -#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ -#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_WE BIT(14) /* Write enable */ +#define IXGBE_FCDMARW_RE BIT(15) /* Read enable */ #define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ #define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ #define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 @@ -719,23 +724,23 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FCFLT 0x05108 /* FC FLT Context */ #define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ #define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ -#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ -#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_VALID BIT(0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST BIT(1) /* Filter First */ #define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ #define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ -#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ -#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ -#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ +#define IXGBE_FCFLTRW_RVALDT BIT(13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE BIT(14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE BIT(15) /* Read Enable */ /* FCoE Receive Control */ #define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ -#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ -#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ -#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ -#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ -#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ -#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ -#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ -#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOELLI BIT(0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD BIT(1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH BIT(2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH BIT(3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH BIT(4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC BIT(6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO BIT(7) /* FC CRC Byte Ordering */ #define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ #define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 /* FCoE Redirection */ @@ -1056,15 +1061,9 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) #define IXGBE_TDPROBE 0x07F20 #define IXGBE_TXBUFCTRL 0x0C600 -#define IXGBE_TXBUFDATA0 0x0C610 -#define IXGBE_TXBUFDATA1 0x0C614 -#define IXGBE_TXBUFDATA2 0x0C618 -#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */ #define IXGBE_RXBUFCTRL 0x03600 -#define IXGBE_RXBUFDATA0 0x03610 -#define IXGBE_RXBUFDATA1 0x03614 -#define IXGBE_RXBUFDATA2 0x03618 -#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */ #define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ #define IXGBE_RFVAL 0x050A4 #define IXGBE_MDFTC1 0x042B8 @@ -1127,6 +1126,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_XPCSS 0x04290 #define IXGBE_MFLCN 0x04294 #define IXGBE_SERDESC 0x04298 +#define IXGBE_MAC_SGMII_BUSY 0x04298 #define IXGBE_MACS 0x0429C #define IXGBE_AUTOC 0x042A0 #define IXGBE_LINKS 0x042A4 @@ -1203,6 +1203,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ #define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ #define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ +#define IXGBE_RDRXCTL_MBINTEN 0x10000000 +#define IXGBE_RDRXCTL_MDP_EN 0x20000000 /* RQTC Bit Masks and Shifts */ #define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) @@ -1249,20 +1251,20 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ #define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ #define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ -#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ -#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ -#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ -#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ -#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */ #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ -#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ -#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ /* MSCA Bit Masks */ @@ -1309,6 +1311,7 @@ struct ixgbe_thermal_sensor_data { /* MDIO definitions */ +#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0 #define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 #define IXGBE_MDIO_PCS_DEV_TYPE 0x3 #define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 @@ -1740,7 +1743,7 @@ enum { #define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ #define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ #define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ -#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ +#define IXGBE_ETQF_POOL_ENABLE BIT(26) /* bit 26 */ #define IXGBE_ETQF_POOL_SHIFT 20 #define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ @@ -1866,20 +1869,20 @@ enum { #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 #define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 -#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_XAUI (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) #define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 -#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_KR (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) #define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 #define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 @@ -1957,7 +1960,9 @@ enum { #define IXGBE_GSSR_PHY1_SM 0x0004 #define IXGBE_GSSR_MAC_CSR_SM 0x0008 #define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 #define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */ #define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ #define IXGBE_GSSR_I2C_MASK 0x1800 #define IXGBE_GSSR_NVM_PHY_MASK 0xF @@ -1997,6 +2002,9 @@ enum { #define IXGBE_PBANUM_PTR_GUARD 0xFAFA #define IXGBE_EEPROM_CHECKSUM 0x3F #define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_EEPROM_CTRL_4 0x45 +#define IXGBE_EE_CTRL_4_INST_ID 0x10 +#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4 #define IXGBE_PCIE_ANALOG_PTR 0x03 #define IXGBE_ATLAS0_CONFIG_PTR 0x04 #define IXGBE_PHY_PTR 0x04 @@ -2111,6 +2119,7 @@ enum { #define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 #define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 #define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR BIT(7) #define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 #define IXGBE_FW_LESM_STATE_1 0x1 #define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ @@ -2530,6 +2539,10 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 #define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 #define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000 +#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 +#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ +#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ #define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 #define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 #define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 @@ -2620,6 +2633,20 @@ enum ixgbe_fdir_pballoc_type { #define FW_MAX_READ_BUFFER_SIZE 1024 #define FW_DISABLE_RXEN_CMD 0xDE #define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_PHY_TOKEN_REQ_CMD 0x0A +#define FW_PHY_TOKEN_REQ_LEN 2 +#define FW_PHY_TOKEN_REQ 0 +#define FW_PHY_TOKEN_REL 1 +#define FW_PHY_TOKEN_OK 1 +#define FW_PHY_TOKEN_RETRY 0x80 +#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */ +#define FW_PHY_TOKEN_WAIT 5 /* seconds */ +#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY) +#define FW_INT_PHY_REQ_CMD 0xB +#define FW_INT_PHY_REQ_LEN 10 +#define FW_INT_PHY_REQ_READ 0 +#define FW_INT_PHY_REQ_WRITE 1 /* Host Interface Command Structures */ struct ixgbe_hic_hdr { @@ -2688,6 +2715,28 @@ struct ixgbe_hic_disable_rxen { u16 pad3; }; +struct ixgbe_hic_phy_token_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + u16 pad; +}; + +struct ixgbe_hic_internal_phy_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + __be16 address; + u16 rsv1; + __be32 write_data; + u16 pad; +} __packed; + +struct ixgbe_hic_internal_phy_resp { + struct ixgbe_hic_hdr hdr; + __be32 read_data; +}; + /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { @@ -2786,15 +2835,15 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ #define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ -#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ -#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ -#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ -#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */ -#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */ -#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ -#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ -#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ -#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_FCOEF_SOF (BIT(2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC (BIT(3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE (BIT(4) << 10) /* Orientation: End */ +#define IXGBE_ADVTXD_FCOEF_ORIS (BIT(5) << 10) /* Orientation: Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0u << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (1u << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (2u << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (3u << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */ #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ @@ -2948,7 +2997,6 @@ union ixgbe_atr_hash_dword { IXGBE_CAT(EEC, m), \ IXGBE_CAT(FLA, m), \ IXGBE_CAT(GRC, m), \ - IXGBE_CAT(SRAMREL, m), \ IXGBE_CAT(FACTPS, m), \ IXGBE_CAT(SWSM, m), \ IXGBE_CAT(SWFW_SYNC, m), \ @@ -2989,6 +3037,7 @@ enum ixgbe_mac_type { ixgbe_mac_X540, ixgbe_mac_X550, ixgbe_mac_X550EM_x, + ixgbe_mac_x550em_a, ixgbe_num_macs }; @@ -3017,6 +3066,7 @@ enum ixgbe_phy_type { ixgbe_phy_qsfp_intel, ixgbe_phy_qsfp_unknown, ixgbe_phy_sfp_unsupported, + ixgbe_phy_sgmii, ixgbe_phy_generic }; @@ -3130,8 +3180,9 @@ struct ixgbe_bus_info { enum ixgbe_bus_width width; enum ixgbe_bus_type type; - u16 func; - u16 lan_id; + u8 func; + u8 lan_id; + u8 instance_id; }; /* Flow control parameters */ @@ -3266,6 +3317,7 @@ struct ixgbe_mac_operations { s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); void (*release_swfw_sync)(struct ixgbe_hw *, u32); + void (*init_swfw_sync)(struct ixgbe_hw *); s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); @@ -3308,6 +3360,7 @@ struct ixgbe_mac_operations { /* Flow Control */ s32 (*fc_enable)(struct ixgbe_hw *); + s32 (*setup_fc)(struct ixgbe_hw *); /* Manageability interface */ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); @@ -3323,6 +3376,8 @@ struct ixgbe_mac_operations { s32 (*dmac_config)(struct ixgbe_hw *hw); s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); + s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); + s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); }; struct ixgbe_phy_operations { @@ -3442,7 +3497,7 @@ struct ixgbe_mbx_stats { }; struct ixgbe_mbx_info { - struct ixgbe_mbx_operations ops; + const struct ixgbe_mbx_operations *ops; struct ixgbe_mbx_stats stats; u32 timeout; u32 usec_delay; @@ -3475,10 +3530,10 @@ struct ixgbe_hw { struct ixgbe_info { enum ixgbe_mac_type mac; s32 (*get_invariants)(struct ixgbe_hw *); - struct ixgbe_mac_operations *mac_ops; - struct ixgbe_eeprom_operations *eeprom_ops; - struct ixgbe_phy_operations *phy_ops; - struct ixgbe_mbx_operations *mbx_ops; + const struct ixgbe_mac_operations *mac_ops; + const struct ixgbe_eeprom_operations *eeprom_ops; + const struct ixgbe_phy_operations *phy_ops; + const struct ixgbe_mbx_operations *mbx_ops; const u32 *mvals; }; @@ -3517,14 +3572,19 @@ struct ixgbe_info { #define IXGBE_ERR_INVALID_ARGUMENT -32 #define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 #define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 +#define IXGBE_ERR_FW_RESP_INVALID -39 +#define IXGBE_ERR_TOKEN_RETRY -40 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) #define IXGBE_FUSES0_300MHZ BIT(5) -#define IXGBE_FUSES0_REV_MASK (3 << 6) +#define IXGBE_FUSES0_REV_MASK (3u << 6) #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) +#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) +#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) +#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) #define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) #define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) @@ -3532,43 +3592,54 @@ struct ixgbe_info { #define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31) + +#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28) +#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29) + +#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) +#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) -#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) -#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) -#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2) -#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) -#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31) #define IXGBE_KX4_LINK_CNTL_1 0x4C -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17) -#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24) -#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29) -#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29) +#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31) #define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 #define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 @@ -3584,12 +3655,17 @@ struct ixgbe_info { #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 #define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 -#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) #define IXGBE_SB_IOSF_TARGET_KR_PHY 0 #define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1 #define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 #define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 #define IXGBE_NW_MNG_IF_SEL 0x00011178 +#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1) +#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23) #define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ + (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) #endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 2358c1b7d..f2b1d48a1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); @@ -747,6 +747,25 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) } /** + * ixgbe_init_swfw_sync_X540 - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function reset hardware semaphore bits for a semaphore that may + * have be left locked due to a catastrophic failure. + **/ +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) +{ + /* First try to grab the semaphore but we don't need to bother + * looking to see whether we got the lock or not since we do + * the same thing regardless of whether we got the lock or not. + * We got the lock - we release it. + * We timeout trying to get the lock - we force its release. + */ + ixgbe_get_swfw_sync_semaphore(hw); + ixgbe_release_swfw_sync_semaphore(hw); +} + +/** * ixgbe_blink_led_start_X540 - Blink LED based on index. * @hw: pointer to hardware structure * @index: led number to blink @@ -810,7 +829,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) return 0; } -static struct ixgbe_mac_operations mac_ops_X540 = { +static const struct ixgbe_mac_operations mac_ops_X540 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_X540, .start_hw = &ixgbe_start_hw_X540, @@ -846,6 +865,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .clear_vfta = &ixgbe_clear_vfta_generic, .set_vfta = &ixgbe_set_vfta_generic, .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, .init_uta_tables = &ixgbe_init_uta_tables_generic, .setup_sfp = NULL, @@ -853,6 +873,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, .release_swfw_sync = &ixgbe_release_swfw_sync_X540, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, .disable_rx_buff = &ixgbe_disable_rx_buff_generic, .enable_rx_buff = &ixgbe_enable_rx_buff_generic, .get_thermal_sensor_data = NULL, @@ -863,7 +884,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .disable_rx = &ixgbe_disable_rx_generic, }; -static struct ixgbe_eeprom_operations eeprom_ops_X540 = { +static const struct ixgbe_eeprom_operations eeprom_ops_X540 = { .init_params = &ixgbe_init_eeprom_params_X540, .read = &ixgbe_read_eerd_X540, .read_buffer = &ixgbe_read_eerd_buffer_X540, @@ -874,7 +895,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_X540 = { .update_checksum = &ixgbe_update_eeprom_checksum_X540, }; -static struct ixgbe_phy_operations phy_ops_X540 = { +static const struct ixgbe_phy_operations phy_ops_X540 = { .identify = &ixgbe_identify_phy_generic, .identify_sfp = &ixgbe_identify_sfp_module_generic, .init = NULL, @@ -897,7 +918,7 @@ static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X540) }; -struct ixgbe_info ixgbe_X540_info = { +const struct ixgbe_info ixgbe_X540_info = { .mac = ixgbe_mac_X540, .get_invariants = &ixgbe_get_invariants_X540, .mac_ops = &mac_ops_X540, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h index a1468b1f4..e21cd4849 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -36,4 +36,5 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 68a9c6464..19b75cd98 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. + * Copyright(c) 1999 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -27,6 +27,7 @@ #include "ixgbe_phy.h" static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); +static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) { @@ -272,16 +273,26 @@ out: static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) { switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP: + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_SFP: /* set up for CS4227 usage */ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); ixgbe_check_cs4227(hw); + /* Fallthrough */ + case IXGBE_DEV_ID_X550EM_A_SFP_N: return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_KX4: hw->phy.type = ixgbe_phy_x550em_kx4; break; case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: hw->phy.type = ixgbe_phy_x550em_kr; break; case IXGBE_DEV_ID_X550EM_X_1G_T: @@ -324,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); @@ -412,6 +423,121 @@ out: return ret; } +/** + * ixgbe_get_phy_token - Get the token for shared PHY access + * @hw: Pointer to hardware structure + */ +static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REQ; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return 0; + if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) + return IXGBE_ERR_FW_RESP_INVALID; + + return IXGBE_ERR_TOKEN_RETRY; +} + +/** + * ixgbe_put_phy_token - Put the token for shared PHY access + * @hw: Pointer to hardware structure + */ +static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REL; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return 0; + return IXGBE_ERR_FW_RESP_INVALID; +} + +/** + * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + __always_unused u32 device_type, + u32 data) +{ + struct ixgbe_hic_internal_phy_req write_cmd; + + memset(&write_cmd, 0, sizeof(write_cmd)); + write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + write_cmd.port_number = hw->bus.lan_id; + write_cmd.command_type = FW_INT_PHY_REQ_WRITE; + write_cmd.address = cpu_to_be16(reg_addr); + write_cmd.write_data = cpu_to_be32(data); + + return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd), + IXGBE_HI_COMMAND_TIMEOUT, false); +} + +/** + * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Pointer to read data from the register + **/ +static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + __always_unused u32 device_type, + u32 *data) +{ + union { + struct ixgbe_hic_internal_phy_req cmd; + struct ixgbe_hic_internal_phy_resp rsp; + } hic; + s32 status; + + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.command_type = FW_INT_PHY_REQ_READ; + hic.cmd.address = cpu_to_be16(reg_addr); + + status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* Extract the register value from the response. */ + *data = be32_to_cpu(hic.rsp.read_data); + + return status; +} + /** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface * command assuming that the semaphore is already obtained. * @hw: pointer to hardware structure @@ -436,8 +562,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, /* one word */ buffer.length = cpu_to_be16(sizeof(u16)); - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); if (status) return status; @@ -487,7 +612,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, buffer.address = cpu_to_be32((offset + current_word) * 2); buffer.length = cpu_to_be16(words_to_read * 2); - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); @@ -770,8 +895,7 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, buffer.data = data; buffer.address = cpu_to_be32(offset * 2); - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); return status; } @@ -813,8 +937,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; buffer.req.checksum = FW_DEFAULT_CHECKSUM; - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); return status; } @@ -861,9 +984,9 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - fw_cmd.port_number = (u8)hw->bus.lan_id; + fw_cmd.port_number = hw->bus.lan_id; - status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + status = ixgbe_host_interface_command(hw, &fw_cmd, sizeof(struct ixgbe_hic_disable_rxen), IXGBE_HI_COMMAND_TIMEOUT, true); @@ -1248,6 +1371,117 @@ i2c_err: } /** + * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP + * @hw: pointer to hardware structure + * + * Configure the the integrated PHY for native SFP support. + */ +static s32 +ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + bool setup_linear = false; + u32 reg_phy_int; + s32 rc; + + /* Check if SFP module is supported and linear */ + rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (rc == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + + if (!rc) + return rc; + + /* Configure internal PHY for native SFI */ + rc = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + ®_phy_int); + if (rc) + return rc; + + if (setup_linear) { + reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING; + reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR; + } else { + reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING; + reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR; + } + + rc = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + reg_phy_int); + if (rc) + return rc; + + /* Setup XFI/SFI internal link */ + return ixgbe_setup_ixfi_x550em(hw, &speed); +} + +/** + * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP + * @hw: pointer to hardware structure + * + * Configure the the integrated PHY for SFP support. + */ +static s32 +ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + u32 reg_slice, slice_offset; + bool setup_linear = false; + u16 reg_phy_ext; + s32 rc; + + /* Check if SFP module is supported and linear */ + rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (rc == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + + if (!rc) + return rc; + + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); + + if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF) + return IXGBE_ERR_PHY_ADDR_INVALID; + + /* Get external PHY device id */ + rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + if (rc) + return rc; + + /* When configuring quad port CS4223, the MAC instance is part + * of the slice offset. + */ + if (reg_phy_ext == IXGBE_CS4223_PHY_ID) + slice_offset = (hw->bus.lan_id + + (hw->bus.instance_id << 1)) << 12; + else + slice_offset = hw->bus.lan_id << 12; + + /* Configure CS4227/CS4223 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; + if (setup_linear) + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, + reg_phy_ext); +} + +/** * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed * @hw: pointer to hardware structure * @speed: new link speed @@ -1326,6 +1560,57 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, return 0; } +/** + * ixgbe_setup_sgmii - Set up link for sgmii + * @hw: pointer to hardware structure + */ +static s32 +ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + + return rc; +} + /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers * @hw: pointer to hardware structure **/ @@ -1342,15 +1627,35 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; - mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; + mac->ops.setup_fc = ixgbe_setup_fc_x550em; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP_N: + mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n; + break; + case IXGBE_DEV_ID_X550EM_A_SFP: + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550a; + break; + default: + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550em; + break; + } mac->ops.set_rate_select_speed = ixgbe_set_soft_rate_select_speed; break; case ixgbe_media_type_copper: mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.setup_fc = ixgbe_setup_fc_generic; mac->ops.check_link = ixgbe_check_link_t_X550em; + return; + case ixgbe_media_type_backplane: + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) + mac->ops.setup_link = ixgbe_setup_sgmii; break; default: + mac->ops.setup_fc = ixgbe_setup_fc_x550em; break; } } @@ -1614,7 +1919,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, s32 status; u32 reg_val; - status = ixgbe_read_iosf_sb_reg_x550(hw, + status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status) @@ -1636,7 +1941,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, /* Restart auto-negotiation. */ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - status = ixgbe_write_iosf_sb_reg_x550(hw, + status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); @@ -1653,9 +1958,9 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) s32 status; u32 reg_val; - status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, - IXGBE_SB_IOSF_TARGET_KX4_PCS0 + - hw->bus.lan_id, ®_val); + status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, ®_val); if (status) return status; @@ -1674,20 +1979,24 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) /* Restart auto-negotiation. */ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; - status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, - IXGBE_SB_IOSF_TARGET_KX4_PCS0 + - hw->bus.lan_id, reg_val); + status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, reg_val); return status; } -/** ixgbe_setup_kr_x550em - Configure the KR PHY. - * @hw: pointer to hardware structure +/** + * ixgbe_setup_kr_x550em - Configure the KR PHY + * @hw: pointer to hardware structure * - * Configures the integrated KR PHY. + * Configures the integrated KR PHY for X550EM_x. **/ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { + if (hw->mac.type != ixgbe_mac_X550EM_x) + return 0; + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); } @@ -1842,6 +2151,86 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, return status; } +/** + * ixgbe_setup_fc_x550em - Set up flow control + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) +{ + bool pause, asm_dir; + u32 reg_val; + s32 rc; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + /* 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Determine PAUSE and ASM_DIR bits. */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + pause = false; + asm_dir = false; + break; + case ixgbe_fc_tx_pause: + pause = false; + asm_dir = true; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + /* Fallthrough */ + case ixgbe_fc_full: + pause = true; + asm_dir = true; + break; + default: + hw_err(hw, "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR && + hw->device_id != IXGBE_DEV_ID_X550EM_A_KR && + hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L) + return 0; + + rc = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + ®_val); + if (rc) + return rc; + + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + rc = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + reg_val); + + /* This device does not fully support AN. */ + hw->fc.disable_fc_autoneg = true; + + return rc; +} + /** ixgbe_enter_lplu_x550em - Transition to low power states * @hw: pointer to hardware structure * @@ -1939,6 +2328,36 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register + * @hw: pointer to hardware structure + * + * Read NW_MNG_IF_SEL register and save field values. + */ +static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) +{ + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode. + */ + hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set + * PHY address. This register field was has only been used for X552. + */ + if (!hw->phy.nw_mng_if_sel) { + if (hw->mac.type == ixgbe_mac_x550em_a) { + struct ixgbe_adapter *adapter = hw->back; + + e_warn(drv, "nw_mng_if_sel not set\n"); + } + return; + } + + hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; +} + /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init * @hw: pointer to hardware structure * @@ -1953,14 +2372,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) hw->mac.ops.set_lan_id(hw); + ixgbe_read_mng_if_sel_x550em(hw); + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); - - /* Save NW management interface connected on board. This is used - * to determine internal PHY mode. - */ - phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); } /* Identify the PHY or SFP module */ @@ -2023,16 +2439,24 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) /* Detect if there is a copper PHY attached. */ switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + hw->phy.type = ixgbe_phy_sgmii; + /* Fallthrough */ case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: - media_type = ixgbe_media_type_copper; + media_type = ixgbe_media_type_copper; break; default: media_type = ixgbe_media_type_unknown; @@ -2080,6 +2504,27 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_set_mdio_speed - Set MDIO clock speed + * @hw: pointer to hardware structure + */ +static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) +{ + u32 hlreg0; + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_SFP: + /* Config MDIO clock speed before the first MDIO PHY access */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; + default: + break; + } +} + /** ixgbe_reset_hw_X550em - Perform hardware reset ** @hw: pointer to hardware structure ** @@ -2093,7 +2538,6 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) s32 status; u32 ctrl = 0; u32 i; - u32 hlreg0; bool link_up = false; /* Call adapter stop to disable Tx/Rx and clear interrupts */ @@ -2179,11 +2623,7 @@ mac_reset_top: hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - hlreg0 &= ~IXGBE_HLREG0_MDCSPD; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); - } + ixgbe_set_mdio_speed(hw); if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) ixgbe_setup_mux_ctl(hw); @@ -2206,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof |= (1 << vf_target_shift); + pfvfspoof |= BIT(vf_target_shift); else - pfvfspoof &= ~(1 << vf_target_shift); + pfvfspoof &= ~BIT(vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } @@ -2296,6 +2736,110 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) ixgbe_release_swfw_sync_X540(hw, mask); } +/** + * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and get the shared PHY token as needed + */ +static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + int retries = FW_PHY_TOKEN_RETRIES; + s32 status; + + while (--retries) { + status = 0; + if (hmask) + status = ixgbe_acquire_swfw_sync_X540(hw, hmask); + if (status) + return status; + if (!(mask & IXGBE_GSSR_TOKEN_SM)) + return 0; + + status = ixgbe_get_phy_token(hw); + if (!status) + return 0; + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); + if (status != IXGBE_ERR_TOKEN_RETRY) + return status; + msleep(FW_PHY_TOKEN_DELAY); + } + + return status; +} + +/** + * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Release the SWFW semaphore and puts the shared PHY token as needed + */ +static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + + if (mask & IXGBE_GSSR_TOKEN_SM) + ixgbe_put_phy_token(hw); + + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); +} + +/** + * ixgbe_read_phy_reg_x550a - Reads specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register using the SWFW lock and PHY + * Token. The PHY Token is needed since the MDIO is shared between to MAC + * instances. + */ +static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); + + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +} + +/** + * ixgbe_write_phy_reg_x550a - Writes specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register using the SWFW lock and PHY Token. + * The PHY Token is needed since the MDIO is shared between to MAC instances. + */ +static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +} + #define X550_COMMON_MAC \ .init_hw = &ixgbe_init_hw_generic, \ .start_hw = &ixgbe_start_hw_X540, \ @@ -2337,12 +2881,10 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .get_thermal_sensor_data = NULL, \ .init_thermal_sensor_thresh = NULL, \ - .prot_autoc_read = &prot_autoc_read_generic, \ - .prot_autoc_write = &prot_autoc_write_generic, \ .enable_rx = &ixgbe_enable_rx_generic, \ .disable_rx = &ixgbe_disable_rx_x550, \ -static struct ixgbe_mac_operations mac_ops_X550 = { +static const struct ixgbe_mac_operations mac_ops_X550 = { X550_COMMON_MAC .reset_hw = &ixgbe_reset_hw_X540, .get_media_type = &ixgbe_get_media_type_X540, @@ -2354,20 +2896,45 @@ static struct ixgbe_mac_operations mac_ops_X550 = { .setup_sfp = NULL, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, .release_swfw_sync = &ixgbe_release_swfw_sync_X540, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, + .prot_autoc_read = prot_autoc_read_generic, + .prot_autoc_write = prot_autoc_write_generic, + .setup_fc = ixgbe_setup_fc_generic, }; -static struct ixgbe_mac_operations mac_ops_X550EM_x = { +static const struct ixgbe_mac_operations mac_ops_X550EM_x = { X550_COMMON_MAC .reset_hw = &ixgbe_reset_hw_X550em, .get_media_type = &ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, .get_wwn_prefix = NULL, - .setup_link = NULL, /* defined later */ + .setup_link = &ixgbe_setup_mac_link_X540, .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, .get_bus_info = &ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, + .setup_fc = NULL, /* defined later */ + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, +}; + +static struct ixgbe_mac_operations mac_ops_x550em_a = { + X550_COMMON_MAC + .reset_hw = ixgbe_reset_hw_X550em, + .get_media_type = ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = NULL, /* defined later */ + .get_link_capabilities = ixgbe_get_link_capabilities_X550em, + .get_bus_info = ixgbe_get_bus_info_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, + .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, + .setup_fc = ixgbe_setup_fc_x550em, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, }; #define X550_COMMON_EEP \ @@ -2379,12 +2946,12 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = { .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ -static struct ixgbe_eeprom_operations eeprom_ops_X550 = { +static const struct ixgbe_eeprom_operations eeprom_ops_X550 = { X550_COMMON_EEP .init_params = &ixgbe_init_eeprom_params_X550, }; -static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { +static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { X550_COMMON_EEP .init_params = &ixgbe_init_eeprom_params_X540, }; @@ -2398,23 +2965,25 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ - .read_reg = &ixgbe_read_phy_reg_generic, \ - .write_reg = &ixgbe_write_phy_reg_generic, \ .setup_link = &ixgbe_setup_phy_link_generic, \ .set_phy_power = NULL, \ .check_overtemp = &ixgbe_tn_check_overtemp, \ .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, -static struct ixgbe_phy_operations phy_ops_X550 = { +static const struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY .init = NULL, .identify = &ixgbe_identify_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, }; -static struct ixgbe_phy_operations phy_ops_X550EM_x = { +static const struct ixgbe_phy_operations phy_ops_X550EM_x = { X550_COMMON_PHY .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, .read_i2c_combined = &ixgbe_read_i2c_combined_generic, .write_i2c_combined = &ixgbe_write_i2c_combined_generic, .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, @@ -2422,6 +2991,14 @@ static struct ixgbe_phy_operations phy_ops_X550EM_x = { &ixgbe_write_i2c_combined_generic_unlocked, }; +static const struct ixgbe_phy_operations phy_ops_x550em_a = { + X550_COMMON_PHY + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_x550a, + .write_reg = &ixgbe_write_phy_reg_x550a, +}; + static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550) }; @@ -2430,7 +3007,11 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550EM_x) }; -struct ixgbe_info ixgbe_X550_info = { +static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550EM_a) +}; + +const struct ixgbe_info ixgbe_X550_info = { .mac = ixgbe_mac_X550, .get_invariants = &ixgbe_get_invariants_X540, .mac_ops = &mac_ops_X550, @@ -2440,7 +3021,7 @@ struct ixgbe_info ixgbe_X550_info = { .mvals = ixgbe_mvals_X550, }; -struct ixgbe_info ixgbe_X550EM_x_info = { +const struct ixgbe_info ixgbe_X550EM_x_info = { .mac = ixgbe_mac_X550EM_x, .get_invariants = &ixgbe_get_invariants_X550_x, .mac_ops = &mac_ops_X550EM_x, @@ -2449,3 +3030,13 @@ struct ixgbe_info ixgbe_X550EM_x_info = { .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_X550EM_x, }; + +const struct ixgbe_info ixgbe_x550em_a_info = { + .mac = ixgbe_mac_x550em_a, + .get_invariants = &ixgbe_get_invariants_X550_x, + .mac_ops = &mac_ops_x550em_a, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_a, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, +}; |