diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-09-11 04:34:46 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-09-11 04:34:46 -0300 |
commit | 863981e96738983919de841ec669e157e6bdaeb0 (patch) | |
tree | d6d89a12e7eb8017837c057935a2271290907f76 /drivers/net/ethernet/intel/i40evf | |
parent | 8dec7c70575785729a6a9e6719a955e9c545bcab (diff) |
Linux-libre 4.7.1-gnupck-4.7.1-gnu
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf')
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_adminq.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 70 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_common.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_devids.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 1039 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 114 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_type.h | 51 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h | 45 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf.h | 49 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 329 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_main.c | 503 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | 145 |
12 files changed, 1132 insertions, 1218 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index a3eae5d9a..1f9b3b5d9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -97,7 +97,6 @@ struct i40e_adminq_info { u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_release_on_done; struct mutex asq_mutex; /* Send queue lock */ struct mutex arq_mutex; /* Receive queue lock */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index aad8d6277..3114dcfa1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -78,17 +78,17 @@ struct i40e_aq_desc { #define I40E_AQ_FLAG_EI_SHIFT 14 #define I40E_AQ_FLAG_FE_SHIFT 15 -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { @@ -205,10 +205,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, i40e_aqc_opc_set_phy_config = 0x0601, @@ -426,6 +422,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 #define I40E_AQ_CAP_ID_WSR_PROT 0x0064 +#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 @@ -1582,27 +1579,6 @@ struct i40e_aqc_configure_partition_bw_data { I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F - /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ @@ -1649,11 +1625,11 @@ enum i40e_aq_phy_type { enum i40e_aq_link_speed { I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) + I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT) }; struct i40e_aqc_module_desc { @@ -1924,9 +1900,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) + BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) #define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 @@ -2195,7 +2171,7 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) @@ -2215,14 +2191,14 @@ struct i40e_aqc_get_set_rss_key_data { I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) __le16 vsi_id; #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ + BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 771ac6ad8..8f6420400 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -58,6 +58,8 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: + case I40E_DEV_ID_SFP_I_X722: + case I40E_DEV_ID_QSFP_I_X722: hw->mac.type = I40E_MAC_X722; break; case I40E_DEV_ID_X722_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index ca8b58c3d..d34972bab 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -44,6 +44,8 @@ #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_SFP_I_X722 0x37D3 +#define I40E_DEV_ID_QSFP_I_X722 0x37D4 #define I40E_DEV_ID_X722_VF 0x37CD #define I40E_DEV_ID_X722_VF_HV 0x37D9 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index cea97daa8..79d99cd91 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -155,19 +155,21 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) /** * i40e_clean_tx_irq - Reclaim resources after transmit completes - * @tx_ring: tx ring to clean - * @budget: how many cleans we're allowed + * @vsi: the VSI we care about + * @tx_ring: Tx ring to clean + * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) +static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget) { u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; - unsigned int total_packets = 0; - unsigned int total_bytes = 0; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = vsi->work_limit; tx_buf = &tx_ring->tx_bi[i]; tx_desc = I40E_TX_DESC(tx_ring, i); @@ -197,7 +199,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) total_packets += tx_buf->gso_segs; /* free the skb */ - dev_kfree_skb_any(tx_buf->skb); + napi_consume_skb(tx_buf->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -267,7 +269,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + !test_bit(__I40E_DOWN, &vsi->state) && (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } @@ -285,7 +287,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { + !test_bit(__I40E_DOWN, &vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; @@ -494,7 +496,6 @@ err: void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) { struct device *dev = rx_ring->dev; - struct i40e_rx_buffer *rx_bi; unsigned long bi_size; u16 i; @@ -502,48 +503,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; - if (ring_is_ps_enabled(rx_ring)) { - int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; - - rx_bi = &rx_ring->rx_bi[0]; - if (rx_bi->hdr_buf) { - dma_free_coherent(dev, - bufsz, - rx_bi->hdr_buf, - rx_bi->dma); - for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - rx_bi->dma = 0; - rx_bi->hdr_buf = NULL; - } - } - } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - if (rx_bi->dma) { - dma_unmap_single(dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; - } + struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + if (rx_bi->skb) { dev_kfree_skb(rx_bi->skb); rx_bi->skb = NULL; } - if (rx_bi->page) { - if (rx_bi->page_dma) { - dma_unmap_page(dev, - rx_bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - rx_bi->page_dma = 0; - } - __free_page(rx_bi->page); - rx_bi->page = NULL; - rx_bi->page_offset = 0; - } + if (!rx_bi->page) + continue; + + dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE); + __free_pages(rx_bi->page, 0); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; } bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; @@ -552,6 +527,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } @@ -576,37 +552,6 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring) } /** - * i40evf_alloc_rx_headers - allocate rx header buffers - * @rx_ring: ring to alloc buffers - * - * Allocate rx header buffers for the entire ring. As these are static, - * this is only called when setting up a new ring. - **/ -void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - struct i40e_rx_buffer *rx_bi; - dma_addr_t dma; - void *buffer; - int buf_size; - int i; - - if (rx_ring->rx_bi[0].hdr_buf) - return; - /* Make sure the buffers don't cross cache line boundaries. */ - buf_size = ALIGN(rx_ring->rx_hdr_len, 256); - buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, - &dma, GFP_KERNEL); - if (!buffer) - return; - for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - rx_bi->dma = dma + (i * buf_size); - rx_bi->hdr_buf = buffer + (i * buf_size); - } -} - -/** * i40evf_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -627,9 +572,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ - rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) - ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) - : rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); @@ -640,6 +583,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) goto err; } + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -658,6 +602,10 @@ err: static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -668,160 +616,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split - * @rx_ring: ring to place buffers on - * @cleaned_count: number of buffers to replace + * i40e_alloc_mapped_page - recycle or make a new page + * @rx_ring: ring to use + * @bi: rx_buffer struct to modify * - * Returns true if any errors on allocation + * Returns true if the page was successfully allocated or + * reused. **/ -bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) { - u16 i = rx_ring->next_to_use; - union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; - const int current_node = numa_node_id(); + struct page *page = bi->page; + dma_addr_t dma; - /* do nothing if no valid netdev defined */ - if (!rx_ring->netdev || !cleaned_count) - return false; + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } - while (cleaned_count--) { - rx_desc = I40E_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_bi[i]; + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } - if (bi->skb) /* desc is in use */ - goto no_buffers; + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - /* If we've been moved to a different NUMA node, release the - * page so we can get a new one on the current node. + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use */ - if (bi->page && page_to_nid(bi->page) != current_node) { - dma_unmap_page(rx_ring->dev, - bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - __free_page(bi->page); - bi->page = NULL; - bi->page_dma = 0; - rx_ring->rx_stats.realloc_count++; - } else if (bi->page) { - rx_ring->rx_stats.page_reuse_count++; - } - - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - 0, - PAGE_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - __free_page(bi->page); - bi->page = NULL; - bi->page_dma = 0; - bi->page_offset = 0; - goto no_buffers; - } - bi->page_offset = 0; - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = - cpu_to_le64(bi->page_dma + bi->page_offset); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - i++; - if (i == rx_ring->count) - i = 0; + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, 0); + rx_ring->rx_stats.alloc_page_failed++; + return false; } - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; - return false; + return true; +} -no_buffers: - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); +/** + * i40e_receive_skb - Send a completed packet up the stack + * @rx_ring: rx ring in play + * @skb: packet to send up + * @vlan_tag: vlan tag for packet + **/ +static void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) +{ + struct i40e_q_vector *q_vector = rx_ring->q_vector; - /* make sure to come back via polling to try again after - * allocation failure - */ - return true; + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (vlan_tag & VLAN_VID_MASK)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + napi_gro_receive(&q_vector->napi, skb); } /** - * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer + * i40evf_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * - * Returns true if any errors on allocation + * Returns false if all allocations were successful, true if any fail **/ -bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) { - u16 i = rx_ring->next_to_use; + u16 ntu = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; - struct sk_buff *skb; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; - while (cleaned_count--) { - rx_desc = I40E_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_bi[i]; - skb = bi->skb; - - if (!skb) { - skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len, - GFP_ATOMIC | - __GFP_NOWARN); - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - goto no_buffers; - } - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - bi->skb = skb; - } + rx_desc = I40E_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_bi[ntu]; - if (!bi->dma) { - bi->dma = dma_map_single(rx_ring->dev, - skb->data, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->dma)) { - rx_ring->rx_stats.alloc_buff_failed++; - bi->dma = 0; - dev_kfree_skb(bi->skb); - bi->skb = NULL; - goto no_buffers; - } - } + do { + if (!i40e_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); rx_desc->read.hdr_addr = 0; - i++; - if (i == rx_ring->count) - i = 0; - } - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); + rx_desc++; + bi++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = I40E_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_bi; + ntu = 0; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.qword1.status_error_len = 0; + + cleaned_count--; + } while (cleaned_count); + + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); return false; no_buffers: - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); /* make sure to come back via polling to try again after * allocation failure @@ -830,41 +740,35 @@ no_buffers: } /** - * i40e_receive_skb - Send a completed packet up the stack - * @rx_ring: rx ring in play - * @skb: packet to send up - * @vlan_tag: vlan tag for packet - **/ -static void i40e_receive_skb(struct i40e_ring *rx_ring, - struct sk_buff *skb, u16 vlan_tag) -{ - struct i40e_q_vector *q_vector = rx_ring->q_vector; - - if (vlan_tag & VLAN_VID_MASK) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - - napi_gro_receive(&q_vector->napi, skb); -} - -/** * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum * @vsi: the VSI we care about * @skb: skb currently being received and modified - * @rx_status: status value of last descriptor in packet - * @rx_error: error value of last descriptor in packet - * @rx_ptype: ptype value of last descriptor in packet + * @rx_desc: the receive descriptor + * + * skb->protocol must be set before this function is called **/ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, - u32 rx_status, - u32 rx_error, - u16 rx_ptype) + union i40e_rx_desc *rx_desc) { - struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); - bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; + struct i40e_rx_ptype_decoded decoded; + u32 rx_error, rx_status; + bool ipv4, ipv6; + u8 ptype; + u64 qword; + + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + /* Rx csum enabled and ip headers found? */ if (!(vsi->netdev->features & NETIF_F_RXCSUM)) return; @@ -904,20 +808,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* The hardware supported by this driver does not validate outer - * checksums for tunneled VXLAN or GENEVE frames. I don't agree - * with it but the specification states that you "MAY validate", it - * doesn't make it a hard requirement so if we have validated the - * inner checksum report CHECKSUM_UNNECESSARY. + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. */ - - ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); - - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = ipv4_tunnel || ipv6_tunnel; + if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) + skb->csum_level = 1; + + /* Only report checksum unnecessary for TCP, UDP, or SCTP */ + switch (decoded.inner_prot) { + case I40E_RX_PTYPE_INNER_PROT_TCP: + case I40E_RX_PTYPE_INNER_PROT_UDP: + case I40E_RX_PTYPE_INNER_PROT_SCTP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + /* fall though */ + default: + break; + } return; @@ -931,7 +838,7 @@ checksum_fail: * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) +static inline int i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -959,7 +866,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, u8 rx_ptype) { u32 hash; - const __le64 rss_mask = + const __le64 rss_mask = cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); @@ -973,313 +880,411 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, } /** - * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split - * @rx_ring: rx ring to clean - * @budget: how many cleans we're allowed + * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * @rx_ptype: the packet type decoded by hardware * - * Returns true if there's any budget left (e.g. the clean is finished) + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, protocol, and + * other fields within the skb. **/ -static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) +static inline +void i40evf_process_skb_fields(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype) { - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; - u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - struct i40e_vsi *vsi = rx_ring->vsi; - u16 i = rx_ring->next_to_clean; - union i40e_rx_desc *rx_desc; - u32 rx_error, rx_status; - bool failure = false; - u8 rx_ptype; - u64 qword; - u32 copysize; + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - do { - struct i40e_rx_buffer *rx_bi; - struct sk_buff *skb; - u16 vlan_tag; - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - failure = failure || - i40evf_alloc_rx_buffers_ps(rx_ring, - cleaned_count); - cleaned_count = 0; - } + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); - i = rx_ring->next_to_clean; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); - if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) - break; + skb_record_rx_queue(skb, rx_ring->queue_index); +} - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * DD bit is set. - */ - dma_rmb(); - /* sync header buffer for reading */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); - rx_bi = &rx_ring->rx_bi[i]; - skb = rx_bi->skb; - if (likely(!skb)) { - skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_hdr_len, - GFP_ATOMIC | - __GFP_NOWARN); - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - failure = true; - break; - } +/** + * i40e_pull_tail - i40e specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an i40e specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); - } - rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> - I40E_RXD_QW1_LENGTH_HBUF_SHIFT; - rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> - I40E_RXD_QW1_LENGTH_SPH_SHIFT; - - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); - rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; - /* sync half-page for reading */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->page_dma, - rx_bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - prefetch(page_address(rx_bi->page) + rx_bi->page_offset); - rx_bi->skb = NULL; - cleaned_count++; - copysize = 0; - if (rx_hbo || rx_sph) { - int len; - - if (rx_hbo) - len = I40E_RX_HDR_SIZE; - else - len = rx_header_len; - memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); - } else if (skb->len == 0) { - int len; - unsigned char *va = page_address(rx_bi->page) + - rx_bi->page_offset; - - len = min(rx_packet_len, rx_ring->rx_hdr_len); - memcpy(__skb_put(skb, len), va, len); - copysize = len; - rx_packet_len -= len; - } - /* Get the rest of the data if this was a header split */ - if (rx_packet_len) { - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - rx_bi->page, - rx_bi->page_offset + copysize, - rx_packet_len, I40E_RXBUFFER_2048); - - /* If the page count is more than 2, then both halves - * of the page are used and we need to free it. Do it - * here instead of in the alloc code. Otherwise one - * of the half-pages might be released between now and - * then, and we wouldn't know which one to use. - * Don't call get_page and free_page since those are - * both expensive atomic operations that just change - * the refcount in opposite directions. Just give the - * page to the stack; he can have our refcount. - */ - if (page_count(rx_bi->page) > 2) { - dma_unmap_page(rx_ring->dev, - rx_bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - rx_bi->page = NULL; - rx_bi->page_dma = 0; - rx_ring->rx_stats.realloc_count++; - } else { - get_page(rx_bi->page); - /* switch to the other half-page here; the - * allocation code programs the right addr - * into HW. If we haven't used this half-page, - * the address won't be changed, and HW can - * just use it next time through. - */ - rx_bi->page_offset ^= PAGE_SIZE / 2; - } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); - } - I40E_RX_INCREMENT(rx_ring, i); + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - if (unlikely( - !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { - struct i40e_rx_buffer *next_buffer; + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} - next_buffer = &rx_ring->rx_bi[i]; - next_buffer->skb = skb; - rx_ring->rx_stats.non_eop_descs++; - continue; - } +/** + * i40e_cleanup_headers - Correct empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being fixed + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) +{ + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + i40e_pull_tail(rx_ring, skb); - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { - dev_kfree_skb_any(skb); - continue; - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + return false; +} - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - total_rx_packets++; +/** + * i40e_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *old_buff) +{ + struct i40e_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + new_buff = &rx_ring->rx_bi[nta]; - i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) - ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) - : 0; -#ifdef I40E_FCOE - if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { - dev_kfree_skb_any(skb); - continue; - } + /* transfer page from old buffer to new buffer */ + *new_buff = *old_buff; +} + +/** + * i40e_page_is_reserved - check if reuse is possible + * @page: page struct to check + */ +static inline bool i40e_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; +#if (PAGE_SIZE < 8192) + unsigned int truesize = I40E_RXBUFFER_2048; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; #endif - i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_desc->wb.qword1.status_error_len = 0; + /* will the data fit in the skb we allocated? if so, just + * copy it as it is pretty small anyway + */ + if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; - } while (likely(total_rx_packets < budget)); + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - rx_ring->q_vector->rx.total_packets += total_rx_packets; - rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!i40e_page_is_reserved(page))) + return true; - return failure ? budget : total_rx_packets; + /* this page cannot be reused so discard it */ + __free_pages(page, 0); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(i40e_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + get_page(rx_buffer->page); + + return true; } /** - * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer - * @rx_ring: rx ring to clean - * @budget: how many cleans we're allowed + * i40evf_fetch_rx_buffer - Allocate skb and populate it + * @rx_ring: rx descriptor ring to transact packets on + * @rx_desc: descriptor containing info written by hardware * - * Returns number of packets cleaned + * This function allocates an skb on the fly, and populates it with the page + * data from the current receive descriptor, taking care to set up the skb + * correctly, as well as handling calling the page recycle function if + * necessary. + */ +static inline +struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc) +{ + struct i40e_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + I40E_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_buff_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } else { + rx_buffer->skb = NULL; + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + I40E_RXBUFFER_2048, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + i40e_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +/** + * i40e_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool i40e_is_non_eop(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(I40E_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ +#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) + if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) + return false; + + /* place skb in next buffer to be received */ + rx_ring->rx_bi[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed **/ -static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - struct i40e_vsi *vsi = rx_ring->vsi; - union i40e_rx_desc *rx_desc; - u32 rx_error, rx_status; - u16 rx_packet_len; bool failure = false; - u8 rx_ptype; - u64 qword; - u16 i; - do { - struct i40e_rx_buffer *rx_bi; + while (likely(total_rx_packets < budget)) { + union i40e_rx_desc *rx_desc; struct sk_buff *skb; + u32 rx_status; u16 vlan_tag; + u8 rx_ptype; + u64 qword; + /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { failure = failure || - i40evf_alloc_rx_buffers_1buf(rx_ring, - cleaned_count); + i40evf_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } - i = rx_ring->next_to_clean; - rx_desc = I40E_RX_DESC(rx_ring, i); + rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + I40E_RXD_QW1_STATUS_SHIFT; if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; + /* status_error_len will always be zero for unused descriptors + * because it's cleared in cleanup, and overlaps with hdr_addr + * which is always zero because packet split isn't used, if the + * hardware wrote DD then it will be non-zero + */ + if (!rx_desc->wb.qword1.status_error_len) + break; + /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * DD bit is set. */ dma_rmb(); - rx_bi = &rx_ring->rx_bi[i]; - skb = rx_bi->skb; - prefetch(skb->data); - - rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); + skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc); + if (!skb) + break; - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; - rx_bi->skb = NULL; cleaned_count++; - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - skb_put(skb, rx_packet_len); - dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; - - I40E_RX_INCREMENT(rx_ring, i); - - if (unlikely( - !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { - rx_ring->rx_stats.non_eop_descs++; + if (i40e_is_non_eop(rx_ring, rx_desc, skb)) continue; - } - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { + /* ERR_MASK will only have valid bits if EOP set, and + * what we are doing here is actually checking + * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in + * the error field + */ + if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { dev_kfree_skb_any(skb); continue; } - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + if (i40e_cleanup_headers(rx_ring, skb)) + continue; + /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - total_rx_packets++; - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + /* populate checksum, VLAN, and protocol */ + i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + - i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; - vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) - ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) - : 0; i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_desc->wb.qword1.status_error_len = 0; - } while (likely(total_rx_packets < budget)); + /* update budget accounting */ + total_rx_packets++; + } u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; @@ -1288,6 +1293,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : total_rx_packets; } @@ -1411,9 +1417,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - clean_complete = clean_complete && - i40e_clean_tx_irq(ring, vsi->work_limit); - arm_wb = arm_wb || ring->arm_wb; + if (!i40e_clean_tx_irq(vsi, ring, budget)) { + clean_complete = false; + continue; + } + arm_wb |= ring->arm_wb; ring->arm_wb = false; } @@ -1427,16 +1435,12 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { - int cleaned; - - if (ring_is_ps_enabled(ring)) - cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); - else - cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; - /* if we didn't clean as many as budgeted, we must be done */ - clean_complete = clean_complete && (budget_per_ring > cleaned); + /* if we clean as many as budgeted, we must not be done */ + if (cleaned >= budget_per_ring) + clean_complete = false; } /* If work not completed, return budget and polling will return */ @@ -1514,15 +1518,13 @@ out: /** * i40e_tso - set up the tso context descriptor - * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u8 *hdr_len, u64 *cd_type_cmd_tso_mss) +static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u64 cd_cmd, cd_tso_len, cd_mss; union { @@ -1559,16 +1561,22 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, ip.v6->payload_len = 0; } - if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_IPXIP4 | + SKB_GSO_IPXIP6 | + SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + l4.udp->len = 0; + /* determine offset of outer transport header */ l4_offset = l4.hdr - skb->data; /* remove payload length from outer checksum */ - paylen = (__force u16)l4.udp->check; - paylen += ntohs(1) * (u16)~(skb->len - l4_offset); - l4.udp->check = ~csum_fold((__force __wsum)paylen); + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.udp->check, htonl(paylen)); } /* reset pointers to inner headers */ @@ -1588,9 +1596,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, l4_offset = l4.hdr - skb->data; /* remove payload length from inner checksum */ - paylen = (__force u16)l4.tcp->check; - paylen += ntohs(1) * (u16)~(skb->len - l4_offset); - l4.tcp->check = ~csum_fold((__force __wsum)paylen); + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; @@ -1630,7 +1637,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, unsigned char *hdr; } l4; unsigned char *exthdr; - u32 offset, cmd = 0, tunnel = 0; + u32 offset, cmd = 0; __be16 frag_off; u8 l4_proto = 0; @@ -1644,6 +1651,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { + u32 tunnel = 0; /* define outer network header type */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? @@ -1661,13 +1669,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, &l4_proto, &frag_off); } - /* compute outer L3 header size */ - tunnel |= ((l4.hdr - ip.hdr) / 4) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; - - /* switch IP header pointer from outer to inner header */ - ip.hdr = skb_inner_network_header(skb); - /* define outer transport */ switch (l4_proto) { case IPPROTO_UDP: @@ -1678,6 +1679,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, tunnel |= I40E_TXD_CTX_GRE_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + l4.hdr = skb_inner_network_header(skb); + break; default: if (*tx_flags & I40E_TX_FLAGS_TSO) return -1; @@ -1686,12 +1692,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, return 0; } + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + /* compute tunnel header size */ tunnel |= ((ip.hdr - l4.hdr) / 2) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; /* indicate if we need to offload outer UDP header */ if ((*tx_flags & I40E_TX_FLAGS_TSO) && + !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; @@ -1935,6 +1949,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = first; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; + if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; @@ -1942,12 +1958,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, dma_unmap_len_set(tx_bi, len, size); dma_unmap_addr_set(tx_bi, dma, dma); + /* align size to end of page */ + max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); tx_desc->buffer_addr = cpu_to_le64(dma); while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, - I40E_MAX_DATA_PER_TXD, td_tag); + max_data, td_tag); tx_desc++; i++; @@ -1958,9 +1976,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, i = 0; } - dma += I40E_MAX_DATA_PER_TXD; - size -= I40E_MAX_DATA_PER_TXD; + dma += max_data; + size -= max_data; + max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; tx_desc->buffer_addr = cpu_to_le64(dma); } @@ -2109,7 +2128,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (i40e_chk_linearize(skb, count)) { if (__skb_linearize(skb)) goto out_drop; - count = TXD_USE_COUNT(skb->len); + count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } @@ -2140,7 +2159,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); + tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 0429553fe..0112277e5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -102,8 +102,8 @@ enum i40e_dyn_idx_t { (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) -/* Supported Rx Buffer Sizes */ -#define I40E_RXBUFFER_512 512 /* Used for packet split */ +/* Supported Rx Buffer Sizes (a multiple of 128) */ +#define I40E_RXBUFFER_256 256 #define I40E_RXBUFFER_2048 2048 #define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ #define I40E_RXBUFFER_4096 4096 @@ -114,9 +114,28 @@ enum i40e_dyn_idx_t { * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, * this adds up to 512 bytes of extra data meaning the smallest allocation * we could have is 1K. - * i.e. RXBUFFER_512 --> size-1024 slab + * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) + * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) */ -#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 +#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 +#define i40e_rx_desc i40e_32byte_rx_desc + +/** + * i40e_test_staterr - tests bits in Rx descriptor status and error fields + * @rx_desc: pointer to receive descriptor (in le64 format) + * @stat_err_bits: value to mask + * + * This function does some fast chicanery in order to return the + * value of the mask which is really only used for boolean tests. + * The status_error_len doesn't need to be shifted because it begins + * at offset zero. + */ +static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, + const u64 stat_err_bits) +{ + return !!(rx_desc->wb.qword1.status_error_len & + cpu_to_le64(stat_err_bits)); +} /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ @@ -142,14 +161,41 @@ enum i40e_dyn_idx_t { prefetch((n)); \ } while (0) -#define i40e_rx_desc i40e_32byte_rx_desc - #define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 -#define I40E_MAX_DATA_PER_TXD 8192 + +/* The size limit for a transmit buffer in a descriptor is (16K - 1). + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define I40E_MAX_READ_REQ_SIZE 4096 +#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) +#define I40E_MAX_DATA_PER_TXD_ALIGNED \ + (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) + +/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is + * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact + * that 12K is not a power of 2 and division is expensive. It is used to + * approximate the number of descriptors used per linear buffer. Note + * that this will overestimate in some cases as it doesn't account for the + * fact that we will add up to 4K - 1 in aligning the 12K buffer, however + * the error should not impact things much as large buffers usually mean + * we will use fewer descriptors then there are frags in an skb. + */ +static inline unsigned int i40e_txd_use_count(unsigned int size) +{ + const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED; + const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max; + unsigned int adjust = ~(u32)0; + + /* if we rounded up on the reciprocal pull down the adjustment */ + if ((max * reciprocal) > adjust) + adjust = ~(u32)(reciprocal - 1); + + return (u32)((((u64)size * reciprocal) + adjust) >> 32); +} /* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 @@ -183,10 +229,8 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; - void *hdr_buf; dma_addr_t dma; struct page *page; - dma_addr_t page_dma; unsigned int page_offset; }; @@ -215,22 +259,18 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_RX_PS_ENABLED, - __I40E_RX_16BYTE_DESC_ENABLED, }; -#define ring_is_ps_enabled(ring) \ - test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define set_ring_ps_enabled(ring) \ - set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define clear_ring_ps_enabled(ring) \ - clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define ring_is_16byte_desc_enabled(ring) \ - test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define set_ring_16byte_desc_enabled(ring) \ - set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define clear_ring_16byte_desc_enabled(ring) \ - clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +/* some useful defines for virtchannel interface, which + * is the only remaining user of header split + */ +#define I40E_RX_DTYPE_NO_SPLIT 0 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 +#define I40E_RX_SPLIT_L2 0x1 +#define I40E_RX_SPLIT_IP 0x2 +#define I40E_RX_SPLIT_TCP_UDP 0x4 +#define I40E_RX_SPLIT_SCTP 0x8 /* struct that defines a descriptor ring, associated with a VSI */ struct i40e_ring { @@ -249,16 +289,7 @@ struct i40e_ring { u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ - u16 rx_hdr_len; u16 rx_buf_len; - u8 dtype; -#define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 -#define I40E_RX_SPLIT_L2 0x1 -#define I40E_RX_SPLIT_IP 0x2 -#define I40E_RX_SPLIT_TCP_UDP 0x4 -#define I40E_RX_SPLIT_SCTP 0x8 /* used in interrupt processing */ u16 next_to_use; @@ -290,6 +321,7 @@ struct i40e_ring { struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct rcu_head rcu; /* to avoid race on free */ + u16 next_to_alloc; } ____cacheline_internodealigned_in_smp; enum i40e_latency_range { @@ -313,9 +345,7 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); -void i40evf_alloc_rx_headers(struct i40e_ring *rxr); +bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); @@ -359,7 +389,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) int count = 0, size = skb_headlen(skb); for (;;) { - count += TXD_USE_COUNT(size); + count += i40e_txd_use_count(size); if (!nr_frags--) break; @@ -405,4 +435,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) /* we can support up to 8 data buffers for a single send */ return count != I40E_MAX_BUFFER_TXD; } + +/** + * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE + * @ptype: the packet type field from Rx descriptor write-back + **/ +static inline bool i40e_rx_is_fcoe(u16 ptype) +{ + return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && + (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); +} #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 301fe2b6d..97f96e0d9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -36,7 +36,7 @@ #include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) (mask << shift) +#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 3 @@ -258,6 +258,11 @@ struct i40e_hw_capabilities { #define I40E_FLEX10_STATUS_DCC_ERROR 0x1 #define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool sec_rev_disabled; + bool update_disabled; +#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 +#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -522,6 +527,8 @@ struct i40e_hw { enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; struct i40e_virt_mem nvm_buff; + bool nvm_release_on_done; + u16 nvm_wait_opcode; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ @@ -1329,4 +1336,46 @@ enum i40e_reset_type { /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 + +/* INPUT SET MASK for RSS, flow director and flexible payload */ +#define I40E_FD_INSET_L3_SRC_SHIFT 47 +#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_L3_SRC_SHIFT) +#define I40E_FD_INSET_L3_DST_SHIFT 35 +#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_L3_DST_SHIFT) +#define I40E_FD_INSET_L4_SRC_SHIFT 34 +#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \ + I40E_FD_INSET_L4_SRC_SHIFT) +#define I40E_FD_INSET_L4_DST_SHIFT 33 +#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \ + I40E_FD_INSET_L4_DST_SHIFT) +#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31 +#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_VERIFY_TAG_SHIFT) + +#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17 +#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD50_SHIFT) +#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16 +#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD51_SHIFT) +#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15 +#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD52_SHIFT) +#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14 +#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD53_SHIFT) +#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13 +#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD54_SHIFT) +#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12 +#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD55_SHIFT) +#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11 +#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD56_SHIFT) +#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10 +#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD57_SHIFT) #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index 3b9d20374..f04ce6cb7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -80,7 +80,12 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, - I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + I40E_VIRTCHNL_OP_SET_RSS_HENA = 26, + }; /* Virtual channel message descriptor. This overlays the admin queue @@ -154,6 +159,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 struct i40e_virtchnl_vf_resource { u16 num_vsis; @@ -162,8 +168,8 @@ struct i40e_virtchnl_vf_resource { u16 max_mtu; u32 vf_offload_flags; - u32 max_fcoe_contexts; - u32 max_fcoe_filters; + u32 rss_key_size; + u32 rss_lut_size; struct i40e_virtchnl_vsi_resource vsi_res[1]; }; @@ -322,6 +328,39 @@ struct i40e_virtchnl_promisc_info { * PF replies with struct i40e_eth_stats in an external buffer. */ +/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY + * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct i40e_virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +struct i40e_virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table*/ +}; + +/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS + * I40E_VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h + */ +struct i40e_virtchnl_rss_hena { + u64 hena; +}; + /* I40E_VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index e657eccd2..76ed97db2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -67,8 +67,6 @@ struct i40e_vsi { u16 rx_itr_setting; u16 tx_itr_setting; u16 qs_handle; - u8 *rss_hkey_user; /* User configured hash keys */ - u8 *rss_lut_user; /* User configured lookup table entries */ }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -82,9 +80,6 @@ struct i40e_vsi { #define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 /* Supported Rx Buffer Sizes */ -#define I40EVF_RXBUFFER_64 64 /* Used for packet split */ -#define I40EVF_RXBUFFER_128 128 /* Used for packet split */ -#define I40EVF_RXBUFFER_256 256 /* Used for packet split */ #define I40EVF_RXBUFFER_2048 2048 #define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ #define I40EVF_MAX_AQ_BUF_SIZE 4096 @@ -210,9 +205,6 @@ struct i40evf_adapter { u32 flags; #define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1) -#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2) -#define I40EVF_FLAG_RX_PS_ENABLED BIT(3) #define I40EVF_FLAG_IMIR_ENABLED BIT(5) #define I40EVF_FLAG_MQ_CAPABLE BIT(6) #define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) @@ -222,6 +214,8 @@ struct i40evf_adapter { #define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) #define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) +#define I40EVF_FLAG_PROMISC_ON BIT(15) +#define I40EVF_FLAG_ALLMULTI_ON BIT(16) /* duplicates for common code */ #define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 @@ -239,8 +233,17 @@ struct i40evf_adapter { #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) #define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) #define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) -#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) +#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ #define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) +/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ +#define I40EVF_FLAG_AQ_GET_HENA BIT(11) +#define I40EVF_FLAG_AQ_SET_HENA BIT(12) +#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13) +#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14) +#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15) +#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16) +#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) +#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) /* OS defined structs */ struct net_device *netdev; @@ -256,10 +259,18 @@ struct i40evf_adapter { bool netdev_registered; bool link_up; enum i40e_virtchnl_ops current_op; -#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \ - I40E_VIRTCHNL_VF_OFFLOAD_IWARP) +#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \ + (_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \ + 0) +/* RSS by the PF should be preferred over RSS via other methods. */ +#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) #define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) +#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \ + (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF))) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \ I40E_VIRTCHNL_VF_OFFLOAD_VLAN) struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ @@ -271,11 +282,16 @@ struct i40evf_adapter { struct i40e_eth_stats current_stats; struct i40e_vsi vsi; u32 aq_wait_count; + /* RSS stuff */ + u64 hena; + u16 rss_key_size; + u16 rss_lut_size; + u8 *rss_key; + u8 *rss_lut; }; /* Ethtool Private Flags */ -#define I40EVF_PRIV_FLAGS_PS BIT(0) /* needed by i40evf_ethtool.c */ extern char i40evf_driver_name[]; @@ -314,11 +330,12 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter); void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); void i40evf_request_stats(struct i40evf_adapter *adapter); void i40evf_request_reset(struct i40evf_adapter *adapter); +void i40evf_get_hena(struct i40evf_adapter *adapter); +void i40evf_set_hena(struct i40evf_adapter *adapter); +void i40evf_set_rss_key(struct i40evf_adapter *adapter); +void i40evf_set_rss_lut(struct i40evf_adapter *adapter); void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, enum i40e_virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen); -int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, - u16 lut_size); -int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, - u16 lut_size); +int i40evf_config_rss(struct i40evf_adapter *adapter); #endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index dd4430aae..c9c202f6c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -63,12 +63,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { #define I40EVF_STATS_LEN(_dev) \ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) -static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = { - "packet-split", -}; - -#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings) - /** * i40evf_get_settings - Get Link Speed and Duplex settings * @netdev: network interface device structure @@ -103,8 +97,6 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset) { if (sset == ETH_SS_STATS) return I40EVF_STATS_LEN(netdev); - else if (sset == ETH_SS_PRIV_FLAGS) - return I40EVF_PRIV_FLAGS_STR_LEN; else return -EINVAL; } @@ -170,12 +162,6 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); p += ETH_GSTRING_LEN; } - } else if (sset == ETH_SS_PRIV_FLAGS) { - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - memcpy(data, i40evf_priv_flags_strings[i], - ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } } } @@ -225,7 +211,6 @@ static void i40evf_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, i40evf_driver_version, 32); strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN; } /** @@ -378,63 +363,6 @@ static int i40evf_set_coalesce(struct net_device *netdev, } /** - * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type - * @adapter: board private structure - * @cmd: ethtool rxnfc command - * - * Returns Success if the flow is supported, else Invalid Input. - **/ -static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, - struct ethtool_rxnfc *cmd) -{ - struct i40e_hw *hw = &adapter->hw; - u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | - ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); - - /* We always hash on IP src and dest addresses */ - cmd->data = RXH_IP_SRC | RXH_IP_DST; - - switch (cmd->flow_type) { - case TCP_V4_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case UDP_V4_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - - case SCTP_V4_FLOW: - case AH_ESP_V4_FLOW: - case AH_V4_FLOW: - case ESP_V4_FLOW: - case IPV4_FLOW: - break; - - case TCP_V6_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case UDP_V6_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - - case SCTP_V6_FLOW: - case AH_ESP_V6_FLOW: - case AH_V6_FLOW: - case ESP_V6_FLOW: - case IPV6_FLOW: - break; - default: - cmd->data = 0; - return -EINVAL; - } - - return 0; -} - -/** * i40evf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -454,145 +382,8 @@ static int i40evf_get_rxnfc(struct net_device *netdev, ret = 0; break; case ETHTOOL_GRXFH: - ret = i40evf_get_rss_hash_opts(adapter, cmd); - break; - default: - break; - } - - return ret; -} - -/** - * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash - * @adapter: board private structure - * @cmd: ethtool rxnfc command - * - * Returns Success if the flow input set is supported. - **/ -static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, - struct ethtool_rxnfc *nfc) -{ - struct i40e_hw *hw = &adapter->hw; - u32 flags = adapter->vf_res->vf_offload_flags; - - u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | - ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); - - /* RSS does not support anything other than hashing - * to queues on src and dst IPs and ports - */ - if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3)) - return -EINVAL; - - /* We need at least the IP SRC and DEST fields for hashing */ - if (!(nfc->data & RXH_IP_SRC) || - !(nfc->data & RXH_IP_DST)) - return -EINVAL; - - switch (nfc->flow_type) { - case TCP_V4_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); - - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - } else { - return -EINVAL; - } - break; - case TCP_V6_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); - - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - } else { - return -EINVAL; - } - break; - case UDP_V4_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - } else { - return -EINVAL; - } - break; - case UDP_V6_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - } else { - return -EINVAL; - } - break; - case AH_ESP_V4_FLOW: - case AH_V4_FLOW: - case ESP_V4_FLOW: - case SCTP_V4_FLOW: - if ((nfc->data & RXH_L4_B_0_1) || - (nfc->data & RXH_L4_B_2_3)) - return -EINVAL; - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); - break; - case AH_ESP_V6_FLOW: - case AH_V6_FLOW: - case ESP_V6_FLOW: - case SCTP_V6_FLOW: - if ((nfc->data & RXH_L4_B_0_1) || - (nfc->data & RXH_L4_B_2_3)) - return -EINVAL; - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); - break; - case IPV4_FLOW: - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - case IPV6_FLOW: - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - default: - return -EINVAL; - } - - wr32(hw, I40E_VFQF_HENA(0), (u32)hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); - i40e_flush(hw); - - return 0; -} - -/** - * i40evf_set_rxnfc - command to set RX flow classification rules - * @netdev: network interface device structure - * @cmd: ethtool rxnfc command - * - * Returns Success if the command is supported. - **/ -static int i40evf_set_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *cmd) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_SRXFH: - ret = i40evf_set_rss_hash_opt(adapter, cmd); + netdev_info(netdev, + "RSS hash info is not available to vf, use pf.\n"); break; default: break; @@ -600,7 +391,6 @@ static int i40evf_set_rxnfc(struct net_device *netdev, return ret; } - /** * i40evf_get_channels: get the number of channels supported by the device * @netdev: network interface device structure @@ -624,6 +414,19 @@ static void i40evf_get_channels(struct net_device *netdev, } /** + * i40evf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40evf_get_rxfh_key_size(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_key_size; +} + +/** * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size * @netdev: network interface device structure * @@ -631,7 +434,9 @@ static void i40evf_get_channels(struct net_device *netdev, **/ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) { - return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4; + struct i40evf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_lut_size; } /** @@ -646,9 +451,6 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - u8 *seed = NULL, *lut; - int ret; u16 i; if (hfunc) @@ -656,24 +458,13 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (!indir) return 0; - seed = key; - - lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); - if (!lut) - return -ENOMEM; - - ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); - if (ret) - goto out; + memcpy(key, adapter->rss_key, adapter->rss_key_size); /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) - indir[i] = (u32)lut[i]; + for (i = 0; i < adapter->rss_lut_size; i++) + indir[i] = (u32)adapter->rss_lut[i]; -out: - kfree(lut); - - return ret; + return 0; } /** @@ -689,8 +480,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - u8 *seed = NULL; u16 i; /* We do not allow change in unsupported parameters */ @@ -701,76 +490,14 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, return 0; if (key) { - if (!vsi->rss_hkey_user) { - vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE, - GFP_KERNEL); - if (!vsi->rss_hkey_user) - return -ENOMEM; - } - memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE); - seed = vsi->rss_hkey_user; - } - if (!vsi->rss_lut_user) { - vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE, - GFP_KERNEL); - if (!vsi->rss_lut_user) - return -ENOMEM; + memcpy(adapter->rss_key, key, adapter->rss_key_size); } /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) - vsi->rss_lut_user[i] = (u8)(indir[i]); - - return i40evf_config_rss(vsi, seed, vsi->rss_lut_user, - I40EVF_HLUT_ARRAY_SIZE); -} - -/** - * i40evf_get_priv_flags - report device private flags - * @dev: network interface device structure - * - * The get string set count and the string set should be matched for each - * flag returned. Add new strings for each flag to the i40e_priv_flags_strings - * array. - * - * Returns a u32 bitmap of flags. - **/ -static u32 i40evf_get_priv_flags(struct net_device *dev) -{ - struct i40evf_adapter *adapter = netdev_priv(dev); - u32 ret_flags = 0; - - ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ? - I40EVF_PRIV_FLAGS_PS : 0; - - return ret_flags; -} + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = (u8)(indir[i]); -/** - * i40evf_set_priv_flags - set private flags - * @dev: network interface device structure - * @flags: bit flags to be set - **/ -static int i40evf_set_priv_flags(struct net_device *dev, u32 flags) -{ - struct i40evf_adapter *adapter = netdev_priv(dev); - bool reset_required = false; - - if ((flags & I40EVF_PRIV_FLAGS_PS) && - !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { - adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; - reset_required = true; - } else if (!(flags & I40EVF_PRIV_FLAGS_PS) && - (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; - reset_required = true; - } - - /* if needed, issue reset to cause things to take effect */ - if (reset_required) - i40evf_schedule_reset(adapter); - - return 0; + return i40evf_config_rss(adapter); } static const struct ethtool_ops i40evf_ethtool_ops = { @@ -782,18 +509,16 @@ static const struct ethtool_ops i40evf_ethtool_ops = { .get_strings = i40evf_get_strings, .get_ethtool_stats = i40evf_get_ethtool_stats, .get_sset_count = i40evf_get_sset_count, - .get_priv_flags = i40evf_get_priv_flags, - .set_priv_flags = i40evf_set_priv_flags, .get_msglevel = i40evf_get_msglevel, .set_msglevel = i40evf_set_msglevel, .get_coalesce = i40evf_get_coalesce, .set_coalesce = i40evf_set_coalesce, .get_rxnfc = i40evf_get_rxnfc, - .set_rxnfc = i40evf_set_rxnfc, .get_rxfh_indir_size = i40evf_get_rxfh_indir_size, .get_rxfh = i40evf_get_rxfh, .set_rxfh = i40evf_set_rxfh, .get_channels = i40evf_get_channels, + .get_rxfh_key_size = i40evf_get_rxfh_key_size, }; /** diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 4b70aae2f..16c552952 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -37,8 +37,8 @@ static const char i40evf_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 15 +#define DRV_VERSION_MINOR 5 +#define DRV_VERSION_BUILD 10 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -641,28 +641,11 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) static void i40evf_configure_rx(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; int i; - int rx_buf_len; - - - /* Set the RX buffer length according to the mode */ - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED || - netdev->mtu <= ETH_DATA_LEN) - rx_buf_len = I40EVF_RXBUFFER_2048; - else - rx_buf_len = ALIGN(max_frame, 1024); for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); - adapter->rx_rings[i].rx_buf_len = rx_buf_len; - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - set_ring_ps_enabled(&adapter->rx_rings[i]); - adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE; - } else { - clear_ring_ps_enabled(&adapter->rx_rings[i]); - } + adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048; } } @@ -943,6 +926,21 @@ static void i40evf_set_rx_mode(struct net_device *netdev) bottom_of_search_loop: continue; } + + if (netdev->flags & IFF_PROMISC && + !(adapter->flags & I40EVF_FLAG_PROMISC_ON)) + adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC; + else if (!(netdev->flags & IFF_PROMISC) && + adapter->flags & I40EVF_FLAG_PROMISC_ON) + adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC; + + if (netdev->flags & IFF_ALLMULTI && + !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON)) + adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + else if (!(netdev->flags & IFF_ALLMULTI) && + adapter->flags & I40EVF_FLAG_ALLMULTI_ON) + adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI; + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); } @@ -999,14 +997,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = &adapter->rx_rings[i]; - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - i40evf_alloc_rx_headers(ring); - i40evf_alloc_rx_buffers_ps(ring, ring->count); - } else { - i40evf_alloc_rx_buffers_1buf(ring, ring->count); - } - ring->next_to_use = ring->count - 1; - writel(ring->next_to_use, ring->tail); + i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); } } @@ -1224,24 +1215,18 @@ out: } /** - * i40e_config_rss_aq - Prepare for RSS using AQ commands - * @vsi: vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size + * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands + * @adapter: board private structure * * Return 0 on success, negative on failure **/ -static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, - u8 *lut, u16 lut_size) +static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) { - struct i40evf_adapter *adapter = vsi->back; + struct i40e_aqc_get_set_rss_key_data *rss_key = + (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key; struct i40e_hw *hw = &adapter->hw; int ret = 0; - if (!vsi->id) - return -EINVAL; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", @@ -1249,198 +1234,82 @@ static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, return -EBUSY; } - if (seed) { - struct i40e_aqc_get_set_rss_key_data *rss_key = - (struct i40e_aqc_get_set_rss_key_data *)seed; - ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key); - if (ret) { - dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } + ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); + if (ret) { + dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } - if (lut) { - ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } + ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false, + adapter->rss_lut, adapter->rss_lut_size); + if (ret) { + dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); } return ret; + } /** * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size + * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, - const u8 *lut, u16 lut_size) +static int i40evf_config_rss_reg(struct i40evf_adapter *adapter) { - struct i40evf_adapter *adapter = vsi->back; struct i40e_hw *hw = &adapter->hw; + u32 *dw; u16 i; - if (seed) { - u32 *seed_dw = (u32 *)seed; - - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); - } - - if (lut) { - u32 *lut_dw = (u32 *)lut; + dw = (u32 *)adapter->rss_key; + for (i = 0; i <= adapter->rss_key_size / 4; i++) + wr32(hw, I40E_VFQF_HKEY(i), dw[i]); - if (lut_size != I40EVF_HLUT_ARRAY_SIZE) - return -EINVAL; + dw = (u32 *)adapter->rss_lut; + for (i = 0; i <= adapter->rss_lut_size / 4; i++) + wr32(hw, I40E_VFQF_HLUT(i), dw[i]); - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]); - } i40e_flush(hw); return 0; } /** - * * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size - * - * Return 0 on success, negative on failure - **/ -static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, - u8 *lut, u16 lut_size) -{ - struct i40evf_adapter *adapter = vsi->back; - struct i40e_hw *hw = &adapter->hw; - int ret = 0; - - if (seed) { - ret = i40evf_aq_get_rss_key(hw, vsi->id, - (struct i40e_aqc_get_set_rss_key_data *)seed); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot get RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } - } - - if (lut) { - ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot get RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } - } - - return ret; -} - -/** - * * i40evf_get_rss_reg - Get RSS keys and lut by reading registers - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size - * - * Returns 0 on success, negative on failure - **/ -static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed, - const u8 *lut, u16 lut_size) -{ - struct i40evf_adapter *adapter = vsi->back; - struct i40e_hw *hw = &adapter->hw; - u16 i; - - if (seed) { - u32 *seed_dw = (u32 *)seed; - - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i)); - } - - if (lut) { - u32 *lut_dw = (u32 *)lut; - - if (lut_size != I40EVF_HLUT_ARRAY_SIZE) - return -EINVAL; - - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) - lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i)); - } - - return 0; -} - -/** * i40evf_config_rss - Configure RSS keys and lut - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size - * - * Returns 0 on success, negative on failure - **/ -int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, - u8 *lut, u16 lut_size) -{ - struct i40evf_adapter *adapter = vsi->back; - - if (RSS_AQ(adapter)) - return i40evf_config_rss_aq(vsi, seed, lut, lut_size); - else - return i40evf_config_rss_reg(vsi, seed, lut, lut_size); -} - -/** - * i40evf_get_rss - Get RSS keys and lut - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size + * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) +int i40evf_config_rss(struct i40evf_adapter *adapter) { - struct i40evf_adapter *adapter = vsi->back; - if (RSS_AQ(adapter)) - return i40evf_get_rss_aq(vsi, seed, lut, lut_size); - else - return i40evf_get_rss_reg(vsi, seed, lut, lut_size); + if (RSS_PF(adapter)) { + adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT | + I40EVF_FLAG_AQ_SET_RSS_KEY; + return 0; + } else if (RSS_AQ(adapter)) { + return i40evf_config_rss_aq(adapter); + } else { + return i40evf_config_rss_reg(adapter); + } } /** * i40evf_fill_rss_lut - Fill the lut with default values - * @lut: Lookup table to be filled with - * @rss_table_size: Lookup table size - * @rss_size: Range of queue number for hashing + * @adapter: board private structure **/ -static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) +static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter) { u16 i; - for (i = 0; i < rss_table_size; i++) - lut[i] = i % rss_size; + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = i % adapter->num_active_queues; } /** @@ -1451,42 +1320,25 @@ static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) **/ static int i40evf_init_rss(struct i40evf_adapter *adapter) { - struct i40e_vsi *vsi = &adapter->vsi; struct i40e_hw *hw = &adapter->hw; - u8 seed[I40EVF_HKEY_ARRAY_SIZE]; - u64 hena; - u8 *lut; int ret; - /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ - if (adapter->vf_res->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena = I40E_DEFAULT_RSS_HENA_EXPANDED; - else - hena = I40E_DEFAULT_RSS_HENA; - wr32(hw, I40E_VFQF_HENA(0), (u32)hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); + if (!RSS_PF(adapter)) { + /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ + if (adapter->vf_res->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; + else + adapter->hena = I40E_DEFAULT_RSS_HENA; - lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); - if (!lut) - return -ENOMEM; + wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32)); + } - /* Use user configured lut if there is one, otherwise use default */ - if (vsi->rss_lut_user) - memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE); - else - i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE, - adapter->num_active_queues); + i40evf_fill_rss_lut(adapter); - /* Use user configured hash key if there is one, otherwise - * user default. - */ - if (vsi->rss_hkey_user) - memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE); - else - netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); - ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); - kfree(lut); + netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); + ret = i40evf_config_rss(adapter); return ret; } @@ -1507,7 +1359,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), GFP_KERNEL); if (!adapter->q_vectors) - goto err_out; + return -ENOMEM; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { q_vector = &adapter->q_vectors[q_idx]; @@ -1519,15 +1371,6 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) } return 0; - -err_out: - while (q_idx) { - q_idx--; - q_vector = &adapter->q_vectors[q_idx]; - netif_napi_del(&q_vector->napi); - } - kfree(adapter->q_vectors); - return -ENOMEM; } /** @@ -1610,19 +1453,16 @@ err_set_interrupt: } /** - * i40evf_clear_rss_config_user - Clear user configurations of RSS - * @vsi: Pointer to VSI structure + * i40evf_free_rss - Free memory used by RSS structs + * @adapter: board private structure **/ -static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi) +static void i40evf_free_rss(struct i40evf_adapter *adapter) { - if (!vsi) - return; - - kfree(vsi->rss_hkey_user); - vsi->rss_hkey_user = NULL; + kfree(adapter->rss_key); + adapter->rss_key = NULL; - kfree(vsi->rss_lut_user); - vsi->rss_lut_user = NULL; + kfree(adapter->rss_lut); + adapter->rss_lut = NULL; } /** @@ -1756,6 +1596,39 @@ static void i40evf_watchdog_task(struct work_struct *work) adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) { + i40evf_get_hena(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) { + i40evf_set_hena(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) { + i40evf_set_rss_key(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) { + i40evf_set_rss_lut(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) { + i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC | + I40E_FLAG_VF_MULTICAST_PROMISC); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) { + i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC); + goto watchdog_done; + } + + if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) && + (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) { + i40evf_set_promiscuous(adapter, 0); + goto watchdog_done; + } if (adapter->state == __I40EVF_RUNNING) i40evf_request_stats(adapter); @@ -2003,6 +1876,8 @@ static void i40evf_adminq_task(struct work_struct *work) /* check for error indications */ val = rd32(hw, hw->aq.arq.len); + if (val == 0xdeadbeef) /* indicates device in reset */ + goto freedom; oldval = val; if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); @@ -2259,6 +2134,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } +#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\ + NETIF_F_HW_VLAN_CTAG_RX |\ + NETIF_F_HW_VLAN_CTAG_FILTER) + +/** + * i40evf_fix_features - fix up the netdev feature bits + * @netdev: our net device + * @features: desired feature bits + * + * Returns fixed-up features bits + **/ +static netdev_features_t i40evf_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + features &= ~I40EVF_VLAN_FEATURES; + if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) + features |= I40EVF_VLAN_FEATURES; + return features; +} + static const struct net_device_ops i40evf_netdev_ops = { .ndo_open = i40evf_open, .ndo_stop = i40evf_close, @@ -2271,6 +2168,7 @@ static const struct net_device_ops i40evf_netdev_ops = { .ndo_tx_timeout = i40evf_tx_timeout, .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, + .ndo_fix_features = i40evf_fix_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40evf_netpoll, #endif @@ -2307,57 +2205,61 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) **/ int i40evf_process_config(struct i40evf_adapter *adapter) { + struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; + struct i40e_vsi *vsi = &adapter->vsi; int i; /* got VF config message back from PF, now we can parse it */ - for (i = 0; i < adapter->vf_res->num_vsis; i++) { - if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) - adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + for (i = 0; i < vfres->num_vsis; i++) { + if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + adapter->vsi_res = &vfres->vsi_res[i]; } if (!adapter->vsi_res) { dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); return -ENODEV; } - if (adapter->vf_res->vf_offload_flags - & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { - netdev->vlan_features = netdev->features & - ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER); - netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; - } - netdev->features |= NETIF_F_HIGHDMA | - NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_SCTP_CRC | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_TSO_ECN | - NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_RXCSUM | - NETIF_F_GRO; - - netdev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_TSO_ECN | - NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE) - netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; - - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features; - netdev->hw_features &= ~NETIF_F_RXCSUM; + netdev->hw_enc_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HIGHDMA | + NETIF_F_SOFT_FEATURES | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_SCTP_CRC | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + 0; + + if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)) + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + + /* record features VLANs can make use of */ + netdev->vlan_features |= netdev->hw_enc_features | + NETIF_F_TSO_MANGLEID; + + /* Write features and hw_features separately to avoid polluting + * with, or dropping, features that are set when we registgered. + */ + netdev->hw_features |= netdev->hw_enc_features; + + netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES; + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + + /* disable VLAN features if not supported */ + if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)) + netdev->features ^= I40EVF_VLAN_FEATURES; adapter->vsi.id = adapter->vsi_res->vsi_id; @@ -2368,8 +2270,16 @@ int i40evf_process_config(struct i40evf_adapter *adapter) ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC | ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); - adapter->vsi.netdev = adapter->netdev; - adapter->vsi.qs_handle = adapter->vsi_res->qset_handle; + vsi->netdev = adapter->netdev; + vsi->qs_handle = adapter->vsi_res->qset_handle; + if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { + adapter->rss_key_size = vfres->rss_key_size; + adapter->rss_lut_size = vfres->rss_lut_size; + } else { + adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE; + adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE; + } + return 0; } @@ -2502,11 +2412,6 @@ static void i40evf_init_task(struct work_struct *work) adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; - adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; - adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE; - - /* Default to single buffer rx, can be changed through ethtool. */ - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; netdev->netdev_ops = &i40evf_netdev_ops; i40evf_set_ethtool_ops(netdev); @@ -2565,6 +2470,11 @@ static void i40evf_init_task(struct work_struct *work) set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_misc_irq_enable(adapter); + adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); + adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); + if (!adapter->rss_key || !adapter->rss_lut) + goto err_mem; + if (RSS_AQ(adapter)) { adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); @@ -2575,7 +2485,8 @@ static void i40evf_init_task(struct work_struct *work) restart: schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30)); return; - +err_mem: + i40evf_free_rss(adapter); err_register: i40evf_free_misc_irq(adapter); err_sw_init: @@ -2838,11 +2749,11 @@ static void i40evf_remove(struct pci_dev *pdev) adapter->state = __I40EVF_REMOVE; adapter->aq_required = 0; i40evf_request_reset(adapter); - msleep(20); + msleep(50); /* If the FW isn't responding, kick it once, but only once. */ if (!i40evf_asq_done(hw)) { i40evf_request_reset(adapter); - msleep(20); + msleep(50); } if (adapter->msix_entries) { @@ -2857,8 +2768,7 @@ static void i40evf_remove(struct pci_dev *pdev) flush_scheduled_work(); - /* Clear user configurations for RSS */ - i40evf_clear_rss_config_user(&adapter->vsi); + i40evf_free_rss(adapter); if (hw->aq.asq.count) i40evf_shutdown_adminq(hw); @@ -2869,7 +2779,6 @@ static void i40evf_remove(struct pci_dev *pdev) iounmap(hw->hw_addr); pci_release_regions(pdev); - i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); i40evf_free_queues(adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 488e738f7..f13445691 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -270,10 +270,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) vqpi->rxq.max_pkt_size = adapter->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - vqpi->rxq.splithdr_enabled = true; - vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE; - } vqpi++; } @@ -645,6 +641,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) { struct i40e_virtchnl_promisc_info vpi; + int promisc_all; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -652,6 +649,27 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) adapter->current_op); return; } + + promisc_all = I40E_FLAG_VF_UNICAST_PROMISC | + I40E_FLAG_VF_MULTICAST_PROMISC; + if ((flags & promisc_all) == promisc_all) { + adapter->flags |= I40EVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC; + dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); + } + + if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) { + adapter->flags |= I40EVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); + } + + if (!flags) { + adapter->flags &= ~I40EVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC; + dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + } + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; vpi.vsi_id = adapter->vsi_res->vsi_id; vpi.flags = flags; @@ -681,6 +699,115 @@ void i40evf_request_stats(struct i40evf_adapter *adapter) /* if the request failed, don't lock out others */ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; } + +/** + * i40evf_get_hena + * @adapter: adapter structure + * + * Request hash enable capabilities from PF + **/ +void i40evf_get_hena(struct i40evf_adapter *adapter) +{ + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS; + adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, + NULL, 0); +} + +/** + * i40evf_set_hena + * @adapter: adapter structure + * + * Request the PF to set our RSS hash capabilities + **/ +void i40evf_set_hena(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_rss_hena vrh; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", + adapter->current_op); + return; + } + vrh.hena = adapter->hena; + adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA, + (u8 *)&vrh, sizeof(vrh)); +} + +/** + * i40evf_set_rss_key + * @adapter: adapter structure + * + * Request the PF to set our RSS hash key + **/ +void i40evf_set_rss_key(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_rss_key *vrk; + int len; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", + adapter->current_op); + return; + } + len = sizeof(struct i40e_virtchnl_rss_key) + + (adapter->rss_key_size * sizeof(u8)) - 1; + vrk = kzalloc(len, GFP_KERNEL); + if (!vrk) + return; + vrk->vsi_id = adapter->vsi.id; + vrk->key_len = adapter->rss_key_size; + memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); + + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, + (u8 *)vrk, len); + kfree(vrk); +} + +/** + * i40evf_set_rss_lut + * @adapter: adapter structure + * + * Request the PF to set our RSS lookup table + **/ +void i40evf_set_rss_lut(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_rss_lut *vrl; + int len; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", + adapter->current_op); + return; + } + len = sizeof(struct i40e_virtchnl_rss_lut) + + (adapter->rss_lut_size * sizeof(u8)) - 1; + vrl = kzalloc(len, GFP_KERNEL); + if (!vrl) + return; + vrl->vsi_id = adapter->vsi.id; + vrl->lut_entries = adapter->rss_lut_size; + memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, + (u8 *)vrl, len); + kfree(vrl); +} + /** * i40evf_request_reset * @adapter: adapter structure @@ -820,6 +947,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, if (v_opcode != adapter->current_op) return; break; + case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: { + struct i40e_virtchnl_rss_hena *vrh = + (struct i40e_virtchnl_rss_hena *)msg; + if (msglen == sizeof(*vrh)) + adapter->hena = vrh->hena; + else + dev_warn(&adapter->pdev->dev, + "Invalid message %d from PF\n", v_opcode); + } + break; default: if (v_opcode != adapter->current_op) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", |