diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
commit | e5fd91f1ef340da553f7a79da9540c3db711c937 (patch) | |
tree | b11842027dc6641da63f4bcc524f8678263304a3 /drivers/net/ethernet/intel/i40evf | |
parent | 2a9b0348e685a63d97486f6749622b61e9e3292f (diff) |
Linux-libre 4.2-gnu
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf')
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 164 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_type.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_main.c | 110 |
6 files changed, 131 insertions, 153 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 458fbb421..395f32f22 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->vsi->seid, tx_ring->queue_index, tx_ring->next_to_use, i); - dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->tx_bi[i].time_stamp, jiffies); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); @@ -488,6 +484,8 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) if (!dev) return -ENOMEM; + /* warn if we are about to overwrite the pointer */ + WARN_ON(tx_ring->tx_bi); bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); if (!tx_ring->tx_bi) @@ -648,6 +646,8 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) struct device *dev = rx_ring->dev; int bi_size; + /* warn if we are about to overwrite the pointer */ + WARN_ON(rx_ring->rx_bi); bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); if (!rx_ring->rx_bi) @@ -1128,9 +1128,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } @@ -1156,7 +1153,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_ring->netdev->last_rx = jiffies; rx_desc->wb.qword1.status_error_len = 0; } while (likely(total_rx_packets < budget)); @@ -1271,7 +1267,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) : 0; i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_ring->netdev->last_rx = jiffies; rx_desc->wb.qword1.status_error_len = 0; } while (likely(total_rx_packets < budget)); @@ -1352,7 +1347,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) } /** - * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * @skb: send buffer * @tx_ring: ring to send buffer on * @flags: the tx flags to be set @@ -1363,9 +1358,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ -static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) +static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) { __be16 protocol = skb->protocol; u32 tx_flags = 0; @@ -1408,16 +1403,14 @@ out: * i40e_tso - set up the tso context descriptor * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending - * @tx_flags: the collected send information - * @protocol: the send protocol * @hdr_len: ptr to the size of the packet header * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol, u8 *hdr_len, - u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss, + u32 *cd_tunneling) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -1468,12 +1461,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, /** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer - * @tx_flags: Tx flags currently set + * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) @@ -1489,6 +1482,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: return; @@ -1498,18 +1492,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, this_ipv6_hdr = inner_ipv6_hdr(skb); this_tcp_hdrlen = inner_tcp_hdrlen(skb); - if (tx_flags & I40E_TX_FLAGS_IPV4) { - - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; ip_hdr(skb)->check = 0; } else { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - if (tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; } @@ -1521,8 +1514,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; if (this_ip_hdr->version == 6) { - tx_flags &= ~I40E_TX_FLAGS_IPV4; - tx_flags |= I40E_TX_FLAGS_IPV6; + *tx_flags &= ~I40E_TX_FLAGS_IPV4; + *tx_flags |= I40E_TX_FLAGS_IPV6; } @@ -1534,12 +1527,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, } /* Enable IP checksum offloads */ - if (tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { l4_hdr = this_ip_hdr->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; this_ip_hdr->check = 0; } else { @@ -1548,7 +1541,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the td_offset for IP header length */ *td_offset = (network_hdr_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { l4_hdr = this_ipv6_hdr->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; /* Now set the td_offset for IP header length */ @@ -1672,7 +1665,44 @@ linearize_chk_done: } /** - * i40e_tx_map - Build the Tx descriptor + * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +/** + * i40evf_maybe_stop_tx - 1st level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40evf_maybe_stop_tx(tx_ring, size); +} + +/** + * i40evf_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer * @first: first buffer info buffer to use @@ -1681,9 +1711,9 @@ linearize_chk_done: * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc **/ -static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) +static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); @@ -1789,9 +1819,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->queue_index), first->bytecount); - /* set the timestamp */ - first->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -1808,8 +1835,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - writel(i, tx_ring->tail); + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) + writel(i, tx_ring->tail); return; @@ -1831,44 +1862,7 @@ dma_error: } /** - * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns -EBUSY if a stop is needed, else 0 - **/ -static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Memory barrier before checking head and tail */ - smp_mb(); - - /* Check again in a case another CPU has just made room available. */ - if (likely(I40E_DESC_UNUSED(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -/** - * i40e_maybe_stop_tx - 1st level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns 0 if stop is not needed - **/ -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __i40e_maybe_stop_tx(tx_ring, size); -} - -/** - * i40e_xmit_descriptor_count - calculate number of tx descriptors needed + * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed * @skb: send buffer * @tx_ring: ring to send buffer on * @@ -1876,8 +1870,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * there is not enough descriptors available in this ring since we need at least * one descriptor. **/ -static int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) +static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) { unsigned int f; int count = 0; @@ -1892,7 +1886,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return 0; } @@ -1918,11 +1912,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u32 td_cmd = 0; u8 hdr_len = 0; int tso; - if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) + if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; /* prepare the xmit flags */ - if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; /* obtain protocol of skb */ @@ -1937,7 +1931,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss, &cd_tunneling); if (tso < 0) @@ -1958,17 +1952,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_flags |= I40E_TX_FLAGS_CSUM; - i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); } i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); - i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, - td_cmd, td_offset); - - i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 1e49bb1fb..e7a34f899 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -138,6 +138,7 @@ enum i40e_dyn_idx_t { #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -145,7 +146,6 @@ enum i40e_dyn_idx_t { struct i40e_tx_buffer { struct i40e_tx_desc *next_to_watch; - unsigned long time_stamp; union { struct sk_buff *skb; void *raw_buf; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index ec9d83a93..c463ec415 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats { /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; + u64 fd_atr_tunnel_match; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 1b98c25b3..fea3b75a9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -264,7 +264,6 @@ extern const char i40evf_driver_version[]; int i40evf_up(struct i40evf_adapter *adapter); void i40evf_down(struct i40evf_adapter *adapter); -void i40evf_reinit_locked(struct i40evf_adapter *adapter); void i40evf_reset(struct i40evf_adapter *adapter); void i40evf_set_ethtool_ops(struct net_device *netdev); void i40evf_update_stats(struct i40evf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index f4e77665b..2b53c870e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -267,8 +267,10 @@ static int i40evf_set_ringparam(struct net_device *netdev, adapter->tx_desc_count = new_tx_count; adapter->rx_desc_count = new_rx_count; - if (netif_running(netdev)) - i40evf_reinit_locked(adapter); + if (netif_running(netdev)) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 7c53aca4b..4ab4ebba0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -170,7 +170,8 @@ static void i40evf_tx_timeout(struct net_device *netdev) struct i40evf_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { + if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING | + I40EVF_FLAG_RESET_NEEDED))) { adapter->flags |= I40EVF_FLAG_RESET_NEEDED; schedule_work(&adapter->reset_task); } @@ -1460,7 +1461,7 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { lut = 0; for (j = 0; j < 4; j++) { - if (cqueue == adapter->vsi_res->num_queue_pairs) + if (cqueue == adapter->num_active_queues) cqueue = 0; lut |= ((cqueue) << (8 * j)); cqueue++; @@ -1470,8 +1471,8 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) i40e_flush(hw); } -#define I40EVF_RESET_WAIT_MS 100 -#define I40EVF_RESET_WAIT_COUNT 200 +#define I40EVF_RESET_WAIT_MS 10 +#define I40EVF_RESET_WAIT_COUNT 500 /** * i40evf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct @@ -1495,10 +1496,17 @@ static void i40evf_reset_task(struct work_struct *work) &adapter->crit_section)) usleep_range(500, 1000); + i40evf_misc_irq_disable(adapter); if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { - dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); + adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; + /* Restart the AQ here. If we have been reset but didn't + * detect it, or if the PF had to reinit, our AQ will be hosed. + */ + i40evf_shutdown_adminq(hw); + i40evf_init_adminq(hw); i40evf_request_reset(adapter); } + adapter->flags |= I40EVF_FLAG_RESET_PENDING; /* poll until we see the reset actually happen */ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { @@ -1507,10 +1515,10 @@ static void i40evf_reset_task(struct work_struct *work) if ((rstat_val != I40E_VFR_VFACTIVE) && (rstat_val != I40E_VFR_COMPLETED)) break; - msleep(I40EVF_RESET_WAIT_MS); + usleep_range(500, 1000); } if (i == I40EVF_RESET_WAIT_COUNT) { - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + dev_info(&adapter->pdev->dev, "Never saw reset\n"); goto continue_reset; /* act like the reset happened */ } @@ -1518,11 +1526,12 @@ static void i40evf_reset_task(struct work_struct *work) for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if ((rstat_val == I40E_VFR_VFACTIVE) || - (rstat_val == I40E_VFR_COMPLETED)) + if (rstat_val == I40E_VFR_VFACTIVE) break; msleep(I40EVF_RESET_WAIT_MS); } + /* extra wait to make sure minimum wait is met */ + msleep(I40EVF_RESET_WAIT_MS); if (i == I40EVF_RESET_WAIT_COUNT) { struct i40evf_mac_filter *f, *ftmp; struct i40evf_vlan_filter *fv, *fvtmp; @@ -1534,11 +1543,10 @@ static void i40evf_reset_task(struct work_struct *work) if (netif_running(adapter->netdev)) { set_bit(__I40E_DOWN, &adapter->vsi.state); - i40evf_irq_disable(adapter); - i40evf_napi_disable_all(adapter); - netif_tx_disable(netdev); - netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); + netif_tx_disable(netdev); + i40evf_napi_disable_all(adapter); + i40evf_irq_disable(adapter); i40evf_free_traffic_irqs(adapter); i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); @@ -1550,6 +1558,7 @@ static void i40evf_reset_task(struct work_struct *work) list_del(&f->list); kfree(f); } + list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { list_del(&fv->list); @@ -1564,22 +1573,27 @@ static void i40evf_reset_task(struct work_struct *work) i40evf_shutdown_adminq(hw); adapter->netdev->flags &= ~IFF_UP; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); return; /* Do not attempt to reinit. It's dead, Jim. */ } continue_reset: - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - - i40evf_irq_disable(adapter); - if (netif_running(adapter->netdev)) { - i40evf_napi_disable_all(adapter); - netif_tx_disable(netdev); - netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + i40evf_napi_disable_all(adapter); } + i40evf_irq_disable(adapter); adapter->state = __I40EVF_RESETTING; + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + + /* free the Tx/Rx rings and descriptors, might be better to just + * re-use them sometime in the future + */ + i40evf_free_all_rx_resources(adapter); + i40evf_free_all_tx_resources(adapter); /* kill and reinit the admin queue */ if (i40evf_shutdown_adminq(hw)) @@ -1603,6 +1617,7 @@ continue_reset: adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + i40evf_misc_irq_enable(adapter); mod_timer(&adapter->watchdog_timer, jiffies + 2); @@ -1624,7 +1639,10 @@ continue_reset: goto reset_err; i40evf_irq_enable(adapter, true); + } else { + adapter->state = __I40EVF_DOWN; } + return; reset_err: dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); @@ -1667,6 +1685,11 @@ static void i40evf_adminq_task(struct work_struct *work) memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); } while (pending); + if ((adapter->flags & + (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) || + adapter->state == __I40EVF_RESETTING) + goto freedom; + /* check for error indications */ val = rd32(hw, hw->aq.arq.len); oldval = val; @@ -1702,6 +1725,7 @@ static void i40evf_adminq_task(struct work_struct *work) if (oldval != val) wr32(hw, hw->aq.asq.len, val); +freedom: kfree(event.msg_buf); out: /* re-enable Admin queue interrupt cause */ @@ -1897,47 +1921,6 @@ static struct net_device_stats *i40evf_get_stats(struct net_device *netdev) } /** - * i40evf_reinit_locked - Software reinit - * @adapter: board private structure - * - * Reinititalizes the ring structures in response to a software configuration - * change. Roughly the same as close followed by open, but skips releasing - * and reallocating the interrupts. - **/ -void i40evf_reinit_locked(struct i40evf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err; - - WARN_ON(in_interrupt()); - - i40evf_down(adapter); - - /* allocate transmit descriptors */ - err = i40evf_setup_all_tx_resources(adapter); - if (err) - goto err_reinit; - - /* allocate receive descriptors */ - err = i40evf_setup_all_rx_resources(adapter); - if (err) - goto err_reinit; - - i40evf_configure(adapter); - - err = i40evf_up_complete(adapter); - if (err) - goto err_reinit; - - i40evf_irq_enable(adapter, true); - return; - -err_reinit: - dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); - i40evf_close(netdev); -} - -/** * i40evf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size @@ -1952,9 +1935,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) return -EINVAL; - /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; - i40evf_reinit_locked(adapter); + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + return 0; } |