summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-03-25 03:53:42 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-03-25 03:53:42 -0300
commit03dd4cb26d967f9588437b0fc9cc0e8353322bb7 (patch)
treefa581f6dc1c0596391690d1f67eceef3af8246dc /drivers/net/ethernet/sfc
parentd4e493caf788ef44982e131ff9c786546904d934 (diff)
Linux-libre 4.5-gnu
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r--drivers/net/ethernet/sfc/ef10.c96
-rw-r--r--drivers/net/ethernet/sfc/efx.c18
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c250
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h10
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h5
-rw-r--r--drivers/net/ethernet/sfc/rx.c1
-rw-r--r--drivers/net/ethernet/sfc/tx.c8
7 files changed, 325 insertions, 63 deletions
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e6a084a6b..98d33d462 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -181,13 +181,6 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
if (!(nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
- netif_err(efx, drv, efx->net_dev,
- "current firmware does not support TSO\n");
- return -ENODEV;
- }
-
- if (!(nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
netif_err(efx, probe, efx->net_dev,
"current firmware does not support an RX prefix\n");
@@ -493,10 +486,17 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
for (i = 0; i < n; i++) {
- rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc) {
+ /* Don't display the MC error if we didn't have space
+ * for a VF.
+ */
+ if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
+ efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
+ 0, outbuf, outlen, rc);
break;
+ }
if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
rc = -EIO;
break;
@@ -1797,6 +1797,12 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
tx_queue->write_count = 1;
+
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
+ tx_queue->tso_version = 1;
+ }
+
wmb();
efx_ef10_push_tx_desc(tx_queue, txd);
@@ -2375,8 +2381,19 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
netif_info(efx, drv, efx->net_dev,
"other functions on NIC have been reset\n");
- /* MC's boot count has incremented */
- ++nic_data->warm_boot_count;
+
+ /* With MCFW v4.6.x and earlier, the
+ * boot count will have incremented,
+ * so re-read the warm_boot_count
+ * value now to ensure this function
+ * doesn't think it has changed next
+ * time it checks.
+ */
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc >= 0) {
+ nic_data->warm_boot_count = rc;
+ rc = 0;
+ }
}
nic_data->workaround_26807 = true;
} else if (rc == -EPERM) {
@@ -3823,13 +3840,12 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
table->entry[filter_idx].handle);
- rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
if (rc)
- netdev_WARN(efx->net_dev,
- "filter_idx=%#x handle=%#llx\n",
- filter_idx,
- table->entry[filter_idx].handle);
+ netif_info(efx, drv, efx->net_dev,
+ "%s: filter %04x remove failed\n",
+ __func__, filter_idx);
kfree(spec);
}
@@ -3838,11 +3854,14 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
}
#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
- if (id != EFX_EF10_FILTER_ID_INVALID) { \
- filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
- WARN_ON(!table->entry[filter_idx].spec); \
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \
- }
+ if (id != EFX_EF10_FILTER_ID_INVALID) { \
+ filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
+ if (!table->entry[filter_idx].spec) \
+ netif_dbg(efx, drv, efx->net_dev, \
+ "%s: marked null spec old %04x:%04x\n", \
+ __func__, id, filter_idx); \
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\
+ }
static void efx_ef10_filter_mark_old(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -4016,9 +4035,10 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
- netif_warn(efx, drv, efx->net_dev,
- "%scast mismatch filter insert failed rc=%d\n",
- multicast ? "Multi" : "Uni", rc);
+ netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
+ efx->net_dev,
+ "%scast mismatch filter insert failed rc=%d\n",
+ multicast ? "Multi" : "Uni", rc);
} else if (multicast) {
table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
if (!nic_data->workaround_26807) {
@@ -4060,19 +4080,31 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
static void efx_ef10_filter_remove_old(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
- bool remove_failed = false;
+ int remove_failed = 0;
+ int remove_noent = 0;
+ int rc;
int i;
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (ACCESS_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
- if (efx_ef10_filter_remove_internal(
- efx, 1U << EFX_FILTER_PRI_AUTO,
- i, true) < 0)
- remove_failed = true;
+ rc = efx_ef10_filter_remove_internal(efx,
+ 1U << EFX_FILTER_PRI_AUTO, i, true);
+ if (rc == -ENOENT)
+ remove_noent++;
+ else if (rc)
+ remove_failed++;
}
}
- WARN_ON(remove_failed);
+
+ if (remove_failed)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: failed to remove %d filters\n",
+ __func__, remove_failed);
+ if (remove_noent)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: failed to remove %d non-existent filters\n",
+ __func__, remove_noent);
}
static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index a3c42a376..0705ec869 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2059,7 +2059,6 @@ static void efx_init_napi_channel(struct efx_channel *channel)
channel->napi_dev = efx->net_dev;
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
- napi_hash_add(&channel->napi_str);
efx_channel_busy_poll_init(channel);
}
@@ -2785,6 +2784,12 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */
+ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */
+ .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */
+ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{0} /* end of list */
};
@@ -3123,10 +3128,10 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO |
NETIF_F_RXCSUM);
- if (efx->type->offload_features & NETIF_F_V6_CSUM)
+ if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
- net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
+ net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
/* All offloads can be toggled */
@@ -3169,14 +3174,15 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
rtnl_lock();
rc = efx_mtd_probe(efx);
rtnl_unlock();
- if (rc)
+ if (rc && rc != -EPERM)
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
rc = pci_enable_pcie_error_reporting(pci_dev);
if (rc && rc != -EINVAL)
- netif_warn(efx, probe, efx->net_dev,
- "pci_enable_pcie_error_reporting failed (%d)\n", rc);
+ netif_notice(efx, probe, efx->net_dev,
+ "PCIE error reporting unavailable (%d).\n",
+ rc);
return 0;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 41fb6b60a..d28e7dd8f 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -82,6 +82,7 @@ int efx_mcdi_init(struct efx_nic *efx)
mcdi->logging_enabled = mcdi_logging_default;
#endif
init_waitqueue_head(&mcdi->wq);
+ init_waitqueue_head(&mcdi->proxy_rx_wq);
spin_lock_init(&mcdi->iface_lock);
mcdi->state = MCDI_STATE_QUIESCENT;
mcdi->mode = MCDI_MODE_POLL;
@@ -315,6 +316,7 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
}
#endif
+ mcdi->resprc_raw = 0;
if (error && mcdi->resp_data_len == 0) {
netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
mcdi->resprc = -EIO;
@@ -325,8 +327,8 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
mcdi->resprc = -EIO;
} else if (error) {
efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
- mcdi->resprc =
- efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
+ mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0);
+ mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw);
} else {
mcdi->resprc = 0;
}
@@ -621,9 +623,30 @@ efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
return 0;
}
-static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx,
+ size_t hdr_len, size_t data_len,
+ u32 *proxy_handle)
+{
+ MCDI_DECLARE_BUF_ERR(testbuf);
+ const size_t buflen = sizeof(testbuf);
+
+ if (!proxy_handle || data_len < buflen)
+ return false;
+
+ efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen);
+ if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) {
+ *proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE);
+ return true;
+ }
+
+ return false;
+}
+
+static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
+ size_t inlen,
efx_dword_t *outbuf, size_t outlen,
- size_t *outlen_actual, bool quiet)
+ size_t *outlen_actual, bool quiet,
+ u32 *proxy_handle, int *raw_rc)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
MCDI_DECLARE_BUF_ERR(errbuf);
@@ -657,6 +680,9 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
spin_unlock_bh(&mcdi->iface_lock);
}
+ if (proxy_handle)
+ *proxy_handle = 0;
+
if (rc != 0) {
if (outlen_actual)
*outlen_actual = 0;
@@ -669,6 +695,8 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
* acquiring the iface_lock. */
spin_lock_bh(&mcdi->iface_lock);
rc = mcdi->resprc;
+ if (raw_rc)
+ *raw_rc = mcdi->resprc_raw;
hdr_len = mcdi->resp_hdr_len;
data_len = mcdi->resp_data_len;
err_len = min(sizeof(errbuf), data_len);
@@ -689,6 +717,12 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
-rc);
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ } else if (proxy_handle && (rc == -EPROTO) &&
+ efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
+ proxy_handle)) {
+ mcdi->proxy_rx_status = 0;
+ mcdi->proxy_rx_handle = 0;
+ mcdi->state = MCDI_STATE_PROXY_WAIT;
} else if (rc && !quiet) {
efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
rc);
@@ -701,34 +735,195 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
}
}
- efx_mcdi_release(mcdi);
+ if (!proxy_handle || !*proxy_handle)
+ efx_mcdi_release(mcdi);
return rc;
}
-static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi)
+{
+ if (mcdi->state == MCDI_STATE_PROXY_WAIT) {
+ /* Interrupt the proxy wait. */
+ mcdi->proxy_rx_status = -EINTR;
+ wake_up(&mcdi->proxy_rx_wq);
+ }
+}
+
+static void efx_mcdi_ev_proxy_response(struct efx_nic *efx,
+ u32 handle, int status)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT);
+
+ mcdi->proxy_rx_status = efx_mcdi_errno(status);
+ /* Ensure the status is written before we update the handle, since the
+ * latter is used to check if we've finished.
+ */
+ wmb();
+ mcdi->proxy_rx_handle = handle;
+ wake_up(&mcdi->proxy_rx_wq);
+}
+
+static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
+
+ /* Wait for a proxy event, or timeout. */
+ rc = wait_event_timeout(mcdi->proxy_rx_wq,
+ mcdi->proxy_rx_handle != 0 ||
+ mcdi->proxy_rx_status == -EINTR,
+ MCDI_RPC_TIMEOUT);
+
+ if (rc <= 0) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "MCDI proxy timeout %d\n", handle);
+ return -ETIMEDOUT;
+ } else if (mcdi->proxy_rx_handle != handle) {
+ netif_warn(efx, hw, efx->net_dev,
+ "MCDI proxy unexpected handle %d (expected %d)\n",
+ mcdi->proxy_rx_handle, handle);
+ return -EINVAL;
+ }
+
+ return mcdi->proxy_rx_status;
+}
+
+static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
- size_t *outlen_actual, bool quiet)
+ size_t *outlen_actual, bool quiet, int *raw_rc)
{
+ u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */
int rc;
+ if (inbuf && inlen && (inbuf == outbuf)) {
+ /* The input buffer can't be aliased with the output. */
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
- if (rc) {
- if (outlen_actual)
- *outlen_actual = 0;
+ if (rc)
return rc;
+
+ rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, quiet, &proxy_handle, raw_rc);
+
+ if (proxy_handle) {
+ /* Handle proxy authorisation. This allows approval of MCDI
+ * operations to be delegated to the admin function, allowing
+ * fine control over (eg) multicast subscriptions.
+ */
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "MCDI waiting for proxy auth %d\n",
+ proxy_handle);
+ rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet);
+
+ if (rc == 0) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "MCDI proxy retry %d\n", proxy_handle);
+
+ /* We now retry the original request. */
+ mcdi->state = MCDI_STATE_RUNNING_SYNC;
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+
+ rc = _efx_mcdi_rpc_finish(efx, cmd, inlen,
+ outbuf, outlen, outlen_actual,
+ quiet, NULL, raw_rc);
+ } else {
+ netif_printk(efx, hw,
+ rc == -EPERM ? KERN_DEBUG : KERN_ERR,
+ efx->net_dev,
+ "MC command 0x%x failed after proxy auth rc=%d\n",
+ cmd, rc);
+
+ if (rc == -EINTR || rc == -EIO)
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ efx_mcdi_release(mcdi);
+ }
}
- return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
- outlen_actual, quiet);
+
+ return rc;
}
+static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ int raw_rc = 0;
+ int rc;
+
+ rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
+ outbuf, outlen, outlen_actual, true, &raw_rc);
+
+ if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
+ efx->type->is_vf) {
+ /* If the EVB port isn't available within a VF this may
+ * mean the PF is still bringing the switch up. We should
+ * retry our request shortly.
+ */
+ unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT;
+ unsigned int delay_us = 10000;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "%s: NO_EVB_PORT; will retry request\n",
+ __func__);
+
+ do {
+ usleep_range(delay_us, delay_us + 10000);
+ rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
+ outbuf, outlen, outlen_actual,
+ true, &raw_rc);
+ if (delay_us < 100000)
+ delay_us <<= 1;
+ } while ((rc == -EPROTO) &&
+ (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
+ time_before(jiffies, abort_time));
+ }
+
+ if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO))
+ efx_mcdi_display_error(efx, cmd, inlen,
+ outbuf, outlen, rc);
+
+ return rc;
+}
+
+/**
+ * efx_mcdi_rpc - Issue an MCDI command and wait for completion
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes. Must be a multiple
+ * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
+ * @outbuf: Response buffer. May be %NULL if @outlen is 0.
+ * @outlen: Length of response buffer, in bytes. If the actual
+ * response is longer than @outlen & ~3, it will be truncated
+ * to that length.
+ * @outlen_actual: Pointer through which to return the actual response
+ * length. May be %NULL if this is not needed.
+ *
+ * This function may sleep and therefore must be called in an appropriate
+ * context.
+ *
+ * Return: A negative error code, or zero if successful. The error
+ * code may come from the MCDI response or may indicate a failure
+ * to communicate with the MC. In the former case, the response
+ * will still be copied to @outbuf and *@outlen_actual will be
+ * set accordingly. In the latter case, *@outlen_actual will be
+ * set to zero.
+ */
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
- outlen_actual, false);
+ return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, false);
}
/* Normally, on receiving an error code in the MCDI response,
@@ -744,8 +939,8 @@ int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
- outlen_actual, true);
+ return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, true);
}
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
@@ -866,7 +1061,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
size_t *outlen_actual)
{
return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
- outlen_actual, false);
+ outlen_actual, false, NULL, NULL);
}
int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
@@ -874,7 +1069,7 @@ int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
size_t *outlen_actual)
{
return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
- outlen_actual, true);
+ outlen_actual, true, NULL, NULL);
}
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
@@ -887,9 +1082,10 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
code = MCDI_DWORD(outbuf, ERR_CODE);
if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
err_arg = MCDI_DWORD(outbuf, ERR_ARG);
- netif_err(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n",
- cmd, (int)inlen, rc, code, err_arg);
+ netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR,
+ efx->net_dev,
+ "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
+ cmd, inlen, rc, code, err_arg);
}
/* Switch to polled MCDI completions. This can be called in various
@@ -1014,8 +1210,13 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
* receiving a REBOOT event after posting the MCDI
* request. Did the mc reboot before or after the copyout? The
* best we can do always is just return failure.
+ *
+ * If there is an outstanding proxy response expected it is not going
+ * to arrive. We should thus abort it.
*/
spin_lock(&mcdi->iface_lock);
+ efx_mcdi_proxy_abort(mcdi);
+
if (efx_mcdi_complete_sync(mcdi)) {
if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = rc;
@@ -1063,6 +1264,8 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx)
spin_lock(&mcdi->iface_lock);
efx->mc_bist_for_other_fn = true;
+ efx_mcdi_proxy_abort(mcdi);
+
if (efx_mcdi_complete_sync(mcdi)) {
if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = -EIO;
@@ -1171,6 +1374,11 @@ void efx_mcdi_process_event(struct efx_channel *channel,
EFX_QWORD_VAL(*event));
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
break;
+ case MCDI_EVENT_CODE_PROXY_RESPONSE:
+ efx_mcdi_ev_proxy_response(efx,
+ MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE),
+ MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC));
+ break;
default:
netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
code);
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 025d504c4..c9aeb0701 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -17,6 +17,8 @@
* @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
* Only the thread that moved into this state is allowed to move out of it.
* @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
+ * @MCDI_STATE_PROXY_WAIT: An MCDI request has completed with a response that
+ * indicates we must wait for a proxy try again message.
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
* has not yet consumed the result. For all other threads, equivalent to
* %MCDI_STATE_RUNNING.
@@ -25,6 +27,7 @@ enum efx_mcdi_state {
MCDI_STATE_QUIESCENT,
MCDI_STATE_RUNNING_SYNC,
MCDI_STATE_RUNNING_ASYNC,
+ MCDI_STATE_PROXY_WAIT,
MCDI_STATE_COMPLETED,
};
@@ -60,6 +63,9 @@ enum efx_mcdi_mode {
* @async_timer: Timer for asynchronous request timeout
* @logging_buffer: buffer that may be used to build MCDI tracing messages
* @logging_enabled: whether to trace MCDI
+ * @proxy_rx_handle: Most recently received proxy authorisation handle
+ * @proxy_rx_status: Status of most recent proxy authorisation
+ * @proxy_rx_wq: Wait queue for updates to proxy_rx_handle
*/
struct efx_mcdi_iface {
struct efx_nic *efx;
@@ -71,6 +77,7 @@ struct efx_mcdi_iface {
unsigned int credits;
unsigned int seqno;
int resprc;
+ int resprc_raw;
size_t resp_hdr_len;
size_t resp_data_len;
spinlock_t async_lock;
@@ -80,6 +87,9 @@ struct efx_mcdi_iface {
char *logging_buffer;
bool logging_enabled;
#endif
+ unsigned int proxy_rx_handle;
+ int proxy_rx_status;
+ wait_queue_head_t proxy_rx_wq;
};
struct efx_mcdi_mon {
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index a8ddd122f..38c422321 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -182,6 +182,7 @@ struct efx_tx_buffer {
*
* @efx: The associated Efx NIC
* @queue: DMA queue number
+ * @tso_version: Version of TSO in use for this queue.
* @channel: The associated channel
* @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring
@@ -228,6 +229,7 @@ struct efx_tx_queue {
/* Members which don't change on the fast path */
struct efx_nic *efx ____cacheline_aligned_in_smp;
unsigned queue;
+ unsigned int tso_version;
struct efx_channel *channel;
struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer;
@@ -1502,8 +1504,9 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
* same cycle, the XMAC can miss the IPG altogether. We work around
* this by adding a further 16 bytes.
*/
+#define EFX_FRAME_PAD 16
#define EFX_MAX_FRAME_LEN(mtu) \
- ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
+ (ALIGN(((mtu) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + EFX_FRAME_PAD), 8))
static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 809ea4610..8956995b2 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -463,7 +463,6 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
skb_record_rx_queue(skb, channel->rx_queue.core_index);
- skb_mark_napi_id(skb, &channel->napi_str);
gro_result = napi_gro_frags(napi);
if (gro_result != GRO_DROP)
channel->irq_mod_score += 2;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 67f6afaa0..f7a0ec1bc 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -1010,13 +1010,17 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
/* Parse the SKB header and initialise state. */
static int tso_start(struct tso_state *st, struct efx_nic *efx,
+ struct efx_tx_queue *tx_queue,
const struct sk_buff *skb)
{
- bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
struct device *dma_dev = &efx->pci_dev->dev;
unsigned int header_len, in_len;
+ bool use_opt_desc = false;
dma_addr_t dma_addr;
+ if (tx_queue->tso_version == 1)
+ use_opt_desc = true;
+
st->ip_off = skb_network_header(skb) - skb->data;
st->tcp_off = skb_transport_header(skb) - skb->data;
header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
@@ -1271,7 +1275,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
/* Find the packet protocol and sanity-check it */
state.protocol = efx_tso_check_protocol(skb);
- rc = tso_start(&state, efx, skb);
+ rc = tso_start(&state, efx, tx_queue, skb);
if (rc)
goto mem_err;