summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic/qede
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic/qede')
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h54
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c348
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c732
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c698
5 files changed, 1670 insertions, 163 deletions
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 06ff90d87..74a49850d 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
+qede-$(CONFIG_DCB) += qede_dcbnl.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index d02325154..02b06d4e4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -24,16 +24,14 @@
#include <linux/qed/qed_eth_if.h>
#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 7
-#define QEDE_REVISION_VERSION 0
-#define QEDE_ENGINEERING_VERSION 0
+#define QEDE_MINOR_VERSION 10
+#define QEDE_REVISION_VERSION 1
+#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
__stringify(QEDE_ENGINEERING_VERSION)
-#define QEDE_ETH_INTERFACE_VERSION 300
-
#define DRV_MODULE_SYM qede
struct qede_stats {
@@ -61,16 +59,16 @@ struct qede_stats {
/* port */
u64 rx_64_byte_packets;
- u64 rx_127_byte_packets;
- u64 rx_255_byte_packets;
- u64 rx_511_byte_packets;
- u64 rx_1023_byte_packets;
- u64 rx_1518_byte_packets;
- u64 rx_1522_byte_packets;
- u64 rx_2047_byte_packets;
- u64 rx_4095_byte_packets;
- u64 rx_9216_byte_packets;
- u64 rx_16383_byte_packets;
+ u64 rx_65_to_127_byte_packets;
+ u64 rx_128_to_255_byte_packets;
+ u64 rx_256_to_511_byte_packets;
+ u64 rx_512_to_1023_byte_packets;
+ u64 rx_1024_to_1518_byte_packets;
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -114,6 +112,10 @@ struct qede_dev {
u32 dp_module;
u8 dp_level;
+ u32 flags;
+#define QEDE_FLAG_IS_VF BIT(0)
+#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
+
const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info;
@@ -141,6 +143,8 @@ struct qede_dev {
struct mutex qede_lock;
u32 state; /* Protected by qede_lock */
u16 rx_buf_size;
+ u32 rx_copybreak;
+
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
/* Max supported alignment is 256 (8 shift)
@@ -156,6 +160,10 @@ struct qede_dev {
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
struct qede_stats stats;
+#define QEDE_RSS_INDIR_INITED BIT(0)
+#define QEDE_RSS_KEY_INITED BIT(1)
+#define QEDE_RSS_CAPS_INITED BIT(2)
+ u32 rss_params_inited; /* bit-field to track initialized rss params */
struct qed_update_vport_rss_params rss_params;
u16 q_num_rx_buffers; /* Must be a power of two */
u16 q_num_tx_buffers; /* Must be a power of two */
@@ -167,6 +175,8 @@ struct qede_dev {
bool accept_any_vlan;
struct delayed_work sp_task;
unsigned long sp_flags;
+ u16 vxlan_dst_port;
+ u16 geneve_dst_port;
};
enum QEDE_STATE {
@@ -227,6 +237,7 @@ struct qede_rx_queue {
u64 rx_hw_errors;
u64 rx_alloc_errors;
+ u64 rx_ip_frags;
};
union db_prod {
@@ -286,13 +297,19 @@ struct qede_fastpath {
#define QEDE_CSUM_ERROR BIT(0)
#define QEDE_CSUM_UNNECESSARY BIT(1)
+#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
-#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_VXLAN_PORT_CONFIG 2
+#define QEDE_SP_GENEVE_PORT_CONFIG 3
union qede_reload_args {
u16 mtu;
};
+#ifdef CONFIG_DCB
+void qede_set_dcbnl_ops(struct net_device *ndev);
+#endif
void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
void qede_set_ethtool_ops(struct net_device *netdev);
void qede_reload(struct qede_dev *edev,
@@ -301,6 +318,10 @@ void qede_reload(struct qede_dev *edev,
union qede_reload_args *args);
int qede_change_mtu(struct net_device *dev, int new_mtu);
void qede_fill_by_demand_stats(struct qede_dev *edev);
+bool qede_has_rx_work(struct qede_rx_queue *rxq);
+int qede_txq_has_work(struct qede_tx_queue *txq);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
+ u8 count);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
@@ -314,6 +335,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev);
#define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+#define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256
#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
new file mode 100644
index 000000000..03e8c0212
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -0,0 +1,348 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+#include "qede.h"
+
+static u8 qede_dcbnl_getstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getstate(edev->cdev);
+}
+
+static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setstate(edev->cdev, state);
+}
+
+static void qede_dcbnl_getpermhwaddr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+}
+
+static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type,
+ pgid, bw_pct, up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct,
+ up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpfccfg(edev->cdev, prio, setting);
+}
+
+static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->setpfccfg(edev->cdev, prio, setting);
+}
+
+static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getcap(edev->cdev, capid, cap);
+}
+
+static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num);
+}
+
+static u8 qede_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getpfcstate(edev->cdev);
+}
+
+static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getapp(edev->cdev, idtype, id);
+}
+
+static u8 qede_dcbnl_getdcbx(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getdcbx(edev->cdev);
+}
+
+static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static u8 qede_dcbnl_setall(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setall(edev->cdev);
+}
+
+static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num);
+}
+
+static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpfcstate(edev->cdev, state);
+}
+
+static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval,
+ u8 up)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up);
+}
+
+static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setdcbx(edev->cdev, state);
+}
+
+static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid,
+ u8 *flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags);
+}
+
+static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags);
+}
+
+static int qede_dcbnl_peer_getappinfo(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *count)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count);
+}
+
+static int qede_dcbnl_peer_getapptable(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getapptable(edev->cdev, app);
+}
+
+static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg);
+}
+
+static int qede_dcbnl_ieee_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_getapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_setapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets);
+}
+
+static const struct dcbnl_rtnl_ops qede_dcbnl_ops = {
+ .ieee_getpfc = qede_dcbnl_ieee_getpfc,
+ .ieee_setpfc = qede_dcbnl_ieee_setpfc,
+ .ieee_getets = qede_dcbnl_ieee_getets,
+ .ieee_setets = qede_dcbnl_ieee_setets,
+ .ieee_getapp = qede_dcbnl_ieee_getapp,
+ .ieee_setapp = qede_dcbnl_ieee_setapp,
+ .getdcbx = qede_dcbnl_getdcbx,
+ .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
+ .ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
+ .getstate = qede_dcbnl_getstate,
+ .setstate = qede_dcbnl_setstate,
+ .getpermhwaddr = qede_dcbnl_getpermhwaddr,
+ .getpgtccfgtx = qede_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx,
+ .getpgtccfgrx = qede_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx,
+ .getpfccfg = qede_dcbnl_getpfccfg,
+ .setpfccfg = qede_dcbnl_setpfccfg,
+ .getcap = qede_dcbnl_getcap,
+ .getnumtcs = qede_dcbnl_getnumtcs,
+ .getpfcstate = qede_dcbnl_getpfcstate,
+ .getapp = qede_dcbnl_getapp,
+ .getdcbx = qede_dcbnl_getdcbx,
+ .setpgtccfgtx = qede_dcbnl_setpgtccfgtx,
+ .setpgtccfgrx = qede_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx,
+ .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx,
+ .setall = qede_dcbnl_setall,
+ .setnumtcs = qede_dcbnl_setnumtcs,
+ .setpfcstate = qede_dcbnl_setpfcstate,
+ .setapp = qede_dcbnl_setapp,
+ .setdcbx = qede_dcbnl_setdcbx,
+ .setfeatcfg = qede_dcbnl_setfeatcfg,
+ .getfeatcfg = qede_dcbnl_getfeatcfg,
+ .peer_getappinfo = qede_dcbnl_peer_getappinfo,
+ .peer_getapptable = qede_dcbnl_peer_getapptable,
+ .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc,
+ .cee_peer_getpg = qede_dcbnl_cee_peer_getpg,
+};
+
+void qede_set_dcbnl_ops(struct net_device *dev)
+{
+ dev->dcbnl_ops = &qede_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index c49dc10ce..f8492cac9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -9,6 +9,7 @@
#include <linux/version.h>
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/string.h>
#include <linux/pci.h>
@@ -27,12 +28,16 @@
#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
#define QEDE_RQSTAT(stat_name) \
{QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+
+#define QEDE_SELFTEST_POLL_COUNT 100
+
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
} qede_rqstats_arr[] = {
QEDE_RQSTAT(rx_hw_errors),
QEDE_RQSTAT(rx_alloc_errors),
+ QEDE_RQSTAT(rx_ip_frags),
};
#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
@@ -59,16 +64,16 @@ static const struct {
QEDE_STAT(tx_bcast_pkts),
QEDE_PF_STAT(rx_64_byte_packets),
- QEDE_PF_STAT(rx_127_byte_packets),
- QEDE_PF_STAT(rx_255_byte_packets),
- QEDE_PF_STAT(rx_511_byte_packets),
- QEDE_PF_STAT(rx_1023_byte_packets),
- QEDE_PF_STAT(rx_1518_byte_packets),
- QEDE_PF_STAT(rx_1522_byte_packets),
- QEDE_PF_STAT(rx_2047_byte_packets),
- QEDE_PF_STAT(rx_4095_byte_packets),
- QEDE_PF_STAT(rx_9216_byte_packets),
- QEDE_PF_STAT(rx_16383_byte_packets),
+ QEDE_PF_STAT(rx_65_to_127_byte_packets),
+ QEDE_PF_STAT(rx_128_to_255_byte_packets),
+ QEDE_PF_STAT(rx_256_to_511_byte_packets),
+ QEDE_PF_STAT(rx_512_to_1023_byte_packets),
+ QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
+ QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
+ QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
+ QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
+ QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
+ QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
QEDE_PF_STAT(tx_64_byte_packets),
QEDE_PF_STAT(tx_65_to_127_byte_packets),
QEDE_PF_STAT(tx_128_to_255_byte_packets),
@@ -116,11 +121,39 @@ static const struct {
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+enum {
+ QEDE_PRI_FLAG_CMT,
+ QEDE_PRI_FLAG_LEN,
+};
+
+static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+ "Coupled-Function",
+};
+
+enum qede_ethtool_tests {
+ QEDE_ETHTOOL_INT_LOOPBACK,
+ QEDE_ETHTOOL_INTERRUPT_TEST,
+ QEDE_ETHTOOL_MEMORY_TEST,
+ QEDE_ETHTOOL_REGISTER_TEST,
+ QEDE_ETHTOOL_CLOCK_TEST,
+ QEDE_ETHTOOL_TEST_MAX
+};
+
+static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
+ "Internal loopback (offline)",
+ "Interrupt (online)\t",
+ "Memory (online)\t\t",
+ "Register (online)\t",
+ "Clock (online)\t\t",
+};
+
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
int i, j, k;
for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+ if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ continue;
strcpy(buf + j * ETH_GSTRING_LEN,
qede_stats_arr[i].string);
j++;
@@ -139,6 +172,14 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
case ETH_SS_STATS:
qede_get_strings_stats(edev, buf);
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(buf, qede_private_arr,
+ ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
+ break;
+ case ETH_SS_TEST:
+ memcpy(buf, qede_tests_str_arr,
+ ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
+ break;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
@@ -156,8 +197,11 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock);
- for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+ for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
+ if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
+ continue;
buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+ }
for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
buf[cnt] = 0;
@@ -176,8 +220,21 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
- return num_stats + QEDE_NUM_RQSTATS;
+ if (IS_VF(edev)) {
+ int i;
+ for (i = 0; i < QEDE_NUM_STATS; i++)
+ if (qede_stats_arr[i].pf_only)
+ num_stats--;
+ }
+ return num_stats + QEDE_NUM_RQSTATS;
+ case ETH_SS_PRIV_FLAGS:
+ return QEDE_PRI_FLAG_LEN;
+ case ETH_SS_TEST:
+ if (!IS_VF(edev))
+ return QEDE_ETHTOOL_TEST_MAX;
+ else
+ return 0;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
@@ -185,6 +242,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
}
}
+static u32 qede_get_priv_flags(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
+}
+
static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -217,9 +281,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct qed_link_params params;
u32 speed;
- if (!edev->dev_info.common.is_mf_default) {
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev,
- "Link parameters can not be changed in non-default mode\n");
+ "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -328,6 +392,12 @@ static int qede_nway_reset(struct net_device *dev)
struct qed_link_output current_link;
struct qed_link_params link_params;
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+ DP_INFO(edev,
+ "Link settings are not allowed to be changed\n");
+ return -EOPNOTSUPP;
+ }
+
if (!netif_running(dev))
return 0;
@@ -357,6 +427,59 @@ static u32 qede_get_link(struct net_device *dev)
return current_link.link_up;
}
+static int qede_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 rxc, txc;
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+ edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc);
+
+ coal->rx_coalesce_usecs = rxc;
+ coal->tx_coalesce_usecs = txc;
+
+ return 0;
+}
+
+static int qede_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i, rc = 0;
+ u16 rxc, txc;
+ u8 sb_id;
+
+ if (!netif_running(dev)) {
+ DP_INFO(edev, "Interface is down\n");
+ return -EINVAL;
+ }
+
+ if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
+ coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
+ DP_INFO(edev,
+ "Can't support requested %s coalesce value [max supported value %d]\n",
+ coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
+ : "tx",
+ QED_COALESCE_MAX);
+ return -EINVAL;
+ }
+
+ rxc = (u16)coal->rx_coalesce_usecs;
+ txc = (u16)coal->tx_coalesce_usecs;
+ for_each_rss(i) {
+ sb_id = edev->fp_array[i].sb_info->igu_sb_id;
+ rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
+ (u8)i, sb_id);
+ if (rc) {
+ DP_INFO(edev, "Set coalesce error, rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
static void qede_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
@@ -428,9 +551,9 @@ static int qede_set_pauseparam(struct net_device *dev,
struct qed_link_params params;
struct qed_link_output current_link;
- if (!edev->dev_info.common.is_mf_default) {
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev,
- "Pause parameters can not be updated in non-default mode\n");
+ "Pause settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -569,6 +692,541 @@ static int qede_set_phys_id(struct net_device *dev,
return 0;
}
+static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = edev->num_rss;
+ return 0;
+ case ETHTOOL_GRXFH:
+ return qede_get_rss_flags(edev, info);
+ default:
+ DP_ERR(edev, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ struct qed_update_vport_params vport_update_params;
+ u8 set_caps = 0, clr_caps = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case UDP_V4_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ /* No action is needed if there is no change in the rss capability */
+ if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
+ ~clr_caps) | set_caps))
+ return 0;
+
+ /* Update internal configuration */
+ edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
+ set_caps;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+
+ /* Re-configure if possible */
+ if (netif_running(edev->ndev)) {
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ return edev->ops->vport_update(edev->cdev,
+ &vport_update_params);
+ }
+
+ return 0;
+}
+
+static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return qede_set_rss_flags(edev, info);
+ default:
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 qede_get_rxfh_indir_size(struct net_device *dev)
+{
+ return QED_RSS_IND_TABLE_SIZE;
+}
+
+static u32 qede_get_rxfh_key_size(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return sizeof(edev->rss_params.rss_key);
+}
+
+static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ indir[i] = edev->rss_params.rss_ind_table[i];
+
+ if (key)
+ memcpy(key, edev->rss_params.rss_key,
+ qede_get_rxfh_key_size(dev));
+
+ return 0;
+}
+
+static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!indir && !key)
+ return 0;
+
+ if (indir) {
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ edev->rss_params.rss_ind_table[i] = indir[i];
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ if (key) {
+ memcpy(&edev->rss_params.rss_key, key,
+ qede_get_rxfh_key_size(dev));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+
+ if (netif_running(edev->ndev)) {
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ return edev->ops->vport_update(edev->cdev,
+ &vport_update_params);
+ }
+
+ return 0;
+}
+
+/* This function enables the interrupt generation and the NAPI on the device */
+static void qede_netif_start(struct qede_dev *edev)
+{
+ int i;
+
+ if (!netif_running(edev->ndev))
+ return;
+
+ for_each_rss(i) {
+ /* Update and reenable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
+ napi_enable(&edev->fp_array[i].napi);
+ }
+}
+
+/* This function disables the NAPI and the interrupt generation on the device */
+static void qede_netif_stop(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ napi_disable(&edev->fp_array[i].napi);
+ /* Disable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
+ }
+}
+
+static int qede_selftest_transmit_traffic(struct qede_dev *edev,
+ struct sk_buff *skb)
+{
+ struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+ struct eth_tx_1st_bd *first_bd;
+ dma_addr_t mapping;
+ int i, idx, val;
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ txq->sw_tx_ring[idx].skb = skb;
+ first_bd = qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+ first_bd->data.bd_flags.bitfields = val;
+ val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
+ first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ return -ENOMEM;
+ }
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = 1;
+ txq->sw_tx_prod++;
+ /* 'next page' entries are counted in the producer value */
+ val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+ txq->tx_db.data.bd_prod = val;
+
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+
+ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ if (qede_txq_has_work(txq))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!qede_txq_has_work(txq)) {
+ DP_NOTICE(edev, "Tx completion didn't happen\n");
+ return -1;
+ }
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ txq->sw_tx_cons++;
+ txq->sw_tx_ring[idx].skb = NULL;
+
+ return 0;
+}
+
+static int qede_selftest_receive_traffic(struct qede_dev *edev)
+{
+ struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
+ u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct sw_rx_data *sw_rx_data;
+ union eth_rx_cqe *cqe;
+ u8 *data_ptr;
+ int i;
+
+ /* The packet is expected to receive on rx-queue 0 even though RSS is
+ * enabled. This is because the queue 0 is configured as the default
+ * queue and that the loopback traffic is not IP.
+ */
+ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ if (qede_has_rx_work(rxq))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!qede_has_rx_work(rxq)) {
+ DP_NOTICE(edev, "Failed to receive the traffic\n");
+ return -1;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD before reading hw_comp_cons. If the CQE is read before it is
+ * written by FW, then FW writes CQE and SB, and then the CPU reads the
+ * hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset + sw_rx_data->page_offset);
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ DP_NOTICE(edev, "Loopback test failed\n");
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ return -1;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+
+ return 0;
+}
+
+static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
+{
+ struct qed_link_params link_params;
+ struct sk_buff *skb = NULL;
+ int rc = 0, i;
+ u32 pkt_size;
+ u8 *packet;
+
+ if (!netif_running(edev->ndev)) {
+ DP_NOTICE(edev, "Interface is down\n");
+ return -EINVAL;
+ }
+
+ qede_netif_stop(edev);
+
+ /* Bring up the link in Loopback mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = loopback_mode;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ /* prepare the loopback packet */
+ pkt_size = edev->ndev->mtu + ETH_HLEN;
+
+ skb = netdev_alloc_skb(edev->ndev, pkt_size);
+ if (!skb) {
+ DP_INFO(edev, "Can't allocate skb\n");
+ rc = -ENOMEM;
+ goto test_loopback_exit;
+ }
+ packet = skb_put(skb, pkt_size);
+ ether_addr_copy(packet, edev->ndev->dev_addr);
+ ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
+ memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ packet[i] = (unsigned char)(i & 0xff);
+
+ rc = qede_selftest_transmit_traffic(edev, skb);
+ if (rc)
+ goto test_loopback_exit;
+
+ rc = qede_selftest_receive_traffic(edev);
+ if (rc)
+ goto test_loopback_exit;
+
+ DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
+
+test_loopback_exit:
+ dev_kfree_skb(skb);
+
+ /* Bring up the link in Normal mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ qede_netif_start(edev);
+
+ return rc;
+}
+
+static void qede_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
+
+ memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ if (qede_selftest_run_loopback(edev,
+ QED_LINK_LOOPBACK_INT_PHY)) {
+ buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+
+ if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
+ buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
+ buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
+ buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
+ buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+}
+
+static int qede_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 val;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ val = *(u32 *)data;
+ if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Invalid rx copy break value, range is [%u, %u]",
+ QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE);
+ return -EINVAL;
+ }
+
+ edev->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qede_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = edev->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_settings = qede_get_settings,
.set_settings = qede_set_settings,
@@ -577,6 +1235,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.set_msglevel = qede_set_msglevel,
.nway_reset = qede_nway_reset,
.get_link = qede_get_link,
+ .get_coalesce = qede_get_coalesce,
+ .set_coalesce = qede_set_coalesce,
.get_ringparam = qede_get_ringparam,
.set_ringparam = qede_set_ringparam,
.get_pauseparam = qede_get_pauseparam,
@@ -584,13 +1244,51 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_strings = qede_get_strings,
.set_phys_id = qede_set_phys_id,
.get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
.get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
+ .get_channels = qede_get_channels,
+ .set_channels = qede_set_channels,
+ .self_test = qede_self_test,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
+};
+static const struct ethtool_ops qede_vf_ethtool_ops = {
+ .get_settings = qede_get_settings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .get_link = qede_get_link,
+ .get_ringparam = qede_get_ringparam,
+ .set_ringparam = qede_set_ringparam,
+ .get_strings = qede_get_strings,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
+ .get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
};
void qede_set_ethtool_ops(struct net_device *dev)
{
- dev->ethtool_ops = &qede_ethtool_ops;
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (IS_VF(edev))
+ dev->ethtool_ops = &qede_vf_ethtool_ops;
+ else
+ dev->ethtool_ops = &qede_ethtool_ops;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 12f661579..9544e4c41 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -24,7 +24,7 @@
#include <linux/netdev_features.h>
#include <linux/udp.h>
#include <linux/tcp.h>
-#include <net/vxlan.h>
+#include <net/udp_tunnel.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -58,6 +58,7 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_100 0x1644
#define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656
+#define CHIP_NUM_57980S_IOV 0x1664
#ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
@@ -66,15 +67,24 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
#endif
+enum qede_pci_private {
+ QEDE_PRIVATE_PF,
+ QEDE_PRIVATE_VF
+};
+
static const struct pci_device_id qede_pci_tbl[] = {
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
@@ -89,17 +99,87 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq);
static void qede_link_update(void *dev, struct qed_link_output *link);
+#ifdef CONFIG_QED_SRIOV
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (vlan > 4095) {
+ DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
+ vlan, vf);
+
+ return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
+}
+
+static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ DP_VERBOSE(edev, QED_MSG_IOV,
+ "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
+
+ if (!is_valid_ether_addr(mac)) {
+ DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
+ return -EINVAL;
+ }
+
+ return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
+}
+
+static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+{
+ struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
+ int rc;
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
+
+ rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
+
+ /* Enable/Disable Tx switching for PF */
+ if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
+ qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
+ struct qed_update_vport_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.vport_id = 0;
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = num_vfs_param ? 1 : 0;
+ edev->ops->vport_update(edev->cdev, &params);
+ }
+
+ return rc;
+}
+#endif
+
static struct pci_driver qede_pci_driver = {
.name = "qede",
.id_table = qede_pci_tbl,
.probe = qede_probe,
.remove = qede_remove,
+#ifdef CONFIG_QED_SRIOV
+ .sriov_configure = qede_sriov_configure,
+#endif
};
+static void qede_force_mac(void *dev, u8 *mac)
+{
+ struct qede_dev *edev = dev;
+
+ ether_addr_copy(edev->ndev->dev_addr, mac);
+ ether_addr_copy(edev->primary_mac, mac);
+}
+
static struct qed_eth_cb_ops qede_ll_ops = {
{
.link_update = qede_link_update,
},
+ .force_mac = qede_force_mac,
};
static int qede_netdev_event(struct notifier_block *this, unsigned long event,
@@ -141,19 +221,10 @@ static
int __init qede_init(void)
{
int ret;
- u32 qed_ver;
pr_notice("qede_init: %s\n", version);
- qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
- if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
- pr_notice("Version mismatch [%08x != %08x]\n",
- qed_ver,
- QEDE_ETH_INTERFACE_VERSION);
- return -EINVAL;
- }
-
- qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+ qed_ops = qed_get_eth_ops();
if (!qed_ops) {
pr_notice("Failed to get qed ethtool operations\n");
return -EINVAL;
@@ -319,6 +390,9 @@ static u32 qede_xmit_type(struct qede_dev *edev,
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
*ipv6_ext = 1;
+ if (skb->encapsulation)
+ rc |= XMIT_ENC;
+
if (skb_is_gso(skb))
rc |= XMIT_LSO;
@@ -380,6 +454,16 @@ static int map_frag_to_bd(struct qede_dev *edev,
return 0;
}
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+ if (is_encap_pkt)
+ return (skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data);
+ else
+ return (skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data);
+}
+
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
@@ -390,8 +474,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
if (xmit_type & XMIT_LSO) {
int hlen;
- hlen = skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data;
+ hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
/* linear payload would require its own BD */
if (skb_headlen(skb) > hlen)
@@ -402,6 +485,24 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
}
#endif
+static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
+{
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+}
+
/* Main transmit function */
static
netdev_tx_t qede_start_xmit(struct sk_buff *skb,
@@ -460,6 +561,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
DP_NOTICE(edev, "SKB mapping failed\n");
qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
nbd++;
@@ -491,15 +593,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Fill the parsing flags & params according to the requested offload */
if (xmit_type & XMIT_L4_CSUM) {
- u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
-
/* We don't re-calculate IP checksum as it is already done by
* the upper stack
*/
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
- first_bd->data.bitfields |= cpu_to_le16(temp);
+ if (xmit_type & XMIT_ENC) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ first_bd->data.bitfields |=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ }
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
@@ -515,10 +620,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
third_bd->data.lso_mss =
cpu_to_le16(skb_shinfo(skb)->gso_size);
- first_bd->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- hlen = skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data;
+ if (unlikely(xmit_type & XMIT_ENC)) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, true);
+ } else {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, false);
+ }
/* @@@TBD - if will not be removed need to check */
third_bd->data.bitfields |=
@@ -551,6 +661,10 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
tx_data_bd = (struct eth_tx_bd *)third_bd;
data_split = true;
}
+ } else {
+ first_bd->data.bitfields |=
+ (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
}
/* Handle fragmented skb */
@@ -562,6 +676,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (rc) {
qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
data_split);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
@@ -586,6 +701,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (rc) {
qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
data_split);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
}
@@ -606,23 +722,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- /* wmb makes sure that the BDs data is updated before updating the
- * producer, otherwise FW may read old data from the BDs.
- */
- wmb();
- barrier();
- writel(txq->tx_db.raw, txq->doorbell_addr);
-
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the queue lock is released and another start_xmit is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
+ if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+ qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) {
+ if (skb->xmit_more)
+ qede_update_tx_producer(txq);
+
netif_tx_stop_queue(netdev_txq);
DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
"Stop queue was called\n");
@@ -644,7 +751,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static int qede_txq_has_work(struct qede_tx_queue *txq)
+int qede_txq_has_work(struct qede_tx_queue *txq)
{
u16 hw_bd_cons;
@@ -727,7 +834,7 @@ static int qede_tx_int(struct qede_dev *edev,
return 0;
}
-static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
{
u16 hw_comp_cons, sw_comp_cons;
@@ -782,8 +889,8 @@ static inline void qede_reuse_page(struct qede_dev *edev,
/* In case of allocation failures reuse buffers
* from consumer index to produce buffers for firmware
*/
-static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
- struct qede_dev *edev, u8 count)
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *edev, u8 count)
{
struct sw_rx_data *curr_cons;
@@ -818,7 +925,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
* network stack to take the ownership of the page
* which can be recycled multiple times by the driver.
*/
- atomic_inc(&curr_cons->data->_count);
+ page_ref_inc(curr_cons->data);
qede_reuse_page(edev, rxq, curr_cons);
}
@@ -879,6 +986,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
if (csum_flag & QEDE_CSUM_UNNECESSARY)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
+ skb->csum_level = 1;
}
static inline void qede_skb_receive(struct qede_dev *edev,
@@ -931,7 +1041,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
/* Incr page ref count to reuse on allocation failure
* so that it doesn't get freed while freeing SKB.
*/
- atomic_inc(&current_bd->data->_count);
+ page_ref_inc(current_bd->data);
goto out;
}
@@ -971,8 +1081,7 @@ static void qede_tpa_start(struct qede_dev *edev,
* start until its over and we don't want to risk allocation failing
* here, so re-allocate when aggregation will be over.
*/
- dma_unmap_addr_set(sw_rx_data_prod, mapping,
- dma_unmap_addr(replace_buf, mapping));
+ sw_rx_data_prod->mapping = replace_buf->mapping;
sw_rx_data_prod->data = replace_buf->data;
rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
@@ -1188,13 +1297,47 @@ err:
tpa_info->skb = NULL;
}
-static u8 qede_check_csum(u16 flag)
+static bool qede_tunn_exist(u16 flag)
+{
+ return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 tcsum = 0;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
{
u16 csum_flag = 0;
u8 csum = 0;
- if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
csum = QEDE_CSUM_UNNECESSARY;
@@ -1209,6 +1352,28 @@ static u8 qede_check_csum(u16 flag)
return csum;
}
+static u8 qede_check_csum(u16 flag)
+{
+ if (!qede_tunn_exist(flag))
+ return qede_check_notunn_csum(flag);
+ else
+ return qede_check_tunn_csum(flag);
+}
+
+static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 flag)
+{
+ u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
+
+ if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
+ (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+ PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
+ return true;
+
+ return false;
+}
+
static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
struct qede_dev *edev = fp->edev;
@@ -1287,6 +1452,12 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
csum_flag = qede_check_csum(parse_flag);
if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
+ parse_flag)) {
+ rxq->rx_ip_frags++;
+ goto alloc_skb;
+ }
+
DP_NOTICE(edev,
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
sw_comp_cons, parse_flag);
@@ -1295,6 +1466,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
goto next_cqe;
}
+alloc_skb:
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
if (unlikely(!skb)) {
DP_NOTICE(edev,
@@ -1305,7 +1477,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
}
/* Copy data into SKB */
- if (len + pad <= QEDE_RX_HDR_SIZE) {
+ if (len + pad <= edev->rx_copybreak) {
memcpy(skb_put(skb, len),
page_address(data) + pad +
sw_rx_data->page_offset, len);
@@ -1340,7 +1512,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
* freeing SKB.
*/
- atomic_inc(&sw_rx_data->data->_count);
+ page_ref_inc(sw_rx_data->data);
rxq->rx_alloc_errors++;
qede_recycle_rx_bd_ring(rxq, edev,
fp_cqe->bd_num);
@@ -1437,56 +1609,49 @@ next_cqe: /* don't consume bd rx buffer */
static int qede_poll(struct napi_struct *napi, int budget)
{
- int work_done = 0;
struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
- napi);
+ napi);
struct qede_dev *edev = fp->edev;
+ int rx_work_done = 0;
+ u8 tc;
- while (1) {
- u8 tc;
-
- for (tc = 0; tc < edev->num_tc; tc++)
- if (qede_txq_has_work(&fp->txqs[tc]))
- qede_tx_int(edev, &fp->txqs[tc]);
-
- if (qede_has_rx_work(fp->rxq)) {
- work_done += qede_rx_int(fp, budget - work_done);
-
- /* must not complete if we consumed full budget */
- if (work_done >= budget)
- break;
- }
+ for (tc = 0; tc < edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ qede_tx_int(edev, &fp->txqs[tc]);
+
+ rx_work_done = qede_has_rx_work(fp->rxq) ?
+ qede_rx_int(fp, budget) : 0;
+ if (rx_work_done < budget) {
+ qed_sb_update_sb_idx(fp->sb_info);
+ /* *_has_*_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that
+ * we won't write the "newer" value of the status block
+ * to HW (if there was a DMA right after
+ * qede_has_rx_work and if there is no rmb, the memory
+ * reading (qed_sb_update_sb_idx) may be postponed
+ * to right before *_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
+ rmb();
/* Fall out from the NAPI loop if needed */
- if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
- qed_sb_update_sb_idx(fp->sb_info);
- /* *_has_*_work() reads the status block,
- * thus we need to ensure that status block indices
- * have been actually read (qed_sb_update_sb_idx)
- * prior to this check (*_has_*_work) so that
- * we won't write the "newer" value of the status block
- * to HW (if there was a DMA right after
- * qede_has_rx_work and if there is no rmb, the memory
- * reading (qed_sb_update_sb_idx) may be postponed
- * to right before *_ack_sb). In this case there
- * will never be another interrupt until there is
- * another update of the status block, while there
- * is still unhandled work.
- */
- rmb();
-
- if (!(qede_has_rx_work(fp->rxq) ||
- qede_has_tx_work(fp))) {
- napi_complete(napi);
- /* Update and reenable interrupts */
- qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
- 1 /*update*/);
- break;
- }
+ if (!(qede_has_rx_work(fp->rxq) ||
+ qede_has_tx_work(fp))) {
+ napi_complete(napi);
+
+ /* Update and reenable interrupts */
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
+ 1 /*update*/);
+ } else {
+ rx_work_done = budget;
}
}
- return work_done;
+ return rx_work_done;
}
static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
@@ -1569,16 +1734,25 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
- edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
- edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
- edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
- edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
- edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
- edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
- edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
- edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
- edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
- edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+ edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
+ edev->stats.rx_128_to_255_byte_packets =
+ stats.rx_128_to_255_byte_packets;
+ edev->stats.rx_256_to_511_byte_packets =
+ stats.rx_256_to_511_byte_packets;
+ edev->stats.rx_512_to_1023_byte_packets =
+ stats.rx_512_to_1023_byte_packets;
+ edev->stats.rx_1024_to_1518_byte_packets =
+ stats.rx_1024_to_1518_byte_packets;
+ edev->stats.rx_1519_to_1522_byte_packets =
+ stats.rx_1519_to_1522_byte_packets;
+ edev->stats.rx_1519_to_2047_byte_packets =
+ stats.rx_1519_to_2047_byte_packets;
+ edev->stats.rx_2048_to_4095_byte_packets =
+ stats.rx_2048_to_4095_byte_packets;
+ edev->stats.rx_4096_to_9216_byte_packets =
+ stats.rx_4096_to_9216_byte_packets;
+ edev->stats.rx_9217_to_16383_byte_packets =
+ stats.rx_9217_to_16383_byte_packets;
edev->stats.rx_crc_errors = stats.rx_crc_errors;
edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
edev->stats.rx_pause_frames = stats.rx_pause_frames;
@@ -1652,6 +1826,49 @@ static struct rtnl_link_stats64 *qede_get_stats64(
return stats;
}
+#ifdef CONFIG_QED_SRIOV
+static int qede_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
+}
+
+static int qede_set_vf_rate(struct net_device *dev, int vfidx,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
+ max_tx_rate);
+}
+
+static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
+}
+
+static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
+ int link_state)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
+}
+#endif
+
static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
{
struct qed_update_vport_params params;
@@ -1850,10 +2067,13 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
}
/* Remove vlan */
- rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid);
- if (rc) {
- DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
- return -EINVAL;
+ if (vlan->configured) {
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
+ vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+ return -EINVAL;
+ }
}
qede_del_vlan_from_list(edev, vlan);
@@ -1893,6 +2113,99 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false;
}
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ netdev_features_t changes = features ^ dev->features;
+ bool need_reload = false;
+
+ /* No action needed if hardware GRO is disabled during driver load */
+ if (changes & NETIF_F_GRO) {
+ if (dev->features & NETIF_F_GRO)
+ need_reload = !edev->gro_disable;
+ else
+ need_reload = edev->gro_disable;
+ }
+
+ if (need_reload && netif_running(edev->ndev)) {
+ dev->features = features;
+ qede_reload(edev, NULL, NULL);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void qede_udp_tunnel_add(struct net_device *dev,
+ struct udp_tunnel_info *ti)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(ti->port);
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
+ t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
+ t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ break;
+ default:
+ return;
+ }
+
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_udp_tunnel_del(struct net_device *dev,
+ struct udp_tunnel_info *ti)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(ti->port);
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (t_port != edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
+ t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (t_port != edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
+ t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ break;
+ default:
+ return;
+ }
+
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -1901,9 +2214,22 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_mac = qede_set_vf_mac,
+ .ndo_set_vf_vlan = qede_set_vf_vlan,
+#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_link_state = qede_set_vf_link_state,
+ .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+ .ndo_get_vf_config = qede_get_vf_config,
+ .ndo_set_vf_rate = qede_set_vf_rate,
+#endif
+ .ndo_udp_tunnel_add = qede_udp_tunnel_add,
+ .ndo_udp_tunnel_del = qede_udp_tunnel_del,
};
/* -------------------------------------------------------------------------
@@ -1974,6 +2300,14 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
+ /* Encap features*/
+ hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO_ECN;
+ ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA;
ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
@@ -2074,6 +2408,8 @@ static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
+ struct qed_dev *cdev = edev->cdev;
+
mutex_lock(&edev->qede_lock);
if (edev->state == QEDE_STATE_OPEN) {
@@ -2081,6 +2417,24 @@ static void qede_sp_task(struct work_struct *work)
qede_config_rx_mode(edev->ndev);
}
+ if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
+ struct qed_tunn_params tunn_params;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = edev->vxlan_dst_port;
+ qed_ops->tunn_config(cdev, &tunn_params);
+ }
+
+ if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
+ struct qed_tunn_params tunn_params;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = edev->geneve_dst_port;
+ qed_ops->tunn_config(cdev, &tunn_params);
+ }
+
mutex_unlock(&edev->qede_lock);
}
@@ -2099,8 +2453,9 @@ enum qede_probe_mode {
};
static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
- enum qede_probe_mode mode)
+ bool is_vf, enum qede_probe_mode mode)
{
+ struct qed_probe_params probe_params;
struct qed_slowpath_params params;
struct qed_dev_eth_info dev_info;
struct qede_dev *edev;
@@ -2110,8 +2465,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (unlikely(dp_level & QED_LEVEL_INFO))
pr_notice("Starting qede probe\n");
- cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
- dp_module, dp_level);
+ memset(&probe_params, 0, sizeof(probe_params));
+ probe_params.protocol = QED_PROTOCOL_ETH;
+ probe_params.dp_module = dp_module;
+ probe_params.dp_level = dp_level;
+ probe_params.is_vf = is_vf;
+ cdev = qed_ops->common->probe(pdev, &probe_params);
if (!cdev) {
rc = -ENODEV;
goto err0;
@@ -2145,6 +2504,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
goto err2;
}
+ if (is_vf)
+ edev->flags |= QEDE_FLAG_IS_VF;
+
qede_init_ndev(edev);
rc = register_netdev(edev->ndev);
@@ -2157,8 +2519,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+#ifdef CONFIG_DCB
+ if (!IS_VF(edev))
+ qede_set_dcbnl_ops(edev->ndev);
+#endif
+
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
+ edev->rx_copybreak = QEDE_RX_HDR_SIZE;
DP_INFO(edev, "Ending successfully qede probe\n");
@@ -2176,12 +2544,24 @@ err0:
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ bool is_vf = false;
u32 dp_module = 0;
u8 dp_level = 0;
+ switch ((enum qede_pci_private)id->driver_data) {
+ case QEDE_PRIVATE_VF:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a VF\n");
+ is_vf = true;
+ break;
+ default:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a PF\n");
+ }
+
qede_config_debug(debug, &dp_module, &dp_level);
- return __qede_probe(pdev, dp_module, dp_level,
+ return __qede_probe(pdev, dp_module, dp_level, is_vf,
QEDE_PROBE_NORMAL);
}
@@ -2320,7 +2700,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
if (replace_buf->data) {
dma_unmap_page(&edev->pdev->dev,
- dma_unmap_addr(replace_buf, mapping),
+ replace_buf->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(replace_buf->data);
}
@@ -2420,7 +2800,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
goto err;
}
- dma_unmap_addr_set(replace_buf, mapping, mapping);
+ replace_buf->mapping = mapping;
tpa_info->replace_buf.page_offset = 0;
tpa_info->replace_buf_mapping = mapping;
@@ -2463,6 +2843,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_NEXT_PTR,
+ QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(struct eth_rx_bd),
&rxq->rx_bd_ring);
@@ -2474,6 +2855,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(union eth_rx_cqe),
&rxq->rx_comp_ring);
@@ -2525,9 +2907,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
NUM_TX_BDS_MAX,
- sizeof(*p_virt),
- &txq->tx_pbl);
+ sizeof(*p_virt), &txq->tx_pbl);
if (rc)
goto err;
@@ -2871,15 +3253,16 @@ static int qede_stop_queues(struct qede_dev *edev)
return rc;
}
-static int qede_start_queues(struct qede_dev *edev)
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
int rc, tc, i;
int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
- struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
struct qed_update_vport_params vport_update_params;
struct qed_queue_start_common_params q_params;
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
struct qed_start_vport_params start = {0};
+ bool reset_rss_indir = false;
if (!edev->num_rss) {
DP_ERR(edev,
@@ -2892,6 +3275,7 @@ static int qede_start_queues(struct qede_dev *edev)
start.vport_id = 0;
start.drop_ttl0 = true;
start.remove_inner_vlan = vlan_removal_en;
+ start.clear_stats = clear_stats;
rc = edev->ops->vport_start(cdev, &start);
@@ -2971,19 +3355,59 @@ static int qede_start_queues(struct qede_dev *edev)
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 1;
+ if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
+ qed_info->tx_switching) {
+ vport_update_params.update_tx_switching_flg = 1;
+ vport_update_params.tx_switching_flg = 1;
+ }
+
/* Fill struct with RSS params */
if (QEDE_RSS_CNT(edev) > 1) {
vport_update_params.update_rss_flg = 1;
- for (i = 0; i < 128; i++)
- rss_params->rss_ind_table[i] =
- ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
- netdev_rss_key_fill(rss_params->rss_key,
- sizeof(rss_params->rss_key));
+
+ /* Need to validate current RSS config uses valid entries */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ if (edev->rss_params.rss_ind_table[i] >=
+ edev->num_rss) {
+ reset_rss_indir = true;
+ break;
+ }
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
+ reset_rss_indir) {
+ u16 val;
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ u16 indir_val;
+
+ val = QEDE_RSS_CNT(edev);
+ indir_val = ethtool_rxfh_indir_default(i, val);
+ edev->rss_params.rss_ind_table[i] = indir_val;
+ }
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+ netdev_rss_key_fill(edev->rss_params.rss_key,
+ sizeof(edev->rss_params.rss_key));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+ edev->rss_params.rss_caps = QED_RSS_IPV4 |
+ QED_RSS_IPV6 |
+ QED_RSS_IPV4_TCP |
+ QED_RSS_IPV6_TCP;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+ }
+
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
} else {
- memset(rss_params, 0, sizeof(*rss_params));
+ memset(&vport_update_params.rss_params, 0,
+ sizeof(vport_update_params.rss_params));
}
- memcpy(&vport_update_params.rss_params, rss_params,
- sizeof(*rss_params));
rc = edev->ops->vport_update(cdev, &vport_update_params);
if (rc) {
@@ -3061,6 +3485,7 @@ out:
enum qede_load_mode {
QEDE_LOAD_NORMAL,
+ QEDE_LOAD_RELOAD,
};
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
@@ -3099,7 +3524,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
goto err3;
DP_INFO(edev, "Setup IRQs succeeded\n");
- rc = qede_start_queues(edev);
+ rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
if (rc)
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
@@ -3154,7 +3579,7 @@ void qede_reload(struct qede_dev *edev,
if (func)
func(edev, args);
- qede_load(edev, QEDE_LOAD_NORMAL);
+ qede_load(edev, QEDE_LOAD_RELOAD);
mutex_lock(&edev->qede_lock);
qede_config_rx_mode(edev->ndev);
@@ -3165,12 +3590,20 @@ void qede_reload(struct qede_dev *edev,
static int qede_open(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
+ int rc;
netif_carrier_off(ndev);
edev->ops->common->set_power_state(edev->cdev, PCI_D0);
- return qede_load(edev, QEDE_LOAD_NORMAL);
+ rc = qede_load(edev, QEDE_LOAD_NORMAL);
+
+ if (rc)
+ return rc;
+
+ udp_tunnel_get_rx_info(ndev);
+
+ return 0;
}
static int qede_close(struct net_device *ndev)
@@ -3221,6 +3654,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
return -EFAULT;
}
+ if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+ DP_NOTICE(edev, "qed prevents setting MAC\n");
+ return -EINVAL;
+ }
+
ether_addr_copy(ndev->dev_addr, addr->sa_data);
if (!netif_running(ndev)) {