summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/chelsio
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-03-25 03:53:42 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-03-25 03:53:42 -0300
commit03dd4cb26d967f9588437b0fc9cc0e8353322bb7 (patch)
treefa581f6dc1c0596391690d1f67eceef3af8246dc /drivers/net/ethernet/chelsio
parentd4e493caf788ef44982e131ff9c786546904d934 (diff)
Linux-libre 4.5-gnu
Diffstat (limited to 'drivers/net/ethernet/chelsio')
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/my3126.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/ael1002.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/aq100x.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/vsc8211.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c144
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c148
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c232
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c179
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c187
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c162
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c11
29 files changed, 901 insertions, 364 deletions
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index a79813a17..4d187f22c 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -65,13 +65,14 @@ config CHELSIO_T3
will be called cxgb3.
config CHELSIO_T4
- tristate "Chelsio Communications T4/T5 Ethernet support"
+ tristate "Chelsio Communications T4/T5/T6 Ethernet support"
depends on PCI && (IPV6 || IPV6=n)
select FW_LOADER
select MDIO
---help---
- This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
- adapter and T5 based 40Gb Ethernet adapter.
+ This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet
+ adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb
+ Ethernet adapters.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
@@ -85,7 +86,7 @@ config CHELSIO_T4
will be called cxgb4.
config CHELSIO_T4_DCB
- bool "Data Center Bridging (DCB) Support for Chelsio T4/T5 cards"
+ bool "Data Center Bridging (DCB) Support for Chelsio T4/T5/T6 cards"
default n
depends on CHELSIO_T4 && DCB
---help---
@@ -107,12 +108,12 @@ config CHELSIO_T4_FCOE
If unsure, say N.
config CHELSIO_T4VF
- tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
+ tristate "Chelsio Communications T4/T5/T6 Virtual Function Ethernet support"
depends on PCI
---help---
- This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
- adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual
- Functions.
+ This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet
+ adapters and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb
+ Ethernet adapters with PCI-E SR-IOV Virtual Functions.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
index a4d2a4c08..bf43da6c6 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cphy.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -137,7 +137,7 @@ static inline int simple_mdio_write(struct cphy *cphy, int reg,
/* Convenience initializer */
static inline void cphy_init(struct cphy *phy, struct net_device *dev,
- int phy_addr, struct cphy_ops *phy_ops,
+ int phy_addr, const struct cphy_ops *phy_ops,
const struct mdio_ops *mdio_ops)
{
struct adapter *adapter = netdev_priv(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
index 71018a4fd..76ce6e538 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
@@ -337,7 +337,7 @@ static void mv88e1xxx_destroy(struct cphy *cphy)
kfree(cphy);
}
-static struct cphy_ops mv88e1xxx_ops = {
+static const struct cphy_ops mv88e1xxx_ops = {
.destroy = mv88e1xxx_destroy,
.reset = mv88e1xxx_reset,
.interrupt_enable = mv88e1xxx_interrupt_enable,
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
index d0cf61155..7ddb301bc 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -195,7 +195,7 @@ static void mv88x201x_destroy(struct cphy *cphy)
kfree(cphy);
}
-static struct cphy_ops mv88x201x_ops = {
+static const struct cphy_ops mv88x201x_ops = {
.destroy = mv88x201x_destroy,
.reset = mv88x201x_reset,
.interrupt_enable = mv88x201x_interrupt_enable,
diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c
index a683fd3bb..d546f46c8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/my3126.c
+++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c
@@ -154,7 +154,7 @@ static void my3126_destroy(struct cphy *cphy)
kfree(cphy);
}
-static struct cphy_ops my3126_ops = {
+static const struct cphy_ops my3126_ops = {
.destroy = my3126_destroy,
.reset = my3126_reset,
.interrupt_enable = my3126_interrupt_enable,
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index ec5e05052..eb462d7db 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -570,7 +570,7 @@ static void pm3393_destroy(struct cmac *cmac)
kfree(cmac);
}
-static struct cmac_ops pm3393_ops = {
+static const struct cmac_ops pm3393_ops = {
.destroy = pm3393_destroy,
.reset = pm3393_reset,
.interrupt_enable = pm3393_interrupt_enable,
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
index b0cb388f5..6f30b6f78 100644
--- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
@@ -666,7 +666,7 @@ static void mac_destroy(struct cmac *mac)
kfree(mac);
}
-static struct cmac_ops vsc7326_ops = {
+static const struct cmac_ops vsc7326_ops = {
.destroy = mac_destroy,
.reset = mac_reset,
.interrupt_handler = mac_intr_handler,
diff --git a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
index 2028da95a..dadf11e3d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
@@ -198,7 +198,7 @@ static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed,
return 0;
}
-static struct cphy_ops ael1002_ops = {
+static const struct cphy_ops ael1002_ops = {
.reset = ael1002_reset,
.intr_enable = ael1002_intr_noop,
.intr_disable = ael1002_intr_noop,
@@ -224,7 +224,7 @@ static int ael1006_reset(struct cphy *phy, int wait)
return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait);
}
-static struct cphy_ops ael1006_ops = {
+static const struct cphy_ops ael1006_ops = {
.reset = ael1006_reset,
.intr_enable = t3_phy_lasi_intr_enable,
.intr_disable = t3_phy_lasi_intr_disable,
@@ -495,7 +495,7 @@ static int ael2005_intr_handler(struct cphy *phy)
return ret ? ret : cphy_cause_link_change;
}
-static struct cphy_ops ael2005_ops = {
+static const struct cphy_ops ael2005_ops = {
.reset = ael2005_reset,
.intr_enable = ael2005_intr_enable,
.intr_disable = ael2005_intr_disable,
@@ -801,7 +801,7 @@ static int ael2020_intr_handler(struct cphy *phy)
return ret ? ret : cphy_cause_link_change;
}
-static struct cphy_ops ael2020_ops = {
+static const struct cphy_ops ael2020_ops = {
.reset = ael2020_reset,
.intr_enable = ael2020_intr_enable,
.intr_disable = ael2020_intr_disable,
@@ -856,7 +856,7 @@ static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed,
return 0;
}
-static struct cphy_ops qt2045_ops = {
+static const struct cphy_ops qt2045_ops = {
.reset = ael1006_reset,
.intr_enable = t3_phy_lasi_intr_enable,
.intr_disable = t3_phy_lasi_intr_disable,
@@ -921,7 +921,7 @@ static int xaui_direct_power_down(struct cphy *phy, int enable)
return 0;
}
-static struct cphy_ops xaui_direct_ops = {
+static const struct cphy_ops xaui_direct_ops = {
.reset = xaui_direct_reset,
.intr_enable = ael1002_intr_noop,
.intr_disable = ael1002_intr_noop,
diff --git a/drivers/net/ethernet/chelsio/cxgb3/aq100x.c b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c
index 341b7ef15..6af5d200e 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/aq100x.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c
@@ -247,7 +247,7 @@ static int aq100x_get_link_status(struct cphy *phy, int *link_ok,
return 0;
}
-static struct cphy_ops aq100x_ops = {
+static const struct cphy_ops aq100x_ops = {
.reset = aq100x_reset,
.intr_enable = aq100x_intr_enable,
.intr_disable = aq100x_intr_disable,
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index 442480982..1bd7d8966 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -575,7 +575,7 @@ static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg,
/* Convenience initializer */
static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
- int phy_addr, struct cphy_ops *phy_ops,
+ int phy_addr, const struct cphy_ops *phy_ops,
const struct mdio_ops *mdio_ops,
unsigned int caps, const char *desc)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index e68c5e4f3..6cdd9adcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -701,15 +701,16 @@ static ssize_t attr_store(struct device *d,
ssize_t(*set) (struct net_device *, unsigned int),
unsigned int min_val, unsigned int max_val)
{
- char *endp;
ssize_t ret;
unsigned int val;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- val = simple_strtoul(buf, &endp, 0);
- if (endp == buf || val < min_val || val > max_val)
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+ if (val < min_val || val > max_val)
return -EINVAL;
rtnl_lock();
@@ -829,14 +830,15 @@ static ssize_t tm_attr_store(struct device *d,
struct port_info *pi = netdev_priv(to_net_dev(d));
struct adapter *adap = pi->adapter;
unsigned int val;
- char *endp;
ssize_t ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- val = simple_strtoul(buf, &endp, 0);
- if (endp == buf || val > 10000000)
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+ if (val > 10000000)
return -EINVAL;
rtnl_lock();
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index a22768c94..a89721fad 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -681,6 +681,24 @@ int t3_seeprom_wp(struct adapter *adapter, int enable)
return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
}
+static int vpdstrtouint(char *s, int len, unsigned int base, unsigned int *val)
+{
+ char tok[len + 1];
+
+ memcpy(tok, s, len);
+ tok[len] = 0;
+ return kstrtouint(strim(tok), base, val);
+}
+
+static int vpdstrtou16(char *s, int len, unsigned int base, u16 *val)
+{
+ char tok[len + 1];
+
+ memcpy(tok, s, len);
+ tok[len] = 0;
+ return kstrtou16(strim(tok), base, val);
+}
+
/**
* get_vpd_params - read VPD parameters from VPD EEPROM
* @adapter: adapter to read
@@ -709,11 +727,21 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
return ret;
}
- p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
- p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
- p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
- p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
- p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
+ ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
+ if (ret)
+ return ret;
+ ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
+ if (ret)
+ return ret;
+ ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
+ if (ret)
+ return ret;
+ ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
+ if (ret)
+ return ret;
+ ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
+ if (ret)
+ return ret;
memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
/* Old eeproms didn't have port information */
@@ -723,8 +751,14 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
} else {
p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
- p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
- p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
+ ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
+ &p->xauicfg[0]);
+ if (ret)
+ return ret;
+ ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
+ &p->xauicfg[1]);
+ if (ret)
+ return ret;
}
ret = hex2bin(p->eth_base, vpd.na_data, 6);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c
index 4f9a1c272..8638ad42b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c
@@ -336,7 +336,7 @@ static int vsc8211_intr_handler(struct cphy *cphy)
return cphy_cause;
}
-static struct cphy_ops vsc8211_ops = {
+static const struct cphy_ops vsc8211_ops = {
.reset = vsc8211_reset,
.intr_enable = vsc8211_intr_enable,
.intr_disable = vsc8211_intr_disable,
@@ -350,7 +350,7 @@ static struct cphy_ops vsc8211_ops = {
.power_down = vsc8211_power_down,
};
-static struct cphy_ops vsc8211_fiber_ops = {
+static const struct cphy_ops vsc8211_fiber_ops = {
.reset = vsc8211_reset,
.intr_enable = vsc8211_intr_enable,
.intr_disable = vsc8211_intr_disable,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 11dd91e4d..7ad43af6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -118,6 +118,11 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
if (ret) {
write_unlock_bh(&ctbl->lock);
+ dev_err(adap->pdev_dev,
+ "CLIP FW cmd failed with error %d, "
+ "Connections using %pI6c wont be "
+ "offloaded",
+ ret, ce->addr6.sin6_addr.s6_addr);
return ret;
}
} else {
@@ -127,6 +132,9 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
}
} else {
write_unlock_bh(&ctbl->lock);
+ dev_info(adap->pdev_dev, "CLIP table overflow, "
+ "Connections using %pI6c wont be offloaded",
+ (void *)lip);
return -ENOMEM;
}
write_unlock_bh(&ctbl->lock);
@@ -146,6 +154,9 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
int hash;
int ret = -1;
+ if (!ctbl)
+ return;
+
hash = clip_addr_hash(ctbl, addr, v6);
read_lock_bh(&ctbl->lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 55a47de54..ec6e84967 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -301,6 +301,8 @@ struct devlog_params {
/* Stores chip specific parameters */
struct arch_specific_params {
u8 nchan;
+ u8 pm_stats_cnt;
+ u8 cng_ch_bits_log; /* congestion channel map bits width */
u16 mps_rplc_size;
u16 vfcount;
u32 sge_fl_db;
@@ -398,11 +400,10 @@ struct link_config {
enum {
MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
- MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
+ MAX_OFLD_QSETS = 16, /* # of offload Tx, iscsi Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
- MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
};
enum {
@@ -420,7 +421,7 @@ enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
- + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
+ + MAX_RDMA_CIQS + INGQ_EXTRAS,
};
struct adapter;
@@ -483,6 +484,8 @@ struct sge_fl { /* SGE free-buffer queue state */
unsigned int pidx; /* producer index */
unsigned long alloc_failed; /* # of times buffer allocation failed */
unsigned long large_alloc_failed;
+ unsigned long mapping_err; /* # of RX Buffer DMA Mapping failures */
+ unsigned long low; /* # of times momentarily starving */
unsigned long starving;
/* RO fields */
unsigned int cntxt_id; /* SGE context id for the free list */
@@ -618,6 +621,7 @@ struct sge_ofld_txq { /* state for an SGE offload Tx queue */
struct adapter *adap;
struct sk_buff_head sendq; /* list of backpressured packets */
struct tasklet_struct qresume_tsk; /* restarts the queue */
+ bool service_ofldq_running; /* service_ofldq() is processing sendq */
u8 full; /* the Tx ring is full */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
} ____cacheline_aligned_in_smp;
@@ -636,7 +640,7 @@ struct sge {
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
- struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
+ struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS];
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
@@ -647,10 +651,10 @@ struct sge {
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
- u16 ofldqsets; /* # of active offload queue sets */
+ u16 iscsiqsets; /* # of active iSCSI queue sets */
u16 rdmaqs; /* # of available RDMA Rx queues */
u16 rdmaciqs; /* # of available RDMA concentrator IQs */
- u16 ofld_rxq[MAX_OFLD_QSETS];
+ u16 iscsi_rxq[MAX_OFLD_QSETS];
u16 rdma_rxq[MAX_RDMA_QUEUES];
u16 rdma_ciq[MAX_RDMA_CIQS];
u16 timer_val[SGE_NTIMERS];
@@ -676,7 +680,7 @@ struct sge {
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
-#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
+#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++)
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
@@ -1253,6 +1257,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
const u8 *fw_data, unsigned int size, int force);
+int t4_fl_pkt_align(struct adapter *adap);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
int t4_check_fw_version(struct adapter *adap);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 4269944c5..e6a4072b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -757,8 +757,8 @@ static int pm_stats_show(struct seq_file *seq, void *v)
};
int i;
- u32 tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
- u64 tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
+ u32 tx_cnt[T6_PM_NSTATS], rx_cnt[T6_PM_NSTATS];
+ u64 tx_cyc[T6_PM_NSTATS], rx_cyc[T6_PM_NSTATS];
struct adapter *adap = seq->private;
t4_pmtx_get_stats(adap, tx_cnt, tx_cyc);
@@ -773,6 +773,32 @@ static int pm_stats_show(struct seq_file *seq, void *v)
for (i = 0; i < PM_NSTATS - 1; i++)
seq_printf(seq, "%-13s %10u %20llu\n",
rx_pm_stats[i], rx_cnt[i], rx_cyc[i]);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ /* In T5 the granularity of the total wait is too fine.
+ * It is not useful as it reaches the max value too fast.
+ * Hence display this Input FIFO wait for T6 onwards.
+ */
+ seq_printf(seq, "%13s %10s %20s\n",
+ " ", "Total wait", "Total Occupancy");
+ seq_printf(seq, "Tx FIFO wait %10u %20llu\n",
+ tx_cnt[i], tx_cyc[i]);
+ seq_printf(seq, "Rx FIFO wait %10u %20llu\n",
+ rx_cnt[i], rx_cyc[i]);
+
+ /* Skip index 6 as there is nothing useful ihere */
+ i += 2;
+
+ /* At index 7, a new stat for read latency (count, total wait)
+ * is added.
+ */
+ seq_printf(seq, "%13s %10s %20s\n",
+ " ", "Reads", "Total wait");
+ seq_printf(seq, "Tx latency %10u %20llu\n",
+ tx_cnt[i], tx_cyc[i]);
+ seq_printf(seq, "Rx latency %10u %20llu\n",
+ rx_cnt[i], rx_cyc[i]);
+ }
return 0;
}
@@ -1559,25 +1585,35 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
-
if (v == SEQ_START_TOKEN) {
- if (adap->params.arch.mps_rplc_size > 128)
+ if (chip_ver > CHELSIO_T5) {
seq_puts(seq, "Idx Ethernet address Mask "
+ " VNI Mask IVLAN Vld "
+ "DIP_Hit Lookup Port "
"Vld Ports PF VF "
"Replication "
" P0 P1 P2 P3 ML\n");
- else
- seq_puts(seq, "Idx Ethernet address Mask "
- "Vld Ports PF VF Replication"
- " P0 P1 P2 P3 ML\n");
+ } else {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF "
+ "Replication "
+ " P0 P1 P2 P3 ML\n");
+ else
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF Replication"
+ " P0 P1 P2 P3 ML\n");
+ }
} else {
u64 mask;
u8 addr[ETH_ALEN];
- bool replicate;
+ bool replicate, dip_hit = false, vlan_vld = false;
unsigned int idx = (uintptr_t)v - 2;
u64 tcamy, tcamx, val;
- u32 cls_lo, cls_hi, ctl;
+ u32 cls_lo, cls_hi, ctl, data2, vnix = 0, vniy = 0;
u32 rplc[8] = {0};
+ u8 lookup_type = 0, port_num = 0;
+ u16 ivlan = 0;
if (chip_ver > CHELSIO_T5) {
/* CtlCmdType - 0: Read, 1: Write
@@ -1596,6 +1632,22 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
tcamy = DMACH_G(val) << 32;
tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+ data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
+ lookup_type = DATALKPTYPE_G(data2);
+ /* 0 - Outer header, 1 - Inner header
+ * [71:48] bit locations are overloaded for
+ * outer vs. inner lookup types.
+ */
+ if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
+ /* Inner header VNI */
+ vniy = ((data2 & DATAVIDH2_F) << 23) |
+ (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
+ dip_hit = data2 & DATADIPHIT_F;
+ } else {
+ vlan_vld = data2 & DATAVIDH2_F;
+ ivlan = VIDL_G(val);
+ }
+ port_num = DATAPORTNUM_G(data2);
/* Read tcamx. Change the control param */
ctl |= CTLXYBITSEL_V(1);
@@ -1603,6 +1655,12 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
tcamx = DMACH_G(val) << 32;
tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+ data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
+ if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
+ /* Inner header VNI mask */
+ vnix = ((data2 & DATAVIDH2_F) << 23) |
+ (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
+ }
} else {
tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
@@ -1662,17 +1720,47 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
}
tcamxy2valmask(tcamx, tcamy, addr, &mask);
- if (chip_ver > CHELSIO_T5)
- seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
- "%012llx%3c %#x%4u%4d",
- idx, addr[0], addr[1], addr[2], addr[3],
- addr[4], addr[5], (unsigned long long)mask,
- (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
- PORTMAP_G(cls_hi),
- T6_PF_G(cls_lo),
- (cls_lo & T6_VF_VALID_F) ?
- T6_VF_G(cls_lo) : -1);
- else
+ if (chip_ver > CHELSIO_T5) {
+ /* Inner header lookup */
+ if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
+ seq_printf(seq,
+ "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx %06x %06x - - %3c"
+ " 'I' %4x "
+ "%3c %#x%4u%4d", idx, addr[0],
+ addr[1], addr[2], addr[3],
+ addr[4], addr[5],
+ (unsigned long long)mask,
+ vniy, vnix, dip_hit ? 'Y' : 'N',
+ port_num,
+ (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ T6_PF_G(cls_lo),
+ (cls_lo & T6_VF_VALID_F) ?
+ T6_VF_G(cls_lo) : -1);
+ } else {
+ seq_printf(seq,
+ "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx - - ",
+ idx, addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5],
+ (unsigned long long)mask);
+
+ if (vlan_vld)
+ seq_printf(seq, "%4u Y ", ivlan);
+ else
+ seq_puts(seq, " - N ");
+
+ seq_printf(seq,
+ "- %3c %4x %3c %#x%4u%4d",
+ lookup_type ? 'I' : 'O', port_num,
+ (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ T6_PF_G(cls_lo),
+ (cls_lo & T6_VF_VALID_F) ?
+ T6_VF_G(cls_lo) : -1);
+ }
+ } else
seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
"%012llx%3c %#x%4u%4d",
idx, addr[0], addr[1], addr[2], addr[3],
@@ -2245,7 +2333,7 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
+ int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4);
int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
@@ -2325,14 +2413,16 @@ do { \
TL("TxMapErr:", mapping_err);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLMapErr:", fl.mapping_err);
+ RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
} else if (iscsi_idx < iscsi_entries) {
const struct sge_ofld_rxq *rx =
- &adap->sge.ofldrxq[iscsi_idx * 4];
+ &adap->sge.iscsirxq[iscsi_idx * 4];
const struct sge_ofld_txq *tx =
&adap->sge.ofldtxq[iscsi_idx * 4];
- int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx);
+ int n = min(4, adap->sge.iscsiqsets - 4 * iscsi_idx);
S("QType:", "iSCSI");
T("TxQ ID:", q.cntxt_id);
@@ -2359,6 +2449,8 @@ do { \
RL("RxNoMem:", stats.nomem);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLMapErr:", fl.mapping_err);
+ RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
} else if (rdma_idx < rdma_entries) {
@@ -2388,6 +2480,8 @@ do { \
RL("RxNoMem:", stats.nomem);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLMapErr:", fl.mapping_err);
+ RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
} else if (ciq_idx < ciq_entries) {
@@ -2448,7 +2542,7 @@ do { \
static int sge_queue_entries(const struct adapter *adap)
{
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
+ DIV_ROUND_UP(adap->sge.iscsiqsets, 4) +
DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index a077f9476..7a0b92b2f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -35,79 +35,79 @@ static void set_msglevel(struct net_device *dev, u32 val)
}
static const char stats_strings[][ETH_GSTRING_LEN] = {
- "tx_octets_ok ",
- "tx_frames_ok ",
- "tx_broadcast_frames ",
- "tx_multicast_frames ",
- "tx_unicast_frames ",
- "tx_error_frames ",
-
- "tx_frames_64 ",
- "tx_frames_65_to_127 ",
- "tx_frames_128_to_255 ",
- "tx_frames_256_to_511 ",
- "tx_frames_512_to_1023 ",
- "tx_frames_1024_to_1518 ",
- "tx_frames_1519_to_max ",
-
- "tx_frames_dropped ",
- "tx_pause_frames ",
- "tx_ppp0_frames ",
- "tx_ppp1_frames ",
- "tx_ppp2_frames ",
- "tx_ppp3_frames ",
- "tx_ppp4_frames ",
- "tx_ppp5_frames ",
- "tx_ppp6_frames ",
- "tx_ppp7_frames ",
-
- "rx_octets_ok ",
- "rx_frames_ok ",
- "rx_broadcast_frames ",
- "rx_multicast_frames ",
- "rx_unicast_frames ",
-
- "rx_frames_too_long ",
- "rx_jabber_errors ",
- "rx_fcs_errors ",
- "rx_length_errors ",
- "rx_symbol_errors ",
- "rx_runt_frames ",
-
- "rx_frames_64 ",
- "rx_frames_65_to_127 ",
- "rx_frames_128_to_255 ",
- "rx_frames_256_to_511 ",
- "rx_frames_512_to_1023 ",
- "rx_frames_1024_to_1518 ",
- "rx_frames_1519_to_max ",
-
- "rx_pause_frames ",
- "rx_ppp0_frames ",
- "rx_ppp1_frames ",
- "rx_ppp2_frames ",
- "rx_ppp3_frames ",
- "rx_ppp4_frames ",
- "rx_ppp5_frames ",
- "rx_ppp6_frames ",
- "rx_ppp7_frames ",
-
- "rx_bg0_frames_dropped ",
- "rx_bg1_frames_dropped ",
- "rx_bg2_frames_dropped ",
- "rx_bg3_frames_dropped ",
- "rx_bg0_frames_trunc ",
- "rx_bg1_frames_trunc ",
- "rx_bg2_frames_trunc ",
- "rx_bg3_frames_trunc ",
-
- "tso ",
- "tx_csum_offload ",
- "rx_csum_good ",
- "vlan_extractions ",
- "vlan_insertions ",
- "gro_packets ",
- "gro_merged ",
+ "tx_octets_ok ",
+ "tx_frames_ok ",
+ "tx_broadcast_frames ",
+ "tx_multicast_frames ",
+ "tx_unicast_frames ",
+ "tx_error_frames ",
+
+ "tx_frames_64 ",
+ "tx_frames_65_to_127 ",
+ "tx_frames_128_to_255 ",
+ "tx_frames_256_to_511 ",
+ "tx_frames_512_to_1023 ",
+ "tx_frames_1024_to_1518 ",
+ "tx_frames_1519_to_max ",
+
+ "tx_frames_dropped ",
+ "tx_pause_frames ",
+ "tx_ppp0_frames ",
+ "tx_ppp1_frames ",
+ "tx_ppp2_frames ",
+ "tx_ppp3_frames ",
+ "tx_ppp4_frames ",
+ "tx_ppp5_frames ",
+ "tx_ppp6_frames ",
+ "tx_ppp7_frames ",
+
+ "rx_octets_ok ",
+ "rx_frames_ok ",
+ "rx_broadcast_frames ",
+ "rx_multicast_frames ",
+ "rx_unicast_frames ",
+
+ "rx_frames_too_long ",
+ "rx_jabber_errors ",
+ "rx_fcs_errors ",
+ "rx_length_errors ",
+ "rx_symbol_errors ",
+ "rx_runt_frames ",
+
+ "rx_frames_64 ",
+ "rx_frames_65_to_127 ",
+ "rx_frames_128_to_255 ",
+ "rx_frames_256_to_511 ",
+ "rx_frames_512_to_1023 ",
+ "rx_frames_1024_to_1518 ",
+ "rx_frames_1519_to_max ",
+
+ "rx_pause_frames ",
+ "rx_ppp0_frames ",
+ "rx_ppp1_frames ",
+ "rx_ppp2_frames ",
+ "rx_ppp3_frames ",
+ "rx_ppp4_frames ",
+ "rx_ppp5_frames ",
+ "rx_ppp6_frames ",
+ "rx_ppp7_frames ",
+
+ "rx_bg0_frames_dropped ",
+ "rx_bg1_frames_dropped ",
+ "rx_bg2_frames_dropped ",
+ "rx_bg3_frames_dropped ",
+ "rx_bg0_frames_trunc ",
+ "rx_bg1_frames_trunc ",
+ "rx_bg2_frames_trunc ",
+ "rx_bg3_frames_trunc ",
+
+ "tso ",
+ "tx_csum_offload ",
+ "rx_csum_good ",
+ "vlan_extractions ",
+ "vlan_insertions ",
+ "gro_packets ",
+ "gro_merged ",
};
static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
@@ -686,7 +686,7 @@ static int set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
lc->requested_fc |= PAUSE_TX;
if (netif_running(dev))
- return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
+ return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan,
lc);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 3f573e214..6c2aaa3b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -160,19 +160,8 @@ MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
static uint force_init;
module_param(force_init, uint, 0644);
-MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
-
-/*
- * Normally if the firmware we connect to has Configuration File support, we
- * use that and only fall back to the old Driver-based initialization if the
- * Configuration File fails for some reason. If force_old_init is set, then
- * we'll always use the old Driver-based initialization sequence.
- */
-static uint force_old_init;
-
-module_param(force_old_init, uint, 0644);
-MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
- " parameter");
+MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
+ "deprecated parameter");
static int dflt_msg_enable = DFLT_MSG_ENABLE;
@@ -194,23 +183,6 @@ module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
/*
- * Queue interrupt hold-off timer values. Queues default to the first of these
- * upon creation.
- */
-static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
-
-module_param_array(intr_holdoff, uint, NULL, 0644);
-MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
- "0..4 in microseconds, deprecated parameter");
-
-static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
-
-module_param_array(intr_cnt, uint, NULL, 0644);
-MODULE_PARM_DESC(intr_cnt,
- "thresholds 1..3 for queue interrupt packet counters, "
- "deprecated parameter");
-
-/*
* Normally we tell the chip to deliver Ingress Packets into our DMA buffers
* offset by 2 bytes in order to have the IP headers line up on 4-byte
* boundaries. This is a requirement for many architectures which will throw
@@ -224,13 +196,7 @@ MODULE_PARM_DESC(intr_cnt,
*/
static int rx_dma_offset = 2;
-static bool vf_acls;
-
#ifdef CONFIG_PCI_IOV
-module_param(vf_acls, bool, 0644);
-MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
- "deprecated parameter");
-
/* Configure the number of PCI-E Virtual Function which are to be instantiated
* on SR-IOV Capable Physical Functions.
*/
@@ -251,12 +217,6 @@ module_param(select_queue, int, 0644);
MODULE_PARM_DESC(select_queue,
"Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
-static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
-
-module_param(tp_vlan_pri_map, uint, 0644);
-MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
- "deprecated parameter");
-
static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list);
@@ -764,8 +724,8 @@ static void name_msix_vecs(struct adapter *adap)
}
/* offload queues */
- for_each_ofldrxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
+ for_each_iscsirxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
adap->port[0]->name, i);
for_each_rdmarxq(&adap->sge, i)
@@ -780,7 +740,7 @@ static void name_msix_vecs(struct adapter *adap)
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
+ int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
@@ -797,11 +757,11 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
- for_each_ofldrxq(s, ofldqidx) {
+ for_each_iscsirxq(s, iscsiqidx) {
err = request_irq(adap->msix_info[msi_index].vec,
t4_sge_intr_msix, 0,
adap->msix_info[msi_index].desc,
- &s->ofldrxq[ofldqidx].rspq);
+ &s->iscsirxq[iscsiqidx].rspq);
if (err)
goto unwind;
msi_index++;
@@ -833,9 +793,9 @@ unwind:
while (--rdmaqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->rdmarxq[rdmaqidx].rspq);
- while (--ofldqidx >= 0)
+ while (--iscsiqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
- &s->ofldrxq[ofldqidx].rspq);
+ &s->iscsirxq[iscsiqidx].rspq);
while (--ethqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->ethrxq[ethqidx].rspq);
@@ -851,8 +811,9 @@ static void free_msix_queue_irqs(struct adapter *adap)
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
for_each_ethrxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
- for_each_ofldrxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
+ for_each_iscsirxq(s, i)
+ free_irq(adap->msix_info[msi_index++].vec,
+ &s->iscsirxq[i].rspq);
for_each_rdmarxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
for_each_rdmaciq(s, i)
@@ -1091,8 +1052,8 @@ freeout: t4_free_sge_resources(adap);
}
}
- j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
- for_each_ofldrxq(s, i) {
+ j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */
+ for_each_iscsirxq(s, i) {
err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
adap->port[i / j],
s->fw_evtq.cntxt_id);
@@ -1108,7 +1069,7 @@ freeout: t4_free_sge_resources(adap);
msi_idx += nq; \
} while (0)
- ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
+ ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq);
ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
@@ -1179,16 +1140,10 @@ static int set_filter_wr(struct adapter *adapter, int fidx)
*/
if (f->fs.newdmac || f->fs.newvlan) {
/* allocate L2T entry for new filter */
- f->l2t = t4_l2t_alloc_switching(adapter->l2t);
+ f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
+ f->fs.eport, f->fs.dmac);
if (f->l2t == NULL) {
kfree_skb(skb);
- return -EAGAIN;
- }
- if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
- f->fs.eport, f->fs.dmac)) {
- cxgb4_l2t_release(f->l2t);
- f->l2t = NULL;
- kfree_skb(skb);
return -ENOMEM;
}
}
@@ -1509,7 +1464,7 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
else
stid = -1;
} else {
- stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
+ stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
if (stid < 0)
stid = -1;
}
@@ -1523,7 +1478,7 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
if (family == PF_INET)
t->stids_in_use++;
else
- t->stids_in_use += 4;
+ t->stids_in_use += 2;
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -1574,13 +1529,13 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
if (family == PF_INET)
__clear_bit(stid, t->stid_bmap);
else
- bitmap_release_region(t->stid_bmap, stid, 2);
+ bitmap_release_region(t->stid_bmap, stid, 1);
t->stid_tab[stid].data = NULL;
if (stid < t->nstids) {
if (family == PF_INET)
t->stids_in_use--;
else
- t->stids_in_use -= 4;
+ t->stids_in_use -= 2;
} else {
t->sftids_in_use--;
}
@@ -2281,7 +2236,7 @@ static void disable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
disable_txq_db(&adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
+ for_each_iscsirxq(&adap->sge, i)
disable_txq_db(&adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
disable_txq_db(&adap->sge.ctrlq[i].q);
@@ -2293,7 +2248,7 @@ static void enable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
+ for_each_iscsirxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
@@ -2363,7 +2318,7 @@ static void recover_all_queues(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
+ for_each_iscsirxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
@@ -2447,10 +2402,10 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.nrxq = adap->sge.rdmaqs;
lli.nciq = adap->sge.rdmaciqs;
} else if (uld == CXGB4_ULD_ISCSI) {
- lli.rxq_ids = adap->sge.ofld_rxq;
- lli.nrxq = adap->sge.ofldqsets;
+ lli.rxq_ids = adap->sge.iscsi_rxq;
+ lli.nrxq = adap->sge.iscsiqsets;
}
- lli.ntxq = adap->sge.ofldqsets;
+ lli.ntxq = adap->sge.iscsiqsets;
lli.nchan = adap->params.nports;
lli.nports = adap->params.nports;
lli.wr_cred = adap->params.ofldq_wr_cred;
@@ -3144,16 +3099,6 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
if (ret < 0)
return ret;
- /* select capabilities we'll be using */
- if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
- if (!vf_acls)
- c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
- else
- c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
- } else if (vf_acls) {
- dev_err(adap->pdev_dev, "virtualization ACLs not supported");
- return ret;
- }
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
@@ -4346,11 +4291,11 @@ static void cfg_queues(struct adapter *adap)
* capped by the number of available cores.
*/
if (n10g) {
- i = min_t(int, ARRAY_SIZE(s->ofldrxq),
+ i = min_t(int, ARRAY_SIZE(s->iscsirxq),
num_online_cpus());
- s->ofldqsets = roundup(i, adap->params.nports);
+ s->iscsiqsets = roundup(i, adap->params.nports);
} else
- s->ofldqsets = adap->params.nports;
+ s->iscsiqsets = adap->params.nports;
/* For RDMA one Rx queue per channel suffices */
s->rdmaqs = adap->params.nports;
/* Try and allow at least 1 CIQ per cpu rounding down
@@ -4381,8 +4326,8 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
s->ofldtxq[i].q.size = 1024;
- for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
- struct sge_ofld_rxq *r = &s->ofldrxq[i];
+ for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) {
+ struct sge_ofld_rxq *r = &s->iscsirxq[i];
init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
r->rspq.uld = CXGB4_ULD_ISCSI;
@@ -4463,7 +4408,7 @@ static int enable_msix(struct adapter *adap)
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
+ want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets;
/* need nchan for each possible ULD */
ofld_need = 3 * nchan;
}
@@ -4502,13 +4447,13 @@ static int enable_msix(struct adapter *adap)
/* leftovers go to OFLD */
i = allocated - EXTRA_VECS - s->max_ethqsets -
s->rdmaqs - s->rdmaciqs;
- s->ofldqsets = (i / nchan) * nchan; /* round down */
+ s->iscsiqsets = (i / nchan) * nchan; /* round down */
}
for (i = 0; i < allocated; ++i)
adap->msix_info[i].vec = entries[i].vector;
dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
"nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
- allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs,
+ allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
s->rdmaciqs);
kfree(entries);
@@ -4536,6 +4481,79 @@ static int init_rss(struct adapter *adap)
return 0;
}
+static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ u32 lnkcap1, lnkcap2;
+ int err1, err2;
+
+#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
+ &lnkcap1);
+ err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
+ &lnkcap2);
+ if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ *speed = PCIE_SPEED_8_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ if (!err1) {
+ *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
+ if (!lnkcap2) { /* pre-r3.0 */
+ if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ }
+
+ if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
+ return err1 ? err1 : err2 ? err2 : -EINVAL;
+ return 0;
+}
+
+static void cxgb4_check_pcie_caps(struct adapter *adap)
+{
+ enum pcie_link_width width, width_cap;
+ enum pci_bus_speed speed, speed_cap;
+
+#define PCIE_SPEED_STR(speed) \
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
+ "Unknown")
+
+ if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
+ dev_warn(adap->pdev_dev,
+ "Unable to determine PCIe device BW capabilities\n");
+ return;
+ }
+
+ if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
+ speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
+ dev_warn(adap->pdev_dev,
+ "Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
+ PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
+ dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
+ width, width_cap);
+ if (speed < speed_cap || width < width_cap)
+ dev_info(adap->pdev_dev,
+ "A slot with more lanes and/or higher speed is "
+ "suggested for optimal performance.\n");
+}
+
static void print_port_info(const struct net_device *dev)
{
char buf[80];
@@ -4563,10 +4581,10 @@ static void print_port_info(const struct net_device *dev)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
+ netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n",
adap->params.vpd.id,
CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
- is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
+ is_offload(adap) ? "R" : "",
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
netdev_info(dev, "S/N: %s, P/N: %s\n",
@@ -4785,8 +4803,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure SGE_STAT_CFG_A to read WC stats */
if (!is_t4(adapter->params.chip))
- t4_write_reg(adapter, SGE_STAT_CFG_A,
- STATSOURCE_T5_V(7) | STATMODE_V(0));
+ t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
+ (is_t5(adapter->params.chip) ? STATMODE_V(0) :
+ T6_STATMODE_V(0)));
for_each_port(adapter, i) {
struct net_device *netdev;
@@ -4863,15 +4882,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#if IS_ENABLED(CONFIG_IPV6)
- adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
- adapter->clipt_end);
- if (!adapter->clipt) {
- /* We tolerate a lack of clip_table, giving up
- * some functionality
+ if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
+ (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
+ /* CLIP functionality is not present in hardware,
+ * hence disable all offload features
*/
dev_warn(&pdev->dev,
- "could not allocate Clip table, continuing\n");
+ "CLIP not enabled in hardware, continuing\n");
adapter->params.offload = 0;
+ } else {
+ adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+ adapter->clipt_end);
+ if (!adapter->clipt) {
+ /* We tolerate a lack of clip_table, giving up
+ * some functionality
+ */
+ dev_warn(&pdev->dev,
+ "could not allocate Clip table, continuing\n");
+ adapter->params.offload = 0;
+ }
}
#endif
if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
@@ -4902,6 +4931,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (msi > 0 && pci_enable_msi(pdev) == 0)
adapter->flags |= USING_MSI;
+ /* check for PCI Express bandwidth capabiltites */
+ cxgb4_check_pcie_caps(adapter);
+
err = init_rss(adapter);
if (err)
goto out_free_dev;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index ac27898c6..5b0f3ef34 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -66,7 +66,7 @@ struct l2t_data {
static inline unsigned int vlan_prio(const struct l2t_entry *e)
{
- return e->vlan >> 13;
+ return e->vlan >> VLAN_PRIO_SHIFT;
}
static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
@@ -161,8 +161,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
- set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- t4_ofld_send(adap, skb);
+ t4_mgmt_tx(adap, skb);
if (sync && e->state != L2T_STATE_SWITCHING)
e->state = L2T_STATE_SYNC_WRITE;
@@ -175,14 +174,10 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
*/
static void send_pending(struct adapter *adap, struct l2t_entry *e)
{
- while (e->arpq_head) {
- struct sk_buff *skb = e->arpq_head;
+ struct sk_buff *skb;
- e->arpq_head = skb->next;
- skb->next = NULL;
+ while ((skb = __skb_dequeue(&e->arpq)) != NULL)
t4_ofld_send(adap, skb);
- }
- e->arpq_tail = NULL;
}
/*
@@ -222,12 +217,7 @@ void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
*/
static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
{
- skb->next = NULL;
- if (e->arpq_head)
- e->arpq_tail->next = skb;
- else
- e->arpq_head = skb;
- e->arpq_tail = skb;
+ __skb_queue_tail(&e->arpq, skb);
}
int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
@@ -259,7 +249,8 @@ again:
if (e->state == L2T_STATE_RESOLVING &&
!neigh_event_send(e->neigh, NULL)) {
spin_lock_bh(&e->lock);
- if (e->state == L2T_STATE_RESOLVING && e->arpq_head)
+ if (e->state == L2T_STATE_RESOLVING &&
+ !skb_queue_empty(&e->arpq))
write_l2e(adap, e, 1);
spin_unlock_bh(&e->lock);
}
@@ -305,12 +296,82 @@ found:
return e;
}
-/*
- * Called when an L2T entry has no more users.
+static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
+ u8 port, u8 *dmac)
+{
+ struct l2t_entry *end, *e, **p;
+ struct l2t_entry *first_free = NULL;
+
+ for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
+ if (atomic_read(&e->refcnt) == 0) {
+ if (!first_free)
+ first_free = e;
+ } else {
+ if (e->state == L2T_STATE_SWITCHING) {
+ if (ether_addr_equal(e->dmac, dmac) &&
+ (e->vlan == vlan) && (e->lport == port))
+ goto exists;
+ }
+ }
+ }
+
+ if (first_free) {
+ e = first_free;
+ goto found;
+ }
+
+ return NULL;
+
+found:
+ /* The entry we found may be an inactive entry that is
+ * presently in the hash table. We need to remove it.
+ */
+ if (e->state < L2T_STATE_SWITCHING)
+ for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
+ if (*p == e) {
+ *p = e->next;
+ e->next = NULL;
+ break;
+ }
+ e->state = L2T_STATE_UNUSED;
+
+exists:
+ return e;
+}
+
+/* Called when an L2T entry has no more users. The entry is left in the hash
+ * table since it is likely to be reused but we also bump nfree to indicate
+ * that the entry can be reallocated for a different neighbor. We also drop
+ * the existing neighbor reference in case the neighbor is going away and is
+ * waiting on our reference.
+ *
+ * Because entries can be reallocated to other neighbors once their ref count
+ * drops to 0 we need to take the entry's lock to avoid races with a new
+ * incarnation.
*/
+static void _t4_l2e_free(struct l2t_entry *e)
+{
+ struct l2t_data *d;
+ struct sk_buff *skb;
+
+ if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (e->neigh) {
+ neigh_release(e->neigh);
+ e->neigh = NULL;
+ }
+ while ((skb = __skb_dequeue(&e->arpq)) != NULL)
+ kfree_skb(skb);
+ }
+
+ d = container_of(e, struct l2t_data, l2tab[e->idx]);
+ atomic_inc(&d->nfree);
+}
+
+/* Locked version of _t4_l2e_free */
static void t4_l2e_free(struct l2t_entry *e)
{
struct l2t_data *d;
+ struct sk_buff *skb;
spin_lock_bh(&e->lock);
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
@@ -318,13 +379,8 @@ static void t4_l2e_free(struct l2t_entry *e)
neigh_release(e->neigh);
e->neigh = NULL;
}
- while (e->arpq_head) {
- struct sk_buff *skb = e->arpq_head;
-
- e->arpq_head = skb->next;
+ while ((skb = __skb_dequeue(&e->arpq)) != NULL)
kfree_skb(skb);
- }
- e->arpq_tail = NULL;
}
spin_unlock_bh(&e->lock);
@@ -457,18 +513,19 @@ EXPORT_SYMBOL(cxgb4_select_ntuple);
* on the arpq head. If a packet specifies a failure handler it is invoked,
* otherwise the packet is sent to the device.
*/
-static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
+static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
{
- while (arpq) {
- struct sk_buff *skb = arpq;
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
- arpq = skb->next;
- skb->next = NULL;
+ spin_unlock(&e->lock);
if (cb->arp_err_handler)
cb->arp_err_handler(cb->handle, skb);
else
t4_ofld_send(adap, skb);
+ spin_lock(&e->lock);
}
}
@@ -479,7 +536,7 @@ static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
{
struct l2t_entry *e;
- struct sk_buff *arpq = NULL;
+ struct sk_buff_head *arpq = NULL;
struct l2t_data *d = adap->l2t;
int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *) neigh->primary_key;
@@ -506,10 +563,9 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
if (e->state == L2T_STATE_RESOLVING) {
if (neigh->nud_state & NUD_FAILED) {
- arpq = e->arpq_head;
- e->arpq_head = e->arpq_tail = NULL;
+ arpq = &e->arpq;
} else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
- e->arpq_head) {
+ !skb_queue_empty(&e->arpq)) {
write_l2e(adap, e, 1);
}
} else {
@@ -519,43 +575,66 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
write_l2e(adap, e, 0);
}
- spin_unlock_bh(&e->lock);
-
if (arpq)
- handle_failed_resolution(adap, arpq);
+ handle_failed_resolution(adap, e);
+ spin_unlock_bh(&e->lock);
}
/* Allocate an L2T entry for use by a switching rule. Such need to be
* explicitly freed and while busy they are not on any hash chain, so normal
* address resolution updates do not see them.
*/
-struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
+struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
+ u8 port, u8 *eth_addr)
{
+ struct l2t_data *d = adap->l2t;
struct l2t_entry *e;
+ int ret;
write_lock_bh(&d->lock);
- e = alloc_l2e(d);
+ e = find_or_alloc_l2e(d, vlan, port, eth_addr);
if (e) {
spin_lock(&e->lock); /* avoid race with t4_l2t_free */
- e->state = L2T_STATE_SWITCHING;
- atomic_set(&e->refcnt, 1);
+ if (!atomic_read(&e->refcnt)) {
+ e->state = L2T_STATE_SWITCHING;
+ e->vlan = vlan;
+ e->lport = port;
+ ether_addr_copy(e->dmac, eth_addr);
+ atomic_set(&e->refcnt, 1);
+ ret = write_l2e(adap, e, 0);
+ if (ret < 0) {
+ _t4_l2e_free(e);
+ spin_unlock(&e->lock);
+ write_unlock_bh(&d->lock);
+ return NULL;
+ }
+ } else {
+ atomic_inc(&e->refcnt);
+ }
+
spin_unlock(&e->lock);
}
write_unlock_bh(&d->lock);
return e;
}
-/* Sets/updates the contents of a switching L2T entry that has been allocated
- * with an earlier call to @t4_l2t_alloc_switching.
+/**
+ * @dev: net_device pointer
+ * @vlan: VLAN Id
+ * @port: Associated port
+ * @dmac: Destination MAC address to add to L2T
+ * Returns pointer to the allocated l2t entry
+ *
+ * Allocates an L2T entry for use by switching rule of a filter
*/
-int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
- u8 port, u8 *eth_addr)
+struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
+ u8 port, u8 *dmac)
{
- e->vlan = vlan;
- e->lport = port;
- memcpy(e->dmac, eth_addr, ETH_ALEN);
- return write_l2e(adap, e, 0);
+ struct adapter *adap = netdev2adap(dev);
+
+ return t4_l2t_alloc_switching(adap, vlan, port, dmac);
}
+EXPORT_SYMBOL(cxgb4_l2t_alloc_switching);
struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
{
@@ -585,6 +664,7 @@ struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
d->l2tab[i].state = L2T_STATE_UNUSED;
spin_lock_init(&d->l2tab[i].lock);
atomic_set(&d->l2tab[i].refcnt, 0);
+ skb_queue_head_init(&d->l2tab[i].arpq);
}
return d;
}
@@ -619,7 +699,8 @@ static char l2e_state(const struct l2t_entry *e)
case L2T_STATE_VALID: return 'V';
case L2T_STATE_STALE: return 'S';
case L2T_STATE_SYNC_WRITE: return 'W';
- case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
+ case L2T_STATE_RESOLVING:
+ return skb_queue_empty(&e->arpq) ? 'R' : 'A';
case L2T_STATE_SWITCHING: return 'X';
default:
return 'U';
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index b38dc526a..4e2d47ac1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -76,8 +76,7 @@ struct l2t_entry {
struct neighbour *neigh; /* associated neighbour */
struct l2t_entry *first; /* start of hash chain */
struct l2t_entry *next; /* next l2t_entry on chain */
- struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
- struct sk_buff *arpq_tail;
+ struct sk_buff_head arpq; /* packet queue awaiting resolution */
spinlock_t lock;
atomic_t refcnt; /* entry reference count */
u16 hash; /* hash bucket the entry is on */
@@ -114,10 +113,11 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
unsigned int priority);
u64 cxgb4_select_ntuple(struct net_device *dev,
const struct l2t_entry *l2t);
+struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
+ u8 port, u8 *dmac);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
-struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
-int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
- u8 port, u8 *eth_addr);
+struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
+ u8 port, u8 *dmac);
struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b7b93e7a6..b4eb4680a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -406,7 +406,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
*/
static inline int reclaimable(const struct sge_txq *q)
{
- int hw_cidx = ntohs(q->stat->cidx);
+ int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
hw_cidx -= q->cidx;
return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
}
@@ -613,6 +613,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
__free_pages(pg, s->fl_pg_order);
+ q->mapping_err++;
goto out; /* do not try small pages for this error */
}
mapping |= RX_LARGE_PG_BUF;
@@ -642,6 +643,7 @@ alloc_small_pages:
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
put_page(pg);
+ q->mapping_err++;
goto out;
}
*d++ = cpu_to_be64(mapping);
@@ -663,6 +665,7 @@ out: cred = q->avail - cred;
if (unlikely(fl_starving(adap, q))) {
smp_wmb();
+ q->low++;
set_bit(q->cntxt_id - adap->sge.egr_start,
adap->sge.starving_fl);
}
@@ -1029,6 +1032,30 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
*p = 0;
}
+static void *inline_tx_skb_header(const struct sk_buff *skb,
+ const struct sge_txq *q, void *pos,
+ int length)
+{
+ u64 *p;
+ int left = (void *)q->stat - pos;
+
+ if (likely(length <= left)) {
+ memcpy(pos, skb->data, length);
+ pos += length;
+ } else {
+ memcpy(pos, skb->data, left);
+ memcpy(q->desc, skb->data + left, length - left);
+ pos = (void *)q->desc + (length - left);
+ }
+ /* 0-pad to multiple of 16 */
+ p = PTR_ALIGN(pos, 8);
+ if ((uintptr_t)p & 8) {
+ *p = 0;
+ return p + 1;
+ }
+ return p;
+}
+
/*
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
@@ -1320,7 +1347,7 @@ out_free: dev_kfree_skb_any(skb);
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
- int hw_cidx = ntohs(q->stat->cidx);
+ int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
int reclaim = hw_cidx - q->cidx;
if (reclaim < 0)
@@ -1542,24 +1569,50 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
}
/**
- * service_ofldq - restart a suspended offload queue
+ * service_ofldq - service/restart a suspended offload queue
* @q: the offload queue
*
- * Services an offload Tx queue by moving packets from its packet queue
- * to the HW Tx ring. The function starts and ends with the queue locked.
+ * Services an offload Tx queue by moving packets from its Pending Send
+ * Queue to the Hardware TX ring. The function starts and ends with the
+ * Send Queue locked, but drops the lock while putting the skb at the
+ * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
+ * allows more skbs to be added to the Send Queue by other threads.
+ * The packet being processed at the head of the Pending Send Queue is
+ * left on the queue in case we experience DMA Mapping errors, etc.
+ * and need to give up and restart later.
+ *
+ * service_ofldq() can be thought of as a task which opportunistically
+ * uses other threads execution contexts. We use the Offload Queue
+ * boolean "service_ofldq_running" to make sure that only one instance
+ * is ever running at a time ...
*/
static void service_ofldq(struct sge_ofld_txq *q)
{
- u64 *pos;
+ u64 *pos, *before, *end;
int credits;
struct sk_buff *skb;
+ struct sge_txq *txq;
+ unsigned int left;
unsigned int written = 0;
unsigned int flits, ndesc;
+ /* If another thread is currently in service_ofldq() processing the
+ * Pending Send Queue then there's nothing to do. Otherwise, flag
+ * that we're doing the work and continue. Examining/modifying
+ * the Offload Queue boolean "service_ofldq_running" must be done
+ * while holding the Pending Send Queue Lock.
+ */
+ if (q->service_ofldq_running)
+ return;
+ q->service_ofldq_running = true;
+
while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
- /*
- * We drop the lock but leave skb on sendq, thus retaining
- * exclusive access to the state of the queue.
+ /* We drop the lock while we're working with the skb at the
+ * head of the Pending Send Queue. This allows more skbs to
+ * be added to the Pending Send Queue while we're working on
+ * this one. We don't need to lock to guard the TX Ring
+ * updates because only one thread of execution is ever
+ * allowed into service_ofldq() at a time.
*/
spin_unlock(&q->sendq.lock);
@@ -1583,9 +1636,32 @@ static void service_ofldq(struct sge_ofld_txq *q)
} else {
int last_desc, hdr_len = skb_transport_offset(skb);
- memcpy(pos, skb->data, hdr_len);
- write_sgl(skb, &q->q, (void *)pos + hdr_len,
- pos + flits, hdr_len,
+ /* The WR headers may not fit within one descriptor.
+ * So we need to deal with wrap-around here.
+ */
+ before = (u64 *)pos;
+ end = (u64 *)pos + flits;
+ txq = &q->q;
+ pos = (void *)inline_tx_skb_header(skb, &q->q,
+ (void *)pos,
+ hdr_len);
+ if (before > (u64 *)pos) {
+ left = (u8 *)end - (u8 *)txq->stat;
+ end = (void *)txq->desc + left;
+ }
+
+ /* If current position is already at the end of the
+ * ofld queue, reset the current to point to
+ * start of the queue and update the end ptr as well.
+ */
+ if (pos == (u64 *)txq->stat) {
+ left = (u8 *)end - (u8 *)txq->stat;
+ end = (void *)txq->desc + left;
+ pos = (void *)txq->desc;
+ }
+
+ write_sgl(skb, &q->q, (void *)pos,
+ end, hdr_len,
(dma_addr_t *)skb->head);
#ifdef CONFIG_NEED_DMA_MAP_STATE
skb->dev = q->adap->port[0];
@@ -1604,6 +1680,11 @@ static void service_ofldq(struct sge_ofld_txq *q)
written = 0;
}
+ /* Reacquire the Pending Send Queue Lock so we can unlink the
+ * skb we've just successfully transferred to the TX Ring and
+ * loop for the next skb which may be at the head of the
+ * Pending Send Queue.
+ */
spin_lock(&q->sendq.lock);
__skb_unlink(skb, &q->sendq);
if (is_ofld_imm(skb))
@@ -1611,6 +1692,11 @@ static void service_ofldq(struct sge_ofld_txq *q)
}
if (likely(written))
ring_tx_db(q->adap, &q->q, written);
+
+ /*Indicate that no thread is processing the Pending Send Queue
+ * currently.
+ */
+ q->service_ofldq_running = false;
}
/**
@@ -1624,9 +1710,19 @@ static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
{
skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
spin_lock(&q->sendq.lock);
+
+ /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
+ * that results in this new skb being the only one on the queue, start
+ * servicing it. If there are other skbs already on the list, then
+ * either the queue is currently being processed or it's been stopped
+ * for some reason and it'll be restarted at a later time. Restart
+ * paths are triggered by events like experiencing a DMA Mapping Error
+ * or filling the Hardware TX Ring.
+ */
__skb_queue_tail(&q->sendq, skb);
if (q->sendq.qlen == 1)
service_ofldq(q);
+
spin_unlock(&q->sendq.lock);
return NET_XMIT_SUCCESS;
}
@@ -1864,7 +1960,6 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
- skb_mark_napi_id(skb, &rxq->rspq.napi);
pi = netdev_priv(skb->dev);
if (pi->rxtstamp)
cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
@@ -2193,7 +2288,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
if (likely(work_done < budget)) {
int timer_index;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
if (q->adaptive_rx) {
@@ -2460,7 +2555,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size = roundup(iq->size, 16);
iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
- &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
+ &iq->phys_addr, NULL, 0,
+ dev_to_node(adap->pdev_dev));
if (!iq->desc)
return -ENOMEM;
@@ -2500,7 +2596,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = roundup(fl->size, 8);
fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
sizeof(struct rx_sw_desc), &fl->addr,
- &fl->sdesc, s->stat_len, NUMA_NO_NODE);
+ &fl->sdesc, s->stat_len,
+ dev_to_node(adap->pdev_dev));
if (!fl->desc)
goto fl_nomem;
@@ -2528,7 +2625,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
goto err;
netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
- napi_hash_add(&iq->napi);
iq->cur_desc = iq->desc;
iq->cidx = 0;
iq->gen = 1;
@@ -2574,8 +2670,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
* simple (and hopefully less wrong).
*/
if (!is_t4(adap->params.chip) && cong >= 0) {
- u32 param, val;
+ u32 param, val, ch_map = 0;
int i;
+ u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
@@ -2587,9 +2684,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
for (i = 0; i < 4; i++) {
if (cong & (1 << i))
- val |=
- CONMCTXT_CNGCHMAP_V(1 << (i << 2));
+ ch_map |= 1 << (i << cng_ch_bits_log);
}
+ val |= CONMCTXT_CNGCHMAP_V(ch_map);
}
ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
&param, &val);
@@ -2884,7 +2981,7 @@ void t4_free_sge_resources(struct adapter *adap)
}
/* clean up RDMA and iSCSI Rx queues */
- t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq);
+ t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
@@ -3077,8 +3174,7 @@ static int t4_sge_init_soft(struct adapter *adap)
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
- u32 sge_control, sge_control2, sge_conm_ctrl;
- unsigned int ingpadboundary, ingpackboundary;
+ u32 sge_control, sge_conm_ctrl;
int ret, egress_threshold;
/*
@@ -3089,35 +3185,7 @@ int t4_sge_init(struct adapter *adap)
s->pktshift = PKTSHIFT_G(sge_control);
s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
- /* T4 uses a single control field to specify both the PCIe Padding and
- * Packing Boundary. T5 introduced the ability to specify these
- * separately. The actual Ingress Packet Data alignment boundary
- * within Packed Buffer Mode is the maximum of these two
- * specifications. (Note that it makes no real practical sense to
- * have the Pading Boudary be larger than the Packing Boundary but you
- * could set the chip up that way and, in fact, legacy T4 code would
- * end doing this because it would initialize the Padding Boundary and
- * leave the Packing Boundary initialized to 0 (16 bytes).)
- */
- ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
- INGPADBOUNDARY_SHIFT_X);
- if (is_t4(adap->params.chip)) {
- s->fl_align = ingpadboundary;
- } else {
- /* T5 has a different interpretation of one of the PCIe Packing
- * Boundary values.
- */
- sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
- ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
- if (ingpackboundary == INGPACKBOUNDARY_16B_X)
- ingpackboundary = 16;
- else
- ingpackboundary = 1 << (ingpackboundary +
- INGPACKBOUNDARY_SHIFT_X);
-
- s->fl_align = max(ingpadboundary, ingpackboundary);
- }
-
+ s->fl_align = t4_fl_pkt_align(adap);
ret = t4_sge_init_soft(adap);
if (ret < 0)
return ret;
@@ -3135,10 +3203,21 @@ int t4_sge_init(struct adapter *adap)
* buffers.
*/
sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
- if (is_t4(adap->params.chip))
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T4:
egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
- else
+ break;
+ case CHELSIO_T5:
egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
+ break;
+ case CHELSIO_T6:
+ egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
+ break;
+ default:
+ dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
+ CHELSIO_CHIP_VERSION(adap->params.chip));
+ return -EINVAL;
+ }
s->fl_starve_thres = 2*egress_threshold + 1;
t4_idma_monitor_init(adap, &s->idma_monitor);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cf61a5869..636b4691f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1942,8 +1942,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1190, 0x1194,
0x11a0, 0x11a4,
0x11b0, 0x11b4,
- 0x11fc, 0x1254,
- 0x1280, 0x133c,
+ 0x11fc, 0x1258,
+ 0x1280, 0x12d4,
+ 0x12d9, 0x12d9,
+ 0x12de, 0x12de,
+ 0x12e3, 0x12e3,
+ 0x12e8, 0x133c,
0x1800, 0x18fc,
0x3000, 0x302c,
0x3060, 0x30b0,
@@ -1973,7 +1977,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x5e50, 0x5e94,
0x5ea0, 0x5eb0,
0x5ec0, 0x5ec0,
- 0x5ec8, 0x5ecc,
+ 0x5ec8, 0x5ed0,
0x6000, 0x6020,
0x6028, 0x6040,
0x6058, 0x609c,
@@ -2048,7 +2052,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x19150, 0x19194,
0x1919c, 0x191b0,
0x191d0, 0x191e8,
- 0x19238, 0x192b0,
+ 0x19238, 0x19290,
+ 0x192a4, 0x192b0,
0x192bc, 0x192bc,
0x19348, 0x1934c,
0x193f8, 0x19418,
@@ -2442,7 +2447,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x40280, 0x40280,
0x40304, 0x40304,
0x40330, 0x4033c,
- 0x41304, 0x413c8,
+ 0x41304, 0x413b8,
+ 0x413c0, 0x413c8,
0x413d0, 0x413dc,
0x413f0, 0x413f0,
0x41400, 0x4140c,
@@ -5254,7 +5260,7 @@ void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
int i;
u32 data[2];
- for (i = 0; i < PM_NSTATS; i++) {
+ for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
if (is_t4(adap->params.chip)) {
@@ -5281,7 +5287,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
int i;
u32 data[2];
- for (i = 0; i < PM_NSTATS; i++) {
+ for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
if (is_t4(adap->params.chip)) {
@@ -5310,7 +5316,14 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
if (n == 0)
return idx == 0 ? 0xf : 0;
- if (n == 1)
+ /* In T6 (which is a 2 port card),
+ * port 0 is mapped to channel 0 and port 1 is mapped to channel 1.
+ * For 2 port T4/T5 adapter,
+ * port 0 is mapped to channel 0 and 1,
+ * port 1 is mapped to channel 2 and 3.
+ */
+ if ((n == 1) &&
+ (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
return idx < 2 ? (3 << (2 * idx)) : 0;
return 1 << idx;
}
@@ -5689,6 +5702,39 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
"IDMA_FL_SEND_PADDING",
"IDMA_FL_SEND_COMPLETION_TO_IMSG",
};
+ static const char * const t6_decode[] = {
+ "IDMA_IDLE",
+ "IDMA_PUSH_MORE_CPL_FIFO",
+ "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+ "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+ "IDMA_PHYSADDR_SEND_PAYLOAD",
+ "IDMA_FL_REQ_DATA_FL",
+ "IDMA_FL_DROP",
+ "IDMA_FL_DROP_SEND_INC",
+ "IDMA_FL_H_REQ_HEADER_FL",
+ "IDMA_FL_H_SEND_PCIEHDR",
+ "IDMA_FL_H_PUSH_CPL_FIFO",
+ "IDMA_FL_H_SEND_CPL",
+ "IDMA_FL_H_SEND_IP_HDR_FIRST",
+ "IDMA_FL_H_SEND_IP_HDR",
+ "IDMA_FL_H_REQ_NEXT_HEADER_FL",
+ "IDMA_FL_H_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_H_SEND_IP_HDR_PADDING",
+ "IDMA_FL_D_SEND_PCIEHDR",
+ "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+ "IDMA_FL_D_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_PCIEHDR",
+ "IDMA_FL_PUSH_CPL_FIFO",
+ "IDMA_FL_SEND_CPL",
+ "IDMA_FL_SEND_PAYLOAD_FIRST",
+ "IDMA_FL_SEND_PAYLOAD",
+ "IDMA_FL_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_SEND_PADDING",
+ "IDMA_FL_SEND_COMPLETION_TO_IMSG",
+ };
static const u32 sge_regs[] = {
SGE_DEBUG_DATA_LOW_INDEX_2_A,
SGE_DEBUG_DATA_LOW_INDEX_3_A,
@@ -5697,6 +5743,32 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
const char **sge_idma_decode;
int sge_idma_decode_nstates;
int i;
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ /* Select the right set of decode strings to dump depending on the
+ * adapter chip type.
+ */
+ switch (chip_version) {
+ case CHELSIO_T4:
+ sge_idma_decode = (const char **)t4_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
+ break;
+
+ case CHELSIO_T5:
+ sge_idma_decode = (const char **)t5_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
+ break;
+
+ case CHELSIO_T6:
+ sge_idma_decode = (const char **)t6_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
+ break;
+
+ default:
+ dev_err(adapter->pdev_dev,
+ "Unsupported chip version %d\n", chip_version);
+ return;
+ }
if (is_t4(adapter->params.chip)) {
sge_idma_decode = (const char **)t4_decode;
@@ -6097,6 +6169,59 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
}
/**
+ * t4_fl_pkt_align - return the fl packet alignment
+ * @adap: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ *
+ */
+int t4_fl_pkt_align(struct adapter *adap)
+{
+ u32 sge_control, sge_control2;
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ sge_control = t4_read_reg(adap, SGE_CONTROL_A);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications. (Note that it makes no real practical sense to
+ * have the Pading Boudary be larger than the Packing Boundary but you
+ * could set the chip up that way and, in fact, legacy T4 code would
+ * end doing this because it would initialize the Padding Boundary and
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
+ * Padding Boundary values in T6 starts from 8B,
+ * where as it is 32B for T4 and T5.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ ingpad_shift = INGPADBOUNDARY_SHIFT_X;
+ else
+ ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
+
+ ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adap->params.chip)) {
+ /* T5 has a weird interpretation of one of the PCIe Packing
+ * Boundary values. No idea why ...
+ */
+ sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
+ ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
+ if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ INGPACKBOUNDARY_SHIFT_X);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+/**
* t4_fixup_host_params - fix up host-dependent parameters
* @adap: the adapter
* @page_size: the host's Base Page Size
@@ -6114,6 +6239,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
+ unsigned int ingpad;
t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
HOSTPAGESIZEPF0_V(sge_hps) |
@@ -6161,10 +6287,16 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
fl_align = 64;
fl_align_log = 6;
}
+
+ if (is_t5(adap->params.chip))
+ ingpad = INGPCIEBOUNDARY_32B_X;
+ else
+ ingpad = T6_INGPADBOUNDARY_32B_X;
+
t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
EGRSTATUSPAGESIZE_F,
- INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
+ INGPADBOUNDARY_V(ingpad) |
EGRSTATUSPAGESIZE_V(stat_len != 64));
t4_set_reg_field(adap, SGE_CONTROL2_A,
INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
@@ -7060,7 +7192,12 @@ int t4_prep_adapter(struct adapter *adapter)
NUM_MPS_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 128;
adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.pm_stats_cnt = PM_NSTATS;
adapter->params.arch.vfcount = 128;
+ /* Congestion map is for 4 channels so that
+ * MPS can have 4 priority per port.
+ */
+ adapter->params.arch.cng_ch_bits_log = 2;
break;
case CHELSIO_T5:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
@@ -7069,7 +7206,9 @@ int t4_prep_adapter(struct adapter *adapter)
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 128;
adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.pm_stats_cnt = PM_NSTATS;
adapter->params.arch.vfcount = 128;
+ adapter->params.arch.cng_ch_bits_log = 2;
break;
case CHELSIO_T6:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
@@ -7078,7 +7217,12 @@ int t4_prep_adapter(struct adapter *adapter)
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 256;
adapter->params.arch.nchan = 2;
+ adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
adapter->params.arch.vfcount = 256;
+ /* Congestion map will be for 2 channels so that
+ * MPS can have 8 priority per port.
+ */
+ adapter->params.arch.cng_ch_bits_log = 3;
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 13708fde1..2fc60e83a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -48,6 +48,7 @@ enum {
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
PM_NSTATS = 5, /* # of PM stats */
+ T6_PM_NSTATS = 7, /* # of PM stats in T6 */
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 03ed00c49..06bc2d2e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -162,6 +162,10 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */
CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */
+ CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
+ CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index fc3044c8a..9fea255c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -492,6 +492,9 @@
#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
#define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M)
+#define T6_STATMODE_S 0
+#define T6_STATMODE_V(x) ((x) << T6_STATMODE_S)
+
#define SGE_DBFIFO_STATUS2_A 0x1118
#define HP_INT_THRESH_T5_S 10
@@ -2395,6 +2398,30 @@
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
+#define VIDL_S 16
+#define VIDL_M 0xffffU
+#define VIDL_G(x) (((x) >> VIDL_S) & VIDL_M)
+
+#define DATALKPTYPE_S 10
+#define DATALKPTYPE_M 0x3U
+#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M)
+
+#define DATAPORTNUM_S 12
+#define DATAPORTNUM_M 0xfU
+#define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M)
+
+#define DATADIPHIT_S 8
+#define DATADIPHIT_V(x) ((x) << DATADIPHIT_S)
+#define DATADIPHIT_F DATADIPHIT_V(1U)
+
+#define DATAVIDH2_S 7
+#define DATAVIDH2_V(x) ((x) << DATAVIDH2_S)
+#define DATAVIDH2_F DATAVIDH2_V(1U)
+
+#define DATAVIDH1_S 0
+#define DATAVIDH1_M 0x7fU
+#define DATAVIDH1_G(x) (((x) >> DATAVIDH1_S) & DATAVIDH1_M)
+
#define USED_S 16
#define USED_M 0x7ffU
#define USED_G(x) (((x) >> USED_S) & USED_M)
@@ -2802,6 +2829,10 @@
#define HASHEN_V(x) ((x) << HASHEN_S)
#define HASHEN_F HASHEN_V(1U)
+#define ASLIPCOMPEN_S 17
+#define ASLIPCOMPEN_V(x) ((x) << ASLIPCOMPEN_S)
+#define ASLIPCOMPEN_F ASLIPCOMPEN_V(1U)
+
#define REQQPARERR_S 16
#define REQQPARERR_V(x) ((x) << REQQPARERR_S)
#define REQQPARERR_F REQQPARERR_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index 7bdee3bf7..a5231fa77 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -53,6 +53,9 @@
#define INGPADBOUNDARY_SHIFT_X 5
+#define T6_INGPADBOUNDARY_SHIFT_X 3
+#define T6_INGPADBOUNDARY_32B_X 2
+
/* CONTROL2 register */
#define INGPACKBOUNDARY_SHIFT_X 5
#define INGPACKBOUNDARY_16B_X 0
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index fa3786a9d..6528231d8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2607,7 +2607,7 @@ int t4vf_sge_init(struct adapter *adapter)
u32 fl0 = sge_params->sge_fl_buffer_size[0];
u32 fl1 = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
- unsigned int ingpadboundary, ingpackboundary;
+ unsigned int ingpadboundary, ingpackboundary, ingpad_shift;
/*
* Start by vetting the basic SGE parameters which have been set up by
@@ -2642,9 +2642,16 @@ int t4vf_sge_init(struct adapter *adapter)
* could set the chip up that way and, in fact, legacy T4 code would
* end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).)
+ * Padding Boundary values in T6 starts from 8B,
+ * where as it is 32B for T4 and T5.
*/
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ ingpad_shift = INGPADBOUNDARY_SHIFT_X;
+ else
+ ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
+
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
- INGPADBOUNDARY_SHIFT_X);
+ ingpad_shift);
if (is_t4(adapter->params.chip)) {
s->fl_align = ingpadboundary;
} else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
index b516b12b1..f859db3d2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
@@ -54,6 +54,7 @@
#define T4VF_MPS_BASE_ADDR 0x0100
#define T4VF_PL_BASE_ADDR 0x0200
#define T4VF_MBDATA_BASE_ADDR 0x0240
+#define T6VF_MBDATA_BASE_ADDR 0x0280
#define T4VF_CIM_BASE_ADDR 0x0300
#define T4VF_REGMAP_START 0x0000
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 63dd5fdac..b6fa74aaf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -120,12 +120,19 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
1, 1, 3, 5, 10, 10, 20, 50, 100
};
- u32 v;
+ u32 v, mbox_data;
int i, ms, delay_idx;
const __be64 *p;
- u32 mbox_data = T4VF_MBDATA_BASE_ADDR;
u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
+ /* In T6, mailbox size is changed to 128 bytes to avoid
+ * invalidating the entire prefetch buffer.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ mbox_data = T4VF_MBDATA_BASE_ADDR;
+ else
+ mbox_data = T6VF_MBDATA_BASE_ADDR;
+
/*
* Commands must be multiples of 16 bytes in length and may not be
* larger than the size of the Mailbox Data register array.