summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
commit8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch)
treee9891aa6c295060d065adffd610c4f49ecf884f3 /drivers/net/ethernet/mellanox
parenta71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff)
Linux-libre 4.3.2-gnu
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h172
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c259
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c387
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c973
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h1090
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c1300
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h207
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h127
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h407
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c1826
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h227
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1349
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1568
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h80
46 files changed, 10125 insertions, 676 deletions
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index 52a6665b7..d54701047 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -18,5 +18,6 @@ if NET_VENDOR_MELLANOX
source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 38fe32ef5..2e2a5ec50 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_MLX5_CORE) += mlx5/core/
+obj-$(CONFIG_MLXSW_CORE) += mlxsw/
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 0a3202047..2177e56ed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2398,7 +2398,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
}
}
- memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
+ memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
INIT_WORK(&priv->mfunc.master.comm_work,
mlx4_master_comm_channel);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 63769df87..eb8a4988d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -100,7 +100,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
{
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
- char name[25];
int timestamp_en = 0;
bool assigned_eq = false;
@@ -119,8 +118,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
err = mlx4_assign_eq(mdev->dev, priv->port,
&cq->vector);
if (err) {
- mlx4_err(mdev, "Failed assigning an EQ to %s\n",
- name);
+ mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
+ cq->vector);
goto free_eq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 99ba1c50e..f79d81243 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -102,6 +102,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
"blueflame",
+ "phv-bit"
};
static const char main_strings[][ETH_GSTRING_LEN] = {
@@ -1797,35 +1798,49 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+ bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
+ bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
int i;
+ int ret = 0;
- if (bf_enabled_new == bf_enabled_old)
- return 0; /* Nothing to do */
+ if (bf_enabled_new != bf_enabled_old) {
+ if (bf_enabled_new) {
+ bool bf_supported = true;
- if (bf_enabled_new) {
- bool bf_supported = true;
+ for (i = 0; i < priv->tx_ring_num; i++)
+ bf_supported &= priv->tx_ring[i]->bf_alloced;
- for (i = 0; i < priv->tx_ring_num; i++)
- bf_supported &= priv->tx_ring[i]->bf_alloced;
+ if (!bf_supported) {
+ en_err(priv, "BlueFlame is not supported\n");
+ return -EINVAL;
+ }
- if (!bf_supported) {
- en_err(priv, "BlueFlame is not supported\n");
- return -EINVAL;
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+ } else {
+ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
}
- priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
- } else {
- priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
- }
-
- for (i = 0; i < priv->tx_ring_num; i++)
- priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+ for (i = 0; i < priv->tx_ring_num; i++)
+ priv->tx_ring[i]->bf_enabled = bf_enabled_new;
- en_info(priv, "BlueFlame %s\n",
- bf_enabled_new ? "Enabled" : "Disabled");
+ en_info(priv, "BlueFlame %s\n",
+ bf_enabled_new ? "Enabled" : "Disabled");
+ }
+ if (phv_enabled_new != phv_enabled_old) {
+ ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
+ if (ret)
+ return ret;
+ else if (phv_enabled_new)
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+ else
+ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
+ en_info(priv, "PHV bit %s\n",
+ phv_enabled_new ? "Enabled" : "Disabled");
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 913b716ed..a946e4bf7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -224,6 +224,26 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
kfree(mdev);
}
+static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx)
+{
+ int i;
+ struct mlx4_en_dev *mdev = ctx;
+
+ /* Create a netdev for each port */
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ mlx4_info(mdev, "Activating port:%d\n", i);
+ if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
+ mdev->pndev[i] = NULL;
+ }
+
+ /* register notifier */
+ mdev->nb.notifier_call = mlx4_en_netdev_event;
+ if (register_netdevice_notifier(&mdev->nb)) {
+ mdev->nb.notifier_call = NULL;
+ mlx4_err(mdev, "Failed to create notifier\n");
+ }
+}
+
static void *mlx4_en_add(struct mlx4_dev *dev)
{
struct mlx4_en_dev *mdev;
@@ -297,21 +317,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mutex_init(&mdev->state_lock);
mdev->device_up = true;
- /* Setup ports */
-
- /* Create a netdev for each port */
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- mlx4_info(mdev, "Activating port:%d\n", i);
- if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
- mdev->pndev[i] = NULL;
- }
- /* register notifier */
- mdev->nb.notifier_call = mlx4_en_netdev_event;
- if (register_netdevice_notifier(&mdev->nb)) {
- mdev->nb.notifier_call = NULL;
- mlx4_err(mdev, "Failed to create notifier\n");
- }
-
return mdev;
err_mr:
@@ -335,6 +340,7 @@ static struct mlx4_interface mlx4_en_interface = {
.event = mlx4_en_event,
.get_dev = mlx4_en_get_netdev,
.protocol = MLX4_PROT_ETH,
+ .activate = mlx4_en_activate,
};
static void mlx4_en_verify_params(void)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e0de2fd1c..4726122ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2184,6 +2184,25 @@ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
+static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *en_priv = netdev_priv(netdev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
+
+ /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
+ * enable/disable make sure S-TAG flag is always in same state as
+ * C-TAG.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX &&
+ !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ else
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ return features;
+}
+
static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2218,6 +2237,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
en_info(priv, "Turn %s TX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+ en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+ (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
+
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
en_info(priv, "Turn %s loopback\n",
(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
@@ -2460,6 +2483,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
@@ -2500,6 +2524,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
@@ -2931,6 +2956,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_LOOPBACK |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+ dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_FILTER;
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ }
+
+ if (mlx4_is_slave(mdev->dev)) {
+ int phv;
+
+ err = get_phv_bit(mdev->dev, port, &phv);
+ if (!err && phv) {
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+ }
+ } else {
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+ !(mdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
+
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 4f95fa7b5..e7a5000aa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -725,7 +725,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
- if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+ if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
!(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
hdr += sizeof(struct vlan_hdr);
@@ -906,17 +906,25 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->csum_level = 1;
if ((cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+ cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vid = be16_to_cpu(cqe->sl_vid);
__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+ } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+ __vlan_hwaccel_put_tag(gro_skb,
+ htons(ETH_P_8021AD),
+ be16_to_cpu(cqe->sl_vid));
}
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(gro_skb,
be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ (ip_summed == CHECKSUM_UNNECESSARY) ?
+ PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
skb_mark_napi_id(gro_skb, &cq->napi);
@@ -962,12 +970,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ (ip_summed == CHECKSUM_UNNECESSARY) ?
+ PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_L3);
if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_VLAN_PRESENT_MASK) &&
+ MLX4_CQE_CVLAN_PRESENT_MASK) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+ else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_STAG_RX))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+ be16_to_cpu(cqe->sl_vid));
if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -1032,13 +1047,15 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done == budget) {
- int cpu_curr;
const struct cpumask *aff;
+ struct irq_data *idata;
+ int cpu_curr;
INC_PERF_COUNTER(priv->pstats.napi_quota);
cpu_curr = smp_processor_id();
- aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+ idata = irq_desc_get_irq_data(cq->irq_desc);
+ aff = irq_data_get_affinity_mask(idata);
if (likely(cpumask_test_cpu(cpu_curr, aff)))
return budget;
@@ -1065,7 +1082,10 @@ static const int frag_sizes[] = {
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
+ /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+ * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+ */
+ int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
int buf_size = 0;
int i = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10d98f6a..4421bf546 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -718,6 +718,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
+ u16 vlan_proto = 0;
int i_frag;
int lso_header_size;
void *fragptr = NULL;
@@ -750,9 +751,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
vlan_tag = skb_vlan_tag_get(skb);
-
+ vlan_proto = be16_to_cpu(skb->vlan_proto);
+ }
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -958,8 +960,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->bf.offset ^= ring->bf.buf_size;
} else {
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- !!skb_vlan_tag_present(skb);
+ if (vlan_proto == ETH_P_8021AD)
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+ else if (vlan_proto == ETH_P_8021Q)
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+ else
+ tx_desc->ctrl.ins_vlan = 0;
+
tx_desc->ctrl.fence_size = real_size;
/* Ensure new descriptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8e81e53c3..603d1c3d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -196,7 +196,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
return;
}
- memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
+ memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave;
/* ensure all information is written before setting the ownersip bit */
dma_wmb();
@@ -1364,6 +1364,10 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
* and performing a NOP command
*/
for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
+ /* Make sure request_irq was called */
+ if (!priv->eq_table.eq[i].have_irq)
+ continue;
+
/* Temporary use polling for command completions */
mlx4_cmd_use_polling(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e30bf57ad..e8ec1dec5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -154,6 +154,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[26] = "Port ETS Scheduler support",
[27] = "Port beacon support",
[28] = "RX-ALL support",
+ [29] = "802.1ad offload support",
};
int i;
@@ -307,6 +308,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
+#define QUERY_FUNC_CAP_PHV_BIT 0x40
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
@@ -351,6 +353,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
+ if (dev->caps.phv_bit[port]) {
+ field = QUERY_FUNC_CAP_PHV_BIT;
+ MLX4_PUT(outbox->buf, field,
+ QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ }
+
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
@@ -600,6 +608,9 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(func_cap->phys_port_id, outbox,
QUERY_FUNC_CAP_PHYS_PORT_ID);
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
@@ -700,6 +711,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
+#define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
@@ -898,6 +910,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
if (field & (1 << 2))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
+ if (field & 0x40)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
+
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -1992,6 +2010,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ /* phv_check enable */
+ MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
+ if (byte_field & 0x2)
+ param->phv_check_en = 1;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -2758,3 +2780,63 @@ int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
}
+
+static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
+{
+#define SET_PORT_GEN_PHV_VALID 0x10
+#define SET_PORT_GEN_PHV_EN 0x80
+
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+
+ context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID;
+ if (phv_bit)
+ context->phv_en |= SET_PORT_GEN_PHV_EN;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
+{
+ int err;
+ struct mlx4_func_cap func_cap;
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+ if (!err)
+ *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+ return err;
+}
+EXPORT_SYMBOL(get_phv_bit);
+
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
+{
+ int ret;
+
+ if (mlx4_is_slave(dev))
+ return -EPERM;
+
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+ ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
+ if (!ret)
+ dev->caps.phv_bit[port] = new_val;
+ return ret;
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(set_phv_bit);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 07cb7c246..08de5555c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -204,6 +204,7 @@ struct mlx4_init_hca_param {
u16 cqe_size; /* For use only when CQE stride feature enabled */
u16 eqe_size; /* For use only when EQE stride feature enabled */
u8 rss_ip_frags;
+ u8 phv_check_en; /* for QUERY_HCA */
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0d80aed59..0472941af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -63,8 +63,11 @@ static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
spin_unlock_irq(&priv->ctx_lock);
+ if (intf->activate)
+ intf->activate(&priv->dev, dev_ctx->context);
} else
kfree(dev_ctx);
+
}
static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a408977a5..cc3a98975 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -405,6 +405,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
+ struct mlx4_init_hca_param hca_param;
+
+ memset(&hca_param, 0, sizeof(hca_param));
+ err = mlx4_QUERY_HCA(dev, &hca_param);
+ /* Turn off PHV_EN flag in case phv_check_en is set.
+ * phv_check_en is a HW check that parse the packet and verify
+ * phv bit was reported correctly in the wqe. To allow QinQ
+ * PHV_EN flag should be set and phv_check_en must be cleared
+ * otherwise QinQ packets will be drop by the HW.
+ */
+ if (err || hca_param.phv_check_en)
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
+ }
+
/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -2654,14 +2669,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
if (msi_x) {
int nreq = dev->caps.num_ports * num_online_cpus() + 1;
- bool shared_ports = false;
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq);
- if (nreq > MAX_MSIX) {
+ if (nreq > MAX_MSIX)
nreq = MAX_MSIX;
- shared_ports = true;
- }
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
@@ -2684,9 +2696,6 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
dev->caps.num_ports);
- if (MLX4_IS_LEGACY_EQ_MODE(dev->caps))
- shared_ports = true;
-
for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
if (i == MLX4_EQ_ASYNC)
continue;
@@ -2694,7 +2703,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
priv->eq_table.eq[i].irq =
entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
- if (shared_ports) {
+ if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
dev->caps.num_ports);
/* We don't set affinity hint when there
@@ -2920,6 +2929,8 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
{
u64 dev_flags = dev->flags;
int err = 0;
+ int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
+ MLX4_MAX_NUM_VF);
if (reset_flow) {
dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
@@ -2945,6 +2956,12 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
}
if (!(dev->flags & MLX4_FLAG_SRIOV)) {
+ if (total_vfs > fw_enabled_sriov_vfs) {
+ mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
+ total_vfs, fw_enabled_sriov_vfs);
+ err = -ENOMEM;
+ goto disable_sriov;
+ }
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
err = pci_enable_sriov(pdev, total_vfs);
}
@@ -3426,20 +3443,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
goto err_disable_pdev;
}
}
- if (total_vfs >= MLX4_MAX_NUM_VF) {
+ if (total_vfs > MLX4_MAX_NUM_VF) {
dev_err(&pdev->dev,
- "Requested more VF's (%d) than allowed (%d)\n",
- total_vfs, MLX4_MAX_NUM_VF - 1);
+ "Requested more VF's (%d) than allowed by hw (%d)\n",
+ total_vfs, MLX4_MAX_NUM_VF);
err = -EINVAL;
goto err_disable_pdev;
}
for (i = 0; i < MLX4_MAX_PORTS; i++) {
- if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+ if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
dev_err(&pdev->dev,
- "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+ "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
nvfs[i] + nvfs[2], i + 1,
- MLX4_MAX_NUM_VF_P_PORT - 1);
+ MLX4_MAX_NUM_VF_P_PORT);
err = -EINVAL;
goto err_disable_pdev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index bd9ea0d01..1d4e2e054 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1184,10 +1184,11 @@ out:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
- new_steering_entry(dev, port, steer, index, qp->qpn);
+ err = new_steering_entry(dev, port, steer,
+ index, qp->qpn);
else
- existing_steering_entry(dev, port, steer,
- index, qp->qpn);
+ err = existing_steering_entry(dev, port, steer,
+ index, qp->qpn);
}
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index a092c5c34..232b2b55f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -787,6 +787,9 @@ struct mlx4_set_port_general_context {
u8 pprx;
u8 pfcrx;
u16 reserved4;
+ u32 reserved5;
+ u8 phv_en;
+ u8 reserved6[3];
};
struct mlx4_set_port_rqp_calc_context {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 666d1669e..defcf8c39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -95,6 +95,7 @@
*/
#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+#define MLX4_EN_PRIV_FLAGS_PHV 2
#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 0715b4975..6cb383046 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -45,15 +45,34 @@
* register it in a memory region at HCA virtual address 0.
*/
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
+ size_t size, dma_addr_t *dma_handle,
+ int node)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ int original_node;
+ void *cpu_handle;
+
+ mutex_lock(&priv->alloc_mutex);
+ original_node = dev_to_node(&dev->pdev->dev);
+ set_dev_node(&dev->pdev->dev, node);
+ cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
+ dma_handle, GFP_KERNEL);
+ set_dev_node(&dev->pdev->dev, original_node);
+ mutex_unlock(&priv->alloc_mutex);
+ return cpu_handle;
+}
+
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_buf *buf, int node)
{
dma_addr_t t;
buf->size = size;
buf->npages = 1;
buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
+ buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size,
+ &t, node);
if (!buf->direct.buf)
return -ENOMEM;
@@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
return 0;
}
+
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+{
+ return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
+}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
@@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
-static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
+static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
+ int node)
{
struct mlx5_db_pgdir *pgdir;
@@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
return NULL;
bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
- pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
- &pgdir->db_dma, GFP_KERNEL);
+
+ pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
+ &pgdir->db_dma, node);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
@@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
return 0;
}
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
{
struct mlx5_db_pgdir *pgdir;
int ret = 0;
@@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
if (!mlx5_alloc_db_from_pgdir(pgdir, db))
goto out;
- pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
+ pgdir = mlx5_alloc_db_pgdir(dev, node);
if (!pgdir) {
ret = -ENOMEM;
goto out;
@@ -145,6 +171,12 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
+
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3d23bd657..0983a208b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -42,24 +42,27 @@
#define MLX5E_MAX_NUM_TC 8
-#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
-#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
+#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
+#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
+#define MLX5E_SQ_BF_BUDGET 16
static const char vport_strings[][ETH_GSTRING_LEN] = {
/* vport statistics */
@@ -91,6 +94,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = {
"lro_bytes",
"rx_csum_good",
"rx_csum_none",
+ "rx_csum_sw",
"tx_csum_offload",
"tx_queue_stopped",
"tx_queue_wake",
@@ -128,18 +132,94 @@ struct mlx5e_vport_stats {
u64 lro_bytes;
u64 rx_csum_good;
u64 rx_csum_none;
+ u64 rx_csum_sw;
u64 tx_csum_offload;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 rx_wqe_err;
-#define NUM_VPORT_COUNTERS 31
+#define NUM_VPORT_COUNTERS 32
+};
+
+static const char pport_strings[][ETH_GSTRING_LEN] = {
+ /* IEEE802.3 counters */
+ "frames_tx",
+ "frames_rx",
+ "check_seq_err",
+ "alignment_err",
+ "octets_tx",
+ "octets_received",
+ "multicast_xmitted",
+ "broadcast_xmitted",
+ "multicast_rx",
+ "broadcast_rx",
+ "in_range_len_errors",
+ "out_of_range_len",
+ "too_long_errors",
+ "symbol_err",
+ "mac_control_tx",
+ "mac_control_rx",
+ "unsupported_op_rx",
+ "pause_ctrl_rx",
+ "pause_ctrl_tx",
+
+ /* RFC2863 counters */
+ "in_octets",
+ "in_ucast_pkts",
+ "in_discards",
+ "in_errors",
+ "in_unknown_protos",
+ "out_octets",
+ "out_ucast_pkts",
+ "out_discards",
+ "out_errors",
+ "in_multicast_pkts",
+ "in_broadcast_pkts",
+ "out_multicast_pkts",
+ "out_broadcast_pkts",
+
+ /* RFC2819 counters */
+ "drop_events",
+ "octets",
+ "pkts",
+ "broadcast_pkts",
+ "multicast_pkts",
+ "crc_align_errors",
+ "undersize_pkts",
+ "oversize_pkts",
+ "fragments",
+ "jabbers",
+ "collisions",
+ "p64octets",
+ "p65to127octets",
+ "p128to255octets",
+ "p256to511octets",
+ "p512to1023octets",
+ "p1024to1518octets",
+ "p1519to2047octets",
+ "p2048to4095octets",
+ "p4096to8191octets",
+ "p8192to10239octets",
+};
+
+#define NUM_IEEE_802_3_COUNTERS 19
+#define NUM_RFC_2863_COUNTERS 13
+#define NUM_RFC_2819_COUNTERS 21
+#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \
+ NUM_RFC_2863_COUNTERS + \
+ NUM_RFC_2819_COUNTERS)
+
+struct mlx5e_pport_stats {
+ __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
+ __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
+ __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
};
static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
"csum_none",
+ "csum_sw",
"lro_packets",
"lro_bytes",
"wqe_err"
@@ -148,10 +228,11 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
struct mlx5e_rq_stats {
u64 packets;
u64 csum_none;
+ u64 csum_sw;
u64 lro_packets;
u64 lro_bytes;
u64 wqe_err;
-#define NUM_RQ_STATS 5
+#define NUM_RQ_STATS 6
};
static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
@@ -179,6 +260,7 @@ struct mlx5e_sq_stats {
struct mlx5e_stats {
struct mlx5e_vport_stats vport;
+ struct mlx5e_pport_stats pport;
};
struct mlx5e_params {
@@ -192,9 +274,12 @@ struct mlx5e_params {
u16 tx_cq_moderation_usec;
u16 tx_cq_moderation_pkts;
u16 min_rx_wqes;
- u16 rx_hash_log_tbl_sz;
bool lro_en;
u32 lro_wqe_sz;
+ u16 tx_max_inline;
+ u8 rss_hfunc;
+ u8 toeplitz_hash_key[40];
+ u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
};
enum {
@@ -214,6 +299,7 @@ struct mlx5e_cq {
struct napi_struct *napi;
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
+ struct mlx5e_priv *priv;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -237,6 +323,7 @@ struct mlx5e_rq {
struct mlx5_wq_ctrl wq_ctrl;
u32 rqn;
struct mlx5e_channel *channel;
+ struct mlx5e_priv *priv;
} ____cacheline_aligned_in_smp;
struct mlx5e_tx_skb_cb {
@@ -266,7 +353,9 @@ struct mlx5e_sq {
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
- u32 bf_offset;
+ u16 bf_offset;
+ u16 prev_cc;
+ u8 bf_budget;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
@@ -279,9 +368,10 @@ struct mlx5e_sq {
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
+ void __iomem *uar_bf_map;
struct netdev_queue *txq;
u32 sqn;
- u32 bf_buf_size;
+ u16 bf_buf_size;
u16 max_inline;
u16 edge;
struct device *pdev;
@@ -315,7 +405,6 @@ struct mlx5e_channel {
__be32 mkey_be;
u8 num_tc;
unsigned long flags;
- int tc_to_txq_map[MLX5E_MAX_NUM_TC];
/* control */
struct mlx5e_priv *priv;
@@ -324,20 +413,24 @@ struct mlx5e_channel {
};
enum mlx5e_traffic_types {
- MLX5E_TT_IPV4_TCP = 0,
- MLX5E_TT_IPV6_TCP = 1,
- MLX5E_TT_IPV4_UDP = 2,
- MLX5E_TT_IPV6_UDP = 3,
- MLX5E_TT_IPV4 = 4,
- MLX5E_TT_IPV6 = 5,
- MLX5E_TT_ANY = 6,
- MLX5E_NUM_TT = 7,
+ MLX5E_TT_IPV4_TCP,
+ MLX5E_TT_IPV6_TCP,
+ MLX5E_TT_IPV4_UDP,
+ MLX5E_TT_IPV6_UDP,
+ MLX5E_TT_IPV4_IPSEC_AH,
+ MLX5E_TT_IPV6_IPSEC_AH,
+ MLX5E_TT_IPV4_IPSEC_ESP,
+ MLX5E_TT_IPV6_IPSEC_ESP,
+ MLX5E_TT_IPV4,
+ MLX5E_TT_IPV6,
+ MLX5E_TT_ANY,
+ MLX5E_NUM_TT,
};
-enum {
- MLX5E_RQT_SPREADING = 0,
- MLX5E_RQT_DEFAULT_RQ = 1,
- MLX5E_NUM_RQT = 2,
+enum mlx5e_rqt_ix {
+ MLX5E_INDIRECTION_RQT,
+ MLX5E_SINGLE_RQ_RQT,
+ MLX5E_NUM_RQT,
};
struct mlx5e_eth_addr_info {
@@ -362,10 +455,10 @@ struct mlx5e_eth_addr_db {
enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLE,
MLX5E_STATE_OPENED,
+ MLX5E_STATE_DESTROYING,
};
struct mlx5e_vlan_db {
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 active_vlans_ft_ix[VLAN_N_VID];
u32 untagged_rule_ft_ix;
u32 any_vlan_rule_ft_ix;
@@ -379,9 +472,9 @@ struct mlx5e_flow_table {
struct mlx5e_priv {
/* priv data path fields - start */
- int num_tc;
int default_vlan_prio;
struct mlx5e_sq **txq_to_sq_map;
+ int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
/* priv data path fields - end */
unsigned long state;
@@ -390,10 +483,11 @@ struct mlx5e_priv {
u32 pdn;
u32 tdn;
struct mlx5_core_mr mr;
+ struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
u32 tisn[MLX5E_MAX_NUM_TC];
- u32 rqtn;
+ u32 rqtn[MLX5E_NUM_RQT];
u32 tirn[MLX5E_NUM_TT];
struct mlx5e_flow_table ft;
@@ -470,10 +564,9 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_update_stats(struct mlx5e_priv *priv);
-int mlx5e_open_flow_table(struct mlx5e_priv *priv);
-void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -482,17 +575,17 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
- struct mlx5e_params *new_params);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
- struct mlx5e_tx_wqe *wqe)
+ struct mlx5e_tx_wqe *wqe, int bf_sz)
{
+ u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
@@ -503,9 +596,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/
wmb();
- mlx5_write64((__be32 *)&wqe->ctrl,
- sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
- NULL);
+ if (bf_sz) {
+ __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
+
+ /* flush the write-combining mapped buffer */
+ wmb();
+
+ } else {
+ mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+ }
sq->bf_offset ^= sq->bf_buf_size;
}
@@ -519,3 +618,4 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 388938482..bce912688 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -171,9 +171,9 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_COUNTERS +
+ return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
priv->params.num_channels * NUM_RQ_STATS +
- priv->params.num_channels * priv->num_tc *
+ priv->params.num_channels * priv->params.num_tc *
NUM_SQ_STATS;
/* fallthrough */
default:
@@ -200,6 +200,11 @@ static void mlx5e_get_strings(struct net_device *dev,
strcpy(data + (idx++) * ETH_GSTRING_LEN,
vport_strings[i]);
+ /* PPORT counters */
+ for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_strings[i]);
+
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
@@ -207,7 +212,7 @@ static void mlx5e_get_strings(struct net_device *dev,
"rx%d_%s", i, rq_stats_strings[j]);
for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data +
(idx++) * ETH_GSTRING_LEN,
@@ -234,6 +239,9 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
data[idx++] = ((u64 *)&priv->stats.vport)[i];
+ for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+ data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
@@ -242,7 +250,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
((u64 *)&priv->channel[i]->rq.stats)[j];
for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] = !test_bit(MLX5E_STATE_OPENED,
&priv->state) ? 0 :
@@ -264,7 +272,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_params new_params;
+ bool was_opened;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
@@ -316,11 +324,18 @@ static int mlx5e_set_ringparam(struct net_device *dev,
return 0;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
- new_params.log_rq_size = log_rq_size;
- new_params.log_sq_size = log_sq_size;
- new_params.min_rx_wqes = min_rx_wqes;
- err = mlx5e_update_priv_params(priv, &new_params);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.log_rq_size = log_rq_size;
+ priv->params.log_sq_size = log_sq_size;
+ priv->params.min_rx_wqes = min_rx_wqes;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -342,7 +357,7 @@ static int mlx5e_set_channels(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
unsigned int count = ch->combined_count;
- struct mlx5e_params new_params;
+ bool was_opened;
int err = 0;
if (!count) {
@@ -365,9 +380,16 @@ static int mlx5e_set_channels(struct net_device *dev,
return 0;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
- new_params.num_channels = count;
- err = mlx5e_update_priv_params(priv, &new_params);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.num_channels = count;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -606,7 +628,7 @@ static int mlx5e_set_settings(struct net_device *netdev,
u32 link_modes;
u32 speed;
u32 eth_proto_cap, eth_proto_admin;
- u8 port_status;
+ enum mlx5_port_status ps;
int err;
speed = ethtool_cmd_speed(cmd);
@@ -640,25 +662,197 @@ static int mlx5e_set_settings(struct net_device *netdev,
if (link_modes == eth_proto_admin)
goto out;
- err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ mlx5_query_port_admin_status(mdev, &ps);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+ mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+
+out:
+ return err;
+}
+
+static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return sizeof(priv->params.toeplitz_hash_key);
+}
+
+static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return MLX5E_INDIR_RQT_SIZE;
+}
+
+static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (indir)
+ memcpy(indir, priv->params.indirection_rqt,
+ sizeof(priv->params.indirection_rqt));
+
+ if (key)
+ memcpy(key, priv->params.toeplitz_hash_key,
+ sizeof(priv->params.toeplitz_hash_key));
+
+ if (hfunc)
+ *hfunc = priv->params.rss_hfunc;
+
+ return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ bool close_open;
+ int err = 0;
+
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
+ (hfunc != ETH_RSS_HASH_XOR) &&
+ (hfunc != ETH_RSS_HASH_TOP))
+ return -EINVAL;
+
+ mutex_lock(&priv->state_lock);
+
+ if (indir) {
+ memcpy(priv->params.indirection_rqt, indir,
+ sizeof(priv->params.indirection_rqt));
+ mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+ }
+
+ close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
+ test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (close_open)
+ mlx5e_close_locked(dev);
+
+ if (key)
+ memcpy(priv->params.toeplitz_hash_key, key,
+ sizeof(priv->params.toeplitz_hash_key));
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+ priv->params.rss_hfunc = hfunc;
+
+ if (close_open)
+ err = mlx5e_open_locked(priv->netdev);
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static int mlx5e_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = priv->params.num_channels;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlx5e_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ const struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ *(u32 *)data = priv->params.tx_max_inline;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int mlx5e_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool was_opened;
+ u32 val;
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ val = *(u32 *)data;
+ if (val > mlx5e_get_max_inline_cap(mdev)) {
+ err = -EINVAL;
+ break;
+ }
+
+ mutex_lock(&priv->state_lock);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.tx_max_inline = val;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
+ mutex_unlock(&priv->state_lock);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static void mlx5e_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
+ &pauseparam->tx_pause);
if (err) {
- netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
+ netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
__func__, err);
- goto out;
}
+}
- err = mlx5_query_port_status(mdev, &port_status);
- if (err)
- goto out;
+static int mlx5e_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
- if (port_status == MLX5_PORT_DOWN)
- return 0;
+ if (pauseparam->autoneg)
+ return -EINVAL;
+
+ err = mlx5_set_port_pause(mdev,
+ pauseparam->rx_pause ? 1 : 0,
+ pauseparam->tx_pause ? 1 : 0);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
+ __func__, err);
+ }
- err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
- if (err)
- goto out;
- err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
-out:
return err;
}
@@ -676,4 +870,13 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_coalesce = mlx5e_set_coalesce,
.get_settings = mlx5e_get_settings,
.set_settings = mlx5e_set_settings,
+ .get_rxfh_key_size = mlx5e_get_rxfh_key_size,
+ .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
+ .get_rxfh = mlx5e_get_rxfh,
+ .set_rxfh = mlx5e_set_rxfh,
+ .get_rxnfc = mlx5e_get_rxnfc,
+ .get_tunable = mlx5e_get_tunable,
+ .set_tunable = mlx5e_set_tunable,
+ .get_pauseparam = mlx5e_get_pauseparam,
+ .set_pauseparam = mlx5e_set_pauseparam,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index 120db80c4..22d603f78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -105,25 +105,41 @@ static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
{
void *ft = priv->ft.main;
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
- if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+ if (ai->tt_vec & BIT(MLX5E_TT_ANY))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
}
@@ -156,33 +172,37 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
switch (eth_addr_type) {
case MLX5E_UC:
ret =
- (1 << MLX5E_TT_IPV4_TCP) |
- (1 << MLX5E_TT_IPV6_TCP) |
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_TCP) |
+ BIT(MLX5E_TT_IPV6_TCP) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV6_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
case MLX5E_MC_IPV4:
ret =
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV4) |
0;
break;
case MLX5E_MC_IPV6:
ret =
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV6) |
0;
break;
case MLX5E_MC_OTHER:
ret =
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
}
@@ -191,23 +211,27 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
case MLX5E_ALLMULTI:
ret =
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
default: /* MLX5E_PROMISC */
ret =
- (1 << MLX5E_TT_IPV4_TCP) |
- (1 << MLX5E_TT_IPV6_TCP) |
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_TCP) |
+ BIT(MLX5E_TT_IPV6_TCP) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV6_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
}
@@ -226,6 +250,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
u8 *match_criteria_dmac;
void *ft = priv->ft.main;
u32 *tirn = priv->tirn;
+ u32 *ft_ix;
u32 tt_vec;
int err;
@@ -261,51 +286,51 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
tt_vec = mlx5e_get_tt_vec(ai, type);
- if (tt_vec & (1 << MLX5E_TT_ANY)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
+ if (tt_vec & BIT(MLX5E_TT_ANY)) {
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_ANY]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_ANY]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_ANY);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_ANY);
}
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.ethertype);
- if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
+ if (tt_vec & BIT(MLX5E_TT_IPV4)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
+ if (tt_vec & BIT(MLX5E_TT_IPV6)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6);
}
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
@@ -313,70 +338,141 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_UDP);
- if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_UDP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_UDP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
}
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_TCP);
- if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_TCP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_TCP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_AH);
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_IPSEC_AH]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
+ }
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_IPSEC_AH]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_ESP);
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
+ }
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
}
return 0;
+
+err_del_ai:
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+
+ return err;
}
static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
@@ -498,44 +594,32 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ if (!priv->vlan.filter_disabled)
+ return;
- if (priv->vlan.filter_disabled) {
- priv->vlan.filter_disabled = false;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- }
+ priv->vlan.filter_disabled = false;
+ if (priv->netdev->flags & IFF_PROMISC)
+ return;
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
}
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
{
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ if (priv->vlan.filter_disabled)
+ return;
- if (!priv->vlan.filter_disabled) {
- priv->vlan.filter_disabled = true;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- }
+ priv->vlan.filter_disabled = true;
+ if (priv->netdev->flags & IFF_PROMISC)
+ return;
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int err = 0;
- mutex_lock(&priv->state_lock);
-
- set_bit(vid, priv->vlan.active_vlans);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
- vid);
-
- mutex_unlock(&priv->state_lock);
-
- return err;
+ return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
}
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -543,56 +627,11 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mutex_lock(&priv->state_lock);
-
- clear_bit(vid, priv->vlan.active_vlans);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-
- mutex_unlock(&priv->state_lock);
-
- return 0;
-}
-
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
-{
- u16 vid;
- int err;
-
- for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
- vid);
- if (err)
- return err;
- }
-
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- if (err)
- return err;
-
- if (priv->vlan.filter_disabled) {
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- if (err)
- return err;
- }
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
return 0;
}
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
-{
- u16 vid;
-
- if (priv->vlan.filter_disabled)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-
- for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-}
-
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
@@ -656,18 +695,21 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
mlx5e_sync_netdev_addr(priv);
mlx5e_apply_netdev_addr(priv);
}
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+void mlx5e_set_rx_mode_work(struct work_struct *work)
{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
struct net_device *ndev = priv->netdev;
- bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
bool broadcast_enabled = rx_mode_enable;
@@ -679,8 +721,12 @@ void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
- if (enable_promisc)
+ if (enable_promisc) {
mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+ if (!priv->vlan.filter_disabled)
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
if (enable_allmulti)
mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
if (enable_broadcast)
@@ -692,25 +738,18 @@ void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
if (disable_allmulti)
mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
- if (disable_promisc)
+ if (disable_promisc) {
+ if (!priv->vlan.filter_disabled)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+ }
ea->promisc_enabled = promisc_enabled;
ea->allmulti_enabled = allmulti_enabled;
ea->broadcast_enabled = broadcast_enabled;
}
-void mlx5e_set_rx_mode_work(struct work_struct *work)
-{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- set_rx_mode_work);
-
- mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_set_rx_mode_core(priv);
- mutex_unlock(&priv->state_lock);
-}
-
void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
{
ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
@@ -725,7 +764,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
if (!g)
return -ENOMEM;
- g[0].log_sz = 2;
+ g[0].log_sz = 3;
g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
outer_headers.ethertype);
@@ -833,7 +872,7 @@ static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
mlx5_destroy_flow_table(priv->ft.vlan);
}
-int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
{
int err;
@@ -845,16 +884,24 @@ int mlx5e_open_flow_table(struct mlx5e_priv *priv)
if (err)
goto err_destroy_main_flow_table;
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ goto err_destroy_vlan_flow_table;
+
return 0;
+err_destroy_vlan_flow_table:
+ mlx5e_destroy_vlan_flow_table(priv);
+
err_destroy_main_flow_table:
mlx5e_destroy_main_flow_table(priv);
return err;
}
-void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
{
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
mlx5e_destroy_vlan_flow_table(priv);
mlx5e_destroy_main_flow_table(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 40206da1f..59874d666 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -41,6 +41,7 @@ struct mlx5e_rq_param {
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
+ u16 max_inline;
};
struct mlx5e_cq_param {
@@ -81,6 +82,47 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_pport_stats *s = &priv->stats.pport;
+ u32 *in;
+ u32 *out;
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ in = mlx5_vzalloc(sz);
+ out = mlx5_vzalloc(sz);
+ if (!in || !out)
+ goto free_out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->IEEE_802_3_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->IEEE_802_3_counters));
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->RFC_2863_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->RFC_2863_counters));
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->RFC_2819_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->RFC_2819_counters));
+
+free_out:
+ kvfree(in);
+ kvfree(out);
+}
+
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -107,6 +149,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->lro_packets = 0;
s->lro_bytes = 0;
s->rx_csum_none = 0;
+ s->rx_csum_sw = 0;
s->rx_wqe_err = 0;
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
@@ -114,9 +157,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->lro_packets += rq_stats->lro_packets;
s->lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
+ s->rx_csum_sw += rq_stats->csum_sw;
s->rx_wqe_err += rq_stats->wqe_err;
- for (j = 0; j < priv->num_tc; j++) {
+ for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
s->tso_packets += sq_stats->tso_packets;
@@ -199,8 +243,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
/* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none;
- s->rx_csum_good = s->rx_packets - s->rx_csum_none;
+ s->rx_csum_good = s->rx_packets - s->rx_csum_none -
+ s->rx_csum_sw;
+ mlx5e_update_pport_counters(priv);
free_out:
kvfree(out);
}
@@ -272,6 +318,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
int err;
int i;
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+
err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
if (err)
@@ -304,6 +352,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->netdev = c->netdev;
rq->channel = c;
rq->ix = c->ix;
+ rq->priv = c->priv;
return 0;
@@ -321,8 +370,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = rq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
@@ -342,11 +390,11 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
memcpy(rqc, param->rqc, sizeof(param->rqc));
- MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
+ MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
mlx5_fill_page_array(&rq->wq_ctrl.buf,
@@ -389,11 +437,7 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
static void mlx5e_disable_rq(struct mlx5e_rq *rq)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
-
- mlx5_core_destroy_rq(mdev, rq->rqn);
+ mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
}
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
@@ -502,6 +546,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
return err;
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
&sq->wq_ctrl);
if (err)
@@ -509,7 +555,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
sq->uar_map = sq->uar.map;
+ sq->uar_bf_map = sq->uar.bf_map;
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+ sq->max_inline = param->max_inline;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err)
@@ -518,11 +566,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
txq_ix = c->ix + tc * priv->params.num_channels;
sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
- sq->pdev = c->pdev;
- sq->mkey_be = c->mkey_be;
- sq->channel = c;
- sq->tc = tc;
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->bf_budget = MLX5E_SQ_BF_BUDGET;
priv->txq_to_sq_map[txq_ix] = sq;
return 0;
@@ -569,7 +618,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
memcpy(sqc, param->sqc, sizeof(param->sqc));
- MLX5_SET(sqc, sqc, user_index, sq->tc);
MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
@@ -579,7 +627,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, sq->uar.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
mlx5_fill_page_array(&sq->wq_ctrl.buf,
@@ -702,7 +750,8 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
int err;
u32 i;
- param->wq.numa = cpu_to_node(c->cpu);
+ param->wq.buf_numa_node = cpu_to_node(c->cpu);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
param->eq_ix = c->ix;
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
@@ -732,6 +781,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
}
cq->channel = c;
+ cq->priv = priv;
return 0;
}
@@ -743,8 +793,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
- struct mlx5e_channel *c = cq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = cq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
@@ -773,7 +822,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
err = mlx5_core_create_cq(mdev, mcq, in, inlen);
@@ -790,8 +839,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
static void mlx5e_disable_cq(struct mlx5e_cq *cq)
{
- struct mlx5e_channel *c = cq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = cq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_cq(mdev, &cq->mcq);
@@ -901,13 +949,13 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c)
mlx5e_close_sq(&c->sq[tc]);
}
-static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
- int num_channels)
+static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
{
int i;
for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
- c->tc_to_txq_map[i] = c->ix + i * num_channels;
+ priv->channeltc_to_txq_map[ix][i] =
+ ix + i * priv->params.num_channels;
}
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
@@ -929,9 +977,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mr.key);
- c->num_tc = priv->num_tc;
+ c->num_tc = priv->params.num_tc;
- mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
+ mlx5e_build_channeltc_to_txq_map(priv, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@@ -1000,7 +1048,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
MLX5_SET(wq, wq, pd, priv->pdn);
- param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
}
@@ -1014,7 +1062,8 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->pdn);
- param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+ param->max_inline = priv->params.tx_max_inline;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1059,27 +1108,28 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
static int mlx5e_open_channels(struct mlx5e_priv *priv)
{
struct mlx5e_channel_param cparam;
+ int nch = priv->params.num_channels;
int err = -ENOMEM;
int i;
int j;
- priv->channel = kcalloc(priv->params.num_channels,
- sizeof(struct mlx5e_channel *), GFP_KERNEL);
+ priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
+ GFP_KERNEL);
- priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+ priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
sizeof(struct mlx5e_sq *), GFP_KERNEL);
if (!priv->channel || !priv->txq_to_sq_map)
goto err_free_txq_to_sq_map;
mlx5e_build_channel_param(priv, &cparam);
- for (i = 0; i < priv->params.num_channels; i++) {
+ for (i = 0; i < nch; i++) {
err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
if (err)
goto err_close_channels;
}
- for (j = 0; j < priv->params.num_channels; j++) {
+ for (j = 0; j < nch; j++) {
err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
if (err)
goto err_close_channels;
@@ -1109,67 +1159,73 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
kfree(priv->channel);
}
-static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+static int mlx5e_rx_hash_fn(int hfunc)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(create_tis_in)];
- void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ return (hfunc == ETH_RSS_HASH_TOP) ?
+ MLX5_RX_HASH_FN_TOEPLITZ :
+ MLX5_RX_HASH_FN_INVERTED_XOR8;
+}
- memset(in, 0, sizeof(in));
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+ int inv = 0;
+ int i;
- MLX5_SET(tisc, tisc, prio, tc);
- MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+ for (i = 0; i < size; i++)
+ inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
- return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+ return inv;
}
-static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
{
- mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
-}
+ int i;
-static int mlx5e_open_tises(struct mlx5e_priv *priv)
-{
- int num_tc = priv->num_tc;
- int err;
- int tc;
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+ int ix = i;
- for (tc = 0; tc < num_tc; tc++) {
- err = mlx5e_open_tis(priv, tc);
- if (err)
- goto err_close_tises;
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
+
+ ix = priv->params.indirection_rqt[ix];
+ ix = ix % priv->params.num_channels;
+ MLX5_SET(rqtc, rqtc, rq_num[i],
+ test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[ix]->rq.rqn :
+ priv->drop_rq.rqn);
}
+}
- return 0;
+static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
+ enum mlx5e_rqt_ix rqt_ix)
+{
-err_close_tises:
- for (tc--; tc >= 0; tc--)
- mlx5e_close_tis(priv, tc);
+ switch (rqt_ix) {
+ case MLX5E_INDIRECTION_RQT:
+ mlx5e_fill_indir_rqt_rqns(priv, rqtc);
- return err;
-}
+ break;
-static void mlx5e_close_tises(struct mlx5e_priv *priv)
-{
- int num_tc = priv->num_tc;
- int tc;
+ default: /* MLX5E_SINGLE_RQ_RQT */
+ MLX5_SET(rqtc, rqtc, rq_num[0],
+ test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[0]->rq.rqn :
+ priv->drop_rq.rqn);
- for (tc = 0; tc < num_tc; tc++)
- mlx5e_close_tis(priv, tc);
+ break;
+ }
}
-static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
- u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
void *rqtc;
int inlen;
- int err;
int sz;
- int i;
+ int err;
- sz = 1 << priv->params.rx_hash_log_tbl_sz;
+ sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
@@ -1181,198 +1237,101 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- for (i = 0; i < sz; i++) {
- int ix = i % priv->params.num_channels;
-
- MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
- }
-
- MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+ mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
- if (!err)
- priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+ err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
kvfree(in);
return err;
}
-static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *rqtc;
+ int inlen;
+ int sz;
+ int err;
- memset(in, 0, sizeof(in));
+ sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
- MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
- MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
- mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
- sizeof(out));
-}
+ rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
-{
- void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+ mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
-#define ROUGH_MAX_L2_L3_HDR_SZ 256
+ MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
-#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_L4_SPORT |\
- MLX5_HASH_FIELD_SEL_L4_DPORT)
-
- if (priv->params.lro_en) {
- MLX5_SET(tirc, tirc, lro_enable_mask,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
- MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (priv->params.lro_wqe_sz -
- ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
- MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[3]));
- }
+ err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
- switch (tt) {
- case MLX5E_TT_ANY:
- MLX5_SET(tirc, tirc, disp_type,
- MLX5_TIRC_DISP_TYPE_DIRECT);
- MLX5_SET(tirc, tirc, inline_rqn,
- priv->channel[0]->rq.rqn);
- break;
- default:
- MLX5_SET(tirc, tirc, disp_type,
- MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table,
- priv->rqtn);
- MLX5_SET(tirc, tirc, rx_hash_fn,
- MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
- rx_hash_toeplitz_key),
- MLX5_FLD_SZ_BYTES(tirc,
- rx_hash_toeplitz_key));
- break;
- }
+ kvfree(in);
- switch (tt) {
- case MLX5E_TT_IPV4_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+ return err;
+}
- case MLX5E_TT_IPV6_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+{
+ mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
+}
- case MLX5E_TT_IPV4_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
+{
+ mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+}
- case MLX5E_TT_IPV6_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+{
+ if (!priv->params.lro_en)
+ return;
- case MLX5E_TT_IPV4:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
- case MLX5E_TT_IPV6:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
- }
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (priv->params.lro_wqe_sz -
+ ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+ MLX5_CAP_ETH(priv->mdev,
+ lro_timer_supported_periods[2]));
}
-static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
+
+ void *in;
void *tirc;
int inlen;
int err;
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
+ tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- mlx5e_build_tir_ctx(priv, tirc, tt);
+ mlx5e_build_tir_ctx_lro(tirc, priv);
- err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+ err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
kvfree(in);
return err;
}
-static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
-{
- mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
-}
-
-static int mlx5e_open_tirs(struct mlx5e_priv *priv)
-{
- int err;
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++) {
- err = mlx5e_open_tir(priv, i);
- if (err)
- goto err_close_tirs;
- }
-
- return 0;
-
-err_close_tirs:
- for (i--; i >= 0; i--)
- mlx5e_close_tir(priv, i);
-
- return err;
-}
-
-static void mlx5e_close_tirs(struct mlx5e_priv *priv)
-{
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++)
- mlx5e_close_tir(priv, i);
-}
-
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1400,6 +1359,8 @@ int mlx5e_open_locked(struct net_device *netdev)
int num_txqs;
int err;
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
num_txqs = priv->params.num_channels * priv->params.num_tc;
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
@@ -1408,74 +1369,19 @@ int mlx5e_open_locked(struct net_device *netdev)
if (err)
return err;
- err = mlx5e_open_tises(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
- __func__, err);
- return err;
- }
-
err = mlx5e_open_channels(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
__func__, err);
- goto err_close_tises;
- }
-
- err = mlx5e_open_rqt(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
- __func__, err);
- goto err_close_channels;
- }
-
- err = mlx5e_open_tirs(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
- __func__, err);
- goto err_close_rqls;
- }
-
- err = mlx5e_open_flow_table(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
- __func__, err);
- goto err_close_tirs;
- }
-
- err = mlx5e_add_all_vlan_rules(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
- __func__, err);
- goto err_close_flow_table;
+ return err;
}
- mlx5e_init_eth_addr(priv);
-
- set_bit(MLX5E_STATE_OPENED, &priv->state);
-
mlx5e_update_carrier(priv);
- mlx5e_set_rx_mode_core(priv);
+ mlx5e_redirect_rqts(priv);
schedule_delayed_work(&priv->update_stats_work, 0);
- return 0;
-
-err_close_flow_table:
- mlx5e_close_flow_table(priv);
-
-err_close_tirs:
- mlx5e_close_tirs(priv);
-
-err_close_rqls:
- mlx5e_close_rqt(priv);
-err_close_channels:
- mlx5e_close_channels(priv);
-
-err_close_tises:
- mlx5e_close_tises(priv);
-
- return err;
+ return 0;
}
static int mlx5e_open(struct net_device *netdev)
@@ -1496,14 +1402,9 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
- mlx5e_set_rx_mode_core(priv);
- mlx5e_del_all_vlan_rules(priv);
+ mlx5e_redirect_rqts(priv);
netif_carrier_off(priv->netdev);
- mlx5e_close_flow_table(priv);
- mlx5e_close_tirs(priv);
- mlx5e_close_rqt(priv);
mlx5e_close_channels(priv);
- mlx5e_close_tises(priv);
return 0;
}
@@ -1520,26 +1421,341 @@ static int mlx5e_close(struct net_device *netdev)
return err;
}
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
- struct mlx5e_params *new_params)
+static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
+ struct mlx5e_rq *rq,
+ struct mlx5e_rq_param *param)
{
- int err = 0;
- int was_opened;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqc = param->rqc;
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int err;
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ param->wq.db_numa_node = param->wq.buf_numa_node;
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(priv->netdev);
+ err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+ &rq->wq_ctrl);
+ if (err)
+ return err;
- priv->params = *new_params;
+ rq->priv = priv;
- if (was_opened)
- err = mlx5e_open_locked(priv->netdev);
+ return 0;
+}
+
+static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
+ struct mlx5e_cq *cq,
+ struct mlx5e_cq_param *param)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int eqn_not_used;
+ int irqn;
+ int err;
+
+ err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ *mcq->set_ci_db = 0;
+ *mcq->arm_db = 0;
+ mcq->vector = param->eq_ix;
+ mcq->comp = mlx5e_completion_event;
+ mcq->event = mlx5e_cq_error_event;
+ mcq->irqn = irqn;
+ mcq->uar = &priv->cq_uar;
+
+ cq->priv = priv;
+
+ return 0;
+}
+
+static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
+{
+ struct mlx5e_cq_param cq_param;
+ struct mlx5e_rq_param rq_param;
+ struct mlx5e_rq *rq = &priv->drop_rq;
+ struct mlx5e_cq *cq = &priv->drop_rq.cq;
+ int err;
+
+ memset(&cq_param, 0, sizeof(cq_param));
+ memset(&rq_param, 0, sizeof(rq_param));
+ mlx5e_build_rx_cq_param(priv, &cq_param);
+ mlx5e_build_rq_param(priv, &rq_param);
+
+ err = mlx5e_create_drop_cq(priv, cq, &cq_param);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_cq(cq, &cq_param);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5e_create_drop_rq(priv, rq, &rq_param);
+ if (err)
+ goto err_disable_cq;
+
+ err = mlx5e_enable_rq(rq, &rq_param);
+ if (err)
+ goto err_destroy_rq;
+
+ return 0;
+
+err_destroy_rq:
+ mlx5e_destroy_rq(&priv->drop_rq);
+
+err_disable_cq:
+ mlx5e_disable_cq(&priv->drop_rq.cq);
+
+err_destroy_cq:
+ mlx5e_destroy_cq(&priv->drop_rq.cq);
+
+ return err;
+}
+
+static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
+{
+ mlx5e_disable_rq(&priv->drop_rq);
+ mlx5e_destroy_rq(&priv->drop_rq);
+ mlx5e_disable_cq(&priv->drop_rq.cq);
+ mlx5e_destroy_cq(&priv->drop_rq.cq);
+}
+
+static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(tisc, tisc, prio, tc);
+ MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+ return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
+{
+ mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_create_tises(struct mlx5e_priv *priv)
+{
+ int err;
+ int tc;
+
+ for (tc = 0; tc < priv->params.num_tc; tc++) {
+ err = mlx5e_create_tis(priv, tc);
+ if (err)
+ goto err_close_tises;
+ }
+
+ return 0;
+
+err_close_tises:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_destroy_tis(priv, tc);
+
+ return err;
+}
+
+static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+{
+ int tc;
+
+ for (tc = 0; tc < priv->params.num_tc; tc++)
+ mlx5e_destroy_tis(priv, tc);
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
+ mlx5e_build_tir_ctx_lro(tirc, priv);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+
+ switch (tt) {
+ case MLX5E_TT_ANY:
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+ break;
+ default:
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn[MLX5E_INDIRECTION_RQT]);
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+ rx_hash_toeplitz_key);
+ size_t len = MLX5_FLD_SZ_BYTES(tirc,
+ rx_hash_toeplitz_key);
+
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+ }
+ break;
+ }
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ }
+}
+
+static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ mlx5e_build_tir_ctx(priv, tirc, tt);
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
+{
+ mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ err = mlx5e_create_tir(priv, i);
+ if (err)
+ goto err_destroy_tirs;
+ }
+
+ return 0;
+
+err_destroy_tirs:
+ for (i--; i >= 0; i--)
+ mlx5e_destroy_tir(priv, i);
return err;
}
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ mlx5e_destroy_tir(priv, i);
+}
+
static struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
@@ -1589,20 +1805,26 @@ static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
netdev_features_t changes = features ^ netdev->features;
- struct mlx5e_params new_params;
- bool update_params = false;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
if (changes & NETIF_F_LRO) {
- new_params.lro_en = !!(features & NETIF_F_LRO);
- update_params = true;
+ bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params.lro_en = !!(features & NETIF_F_LRO);
+ mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
+ mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
+
+ if (was_opened)
+ err = mlx5e_open_locked(priv->netdev);
}
- if (update_params)
- mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1611,8 +1833,6 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_disable_vlan_filter(priv);
}
- mutex_unlock(&priv->state_lock);
-
return 0;
}
@@ -1620,8 +1840,9 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ bool was_opened;
int max_mtu;
- int err;
+ int err = 0;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
@@ -1633,8 +1854,16 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
}
mutex_lock(&priv->state_lock);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(netdev);
+
netdev->mtu = new_mtu;
- err = mlx5e_update_priv_params(priv, &priv->params);
+
+ if (was_opened)
+ err = mlx5e_open_locked(netdev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -1673,11 +1902,21 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
return 0;
}
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+ int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ return bf_buf_size -
+ sizeof(struct mlx5e_tx_wqe) +
+ 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
- int num_comp_vectors)
+ int num_channels)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int i;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -1691,24 +1930,25 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
- priv->params.rx_hash_log_tbl_sz =
- (order_base_2(num_comp_vectors) >
- MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
- order_base_2(num_comp_vectors) :
- MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
priv->params.num_tc = 1;
priv->params.default_vlan_prio = 0;
+ priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
+
+ netdev_rss_key_fill(priv->params.toeplitz_hash_key,
+ sizeof(priv->params.toeplitz_hash_key));
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ priv->params.indirection_rqt[i] = i % num_channels;
- priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
priv->mdev = mdev;
priv->netdev = netdev;
- priv->params.num_channels = num_comp_vectors;
- priv->num_tc = priv->params.num_tc;
+ priv->params.num_channels = num_channels;
priv->default_vlan_prio = priv->params.default_vlan_prio;
spin_lock_init(&priv->async_events_spinlock);
@@ -1733,9 +1973,8 @@ static void mlx5e_build_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
- if (priv->num_tc > 1) {
+ if (priv->params.num_tc > 1)
mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
- }
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->watchdog_timeo = 15 * HZ;
@@ -1798,19 +2037,20 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
{
struct net_device *netdev;
struct mlx5e_priv *priv;
- int ncv = mdev->priv.eq_table.num_comp_vectors;
+ int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
+ MLX5E_MAX_NUM_CHANNELS);
int err;
if (mlx5e_check_required_hca_cap(mdev))
return NULL;
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
}
- mlx5e_build_netdev_priv(mdev, netdev, ncv);
+ mlx5e_build_netdev_priv(mdev, netdev, nch);
mlx5e_build_netdev(netdev);
netif_carrier_off(netdev);
@@ -1819,43 +2059,95 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
if (err) {
- netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
goto err_free_netdev;
}
err = mlx5_core_alloc_pd(mdev, &priv->pdn);
if (err) {
- netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
goto err_unmap_free_uar;
}
err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
if (err) {
- netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc td failed, %d\n", err);
goto err_dealloc_pd;
}
err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
if (err) {
- netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "create mkey failed, %d\n", err);
goto err_dealloc_transport_domain;
}
- err = register_netdev(netdev);
+ err = mlx5e_create_tises(priv);
if (err) {
- netdev_err(netdev, "%s: register_netdev failed, %d\n",
- __func__, err);
+ mlx5_core_warn(mdev, "create tises failed, %d\n", err);
goto err_destroy_mkey;
}
+ err = mlx5e_open_drop_rq(priv);
+ if (err) {
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+ goto err_destroy_tises;
+ }
+
+ err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+ if (err) {
+ mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+ goto err_close_drop_rq;
+ }
+
+ err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+ if (err) {
+ mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
+ goto err_destroy_rqt_indir;
+ }
+
+ err = mlx5e_create_tirs(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
+ goto err_destroy_rqt_single;
+ }
+
+ err = mlx5e_create_flow_tables(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+ goto err_destroy_tirs;
+ }
+
+ mlx5e_init_eth_addr(priv);
+
+ err = register_netdev(netdev);
+ if (err) {
+ mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
+ goto err_destroy_flow_tables;
+ }
+
mlx5e_enable_async_events(priv);
+ schedule_work(&priv->set_rx_mode_work);
return priv;
+err_destroy_flow_tables:
+ mlx5e_destroy_flow_tables(priv);
+
+err_destroy_tirs:
+ mlx5e_destroy_tirs(priv);
+
+err_destroy_rqt_single:
+ mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+
+err_destroy_rqt_indir:
+ mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+
+err_close_drop_rq:
+ mlx5e_close_drop_rq(priv);
+
+err_destroy_tises:
+ mlx5e_destroy_tises(priv);
+
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &priv->mr);
@@ -1879,13 +2171,22 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
struct mlx5e_priv *priv = vpriv;
struct net_device *netdev = priv->netdev;
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+
+ schedule_work(&priv->set_rx_mode_work);
+ mlx5e_disable_async_events(priv);
+ flush_scheduled_work();
unregister_netdev(netdev);
+ mlx5e_destroy_flow_tables(priv);
+ mlx5e_destroy_tirs(priv);
+ mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+ mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_close_drop_rq(priv);
+ mlx5e_destroy_tises(priv);
mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
- mlx5e_disable_async_events(priv);
- flush_scheduled_work();
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 9a9374131..cf0098596 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -111,10 +111,12 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
sizeof(struct iphdr));
ipv6 = NULL;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
} else {
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
sizeof(struct ipv6hdr));
ipv4 = NULL;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
}
if (get_cqe_lro_tcppsh(cqe))
@@ -149,6 +151,38 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
}
+static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+{
+ __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+
+ return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+}
+
+static inline void mlx5e_handle_csum(struct net_device *netdev,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq *rq,
+ struct sk_buff *skb)
+{
+ if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+ goto csum_none;
+
+ if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if (is_first_ethertype_ip(skb)) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ rq->stats.csum_sw++;
+ } else {
+ goto csum_none;
+ }
+
+ return;
+
+csum_none:
+ skb->ip_summed = CHECKSUM_NONE;
+ rq->stats.csum_none++;
+}
+
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
struct sk_buff *skb)
@@ -162,20 +196,12 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe);
- skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt;
}
- if (likely(netdev->features & NETIF_F_RXCSUM) &&
- (cqe->hds_ip_ext & CQE_L2_OK) &&
- (cqe->hds_ip_ext & CQE_L3_OK) &&
- (cqe->hds_ip_ext & CQE_L4_OK)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- skb->ip_summed = CHECKSUM_NONE;
- rq->stats.csum_none++;
- }
+ mlx5e_handle_csum(netdev, cqe, rq, skb);
skb->protocol = eth_type_trans(skb, netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 03f28f438..b73672f32 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe);
+ mlx5e_tx_notify_hw(sq, wqe, 0);
}
}
@@ -106,13 +106,21 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
priv->default_vlan_prio;
int tc = netdev_get_prio_tc_map(dev, up);
- return priv->channel[channel_ix]->tc_to_txq_map[tc];
+ return priv->channeltc_to_txq_map[channel_ix][tc];
}
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool bf)
{
-#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+ /* Some NIC TX decisions, e.g loopback, are based on the packet
+ * headers and occur before the data gather.
+ * Therefore these headers must be copied into the WQE
+ */
+#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
+
+ if (bf && (skb_headlen(skb) <= sq->max_inline))
+ return skb_headlen(skb);
+
return MLX5E_MIN_INLINE;
}
@@ -129,6 +137,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u8 opcode = MLX5_OPCODE_SEND;
dma_addr_t dma_addr = 0;
+ bool bf = false;
u16 headlen;
u16 ds_cnt;
u16 ihs;
@@ -141,6 +150,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
else
sq->stats.csum_offload_none++;
+ if (sq->cc != sq->prev_cc) {
+ sq->prev_cc = sq->cc;
+ sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
+ }
+
if (skb_is_gso(skb)) {
u32 payload_len;
@@ -153,7 +167,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.tso_packets++;
sq->stats.tso_bytes += payload_len;
} else {
- ihs = mlx5e_get_inline_hdr_size(sq, skb);
+ bf = sq->bf_budget &&
+ !skb->xmit_more &&
+ !skb_shinfo(skb)->nr_frags;
+ ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
ETH_ZLEN);
}
@@ -225,14 +242,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+ int bf_sz = 0;
+
+ if (bf && sq->uar_bf_map)
+ bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
+
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe);
+ mlx5e_tx_notify_hw(sq, wqe, bf_sz);
}
/* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
+ sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+
sq->stats.packets++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 06e3e1e54..03aabdd79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -457,7 +457,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
- int numa_node = dev_to_node(&mdev->pdev->dev);
+ int numa_node = priv->numa_node;
int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -656,6 +656,22 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
}
#endif
+static int map_bf_area(struct mlx5_core_dev *dev)
+{
+ resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
+ resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
+
+ dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+
+ return dev->priv.bf_mapping ? 0 : -ENOMEM;
+}
+
+static void unmap_bf_area(struct mlx5_core_dev *dev)
+{
+ if (dev->priv.bf_mapping)
+ io_mapping_free(dev->priv.bf_mapping);
+}
+
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -670,6 +686,10 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
INIT_LIST_HEAD(&priv->pgdir_list);
spin_lock_init(&priv->mkey_lock);
+ mutex_init(&priv->alloc_mutex);
+
+ priv->numa_node = dev_to_node(&dev->pdev->dev);
+
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
if (!priv->dbg_root)
return -ENOMEM;
@@ -806,10 +826,13 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_eqs;
}
+ if (map_bf_area(dev))
+ dev_err(&pdev->dev, "Failed to map blue flame area\n");
+
err = mlx5_irq_set_affinity_hints(dev);
if (err) {
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
- goto err_free_comp_eqs;
+ goto err_unmap_bf_area;
}
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
@@ -821,7 +844,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
return 0;
-err_free_comp_eqs:
+err_unmap_bf_area:
+ unmap_bf_area(dev);
+
free_comp_eqs(dev);
err_stop_eqs:
@@ -879,6 +904,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
+ unmap_bf_area(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fc88ecaec..566a70488 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -73,7 +73,12 @@ static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
int in_size, u32 *out,
int out_size)
{
- mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ int err;
+
+ err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ if (err)
+ return err;
+
return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 70147999f..3b9480fa3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -216,22 +216,25 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
}
EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status)
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status)
{
u32 in[MLX5_ST_SZ_DW(paos_reg)];
u32 out[MLX5_ST_SZ_DW(paos_reg)];
memset(in, 0, sizeof(in));
+ MLX5_SET(paos_reg, in, local_port, 1);
MLX5_SET(paos_reg, in, admin_status, status);
MLX5_SET(paos_reg, in, ase, 1);
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
+EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status)
{
u32 in[MLX5_ST_SZ_DW(paos_reg)];
u32 out[MLX5_ST_SZ_DW(paos_reg)];
@@ -239,14 +242,17 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
memset(in, 0, sizeof(in));
+ MLX5_SET(paos_reg, in, local_port, 1);
+
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 0);
if (err)
return err;
- *status = MLX5_GET(paos_reg, out, oper_status);
+ *status = MLX5_GET(paos_reg, out, admin_status);
return err;
}
+EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
int *max_mtu, int *oper_mtu, u8 port)
@@ -305,7 +311,7 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int err;
memset(in, 0, sizeof(in));
- MLX5_SET(ptys_reg, in, local_port, local_port);
+ MLX5_SET(pvlc_reg, in, local_port, local_port);
err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
pvlc_size, MLX5_REG_PVLC, 0, 0);
@@ -328,3 +334,45 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+ MLX5_SET(pfcc_reg, in, pptx, tx_pause);
+ MLX5_SET(pfcc_reg, in, pprx, rx_pause);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
+
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 0);
+ if (err)
+ return err;
+
+ if (rx_pause)
+ *rx_pause = MLX5_GET(pfcc_reg, out, pprx);
+
+ if (tx_pause)
+ *tx_pause = MLX5_GET(pfcc_reg, out, pptx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 8d98b0302..b4c87c7b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -163,6 +163,18 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
}
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+ int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_tir_out)];
+
+ MLX5_SET(modify_tir_in, in, tirn, tirn);
+ MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
@@ -358,3 +370,44 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
}
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqtn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ int err;
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ return err;
+}
+
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+ int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
+
+ MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
index f9ef24471..74cae5143 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -45,6 +45,8 @@ int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+ int inlen);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tisn);
@@ -61,4 +63,10 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqtn);
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+ int inlen);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 9ef85873c..eb05c845e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
@@ -246,6 +247,10 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
goto err_free_uar;
}
+ if (mdev->priv.bf_mapping)
+ uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
+ uar->index << PAGE_SHIFT);
+
return 0;
err_free_uar:
@@ -257,6 +262,7 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{
+ io_mapping_unmap(uar->bf_map);
iounmap(uar->map);
mlx5_cmd_free_uar(mdev, uar->index);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 838841158..ce21ee5b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -73,13 +73,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free;
@@ -108,13 +109,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
wq->sz_m1 = (1 << wq->log_sz) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free;
@@ -144,7 +146,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e0ddd69fb..6c2a8f950 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -37,7 +37,8 @@
struct mlx5_wq_param {
int linear;
- int numa;
+ int buf_numa_node;
+ int db_numa_node;
};
struct mlx5_wq_ctrl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
new file mode 100644
index 000000000..2941d9c5a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -0,0 +1,32 @@
+#
+# Mellanox switch drivers configuration
+#
+
+config MLXSW_CORE
+ tristate "Mellanox Technologies Switch ASICs support"
+ ---help---
+ This driver supports Mellanox Technologies Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_core.
+
+config MLXSW_PCI
+ tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
+ depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+ default m
+ ---help---
+ This is PCI bus implementation for Mellanox Technologies Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_pci.
+
+config MLXSW_SWITCHX2
+ tristate "Mellanox Technologies SwitchX-2 support"
+ depends on MLXSW_CORE && NET_SWITCHDEV
+ default m
+ ---help---
+ This driver supports Mellanox Technologies SwitchX-2 Ethernet
+ Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_switchx2.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
new file mode 100644
index 000000000..0a05f65ee
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
+mlxsw_core-objs := core.o
+obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
+mlxsw_pci-objs := pci.o
+obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
+mlxsw_switchx2-objs := switchx2.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
new file mode 100644
index 000000000..770db17eb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -0,0 +1,1090 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/cmd.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CMD_H
+#define _MLXSW_CMD_H
+
+#include "item.h"
+
+#define MLXSW_CMD_MBOX_SIZE 4096
+
+static inline char *mlxsw_cmd_mbox_alloc(void)
+{
+ return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL);
+}
+
+static inline void mlxsw_cmd_mbox_free(char *mbox)
+{
+ kfree(mbox);
+}
+
+static inline void mlxsw_cmd_mbox_zero(char *mbox)
+{
+ memset(mbox, 0, MLXSW_CMD_MBOX_SIZE);
+}
+
+struct mlxsw_core;
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size);
+
+static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod, char *in_mbox,
+ size_t in_mbox_size)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+ in_mbox, in_mbox_size, NULL, 0);
+}
+
+static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod,
+ bool out_mbox_direct,
+ char *out_mbox, size_t out_mbox_size)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
+ out_mbox_direct, NULL, 0,
+ out_mbox, out_mbox_size);
+}
+
+static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+ NULL, 0, NULL, 0);
+}
+
+enum mlxsw_cmd_opcode {
+ MLXSW_CMD_OPCODE_QUERY_FW = 0x004,
+ MLXSW_CMD_OPCODE_QUERY_BOARDINFO = 0x006,
+ MLXSW_CMD_OPCODE_QUERY_AQ_CAP = 0x003,
+ MLXSW_CMD_OPCODE_MAP_FA = 0xFFF,
+ MLXSW_CMD_OPCODE_UNMAP_FA = 0xFFE,
+ MLXSW_CMD_OPCODE_CONFIG_PROFILE = 0x100,
+ MLXSW_CMD_OPCODE_ACCESS_REG = 0x040,
+ MLXSW_CMD_OPCODE_SW2HW_DQ = 0x201,
+ MLXSW_CMD_OPCODE_HW2SW_DQ = 0x202,
+ MLXSW_CMD_OPCODE_2ERR_DQ = 0x01E,
+ MLXSW_CMD_OPCODE_QUERY_DQ = 0x022,
+ MLXSW_CMD_OPCODE_SW2HW_CQ = 0x016,
+ MLXSW_CMD_OPCODE_HW2SW_CQ = 0x017,
+ MLXSW_CMD_OPCODE_QUERY_CQ = 0x018,
+ MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013,
+ MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014,
+ MLXSW_CMD_OPCODE_QUERY_EQ = 0x015,
+};
+
+static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
+{
+ switch (opcode) {
+ case MLXSW_CMD_OPCODE_QUERY_FW:
+ return "QUERY_FW";
+ case MLXSW_CMD_OPCODE_QUERY_BOARDINFO:
+ return "QUERY_BOARDINFO";
+ case MLXSW_CMD_OPCODE_QUERY_AQ_CAP:
+ return "QUERY_AQ_CAP";
+ case MLXSW_CMD_OPCODE_MAP_FA:
+ return "MAP_FA";
+ case MLXSW_CMD_OPCODE_UNMAP_FA:
+ return "UNMAP_FA";
+ case MLXSW_CMD_OPCODE_CONFIG_PROFILE:
+ return "CONFIG_PROFILE";
+ case MLXSW_CMD_OPCODE_ACCESS_REG:
+ return "ACCESS_REG";
+ case MLXSW_CMD_OPCODE_SW2HW_DQ:
+ return "SW2HW_DQ";
+ case MLXSW_CMD_OPCODE_HW2SW_DQ:
+ return "HW2SW_DQ";
+ case MLXSW_CMD_OPCODE_2ERR_DQ:
+ return "2ERR_DQ";
+ case MLXSW_CMD_OPCODE_QUERY_DQ:
+ return "QUERY_DQ";
+ case MLXSW_CMD_OPCODE_SW2HW_CQ:
+ return "SW2HW_CQ";
+ case MLXSW_CMD_OPCODE_HW2SW_CQ:
+ return "HW2SW_CQ";
+ case MLXSW_CMD_OPCODE_QUERY_CQ:
+ return "QUERY_CQ";
+ case MLXSW_CMD_OPCODE_SW2HW_EQ:
+ return "SW2HW_EQ";
+ case MLXSW_CMD_OPCODE_HW2SW_EQ:
+ return "HW2SW_EQ";
+ case MLXSW_CMD_OPCODE_QUERY_EQ:
+ return "QUERY_EQ";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+enum mlxsw_cmd_status {
+ /* Command execution succeeded. */
+ MLXSW_CMD_STATUS_OK = 0x00,
+ /* Internal error (e.g. bus error) occurred while processing command. */
+ MLXSW_CMD_STATUS_INTERNAL_ERR = 0x01,
+ /* Operation/command not supported or opcode modifier not supported. */
+ MLXSW_CMD_STATUS_BAD_OP = 0x02,
+ /* Parameter not supported, parameter out of range. */
+ MLXSW_CMD_STATUS_BAD_PARAM = 0x03,
+ /* System was not enabled or bad system state. */
+ MLXSW_CMD_STATUS_BAD_SYS_STATE = 0x04,
+ /* Attempt to access reserved or unallocated resource, or resource in
+ * inappropriate ownership.
+ */
+ MLXSW_CMD_STATUS_BAD_RESOURCE = 0x05,
+ /* Requested resource is currently executing a command. */
+ MLXSW_CMD_STATUS_RESOURCE_BUSY = 0x06,
+ /* Required capability exceeds device limits. */
+ MLXSW_CMD_STATUS_EXCEED_LIM = 0x08,
+ /* Resource is not in the appropriate state or ownership. */
+ MLXSW_CMD_STATUS_BAD_RES_STATE = 0x09,
+ /* Index out of range (might be beyond table size or attempt to
+ * access a reserved resource).
+ */
+ MLXSW_CMD_STATUS_BAD_INDEX = 0x0A,
+ /* NVMEM checksum/CRC failed. */
+ MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B,
+ /* Bad management packet (silently discarded). */
+ MLXSW_CMD_STATUS_BAD_PKT = 0x30,
+};
+
+static inline const char *mlxsw_cmd_status_str(u8 status)
+{
+ switch (status) {
+ case MLXSW_CMD_STATUS_OK:
+ return "OK";
+ case MLXSW_CMD_STATUS_INTERNAL_ERR:
+ return "INTERNAL_ERR";
+ case MLXSW_CMD_STATUS_BAD_OP:
+ return "BAD_OP";
+ case MLXSW_CMD_STATUS_BAD_PARAM:
+ return "BAD_PARAM";
+ case MLXSW_CMD_STATUS_BAD_SYS_STATE:
+ return "BAD_SYS_STATE";
+ case MLXSW_CMD_STATUS_BAD_RESOURCE:
+ return "BAD_RESOURCE";
+ case MLXSW_CMD_STATUS_RESOURCE_BUSY:
+ return "RESOURCE_BUSY";
+ case MLXSW_CMD_STATUS_EXCEED_LIM:
+ return "EXCEED_LIM";
+ case MLXSW_CMD_STATUS_BAD_RES_STATE:
+ return "BAD_RES_STATE";
+ case MLXSW_CMD_STATUS_BAD_INDEX:
+ return "BAD_INDEX";
+ case MLXSW_CMD_STATUS_BAD_NVMEM:
+ return "BAD_NVMEM";
+ case MLXSW_CMD_STATUS_BAD_PKT:
+ return "BAD_PKT";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+/* QUERY_FW - Query Firmware
+ * -------------------------
+ * OpMod == 0, INMmod == 0
+ * -----------------------
+ * The QUERY_FW command retrieves information related to firmware, command
+ * interface version and the amount of resources that should be allocated to
+ * the firmware.
+ */
+
+static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_fw_fw_pages
+ * Amount of physical memory to be allocatedfor firmware usage in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_major
+ * Firmware Revision - Major
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16);
+
+/* cmd_mbox_query_fw_fw_rev_subminor
+ * Firmware Sub-minor version (Patch level)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_minor
+ * Firmware Revision - Minor
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16);
+
+/* cmd_mbox_query_fw_core_clk
+ * Internal Clock Frequency (in MHz)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16);
+
+/* cmd_mbox_query_fw_cmd_interface_rev
+ * Command Interface Interpreter Revision ID. This number is bumped up
+ * every time a non-backward-compatible change is done for the command
+ * interface. The current cmd_interface_rev is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16);
+
+/* cmd_mbox_query_fw_dt
+ * If set, Debug Trace is supported
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1);
+
+/* cmd_mbox_query_fw_api_version
+ * Indicates the version of the API, to enable software querying
+ * for compatibility. The current api_version is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16);
+
+/* cmd_mbox_query_fw_fw_hour
+ * Firmware timestamp - hour
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8);
+
+/* cmd_mbox_query_fw_fw_minutes
+ * Firmware timestamp - minutes
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8);
+
+/* cmd_mbox_query_fw_fw_seconds
+ * Firmware timestamp - seconds
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8);
+
+/* cmd_mbox_query_fw_fw_year
+ * Firmware timestamp - year
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16);
+
+/* cmd_mbox_query_fw_fw_month
+ * Firmware timestamp - month
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
+
+/* cmd_mbox_query_fw_fw_day
+ * Firmware timestamp - day
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
+
+/* cmd_mbox_query_fw_clr_int_base_offset
+ * Clear Interrupt register's offset from clr_int_bar register
+ * in PCI address space.
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64);
+
+/* cmd_mbox_query_fw_clr_int_bar
+ * PCI base address register (BAR) where clr_int register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2);
+
+/* cmd_mbox_query_fw_error_buf_offset
+ * Read Only buffer for internal error reports of offset
+ * from error_buf_bar register in PCI address space).
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64);
+
+/* cmd_mbox_query_fw_error_buf_size
+ * Internal error buffer size in DWORDs
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32);
+
+/* cmd_mbox_query_fw_error_int_bar
+ * PCI base address register (BAR) where error buffer
+ * register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2);
+
+/* cmd_mbox_query_fw_doorbell_page_offset
+ * Offset of the doorbell page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
+
+/* cmd_mbox_query_fw_doorbell_page_bar
+ * PCI base address register (BAR) of the doorbell page
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
+
+/* QUERY_BOARDINFO - Query Board Information
+ * -----------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_BOARDINFO command retrieves adapter specific parameters.
+ */
+
+static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_boardinfo_intapin
+ * When PCIe interrupt messages are being used, this value is used for clearing
+ * an interrupt. When using MSI-X, this register is not used.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8);
+
+/* cmd_mbox_boardinfo_vsd_vendor_id
+ * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor
+ * specifying/formatting the VSD. The vsd_vendor_id identifies the management
+ * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID
+ * format and encoding as long as they use their assigned vsd_vendor_id.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16);
+
+/* cmd_mbox_boardinfo_vsd
+ * Vendor Specific Data. The VSD string that is burnt to the Flash
+ * with the firmware.
+ */
+#define MLXSW_CMD_BOARDINFO_VSD_LEN 208
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN);
+
+/* cmd_mbox_boardinfo_psid
+ * The PSID field is a 16-ascii (byte) character string which acts as
+ * the board ID. The PSID format is used in conjunction with
+ * Mellanox vsd_vendor_id (15B3h).
+ */
+#define MLXSW_CMD_BOARDINFO_PSID_LEN 16
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN);
+
+/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities
+ * -----------------------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_AQ_CAP command returns the device asynchronous queues
+ * capabilities supported.
+ */
+
+static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_aq_cap_log_max_sdq_sz
+ * Log (base 2) of max WQEs allowed on SDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_sdqs
+ * Maximum number of SDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_rdq_sz
+ * Log (base 2) of max WQEs allowed on RDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_rdqs
+ * Maximum number of RDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_cq_sz
+ * Log (base 2) of max CQEs allowed on CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_cqs
+ * Maximum number of CQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_eq_sz
+ * Log (base 2) of max EQEs allowed on EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_eqs
+ * Maximum number of EQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8);
+
+/* cmd_mbox_query_aq_cap_max_sg_sq
+ * The maximum S/G list elements in an DSQ. DSQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8);
+
+/* cmd_mbox_query_aq_cap_
+ * The maximum S/G list elements in an DRQ. DRQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
+
+/* MAP_FA - Map Firmware Area
+ * --------------------------
+ * OpMod == 0 (N/A), INMmod == Number of VPM entries
+ * -------------------------------------------------
+ * The MAP_FA command passes physical pages to the switch. These pages
+ * are used to store the device firmware. MAP_FA can be executed multiple
+ * times until all the firmware area is mapped (the size that should be
+ * mapped is retrieved through the QUERY_FW command). All required pages
+ * must be mapped to finish the initialization phase. Physical memory
+ * passed in this command must be pinned.
+ */
+
+static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 vpm_entries_count)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA,
+ 0, vpm_entries_count,
+ in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_map_fa_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true);
+
+/* cmd_mbox_map_fa_log2size
+ * Log (base 2) of the size in 4KB pages of the physical and contiguous memory
+ * that starts at PA_L/H.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false);
+
+/* UNMAP_FA - Unmap Firmware Area
+ * ------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The UNMAP_FA command unload the firmware and unmaps all the
+ * firmware area. After this command is completed the device will not access
+ * the pages that were mapped to the firmware area. After executing UNMAP_FA
+ * command, software reset must be done prior to execution of MAP_FW command.
+ */
+
+static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
+}
+
+/* CONFIG_PROFILE (Set) - Configure Switch Profile
+ * ------------------------------
+ * OpMod == 1 (Set), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The CONFIG_PROFILE command sets the switch profile. The command can be
+ * executed on the device only once at startup in order to allocate and
+ * configure all switch resources and prepare it for operational mode.
+ * It is not possible to change the device profile after the chip is
+ * in operational mode.
+ * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate
+ * state therefore it is required to perform software reset to the device
+ * following an unsuccessful completion of the command. It is required
+ * to perform software reset to the device to change an existing profile.
+ */
+
+static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core,
+ char *in_mbox)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE,
+ 1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_config_profile_set_max_vepa_channels
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1);
+
+/* cmd_mbox_config_profile_set_max_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1);
+
+/* cmd_mbox_config_profile_set_max_port_per_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1);
+
+/* cmd_mbox_config_profile_set_max_mid
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1);
+
+/* cmd_mbox_config_profile_set_max_pgt
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1);
+
+/* cmd_mbox_config_profile_set_max_system_port
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1);
+
+/* cmd_mbox_config_profile_set_max_vlan_groups
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
+
+/* cmd_mbox_config_profile_set_max_regions
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
+
+/* cmd_mbox_config_profile_set_fid_based
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1);
+
+/* cmd_mbox_config_profile_set_max_flood_tables
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1);
+
+/* cmd_mbox_config_profile_set_max_ib_mc
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1);
+
+/* cmd_mbox_config_profile_set_max_pkey
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1);
+
+/* cmd_mbox_config_profile_set_adaptive_routing_group_cap
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ set_adaptive_routing_group_cap, 0x0C, 14, 1);
+
+/* cmd_mbox_config_profile_set_ar_sec
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+
+/* cmd_mbox_config_profile_max_vepa_channels
+ * Maximum number of VEPA channels per port (0 through 16)
+ * 0 - multi-channel VEPA is disabled
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
+
+/* cmd_mbox_config_profile_max_lag
+ * Maximum number of LAG IDs requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
+
+/* cmd_mbox_config_profile_max_port_per_lag
+ * Maximum number of ports per LAG requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16);
+
+/* cmd_mbox_config_profile_max_mid
+ * Maximum Multicast IDs.
+ * Multicast IDs are allocated from 0 to max_mid-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16);
+
+/* cmd_mbox_config_profile_max_pgt
+ * Maximum records in the Port Group Table per Switch Partition.
+ * Port Group Table indexes are from 0 to max_pgt-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16);
+
+/* cmd_mbox_config_profile_max_system_port
+ * The maximum number of system ports that can be allocated.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16);
+
+/* cmd_mbox_config_profile_max_vlan_groups
+ * Maximum number VLAN Groups for VLAN binding.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
+
+/* cmd_mbox_config_profile_max_regions
+ * Maximum number of TCAM Regions.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
+
+/* cmd_mbox_config_profile_max_flood_tables
+ * Maximum number of Flooding Tables. Flooding Tables are associated to
+ * the different packet types for the different switch partitions.
+ * Note that the table size depends on the fid_based mode.
+ * In SwitchX silicon, tables are split equally between the switch
+ * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
+ * with swid-1 and the last 4 are associated with swid-2.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
+
+/* cmd_mbox_config_profile_max_vid_flood_tables
+ * Maximum number of per-vid flooding tables. Flooding tables are associated
+ * to the different packet types for the different switch partitions.
+ * Table size is 4K entries covering all VID space.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+
+/* cmd_mbox_config_profile_fid_based
+ * FID Based Flood Mode
+ * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
+ * 01 Use FID to offset the index to the Port Group Table (pgi)
+ * 10 Use FID to offset the index to the Port Group Table (pgi) and
+ * the Multicast ID
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+
+/* cmd_mbox_config_profile_max_ib_mc
+ * Maximum number of multicast FDB records for InfiniBand
+ * FDB (in 512 chunks) per InfiniBand switch partition.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15);
+
+/* cmd_mbox_config_profile_max_pkey
+ * Maximum per port PKEY table size (for PKEY enforcement)
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15);
+
+/* cmd_mbox_config_profile_ar_sec
+ * Primary/secondary capability
+ * Describes the number of adaptive routing sub-groups
+ * 0 - disable primary/secondary (single group)
+ * 1 - enable primary/secondary (2 sub-groups)
+ * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2
+ * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2);
+
+/* cmd_mbox_config_profile_adaptive_routing_group_cap
+ * Adaptive Routing Group Capability. Indicates the number of AR groups
+ * supported. Note that when Primary/secondary is enabled, each
+ * primary/secondary couple consumes 2 adaptive routing entries.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
+
+/* cmd_mbox_config_profile_arn
+ * Adaptive Routing Notification Enable
+ * Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+
+/* cmd_mbox_config_profile_swid_config_mask
+ * Modify Switch Partition Configuration mask. When set, the configu-
+ * ration value for the Switch Partition are taken from the mailbox.
+ * When clear, the current configuration values are used.
+ * Bit 0 - set type
+ * Bit 1 - properties
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask,
+ 0x60, 24, 8, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_type
+ * Switch Partition type.
+ * 0000 - disabled (Switch Partition does not exist)
+ * 0001 - InfiniBand
+ * 0010 - Ethernet
+ * 1000 - router port (SwitchX-2 only)
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
+ 0x60, 20, 4, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_properties
+ * Switch Partition properties.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
+ 0x60, 0, 8, 0x08, 0x00, false);
+
+/* ACCESS_REG - Access EMAD Supported Register
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -------------------------------------
+ * The ACCESS_REG command supports accessing device registers. This access
+ * is mainly used for bootstrapping.
+ */
+
+static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, char *out_mbox)
+{
+ return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
+ 0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_DQ - Software to Hardware DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The SW2HW_DQ command transitions a descriptor queue from software to
+ * hardware ownership. The command enables posting WQEs and ringing DoorBells
+ * on the descriptor queue.
+ */
+
+static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number,
+ u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ,
+ opcode_mod, dq_number,
+ in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+enum {
+ MLXSW_CMD_OPCODE_MOD_SDQ = 0,
+ MLXSW_CMD_OPCODE_MOD_RDQ = 1,
+};
+
+static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* cmd_mbox_sw2hw_dq_cq
+ * Number of the CQ that this Descriptor Queue reports completions to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+
+/* cmd_mbox_sw2hw_dq_sdq_tclass
+ * SDQ: CPU Egress TClass
+ * RDQ: Reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6);
+
+/* cmd_mbox_sw2hw_dq_log2_dq_sz
+ * Log (base 2) of the Descriptor Queue size in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6);
+
+/* cmd_mbox_sw2hw_dq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true);
+
+/* HW2SW_DQ - Hardware to Software DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The HW2SW_DQ command transitions a descriptor queue from hardware to
+ * software ownership. Incoming packets on the DQ are silently discarded,
+ * SW should not post descriptors on nonoperational DQs.
+ */
+
+static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number, u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ,
+ opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* 2ERR_DQ - To Error DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The 2ERR_DQ command transitions the DQ into the error state from the state
+ * in which it has been. While the command is executed, some in-process
+ * descriptors may complete. Once the DQ transitions into the error state,
+ * if there are posted descriptors on the RDQ/SDQ, the hardware writes
+ * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ.
+ * When the command is completed successfully, the DQ is already in
+ * the error state.
+ */
+
+static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number, u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+ opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* QUERY_DQ - Query DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware.
+ *
+ * Note: Output mailbox has the same format as SW2HW_DQ.
+ */
+
+static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number,
+ u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+ opcode_mod, dq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* SW2HW_CQ - Software to Hardware CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The SW2HW_CQ command transfers ownership of a CQ context entry from software
+ * to hardware. The command takes the CQ context entry from the input mailbox
+ * and stores it in the CQC in the ownership of the hardware. The command fails
+ * if the requested CQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 cq_number)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ,
+ 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_cq_cv
+ * CQE Version.
+ * 0 - CQE Version 0, 1 - CQE Version 1
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
+
+/* cmd_mbox_sw2hw_cq_c_eqn
+ * Event Queue this CQ reports completion events to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_cq_oi
+ * When set, overrun ignore is enabled. When set, updates of
+ * CQ consumer counter (poll for completion) or Request completion
+ * notifications (Arm CQ) DoorBells should not be rung on that CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_cq_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1);
+
+/* cmd_mbox_sw2hw_cq_log_cq_size
+ * Log (base 2) of the CQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_cq_producer_counter
+ * Producer Counter. The counter is incremented for each CQE that is
+ * written by the HW to the CQ.
+ * Maintained by HW (valid for the QUERY_CQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_cq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_CQ - Hardware to Software CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware
+ * to software. The CQC entry is invalidated as a result of this command.
+ */
+
+static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core,
+ u32 cq_number)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ,
+ 0, cq_number);
+}
+
+/* QUERY_CQ - Query CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The QUERY_CQ command retrieves a snapshot of the current CQ context entry.
+ * The command stores the snapshot in the output mailbox in the software format.
+ * Note that the CQ context state and values are not affected by the QUERY_CQ
+ * command. The QUERY_CQ command is for debug purposes only.
+ *
+ * Note: Output mailbox has the same format as SW2HW_CQ.
+ */
+
+static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 cq_number)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ,
+ 0, cq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_EQ - Software to Hardware EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ * The SW2HW_EQ command transfers ownership of an EQ context entry from software
+ * to hardware. The command takes the EQ context entry from the input mailbox
+ * and stores it in the EQC in the ownership of the hardware. The command fails
+ * if the requested EQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 eq_number)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ,
+ 0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_eq_int_msix
+ * When set, MSI-X cycles will be generated by this EQ.
+ * When cleared, an interrupt will be generated by this EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_eq_int_oi
+ * When set, overrun ignore is enabled.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_eq_int_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ * 0x11 - Always ARMED
+ * other - reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
+
+/* cmd_mbox_sw2hw_eq_int_log_eq_size
+ * Log (base 2) of the EQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_eq_int_producer_counter
+ * Producer Counter. The counter is incremented for each EQE that is written
+ * by the HW to the EQ.
+ * Maintained by HW (valid for the QUERY_EQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_eq_int_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_EQ - Hardware to Software EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ */
+
+static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core,
+ u32 eq_number)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ,
+ 0, eq_number);
+}
+
+/* QUERY_EQ - Query EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ *
+ * Note: Output mailbox has the same format as SW2HW_EQ.
+ */
+
+static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 eq_number)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ,
+ 0, eq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
new file mode 100644
index 000000000..28c19cc1a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -0,0 +1,1300 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/if_link.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/netdevice.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+#include "item.h"
+#include "cmd.h"
+#include "port.h"
+#include "trap.h"
+#include "emad.h"
+#include "reg.h"
+
+static LIST_HEAD(mlxsw_core_driver_list);
+static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
+
+static const char mlxsw_core_driver_name[] = "mlxsw_core";
+
+static struct dentry *mlxsw_core_dbg_root;
+
+struct mlxsw_core_pcpu_stats {
+ u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
+ u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
+ u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
+ u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
+ struct u64_stats_sync syncp;
+ u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
+ u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
+ u32 trap_rx_invalid;
+ u32 port_rx_invalid;
+};
+
+struct mlxsw_core {
+ struct mlxsw_driver *driver;
+ const struct mlxsw_bus *bus;
+ void *bus_priv;
+ const struct mlxsw_bus_info *bus_info;
+ struct list_head rx_listener_list;
+ struct list_head event_listener_list;
+ struct {
+ struct sk_buff *resp_skb;
+ u64 tid;
+ wait_queue_head_t wait;
+ bool trans_active;
+ struct mutex lock; /* One EMAD transaction at a time. */
+ bool use_emad;
+ } emad;
+ struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
+ struct dentry *dbg_dir;
+ struct {
+ struct debugfs_blob_wrapper vsd_blob;
+ struct debugfs_blob_wrapper psid_blob;
+ } dbg;
+ unsigned long driver_priv[0];
+ /* driver_priv has to be always the last item */
+};
+
+struct mlxsw_rx_listener_item {
+ struct list_head list;
+ struct mlxsw_rx_listener rxl;
+ void *priv;
+};
+
+struct mlxsw_event_listener_item {
+ struct list_head list;
+ struct mlxsw_event_listener el;
+ void *priv;
+};
+
+/******************
+ * EMAD processing
+ ******************/
+
+/* emad_eth_hdr_dmac
+ * Destination MAC in EMAD's Ethernet header.
+ * Must be set to 01:02:c9:00:00:01
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
+
+/* emad_eth_hdr_smac
+ * Source MAC in EMAD's Ethernet header.
+ * Must be set to 00:02:c9:01:02:03
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
+
+/* emad_eth_hdr_ethertype
+ * Ethertype in EMAD's Ethernet header.
+ * Must be set to 0x8932
+ */
+MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
+
+/* emad_eth_hdr_mlx_proto
+ * Mellanox protocol.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
+
+/* emad_eth_hdr_ver
+ * Mellanox protocol version.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
+
+/* emad_op_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x1 (operation TLV).
+ */
+MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
+
+/* emad_op_tlv_len
+ * Length of the operation TLV in u32.
+ * Must be set to 0x4.
+ */
+MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
+
+/* emad_op_tlv_dr
+ * Direct route bit. Setting to 1 indicates the EMAD is a direct route
+ * EMAD. DR TLV must follow.
+ *
+ * Note: Currently not supported and must not be set.
+ */
+MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
+
+/* emad_op_tlv_status
+ * Returned status in case of EMAD response. Must be set to 0 in case
+ * of EMAD request.
+ * 0x0 - success
+ * 0x1 - device is busy. Requester should retry
+ * 0x2 - Mellanox protocol version not supported
+ * 0x3 - unknown TLV
+ * 0x4 - register not supported
+ * 0x5 - operation class not supported
+ * 0x6 - EMAD method not supported
+ * 0x7 - bad parameter (e.g. port out of range)
+ * 0x8 - resource not available
+ * 0x9 - message receipt acknowledgment. Requester should retry
+ * 0x70 - internal error
+ */
+MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
+
+/* emad_op_tlv_register_id
+ * Register ID of register within register TLV.
+ */
+MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
+
+/* emad_op_tlv_r
+ * Response bit. Setting to 1 indicates Response, otherwise request.
+ */
+MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
+
+/* emad_op_tlv_method
+ * EMAD method type.
+ * 0x1 - query
+ * 0x2 - write
+ * 0x3 - send (currently not supported)
+ * 0x4 - event
+ */
+MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
+
+/* emad_op_tlv_class
+ * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
+ */
+MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
+
+/* emad_op_tlv_tid
+ * EMAD transaction ID. Used for pairing request and response EMADs.
+ */
+MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+
+/* emad_reg_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x3 (register TLV).
+ */
+MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
+
+/* emad_reg_tlv_len
+ * Length of the operation TLV in u32.
+ */
+MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
+
+/* emad_end_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x0 (end TLV).
+ */
+MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
+
+/* emad_end_tlv_len
+ * Length of the end TLV in u32.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
+
+enum mlxsw_core_reg_access_type {
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+};
+
+static inline const char *
+mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
+{
+ switch (type) {
+ case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
+ return "query";
+ case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
+ return "write";
+ }
+ BUG();
+}
+
+static void mlxsw_emad_pack_end_tlv(char *end_tlv)
+{
+ mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
+ mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
+}
+
+static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
+ const struct mlxsw_reg_info *reg,
+ char *payload)
+{
+ mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
+ mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
+ memcpy(reg_tlv + sizeof(u32), payload, reg->len);
+}
+
+static void mlxsw_emad_pack_op_tlv(char *op_tlv,
+ const struct mlxsw_reg_info *reg,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
+ mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
+ mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
+ mlxsw_emad_op_tlv_status_set(op_tlv, 0);
+ mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
+ mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
+ if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+ mlxsw_emad_op_tlv_method_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_METHOD_QUERY);
+ else
+ mlxsw_emad_op_tlv_method_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_METHOD_WRITE);
+ mlxsw_emad_op_tlv_class_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
+ mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+}
+
+static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
+{
+ char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
+
+ mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
+ mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
+ mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
+ mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
+ mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
+
+ skb_reset_mac_header(skb);
+
+ return 0;
+}
+
+static void mlxsw_emad_construct(struct sk_buff *skb,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_core *mlxsw_core)
+{
+ char *buf;
+
+ buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_end_tlv(buf);
+
+ buf = skb_push(skb, reg->len + sizeof(u32));
+ mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+
+ buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+
+ mlxsw_emad_construct_eth_hdr(skb);
+}
+
+static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
+{
+ return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+}
+
+static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
+{
+ return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
+ MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload(const char *op_tlv)
+{
+ return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+}
+
+static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
+{
+ char *op_tlv;
+
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ return mlxsw_emad_op_tlv_tid_get(op_tlv);
+}
+
+static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
+{
+ char *op_tlv;
+
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+}
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ int err;
+ int ret;
+
+ mlxsw_core->emad.trans_active = true;
+
+ err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
+ if (err) {
+ dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ dev_kfree_skb(skb);
+ goto trans_inactive_out;
+ }
+
+ ret = wait_event_timeout(mlxsw_core->emad.wait,
+ !(mlxsw_core->emad.trans_active),
+ msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
+ if (!ret) {
+ dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ err = -EIO;
+ goto trans_inactive_out;
+ }
+
+ return 0;
+
+trans_inactive_out:
+ mlxsw_core->emad.trans_active = false;
+ return err;
+}
+
+static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
+ char *op_tlv)
+{
+ enum mlxsw_emad_op_tlv_status status;
+ u64 tid;
+
+ status = mlxsw_emad_op_tlv_status_get(op_tlv);
+ tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
+
+ switch (status) {
+ case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+ return 0;
+ case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+ case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+ dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
+ tid, status, mlxsw_emad_op_tlv_status_str(status));
+ return -EAGAIN;
+ case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+ case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+ case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+ case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+ default:
+ dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
+ tid, status, mlxsw_emad_op_tlv_status_str(status));
+ return -EIO;
+ }
+}
+
+static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb)
+{
+ return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+}
+
+static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct sk_buff *trans_skb;
+ int n_retry;
+ int err;
+
+ n_retry = 0;
+retry:
+ /* We copy the EMAD to a new skb, since we might need
+ * to retransmit it in case of failure.
+ */
+ trans_skb = skb_copy(skb, GFP_KERNEL);
+ if (!trans_skb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
+ if (!err) {
+ struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+
+ err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
+ if (err)
+ dev_kfree_skb(resp_skb);
+ if (!err || err != -EAGAIN)
+ goto out;
+ }
+ if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+
+out:
+ dev_kfree_skb(skb);
+ mlxsw_core->emad.tid++;
+ return err;
+}
+
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_core *mlxsw_core = priv;
+
+ if (mlxsw_emad_is_resp(skb) &&
+ mlxsw_core->emad.trans_active &&
+ mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
+ mlxsw_core->emad.resp_skb = skb;
+ mlxsw_core->emad.trans_active = false;
+ wake_up(&mlxsw_core->emad.wait);
+ } else {
+ dev_kfree_skb(skb);
+ }
+}
+
+static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
+ .func = mlxsw_emad_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_ETHEMAD,
+};
+
+static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_TRAP_ID_ETHEMAD);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+
+static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+{
+ int err;
+
+ /* Set the upper 32 bits of the transaction ID field to a random
+ * number. This allows us to discard EMADs addressed to other
+ * devices.
+ */
+ get_random_bytes(&mlxsw_core->emad.tid, 4);
+ mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+
+ init_waitqueue_head(&mlxsw_core->emad.wait);
+ mlxsw_core->emad.trans_active = false;
+ mutex_init(&mlxsw_core->emad.lock);
+
+ err = mlxsw_core_rx_listener_register(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_emad_traps_set(mlxsw_core);
+ if (err)
+ goto err_emad_trap_set;
+
+ mlxsw_core->emad.use_emad = true;
+
+ return 0;
+
+err_emad_trap_set:
+ mlxsw_core_rx_listener_unregister(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+ return err;
+}
+
+static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_TRAP_ID_ETHEMAD);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+}
+
+static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
+ u16 reg_len)
+{
+ struct sk_buff *skb;
+ u16 emad_len;
+
+ emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
+ (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
+ sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
+ return NULL;
+
+ skb = netdev_alloc_skb(NULL, emad_len);
+ if (!skb)
+ return NULL;
+ memset(skb->data, 0, emad_len);
+ skb_reserve(skb, emad_len);
+
+ return skb;
+}
+
+/*****************
+ * Core functions
+ *****************/
+
+static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_core *mlxsw_core = file->private;
+ struct mlxsw_core_pcpu_stats *p;
+ u64 rx_packets, rx_bytes;
+ u64 tmp_rx_packets, tmp_rx_bytes;
+ u32 rx_dropped, rx_invalid;
+ unsigned int start;
+ int i;
+ int j;
+ static const char hdr[] =
+ " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
+ rx_packets = 0;
+ rx_bytes = 0;
+ rx_dropped = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ tmp_rx_packets = p->trap_rx_packets[i];
+ tmp_rx_bytes = p->trap_rx_bytes[i];
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+
+ rx_packets += tmp_rx_packets;
+ rx_bytes += tmp_rx_bytes;
+ rx_dropped += p->trap_rx_dropped[i];
+ }
+ seq_printf(file, "trap %3d %12llu %12llu %10u\n",
+ i, rx_packets, rx_bytes, rx_dropped);
+ }
+ rx_invalid = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ rx_invalid += p->trap_rx_invalid;
+ }
+ seq_printf(file, "trap INV %10u\n",
+ rx_invalid);
+
+ for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
+ rx_packets = 0;
+ rx_bytes = 0;
+ rx_dropped = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ tmp_rx_packets = p->port_rx_packets[i];
+ tmp_rx_bytes = p->port_rx_bytes[i];
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+
+ rx_packets += tmp_rx_packets;
+ rx_bytes += tmp_rx_bytes;
+ rx_dropped += p->port_rx_dropped[i];
+ }
+ seq_printf(file, "port %3d %12llu %12llu %10u\n",
+ i, rx_packets, rx_bytes, rx_dropped);
+ }
+ rx_invalid = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ rx_invalid += p->port_rx_invalid;
+ }
+ seq_printf(file, "port INV %10u\n",
+ rx_invalid);
+ return 0;
+}
+
+static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
+{
+ struct mlxsw_core *mlxsw_core = inode->i_private;
+
+ return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
+}
+
+static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
+ .owner = THIS_MODULE,
+ .open = mlxsw_core_rx_stats_dbg_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+ const char *buf, size_t size)
+{
+ __be32 *m = (__be32 *) buf;
+ int i;
+ int count = size / sizeof(__be32);
+
+ for (i = count - 1; i >= 0; i--)
+ if (m[i])
+ break;
+ i++;
+ count = i ? i : 1;
+ for (i = 0; i < count; i += 4)
+ dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+ i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+ be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
+{
+ spin_lock(&mlxsw_core_driver_list_lock);
+ list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_register);
+
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
+{
+ spin_lock(&mlxsw_core_driver_list_lock);
+ list_del(&mlxsw_driver->list);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_driver_unregister);
+
+static struct mlxsw_driver *__driver_find(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
+ if (strcmp(mlxsw_driver->kind, kind) == 0)
+ return mlxsw_driver;
+ }
+ return NULL;
+}
+
+static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ if (!mlxsw_driver) {
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ }
+ if (mlxsw_driver) {
+ if (!try_module_get(mlxsw_driver->owner))
+ mlxsw_driver = NULL;
+ }
+
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ return mlxsw_driver;
+}
+
+static void mlxsw_core_driver_put(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ if (!mlxsw_driver)
+ return;
+ module_put(mlxsw_driver->owner);
+}
+
+static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
+{
+ const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
+
+ mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
+ mlxsw_core_dbg_root);
+ if (!mlxsw_core->dbg_dir)
+ return -ENOMEM;
+ debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
+ mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
+ mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
+ mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
+ debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
+ &mlxsw_core->dbg.vsd_blob);
+ mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
+ mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
+ debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
+ &mlxsw_core->dbg.psid_blob);
+ return 0;
+}
+
+static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
+{
+ debugfs_remove_recursive(mlxsw_core->dbg_dir);
+}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv)
+{
+ const char *device_kind = mlxsw_bus_info->device_kind;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_driver *mlxsw_driver;
+ size_t alloc_size;
+ int err;
+
+ mlxsw_driver = mlxsw_core_driver_get(device_kind);
+ if (!mlxsw_driver)
+ return -EINVAL;
+ alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+ mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_core) {
+ err = -ENOMEM;
+ goto err_core_alloc;
+ }
+
+ INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
+ INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
+ mlxsw_core->driver = mlxsw_driver;
+ mlxsw_core->bus = mlxsw_bus;
+ mlxsw_core->bus_priv = bus_priv;
+ mlxsw_core->bus_info = mlxsw_bus_info;
+
+ mlxsw_core->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
+ if (!mlxsw_core->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
+ if (err)
+ goto err_bus_init;
+
+ err = mlxsw_emad_init(mlxsw_core);
+ if (err)
+ goto err_emad_init;
+
+ err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
+ mlxsw_bus_info);
+ if (err)
+ goto err_driver_init;
+
+ err = mlxsw_core_debugfs_init(mlxsw_core);
+ if (err)
+ goto err_debugfs_init;
+
+ return 0;
+
+err_debugfs_init:
+ mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+err_driver_init:
+ mlxsw_emad_fini(mlxsw_core);
+err_emad_init:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
+ free_percpu(mlxsw_core->pcpu_stats);
+err_alloc_stats:
+ kfree(mlxsw_core);
+err_core_alloc:
+ mlxsw_core_driver_put(device_kind);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_register);
+
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+{
+ const char *device_kind = mlxsw_core->bus_info->device_kind;
+
+ mlxsw_core_debugfs_fini(mlxsw_core);
+ mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ mlxsw_emad_fini(mlxsw_core);
+ mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ free_percpu(mlxsw_core->pcpu_stats);
+ kfree(mlxsw_core);
+ mlxsw_core_driver_put(device_kind);
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
+
+static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
+{
+ return container_of(driver_priv, struct mlxsw_core, driver_priv);
+}
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+ return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
+ tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+ return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
+ tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit);
+
+static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
+ const struct mlxsw_rx_listener *rxl_b)
+{
+ return (rxl_a->func == rxl_b->func &&
+ rxl_a->local_port == rxl_b->local_port &&
+ rxl_a->trap_id == rxl_b->trap_id);
+}
+
+static struct mlxsw_rx_listener_item *
+__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
+ if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
+ rxl_item->priv == priv)
+ return rxl_item;
+ }
+ return NULL;
+}
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ if (rxl_item)
+ return -EEXIST;
+ rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
+ if (!rxl_item)
+ return -ENOMEM;
+ rxl_item->rxl = *rxl;
+ rxl_item->priv = priv;
+
+ list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
+
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ if (!rxl_item)
+ return;
+ list_del_rcu(&rxl_item->list);
+ synchronize_rcu();
+ kfree(rxl_item);
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *event_listener_item = priv;
+ struct mlxsw_reg_info reg;
+ char *payload;
+ char *op_tlv = mlxsw_emad_op_tlv(skb);
+ char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+
+ reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
+ reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
+ payload = mlxsw_emad_reg_payload(op_tlv);
+ event_listener_item->el.func(&reg, payload, event_listener_item->priv);
+ dev_kfree_skb(skb);
+}
+
+static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
+ const struct mlxsw_event_listener *el_b)
+{
+ return (el_a->func == el_b->func &&
+ el_a->trap_id == el_b->trap_id);
+}
+
+static struct mlxsw_event_listener_item *
+__find_event_listener_item(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *el_item;
+
+ list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
+ if (__is_event_listener_equal(&el_item->el, el) &&
+ el_item->priv == priv)
+ return el_item;
+ }
+ return NULL;
+}
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ int err;
+ struct mlxsw_event_listener_item *el_item;
+ const struct mlxsw_rx_listener rxl = {
+ .func = mlxsw_core_event_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = el->trap_id,
+ };
+
+ el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ if (el_item)
+ return -EEXIST;
+ el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
+ if (!el_item)
+ return -ENOMEM;
+ el_item->el = *el;
+ el_item->priv = priv;
+
+ err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
+ if (err)
+ goto err_rx_listener_register;
+
+ /* No reason to save item if we did not manage to register an RX
+ * listener for it.
+ */
+ list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
+
+ return 0;
+
+err_rx_listener_register:
+ kfree(el_item);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_register);
+
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *el_item;
+ const struct mlxsw_rx_listener rxl = {
+ .func = mlxsw_core_event_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = el->trap_id,
+ };
+
+ el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ if (!el_item)
+ return;
+ mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
+ list_del(&el_item->list);
+ kfree(el_item);
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+
+static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ int err;
+ char *op_tlv;
+ struct sk_buff *skb;
+ struct mlxsw_tx_info tx_info = {
+ .local_port = MLXSW_PORT_CPU_PORT,
+ .is_emad = true,
+ };
+
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+ if (!skb)
+ return -ENOMEM;
+
+ mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
+ mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+
+ err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
+ if (!err) {
+ op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
+ memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
+ reg->len);
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
+ mlxsw_core->emad.tid - 1);
+ mlxsw_core_buf_dump_dbg(mlxsw_core,
+ mlxsw_core->emad.resp_skb->data,
+ mlxsw_core->emad.resp_skb->len);
+
+ dev_kfree_skb(mlxsw_core->emad.resp_skb);
+ }
+
+ return err;
+}
+
+static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ int err, n_retry;
+ char *in_mbox, *out_mbox, *tmp;
+
+ in_mbox = mlxsw_cmd_mbox_alloc();
+ if (!in_mbox)
+ return -ENOMEM;
+
+ out_mbox = mlxsw_cmd_mbox_alloc();
+ if (!out_mbox) {
+ err = -ENOMEM;
+ goto free_in_mbox;
+ }
+
+ mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+ tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+ mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
+
+ n_retry = 0;
+retry:
+ err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
+ if (!err) {
+ err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
+ if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+ }
+
+ if (!err)
+ memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+ reg->len);
+
+ mlxsw_core->emad.tid++;
+ mlxsw_cmd_mbox_free(out_mbox);
+free_in_mbox:
+ mlxsw_cmd_mbox_free(in_mbox);
+ return err;
+}
+
+static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ u64 cur_tid;
+ int err;
+
+ if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
+ dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+ return -EINTR;
+ }
+
+ cur_tid = mlxsw_core->emad.tid;
+ dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ /* During initialization EMAD interface is not available to us,
+ * so we default to command interface. We switch to EMAD interface
+ * after setting the appropriate traps.
+ */
+ if (!mlxsw_core->emad.use_emad)
+ err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
+ payload, type);
+ else
+ err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+ payload, type);
+
+ if (err)
+ dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ mutex_unlock(&mlxsw_core->emad.lock);
+ return err;
+}
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload)
+{
+ return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
+}
+EXPORT_SYMBOL(mlxsw_reg_query);
+
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload)
+{
+ return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
+}
+EXPORT_SYMBOL(mlxsw_reg_write);
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ struct mlxsw_rx_info *rx_info)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+ const struct mlxsw_rx_listener *rxl;
+ struct mlxsw_core_pcpu_stats *pcpu_stats;
+ u8 local_port = rx_info->sys_port;
+ bool found = false;
+
+ dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
+ __func__, rx_info->sys_port, rx_info->trap_id);
+
+ if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
+ (local_port >= MLXSW_PORT_MAX_PORTS))
+ goto drop;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
+ rxl = &rxl_item->rxl;
+ if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
+ rxl->local_port == local_port) &&
+ rxl->trap_id == rx_info->trap_id) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!found)
+ goto drop;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->port_rx_packets[local_port]++;
+ pcpu_stats->port_rx_bytes[local_port] += skb->len;
+ pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
+ pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ rxl->func(skb, local_port, rxl_item->priv);
+ return;
+
+drop:
+ if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
+ this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
+ else
+ this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
+ if (local_port >= MLXSW_PORT_MAX_PORTS)
+ this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
+ else
+ this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_receive);
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size)
+{
+ u8 status;
+ int err;
+
+ BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+ if (!mlxsw_core->bus->cmd_exec)
+ return -EOPNOTSUPP;
+
+ dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
+ if (in_mbox) {
+ dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
+ mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
+ }
+
+ err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
+ opcode_mod, in_mod, out_mbox_direct,
+ in_mbox, in_mbox_size,
+ out_mbox, out_mbox_size, &status);
+
+ if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
+ dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+ in_mod, status, mlxsw_cmd_status_str(status));
+ } else if (err == -ETIMEDOUT) {
+ dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+ in_mod);
+ }
+
+ if (!err && out_mbox) {
+ dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
+ mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_cmd_exec);
+
+static int __init mlxsw_core_module_init(void)
+{
+ mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+ if (!mlxsw_core_dbg_root)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __exit mlxsw_core_module_exit(void)
+{
+ debugfs_remove_recursive(mlxsw_core_dbg_root);
+}
+
+module_init(mlxsw_core_module_init);
+module_exit(mlxsw_core_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch device core driver");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
new file mode 100644
index 000000000..165808471
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -0,0 +1,207 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_H
+#define _MLXSW_CORE_H
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include "trap.h"
+#include "reg.h"
+
+#include "cmd.h"
+
+#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-"
+#define MODULE_MLXSW_DRIVER_ALIAS(kind) \
+ MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
+
+#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+
+struct mlxsw_core;
+struct mlxsw_driver;
+struct mlxsw_bus;
+struct mlxsw_bus_info;
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+
+struct mlxsw_tx_info {
+ u8 local_port;
+ bool is_emad;
+};
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+ const struct mlxsw_tx_info *tx_info);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_rx_listener {
+ void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
+ u8 local_port;
+ u16 trap_id;
+};
+
+struct mlxsw_event_listener {
+ void (*func)(const struct mlxsw_reg_info *reg,
+ char *payload, void *priv);
+ enum mlxsw_event_trap_id trap_id;
+};
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv);
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv);
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv);
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv);
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload);
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload);
+
+struct mlxsw_rx_info {
+ u16 sys_port;
+ int trap_id;
+};
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ struct mlxsw_rx_info *rx_info);
+
+#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
+
+struct mlxsw_swid_config {
+ u8 used_type:1,
+ used_properties:1;
+ u8 type;
+ u8 properties;
+};
+
+struct mlxsw_config_profile {
+ u16 used_max_vepa_channels:1,
+ used_max_lag:1,
+ used_max_port_per_lag:1,
+ used_max_mid:1,
+ used_max_pgt:1,
+ used_max_system_port:1,
+ used_max_vlan_groups:1,
+ used_max_regions:1,
+ used_flood_tables:1,
+ used_flood_mode:1,
+ used_max_ib_mc:1,
+ used_max_pkey:1,
+ used_ar_sec:1,
+ used_adaptive_routing_group_cap:1;
+ u8 max_vepa_channels;
+ u16 max_lag;
+ u16 max_port_per_lag;
+ u16 max_mid;
+ u16 max_pgt;
+ u16 max_system_port;
+ u16 max_vlan_groups;
+ u16 max_regions;
+ u8 max_flood_tables;
+ u8 max_vid_flood_tables;
+ u8 flood_mode;
+ u16 max_ib_mc;
+ u16 max_pkey;
+ u8 ar_sec;
+ u16 adaptive_routing_group_cap;
+ u8 arn;
+ struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
+};
+
+struct mlxsw_driver {
+ struct list_head list;
+ const char *kind;
+ struct module *owner;
+ size_t priv_size;
+ int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info);
+ void (*fini)(void *driver_priv);
+ void (*txhdr_construct)(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+ u8 txhdr_len;
+ const struct mlxsw_config_profile *profile;
+};
+
+struct mlxsw_bus {
+ const char *kind;
+ int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile);
+ void (*fini)(void *bus_priv);
+ bool (*skb_transmit_busy)(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info);
+ int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+ int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *p_status);
+};
+
+struct mlxsw_bus_info {
+ const char *device_kind;
+ const char *device_name;
+ struct device *dev;
+ struct {
+ u16 major;
+ u16 minor;
+ u16 subminor;
+ } fw_rev;
+ u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
+ u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
new file mode 100644
index 000000000..97b6bb5d9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -0,0 +1,127 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/emad.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_EMAD_H
+#define _MLXSW_EMAD_H
+
+#define MLXSW_EMAD_MAX_FRAME_LEN 1518 /* Length in u8 */
+#define MLXSW_EMAD_MAX_RETRY 5
+
+/* EMAD Ethernet header */
+#define MLXSW_EMAD_ETH_HDR_LEN 0x10 /* Length in u8 */
+#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01"
+#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03"
+#define MLXSW_EMAD_EH_ETHERTYPE 0x8932
+#define MLXSW_EMAD_EH_MLX_PROTO 0
+#define MLXSW_EMAD_EH_PROTO_VERSION 0
+
+/* EMAD TLV Types */
+enum {
+ MLXSW_EMAD_TLV_TYPE_END,
+ MLXSW_EMAD_TLV_TYPE_OP,
+ MLXSW_EMAD_TLV_TYPE_DR,
+ MLXSW_EMAD_TLV_TYPE_REG,
+ MLXSW_EMAD_TLV_TYPE_USERDATA,
+ MLXSW_EMAD_TLV_TYPE_OOBETH,
+};
+
+/* OP TLV */
+#define MLXSW_EMAD_OP_TLV_LEN 4 /* Length in u32 */
+
+enum {
+ MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1,
+ MLXSW_EMAD_OP_TLV_CLASS_IPC = 2,
+};
+
+enum mlxsw_emad_op_tlv_status {
+ MLXSW_EMAD_OP_TLV_STATUS_SUCCESS,
+ MLXSW_EMAD_OP_TLV_STATUS_BUSY,
+ MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV,
+ MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER,
+ MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE,
+ MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK,
+ MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70,
+};
+
+static inline char *mlxsw_emad_op_tlv_status_str(u8 status)
+{
+ switch (status) {
+ case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+ return "operation performed";
+ case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+ return "device is busy";
+ case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+ return "version not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+ return "unknown TLV";
+ case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+ return "register not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+ return "class not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+ return "method not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+ return "bad parameter";
+ case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+ return "resource not available";
+ case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+ return "acknowledged. retransmit";
+ case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+ return "internal error";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+enum {
+ MLXSW_EMAD_OP_TLV_REQUEST,
+ MLXSW_EMAD_OP_TLV_RESPONSE
+};
+
+enum {
+ MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1,
+ MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2,
+ MLXSW_EMAD_OP_TLV_METHOD_SEND = 3,
+ MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
+};
+
+/* END TLV */
+#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
new file mode 100644
index 000000000..36fb1cec5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -0,0 +1,407 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/item.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ITEM_H
+#define _MLXSW_ITEM_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+struct mlxsw_item {
+ unsigned short offset; /* bytes in container */
+ unsigned short step; /* step in bytes for indexed items */
+ unsigned short in_step_offset; /* offset within one step */
+ unsigned char shift; /* shift in bits */
+ unsigned char element_size; /* size of element in bit array */
+ bool no_real_shift;
+ union {
+ unsigned char bits;
+ unsigned short bytes;
+ } size;
+ const char *name;
+};
+
+static inline unsigned int
+__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
+ size_t typesize)
+{
+ BUG_ON(index && !item->step);
+ if (item->offset % typesize != 0 ||
+ item->step % typesize != 0 ||
+ item->in_step_offset % typesize != 0) {
+ pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
+ item->name, item->offset, item->step,
+ item->in_step_offset, typesize);
+ BUG();
+ }
+
+ return ((item->offset + item->step * index + item->in_step_offset) /
+ typesize);
+}
+
+static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
+ __be16 *b = (__be16 *) buf;
+ u16 tmp;
+
+ tmp = be16_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
+ unsigned short index, u16 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u16));
+ __be16 *b = (__be16 *) buf;
+ u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u16 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be16_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be16(tmp);
+}
+
+static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
+ __be32 *b = (__be32 *) buf;
+ u32 tmp;
+
+ tmp = be32_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
+ unsigned short index, u32 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u32));
+ __be32 *b = (__be32 *) buf;
+ u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u32 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be32_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be32(tmp);
+}
+
+static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+ __be64 *b = (__be64 *) buf;
+ u64 tmp;
+
+ tmp = be64_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK_ULL(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
+ unsigned short index, u64 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+ __be64 *b = (__be64 *) buf;
+ u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
+ u64 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be64_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be64(tmp);
+}
+
+static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
+ struct mlxsw_item *item)
+{
+ memcpy(dst, &buf[item->offset], item->size.bytes);
+}
+
+static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
+ struct mlxsw_item *item)
+{
+ memcpy(&buf[item->offset], src, item->size.bytes);
+}
+
+static inline u16
+__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
+{
+ u16 max_index, be_index;
+ u16 offset; /* byte offset inside the array */
+ u8 in_byte_index;
+
+ BUG_ON(index && !item->element_size);
+ if (item->offset % sizeof(u32) != 0 ||
+ BITS_PER_BYTE % item->element_size != 0) {
+ pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
+ item->name, item->offset, item->element_size);
+ BUG();
+ }
+
+ max_index = (item->size.bytes << 3) / item->element_size - 1;
+ be_index = max_index - index;
+ offset = be_index * item->element_size >> 3;
+ in_byte_index = index % (BITS_PER_BYTE / item->element_size);
+ *shift = in_byte_index * item->element_size;
+
+ return item->offset + offset;
+}
+
+static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
+ u16 index)
+{
+ u8 shift, tmp;
+ u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+
+ tmp = buf[offset];
+ tmp >>= shift;
+ tmp &= GENMASK(item->element_size - 1, 0);
+ return tmp;
+}
+
+static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
+ u16 index, u8 val)
+{
+ u8 shift, tmp;
+ u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+ u8 mask = GENMASK(item->element_size - 1, 0) << shift;
+
+ val <<= shift;
+ val &= mask;
+ tmp = buf[offset];
+ tmp &= ~mask;
+ tmp |= val;
+ buf[offset] = tmp;
+}
+
+#define __ITEM_NAME(_type, _cname, _iname) \
+ mlxsw_##_type##_##_cname##_##_iname##_item
+
+/* _type: cmd_mbox, reg, etc.
+ * _cname: containter name (e.g. command name, register name)
+ * _iname: item name within the container
+ */
+
+#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+{ \
+ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u16 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u16 val) \
+{ \
+ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u32 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u32 val) \
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+{ \
+ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \
+ _sizebits, _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u64 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u64 val) \
+{ \
+ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \
+{ \
+ __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \
+{ \
+ __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \
+}
+
+#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
+ _element_size) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .element_size = _element_size, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u8 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index) \
+{ \
+ return __mlxsw_item_bit_array_get(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
+{ \
+ return __mlxsw_item_bit_array_set(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+} \
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
new file mode 100644
index 000000000..cef866c37
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -0,0 +1,1826 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/log2.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+
+#include "pci.h"
+#include "core.h"
+#include "cmd.h"
+#include "port.h"
+
+static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
+
+static const struct pci_device_id mlxsw_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+ {0, }
+};
+
+static struct dentry *mlxsw_pci_dbg_root;
+
+static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
+{
+ switch (id->device) {
+ case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
+ return MLXSW_DEVICE_KIND_SWITCHX2;
+ default:
+ BUG();
+ }
+}
+
+#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
+ iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+#define mlxsw_pci_read32(mlxsw_pci, reg) \
+ ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+
+enum mlxsw_pci_queue_type {
+ MLXSW_PCI_QUEUE_TYPE_SDQ,
+ MLXSW_PCI_QUEUE_TYPE_RDQ,
+ MLXSW_PCI_QUEUE_TYPE_CQ,
+ MLXSW_PCI_QUEUE_TYPE_EQ,
+};
+
+static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
+{
+ switch (q_type) {
+ case MLXSW_PCI_QUEUE_TYPE_SDQ:
+ return "sdq";
+ case MLXSW_PCI_QUEUE_TYPE_RDQ:
+ return "rdq";
+ case MLXSW_PCI_QUEUE_TYPE_CQ:
+ return "cq";
+ case MLXSW_PCI_QUEUE_TYPE_EQ:
+ return "eq";
+ }
+ BUG();
+}
+
+#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
+
+static const u16 mlxsw_pci_doorbell_type_offset[] = {
+ MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
+ MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
+ MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+ MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
+ 0, /* unused */
+ 0, /* unused */
+ MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+ MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+struct mlxsw_pci_mem_item {
+ char *buf;
+ dma_addr_t mapaddr;
+ size_t size;
+};
+
+struct mlxsw_pci_queue_elem_info {
+ char *elem; /* pointer to actual dma mapped element mem chunk */
+ union {
+ struct {
+ struct sk_buff *skb;
+ } sdq;
+ struct {
+ struct sk_buff *skb;
+ } rdq;
+ } u;
+};
+
+struct mlxsw_pci_queue {
+ spinlock_t lock; /* for queue accesses */
+ struct mlxsw_pci_mem_item mem_item;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ u16 producer_counter;
+ u16 consumer_counter;
+ u16 count; /* number of elements in queue */
+ u8 num; /* queue number */
+ u8 elem_size; /* size of one element */
+ enum mlxsw_pci_queue_type type;
+ struct tasklet_struct tasklet; /* queue processing tasklet */
+ struct mlxsw_pci *pci;
+ union {
+ struct {
+ u32 comp_sdq_count;
+ u32 comp_rdq_count;
+ } cq;
+ struct {
+ u32 ev_cmd_count;
+ u32 ev_comp_count;
+ u32 ev_other_count;
+ } eq;
+ } u;
+};
+
+struct mlxsw_pci_queue_type_group {
+ struct mlxsw_pci_queue *q;
+ u8 count; /* number of queues in group */
+};
+
+struct mlxsw_pci {
+ struct pci_dev *pdev;
+ u8 __iomem *hw_addr;
+ struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
+ u32 doorbell_offset;
+ struct msix_entry msix_entry;
+ struct mlxsw_core *core;
+ struct {
+ u16 num_pages;
+ struct mlxsw_pci_mem_item *items;
+ } fw_area;
+ struct {
+ struct mlxsw_pci_mem_item out_mbox;
+ struct mlxsw_pci_mem_item in_mbox;
+ struct mutex lock; /* Lock access to command registers */
+ bool nopoll;
+ wait_queue_head_t wait;
+ bool wait_done;
+ struct {
+ u8 status;
+ u64 out_param;
+ } comp;
+ } cmd;
+ struct mlxsw_bus_info bus_info;
+ struct dentry *dbg_dir;
+};
+
+static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+{
+ tasklet_schedule(&q->tasklet);
+}
+
+static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
+ size_t elem_size, int elem_index)
+{
+ return q->mem_item.buf + (elem_size * elem_index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+ return &q->elem_info[elem_index];
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
+{
+ int index = q->producer_counter & (q->count - 1);
+
+ if ((q->producer_counter - q->consumer_counter) == q->count)
+ return NULL;
+ return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
+{
+ int index = q->consumer_counter & (q->count - 1);
+
+ return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+ return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
+}
+
+static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
+{
+ return owner_bit != !!(q->consumer_counter & q->count);
+}
+
+static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
+ u32 (*get_elem_owner_func)(char *))
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *elem;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ elem = elem_info->elem;
+ owner_bit = get_elem_owner_func(elem);
+ if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+ return NULL;
+ q->consumer_counter++;
+ rmb(); /* make sure we read owned bit before the rest of elem */
+ return elem;
+}
+
+static struct mlxsw_pci_queue_type_group *
+mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type)
+{
+ return &mlxsw_pci->queues[q_type];
+}
+
+static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type)
+{
+ struct mlxsw_pci_queue_type_group *queue_group;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
+ return queue_group->count;
+}
+
+static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
+}
+
+static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
+}
+
+static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
+}
+
+static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
+}
+
+static struct mlxsw_pci_queue *
+__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type, u8 q_num)
+{
+ return &mlxsw_pci->queues[q_type].q[q_num];
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci,
+ MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci,
+ MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+}
+
+static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 val)
+{
+ mlxsw_pci_write32(mlxsw_pci,
+ DOORBELL(mlxsw_pci->doorbell_offset,
+ mlxsw_pci_doorbell_type_offset[q->type],
+ q->num), val);
+}
+
+static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 val)
+{
+ mlxsw_pci_write32(mlxsw_pci,
+ DOORBELL(mlxsw_pci->doorbell_offset,
+ mlxsw_pci_doorbell_arm_type_offset[q->type],
+ q->num), val);
+}
+
+static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
+}
+
+static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
+ q->consumer_counter + q->count);
+}
+
+static void
+mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
+}
+
+static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
+ int page_index)
+{
+ return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
+}
+
+static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->producer_counter = 0;
+ q->consumer_counter = 0;
+
+ /* Set CQ of same number of this SDQ. */
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+ mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
+ mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+ }
+
+ err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_sdq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %5d\n",
+ i, q->producer_counter, q->consumer_counter,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
+ int index, char *frag_data, size_t frag_len,
+ int direction)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ dma_addr_t mapaddr;
+
+ mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
+ if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+ if (net_ratelimit())
+ dev_err(&pdev->dev, "failed to dma map tx frag\n");
+ return -EIO;
+ }
+ mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+ mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+ return 0;
+}
+
+static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
+ int index, int direction)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
+ dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
+
+ if (!frag_len)
+ return;
+ pci_unmap_single(pdev, mapaddr, frag_len, direction);
+}
+
+static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue_elem_info *elem_info)
+{
+ size_t buf_len = MLXSW_PORT_MAX_MTU;
+ char *wqe = elem_info->elem;
+ struct sk_buff *skb;
+ int err;
+
+ elem_info->u.rdq.skb = NULL;
+ skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Assume that wqe was previously zeroed. */
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ buf_len, DMA_FROM_DEVICE);
+ if (err)
+ goto err_frag_map;
+
+ elem_info->u.rdq.skb = skb;
+ return 0;
+
+err_frag_map:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue_elem_info *elem_info)
+{
+ struct sk_buff *skb;
+ char *wqe;
+
+ skb = elem_info->u.rdq.skb;
+ wqe = elem_info->elem;
+
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+}
+
+static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ int i;
+ int err;
+
+ q->producer_counter = 0;
+ q->consumer_counter = 0;
+
+ /* Set CQ of same number of this RDQ with base
+ * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+ */
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+ mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+ }
+
+ err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+ for (i = 0; i < q->count; i++) {
+ elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+ BUG_ON(!elem_info);
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err)
+ goto rollback;
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ }
+
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--) {
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ }
+ mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+
+ return err;
+}
+
+static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ int i;
+
+ mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+ for (i = 0; i < q->count; i++) {
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ }
+}
+
+static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_rdq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %5d\n",
+ i, q->producer_counter, q->consumer_counter,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_cqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
+ mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+ mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_cq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %10d %5d\n",
+ i, q->consumer_counter, q->u.cq.comp_sdq_count,
+ q->u.cq.comp_rdq_count, q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 consumer_counter_limit,
+ char *cqe)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ struct sk_buff *skb;
+ int i;
+
+ spin_lock(&q->lock);
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ skb = elem_info->u.sdq.skb;
+ wqe = elem_info->elem;
+ for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ elem_info->u.sdq.skb = NULL;
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
+ spin_unlock(&q->lock);
+}
+
+static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 consumer_counter_limit,
+ char *cqe)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ struct sk_buff *skb;
+ struct mlxsw_rx_info rx_info;
+ u16 byte_count;
+ int err;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ skb = elem_info->u.sdq.skb;
+ if (!skb)
+ return;
+ wqe = elem_info->elem;
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
+ /* We do not support lag now */
+ if (mlxsw_pci_cqe_lag_get(cqe))
+ goto drop;
+
+ rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
+ rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+
+ byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+ if (mlxsw_pci_cqe_crc_get(cqe))
+ byte_count -= ETH_FCS_LEN;
+ skb_put(skb, byte_count);
+ mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+put_new_skb:
+ memset(wqe, 0, q->elem_size);
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err && net_ratelimit())
+ dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ return;
+
+drop:
+ dev_kfree_skb_any(skb);
+ goto put_new_skb;
+}
+
+static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
+{
+ return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
+}
+
+static void mlxsw_pci_cq_tasklet(unsigned long data)
+{
+ struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ char *cqe;
+ int items = 0;
+ int credits = q->count >> 1;
+
+ while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+ u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+ u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
+ u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
+
+ if (sendq) {
+ struct mlxsw_pci_queue *sdq;
+
+ sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+ wqe_counter, cqe);
+ q->u.cq.comp_sdq_count++;
+ } else {
+ struct mlxsw_pci_queue *rdq;
+
+ rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ wqe_counter, cqe);
+ q->u.cq.comp_rdq_count++;
+ }
+ if (++items == credits)
+ break;
+ }
+ if (items) {
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ }
+}
+
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_eqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+ mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+ mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_eq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
+ i, q->consumer_counter, q->u.eq.ev_cmd_count,
+ q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+{
+ mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
+ mlxsw_pci->cmd.comp.out_param =
+ ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
+ mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
+ mlxsw_pci->cmd.wait_done = true;
+ wake_up(&mlxsw_pci->cmd.wait);
+}
+
+static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
+{
+ return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
+}
+
+static void mlxsw_pci_eq_tasklet(unsigned long data)
+{
+ struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+ char *eqe;
+ u8 cqn;
+ bool cq_handle = false;
+ int items = 0;
+ int credits = q->count >> 1;
+
+ memset(&active_cqns, 0, sizeof(active_cqns));
+
+ while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+ u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
+
+ switch (event_type) {
+ case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+ mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
+ q->u.eq.ev_cmd_count++;
+ break;
+ case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+ cqn = mlxsw_pci_eqe_cqn_get(eqe);
+ set_bit(cqn, active_cqns);
+ cq_handle = true;
+ q->u.eq.ev_comp_count++;
+ break;
+ default:
+ q->u.eq.ev_other_count++;
+ }
+ if (++items == credits)
+ break;
+ }
+ if (items) {
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ }
+
+ if (!cq_handle)
+ return;
+ for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+ q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
+ mlxsw_pci_queue_tasklet_schedule(q);
+ }
+}
+
+struct mlxsw_pci_queue_ops {
+ const char *name;
+ enum mlxsw_pci_queue_type type;
+ int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q);
+ void (*fini)(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q);
+ void (*tasklet)(unsigned long data);
+ int (*dbg_read)(struct seq_file *s, void *data);
+ u16 elem_count;
+ u8 elem_size;
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
+ .init = mlxsw_pci_sdq_init,
+ .fini = mlxsw_pci_sdq_fini,
+ .dbg_read = mlxsw_pci_sdq_dbg_read,
+ .elem_count = MLXSW_PCI_WQE_COUNT,
+ .elem_size = MLXSW_PCI_WQE_SIZE,
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
+ .init = mlxsw_pci_rdq_init,
+ .fini = mlxsw_pci_rdq_fini,
+ .dbg_read = mlxsw_pci_rdq_dbg_read,
+ .elem_count = MLXSW_PCI_WQE_COUNT,
+ .elem_size = MLXSW_PCI_WQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_CQ,
+ .init = mlxsw_pci_cq_init,
+ .fini = mlxsw_pci_cq_fini,
+ .tasklet = mlxsw_pci_cq_tasklet,
+ .dbg_read = mlxsw_pci_cq_dbg_read,
+ .elem_count = MLXSW_PCI_CQE_COUNT,
+ .elem_size = MLXSW_PCI_CQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_EQ,
+ .init = mlxsw_pci_eq_init,
+ .fini = mlxsw_pci_eq_fini,
+ .tasklet = mlxsw_pci_eq_tasklet,
+ .dbg_read = mlxsw_pci_eq_dbg_read,
+ .elem_count = MLXSW_PCI_EQE_COUNT,
+ .elem_size = MLXSW_PCI_EQE_SIZE
+};
+
+static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ struct mlxsw_pci_queue *q, u8 q_num)
+{
+ struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+ int i;
+ int err;
+
+ spin_lock_init(&q->lock);
+ q->num = q_num;
+ q->count = q_ops->elem_count;
+ q->elem_size = q_ops->elem_size;
+ q->type = q_ops->type;
+ q->pci = mlxsw_pci;
+
+ if (q_ops->tasklet)
+ tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+
+ mem_item->size = MLXSW_PCI_AQ_SIZE;
+ mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+ mem_item->size,
+ &mem_item->mapaddr);
+ if (!mem_item->buf)
+ return -ENOMEM;
+ memset(mem_item->buf, 0, mem_item->size);
+
+ q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
+ if (!q->elem_info) {
+ err = -ENOMEM;
+ goto err_elem_info_alloc;
+ }
+
+ /* Initialize dma mapped elements info elem_info for
+ * future easy access.
+ */
+ for (i = 0; i < q->count; i++) {
+ struct mlxsw_pci_queue_elem_info *elem_info;
+
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ elem_info->elem =
+ __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
+ }
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = q_ops->init(mlxsw_pci, mbox, q);
+ if (err)
+ goto err_q_ops_init;
+ return 0;
+
+err_q_ops_init:
+ kfree(q->elem_info);
+err_elem_info_alloc:
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ return err;
+}
+
+static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+
+ q_ops->fini(mlxsw_pci, q);
+ kfree(q->elem_info);
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+}
+
+static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ u8 num_qs)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_type_group *queue_group;
+ char tmp[16];
+ int i;
+ int err;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+ queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
+ if (!queue_group->q)
+ return -ENOMEM;
+
+ for (i = 0; i < num_qs; i++) {
+ err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
+ &queue_group->q[i], i);
+ if (err)
+ goto err_queue_init;
+ }
+ queue_group->count = num_qs;
+
+ sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
+ debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
+ q_ops->dbg_read);
+
+ return 0;
+
+err_queue_init:
+ for (i--; i >= 0; i--)
+ mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+ kfree(queue_group->q);
+ return err;
+}
+
+static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue_ops *q_ops)
+{
+ struct mlxsw_pci_queue_type_group *queue_group;
+ int i;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+ for (i = 0; i < queue_group->count; i++)
+ mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+ kfree(queue_group->q);
+}
+
+static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ u8 num_sdqs;
+ u8 sdq_log2sz;
+ u8 num_rdqs;
+ u8 rdq_log2sz;
+ u8 num_cqs;
+ u8 cq_log2sz;
+ u8 num_eqs;
+ u8 eq_log2sz;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
+ if (err)
+ return err;
+
+ num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
+ sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
+ num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
+ rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
+ num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
+ cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+ num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
+ eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
+
+ if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
+ (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
+ (num_cqs != MLXSW_PCI_CQS_COUNT) ||
+ (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+ dev_err(&pdev->dev, "Unsupported number of queues\n");
+ return -EINVAL;
+ }
+
+ if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+ (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+ (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
+ (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
+ dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
+ num_eqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize event queues\n");
+ return err;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
+ num_cqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize completion queues\n");
+ goto err_cqs_init;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
+ num_sdqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
+ goto err_sdqs_init;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
+ num_rdqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
+ goto err_rdqs_init;
+ }
+
+ /* We have to poll in command interface until queues are initialized */
+ mlxsw_pci->cmd.nopoll = true;
+ return 0;
+
+err_rdqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+err_sdqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+err_cqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+ return err;
+}
+
+static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci->cmd.nopoll = false;
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+}
+
+static void
+mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
+ char *mbox, int index,
+ const struct mlxsw_swid_config *swid)
+{
+ u8 mask = 0;
+
+ if (swid->used_type) {
+ mlxsw_cmd_mbox_config_profile_swid_config_type_set(
+ mbox, index, swid->type);
+ mask |= 1;
+ }
+ if (swid->used_properties) {
+ mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
+ mbox, index, swid->properties);
+ mask |= 2;
+ }
+ mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
+}
+
+static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_config_profile *profile)
+{
+ int i;
+
+ mlxsw_cmd_mbox_zero(mbox);
+
+ if (profile->used_max_vepa_channels) {
+ mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
+ mbox, profile->max_vepa_channels);
+ }
+ if (profile->used_max_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_lag_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_lag_set(
+ mbox, profile->max_lag);
+ }
+ if (profile->used_max_port_per_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
+ mbox, profile->max_port_per_lag);
+ }
+ if (profile->used_max_mid) {
+ mlxsw_cmd_mbox_config_profile_set_max_mid_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_mid_set(
+ mbox, profile->max_mid);
+ }
+ if (profile->used_max_pgt) {
+ mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_pgt_set(
+ mbox, profile->max_pgt);
+ }
+ if (profile->used_max_system_port) {
+ mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_system_port_set(
+ mbox, profile->max_system_port);
+ }
+ if (profile->used_max_vlan_groups) {
+ mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
+ mbox, profile->max_vlan_groups);
+ }
+ if (profile->used_max_regions) {
+ mlxsw_cmd_mbox_config_profile_set_max_regions_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_regions_set(
+ mbox, profile->max_regions);
+ }
+ if (profile->used_flood_tables) {
+ mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
+ mbox, profile->max_flood_tables);
+ mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
+ mbox, profile->max_vid_flood_tables);
+ }
+ if (profile->used_flood_mode) {
+ mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_flood_mode_set(
+ mbox, profile->flood_mode);
+ }
+ if (profile->used_max_ib_mc) {
+ mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
+ mbox, profile->max_ib_mc);
+ }
+ if (profile->used_max_pkey) {
+ mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_pkey_set(
+ mbox, profile->max_pkey);
+ }
+ if (profile->used_ar_sec) {
+ mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_ar_sec_set(
+ mbox, profile->ar_sec);
+ }
+ if (profile->used_adaptive_routing_group_cap) {
+ mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
+ mbox, profile->adaptive_routing_group_cap);
+ }
+
+ for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
+ mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
+ &profile->swid_config[i]);
+
+ return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
+}
+
+static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+ struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
+ if (err)
+ return err;
+ mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
+ mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
+ return 0;
+}
+
+static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ u16 num_pages)
+{
+ struct mlxsw_pci_mem_item *mem_item;
+ int i;
+ int err;
+
+ mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
+ GFP_KERNEL);
+ if (!mlxsw_pci->fw_area.items)
+ return -ENOMEM;
+ mlxsw_pci->fw_area.num_pages = num_pages;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ for (i = 0; i < num_pages; i++) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ mem_item->size = MLXSW_PCI_PAGE_SIZE;
+ mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+ mem_item->size,
+ &mem_item->mapaddr);
+ if (!mem_item->buf) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
+ mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+ }
+
+ err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
+ if (err)
+ goto err_cmd_map_fa;
+
+ return 0;
+
+err_cmd_map_fa:
+err_alloc:
+ for (i--; i >= 0; i--) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ }
+ kfree(mlxsw_pci->fw_area.items);
+ return err;
+}
+
+static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ struct mlxsw_pci_mem_item *mem_item;
+ int i;
+
+ mlxsw_cmd_unmap_fa(mlxsw_pci->core);
+
+ for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ }
+ kfree(mlxsw_pci->fw_area.items);
+}
+
+static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_id;
+ struct mlxsw_pci_queue *q;
+ int i;
+
+ for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
+ q = mlxsw_pci_eq_get(mlxsw_pci, i);
+ mlxsw_pci_queue_tasklet_schedule(q);
+ }
+ return IRQ_HANDLED;
+}
+
+static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_mem_item *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ int err = 0;
+
+ mbox->size = MLXSW_CMD_MBOX_SIZE;
+ mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
+ &mbox->mapaddr);
+ if (!mbox->buf) {
+ dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
+ err = -ENOMEM;
+ }
+
+ return err;
+}
+
+static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_mem_item *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+
+ pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
+ mbox->mapaddr);
+}
+
+static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ char *mbox;
+ u16 num_pages;
+ int err;
+
+ mutex_init(&mlxsw_pci->cmd.lock);
+ init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+ mlxsw_pci->core = mlxsw_core;
+
+ mbox = mlxsw_cmd_mbox_alloc();
+ if (!mbox)
+ return -ENOMEM;
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+ if (err)
+ goto mbox_put;
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ if (err)
+ goto err_out_mbox_alloc;
+
+ err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+ if (err)
+ goto err_query_fw;
+
+ mlxsw_pci->bus_info.fw_rev.major =
+ mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+ mlxsw_pci->bus_info.fw_rev.minor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+ mlxsw_pci->bus_info.fw_rev.subminor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
+ dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
+ err = -EINVAL;
+ goto err_iface_rev;
+ }
+ if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
+ err = -EINVAL;
+ goto err_doorbell_page_bar;
+ }
+
+ mlxsw_pci->doorbell_offset =
+ mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
+
+ num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
+ err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
+ if (err)
+ goto err_fw_area_init;
+
+ err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
+ if (err)
+ goto err_boardinfo;
+
+ err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+ if (err)
+ goto err_config_profile;
+
+ err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
+ if (err)
+ goto err_aqs_init;
+
+ err = request_irq(mlxsw_pci->msix_entry.vector,
+ mlxsw_pci_eq_irq_handler, 0,
+ mlxsw_pci_driver_name, mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "IRQ request failed\n");
+ goto err_request_eq_irq;
+ }
+
+ goto mbox_put;
+
+err_request_eq_irq:
+ mlxsw_pci_aqs_fini(mlxsw_pci);
+err_aqs_init:
+err_config_profile:
+err_boardinfo:
+ mlxsw_pci_fw_area_fini(mlxsw_pci);
+err_fw_area_init:
+err_doorbell_page_bar:
+err_iface_rev:
+err_query_fw:
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+err_out_mbox_alloc:
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+mbox_put:
+ mlxsw_cmd_mbox_free(mbox);
+ return err;
+}
+
+static void mlxsw_pci_fini(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+ mlxsw_pci_aqs_fini(mlxsw_pci);
+ mlxsw_pci_fw_area_fini(mlxsw_pci);
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+}
+
+static struct mlxsw_pci_queue *
+mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_tx_info *tx_info)
+{
+ u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+
+ return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
+}
+
+static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+
+ return !mlxsw_pci_queue_elem_info_producer_get(q);
+}
+
+static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct mlxsw_pci_queue *q;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ int i;
+ int err;
+
+ if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+ }
+
+ q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+ spin_lock_bh(&q->lock);
+ elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+ if (!elem_info) {
+ /* queue is full */
+ err = -EAGAIN;
+ goto unlock;
+ }
+ elem_info->u.sdq.skb = skb;
+
+ wqe = elem_info->elem;
+ mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
+ mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
+ mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (err)
+ goto unmap_frags;
+ }
+
+ /* Set unused sq entries byte count to zero. */
+ for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+ mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+
+ /* Everything is set up, ring producer doorbell to get HW going */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+ goto unlock;
+
+unmap_frags:
+ for (; i >= 0; i--)
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+unlock:
+ spin_unlock_bh(&q->lock);
+ return err;
+}
+
+static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *p_status)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
+ dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
+ bool evreq = mlxsw_pci->cmd.nopoll;
+ unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
+ bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+ int err;
+
+ *p_status = MLXSW_CMD_STATUS_OK;
+
+ err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
+ if (err)
+ return err;
+
+ if (in_mbox)
+ memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
+
+ mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
+ mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
+
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
+ mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
+
+ *p_wait_done = false;
+
+ wmb(); /* all needs to be written before we write control register */
+ mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
+ MLXSW_PCI_CIR_CTRL_GO_BIT |
+ (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
+ (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
+ opcode);
+
+ if (!evreq) {
+ unsigned long end;
+
+ end = jiffies + timeout;
+ do {
+ u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+
+ if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+ *p_wait_done = true;
+ *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, end));
+ } else {
+ wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
+ *p_status = mlxsw_pci->cmd.comp.status;
+ }
+
+ err = 0;
+ if (*p_wait_done) {
+ if (*p_status)
+ err = -EIO;
+ } else {
+ err = -ETIMEDOUT;
+ }
+
+ if (!err && out_mbox && out_mbox_direct) {
+ /* Some commands don't use output param as address to mailbox
+ * but they store output directly into registers. In that case,
+ * copy registers into mbox buffer.
+ */
+ __be32 tmp;
+
+ if (!evreq) {
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_HI));
+ memcpy(out_mbox, &tmp, sizeof(tmp));
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_LO));
+ memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
+ }
+ } else if (!err && out_mbox)
+ memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
+
+ mutex_unlock(&mlxsw_pci->cmd.lock);
+
+ return err;
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+ .kind = "pci",
+ .init = mlxsw_pci_init,
+ .fini = mlxsw_pci_fini,
+ .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
+ .skb_transmit = mlxsw_pci_skb_transmit,
+ .cmd_exec = mlxsw_pci_cmd_exec,
+};
+
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+ /* Current firware does not let us know when the reset is done.
+ * So we just wait here for constant time and hope for the best.
+ */
+ msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ return 0;
+}
+
+static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mlxsw_pci *mlxsw_pci;
+ int err;
+
+ mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
+ if (!mlxsw_pci)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto err_pci_enable_device;
+ }
+
+ err = pci_request_regions(pdev, mlxsw_pci_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_pci_request_regions;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ }
+
+ if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
+ dev_err(&pdev->dev, "invalid PCI region size\n");
+ err = -EINVAL;
+ goto err_pci_resource_len_check;
+ }
+
+ mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!mlxsw_pci->hw_addr) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+ pci_set_master(pdev);
+
+ mlxsw_pci->pdev = pdev;
+ pci_set_drvdata(pdev, mlxsw_pci);
+
+ err = mlxsw_pci_sw_reset(mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "Software reset failed\n");
+ goto err_sw_reset;
+ }
+
+ err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
+ if (err) {
+ dev_err(&pdev->dev, "MSI-X init failed\n");
+ goto err_msix_init;
+ }
+
+ mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
+ mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
+ mlxsw_pci->bus_info.dev = &pdev->dev;
+
+ mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
+ mlxsw_pci_dbg_root);
+ if (!mlxsw_pci->dbg_dir) {
+ dev_err(&pdev->dev, "Failed to create debugfs dir\n");
+ err = -ENOMEM;
+ goto err_dbg_create_dir;
+ }
+
+ err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
+ &mlxsw_pci_bus, mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register bus device\n");
+ goto err_bus_device_register;
+ }
+
+ return 0;
+
+err_bus_device_register:
+ debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+err_dbg_create_dir:
+ pci_disable_msix(mlxsw_pci->pdev);
+err_msix_init:
+err_sw_reset:
+ iounmap(mlxsw_pci->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+ pci_release_regions(pdev);
+err_pci_request_regions:
+ pci_disable_device(pdev);
+err_pci_enable_device:
+ kfree(mlxsw_pci);
+ return err;
+}
+
+static void mlxsw_pci_remove(struct pci_dev *pdev)
+{
+ struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core);
+ debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+ pci_disable_msix(mlxsw_pci->pdev);
+ iounmap(mlxsw_pci->hw_addr);
+ pci_release_regions(mlxsw_pci->pdev);
+ pci_disable_device(mlxsw_pci->pdev);
+ kfree(mlxsw_pci);
+}
+
+static struct pci_driver mlxsw_pci_driver = {
+ .name = mlxsw_pci_driver_name,
+ .id_table = mlxsw_pci_id_table,
+ .probe = mlxsw_pci_probe,
+ .remove = mlxsw_pci_remove,
+};
+
+static int __init mlxsw_pci_module_init(void)
+{
+ int err;
+
+ mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
+ if (!mlxsw_pci_dbg_root)
+ return -ENOMEM;
+ err = pci_register_driver(&mlxsw_pci_driver);
+ if (err)
+ goto err_register_driver;
+ return 0;
+
+err_register_driver:
+ debugfs_remove_recursive(mlxsw_pci_dbg_root);
+ return err;
+}
+
+static void __exit mlxsw_pci_module_exit(void)
+{
+ pci_unregister_driver(&mlxsw_pci_driver);
+ debugfs_remove_recursive(mlxsw_pci_dbg_root);
+}
+
+module_init(mlxsw_pci_module_init);
+module_exit(mlxsw_pci_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
+MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
new file mode 100644
index 000000000..1ef9664b4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -0,0 +1,227 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PCI_H
+#define _MLXSW_PCI_H
+
+#include <linux/bitops.h>
+
+#include "item.h"
+
+#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
+#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */
+#define MLXSW_PCI_PAGE_SIZE 4096
+
+#define MLXSW_PCI_CIR_BASE 0x71000
+#define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE
+#define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04)
+#define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08)
+#define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C)
+#define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10)
+#define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14)
+#define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18)
+#define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23)
+#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22)
+#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12
+#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24
+#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
+
+#define MLXSW_PCI_SW_RESET 0xF0010
+#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+
+#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000
+#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200
+#define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400
+#define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600
+#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800
+#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00
+
+#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
+ ((offset) + (type_offset) + (num) * 4)
+
+#define MLXSW_PCI_RDQS_COUNT 24
+#define MLXSW_PCI_SDQS_COUNT 24
+#define MLXSW_PCI_CQS_COUNT (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_EQS_COUNT 2
+#define MLXSW_PCI_EQ_ASYNC_NUM 0
+#define MLXSW_PCI_EQ_COMP_NUM 1
+
+#define MLXSW_PCI_AQ_PAGES 8
+#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
+#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
+#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
+#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
+
+#define MLXSW_PCI_WQE_SG_ENTRIES 3
+#define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA
+
+/* pci_wqe_c
+ * If set it indicates that a completion should be reported upon
+ * execution of this descriptor.
+ */
+MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
+
+/* pci_wqe_lp
+ * Local Processing, set if packet should be processed by the local
+ * switch hardware:
+ * For Ethernet EMAD (Direct Route and non Direct Route) -
+ * must be set if packet destination is local device
+ * For InfiniBand CTL - must be set if packet destination is local device
+ * Otherwise it must be clear
+ * Local Process packets must not exceed the size of 2K (including payload
+ * and headers).
+ */
+MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
+
+/* pci_wqe_type
+ * Packet type.
+ */
+MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+
+/* pci_wqe_byte_count
+ * Size of i-th scatter/gather entry, 0 if entry is unused.
+ */
+MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
+
+/* pci_wqe_address
+ * Physical address of i-th scatter/gather entry.
+ * Gather Entries must be 2Byte aligned.
+ */
+MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+
+/* pci_cqe_lag
+ * Packet arrives from a port which is a LAG
+ */
+MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+
+/* pci_cqe_system_port
+ * When lag=0: System port on which the packet was received
+ * When lag=1:
+ * bits [15:4] LAG ID on which the packet was received
+ * bits [3:0] sub_port on which the packet was received
+ */
+MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
+
+/* pci_cqe_wqe_counter
+ * WQE count of the WQEs completed on the associated dqn
+ */
+MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
+
+/* pci_cqe_byte_count
+ * Byte count of received packets including additional two
+ * Reserved Bytes that are append to the end of the frame.
+ * Reserved for Send CQE.
+ */
+MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+
+/* pci_cqe_trap_id
+ * Trap ID that captured the packet.
+ */
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+
+/* pci_cqe_crc
+ * Length include CRC. Indicates the length field includes
+ * the packet's CRC.
+ */
+MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
+
+/* pci_cqe_e
+ * CQE with Error.
+ */
+MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+
+/* pci_cqe_sr
+ * 1 - Send Queue
+ * 0 - Receive Queue
+ */
+MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+
+/* pci_cqe_dqn
+ * Descriptor Queue (DQ) Number.
+ */
+MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+
+/* pci_cqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_event_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
+#define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00
+#define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A
+
+/* pci_eqe_event_sub_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
+
+/* pci_eqe_cqn
+ * Completion Queue that triggeret this EQE.
+ */
+MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
+
+/* pci_eqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+
+/* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+
+/* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+
+/* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
new file mode 100644
index 000000000..726f5435b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -0,0 +1,75 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/port.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_PORT_H
+#define _MLXSW_PORT_H
+
+#include <linux/types.h>
+
+#define MLXSW_PORT_MAX_MTU 10000
+
+#define MLXSW_PORT_DEFAULT_VID 1
+
+#define MLXSW_PORT_SWID_DISABLED_PORT 255
+#define MLXSW_PORT_SWID_ALL_SWIDS 254
+#define MLXSW_PORT_SWID_TYPE_ETH 2
+
+#define MLXSW_PORT_MID 0xd000
+
+#define MLXSW_PORT_MAX_PHY_PORTS 0x40
+#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS
+
+#define MLXSW_PORT_DEVID_BITS_OFFSET 10
+#define MLXSW_PORT_PHY_BITS_OFFSET 4
+#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
+
+#define MLXSW_PORT_CPU_PORT 0x0
+
+#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
+
+enum mlxsw_port_admin_status {
+ MLXSW_PORT_ADMIN_STATUS_UP = 1,
+ MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
+ MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3,
+ MLXSW_PORT_ADMIN_STATUS_DISABLED = 4,
+};
+
+enum mlxsw_reg_pude_oper_status {
+ MLXSW_PORT_OPER_STATUS_UP = 1,
+ MLXSW_PORT_OPER_STATUS_DOWN = 2,
+ MLXSW_PORT_OPER_STATUS_FAILURE = 4, /* Can be set to up again. */
+};
+
+#endif /* _MLXSW_PORT_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
new file mode 100644
index 000000000..096e1c121
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -0,0 +1,1349 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/reg.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_REG_H
+#define _MLXSW_REG_H
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "item.h"
+#include "port.h"
+
+struct mlxsw_reg_info {
+ u16 id;
+ u16 len; /* In u8 */
+};
+
+#define MLXSW_REG(type) (&mlxsw_reg_##type)
+#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
+#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
+
+/* SGCR - Switch General Configuration Register
+ * --------------------------------------------
+ * This register is used for configuration of the switch capabilities.
+ */
+#define MLXSW_REG_SGCR_ID 0x2000
+#define MLXSW_REG_SGCR_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
+ .id = MLXSW_REG_SGCR_ID,
+ .len = MLXSW_REG_SGCR_LEN,
+};
+
+/* reg_sgcr_llb
+ * Link Local Broadcast (Default=0)
+ * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
+ * packets and ignore the IGMP snooping entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
+
+static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
+{
+ MLXSW_REG_ZERO(sgcr, payload);
+ mlxsw_reg_sgcr_llb_set(payload, !!llb);
+}
+
+/* SPAD - Switch Physical Address Register
+ * ---------------------------------------
+ * The SPAD register configures the switch physical MAC address.
+ */
+#define MLXSW_REG_SPAD_ID 0x2002
+#define MLXSW_REG_SPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_spad = {
+ .id = MLXSW_REG_SPAD_ID,
+ .len = MLXSW_REG_SPAD_LEN,
+};
+
+/* reg_spad_base_mac
+ * Base MAC address for the switch partitions.
+ * Per switch partition MAC address is equal to:
+ * base_mac + swid
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
+
+/* SMID - Switch Multicast ID
+ * --------------------------
+ * In multi-chip configuration, each device should maintain mapping between
+ * Multicast ID (MID) into a list of local ports. This mapping is used in all
+ * the devices other than the ingress device, and is implemented as part of the
+ * FDB. The MID record maps from a MID, which is a unique identi- fier of the
+ * multicast group within the stacking domain, into a list of local ports into
+ * which the packet is replicated.
+ */
+#define MLXSW_REG_SMID_ID 0x2007
+#define MLXSW_REG_SMID_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_smid = {
+ .id = MLXSW_REG_SMID_ID,
+ .len = MLXSW_REG_SMID_LEN,
+};
+
+/* reg_smid_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
+
+/* reg_smid_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
+
+/* reg_smid_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
+
+/* reg_smid_port_mask
+ * Local port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
+{
+ MLXSW_REG_ZERO(smid, payload);
+ mlxsw_reg_smid_swid_set(payload, 0);
+ mlxsw_reg_smid_mid_set(payload, mid);
+ mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+ mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SSPR - Switch System Port Record Register
+ * -----------------------------------------
+ * Configures the system port to local port mapping.
+ */
+#define MLXSW_REG_SSPR_ID 0x2008
+#define MLXSW_REG_SSPR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sspr = {
+ .id = MLXSW_REG_SSPR_ID,
+ .len = MLXSW_REG_SSPR_LEN,
+};
+
+/* reg_sspr_m
+ * Master - if set, then the record describes the master system port.
+ * This is needed in case a local port is mapped into several system ports
+ * (for multipathing). That number will be reported as the source system
+ * port when packets are forwarded to the CPU. Only one master port is allowed
+ * per local port.
+ *
+ * Note: Must be set for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
+
+/* reg_sspr_local_port
+ * Local port number.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
+
+/* reg_sspr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
+
+/* reg_sspr_system_port
+ * Unique identifier within the stacking domain that represents all the ports
+ * that are available in the system (external ports).
+ *
+ * Currently, only single-ASIC configurations are supported, so we default to
+ * 1:1 mapping between system ports and local ports.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
+
+static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(sspr, payload);
+ mlxsw_reg_sspr_m_set(payload, 1);
+ mlxsw_reg_sspr_local_port_set(payload, local_port);
+ mlxsw_reg_sspr_sub_port_set(payload, 0);
+ mlxsw_reg_sspr_system_port_set(payload, local_port);
+}
+
+/* SPMS - Switch Port MSTP/RSTP State Register
+ * -------------------------------------------
+ * Configures the spanning tree state of a physical port.
+ */
+#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_LEN 0x404
+
+static const struct mlxsw_reg_info mlxsw_reg_spms = {
+ .id = MLXSW_REG_SPMS_ID,
+ .len = MLXSW_REG_SPMS_LEN,
+};
+
+/* reg_spms_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_spms_state {
+ MLXSW_REG_SPMS_STATE_NO_CHANGE,
+ MLXSW_REG_SPMS_STATE_DISCARDING,
+ MLXSW_REG_SPMS_STATE_LEARNING,
+ MLXSW_REG_SPMS_STATE_FORWARDING,
+};
+
+/* reg_spms_state
+ * Spanning tree state of each VLAN ID (VID) of the local port.
+ * 0 - Do not change spanning tree state (used only when writing).
+ * 1 - Discarding. No learning or forwarding to/from this port (default).
+ * 2 - Learning. Port is learning, but not forwarding.
+ * 3 - Forwarding. Port is learning and forwarding.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
+
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
+ enum mlxsw_reg_spms_state state)
+{
+ MLXSW_REG_ZERO(spms, payload);
+ mlxsw_reg_spms_local_port_set(payload, local_port);
+ mlxsw_reg_spms_state_set(payload, vid, state);
+}
+
+/* SFGC - Switch Flooding Group Configuration
+ * ------------------------------------------
+ * The following register controls the association of flooding tables and MIDs
+ * to packet types used for flooding.
+ */
+#define MLXSW_REG_SFGC_ID 0x2011
+#define MLXSW_REG_SFGC_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
+ .id = MLXSW_REG_SFGC_ID,
+ .len = MLXSW_REG_SFGC_LEN,
+};
+
+enum mlxsw_reg_sfgc_type {
+ MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
+ MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
+ MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
+ MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+};
+
+/* reg_sfgc_type
+ * The traffic type to reach the flooding table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
+
+enum mlxsw_reg_sfgc_bridge_type {
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+};
+
+/* reg_sfgc_bridge_type
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports 802.1Q mode.
+ */
+MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
+
+enum mlxsw_flood_table_type {
+ MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
+ MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
+};
+
+/* reg_sfgc_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ *
+ * Note: FID offset and FID types are not supported in SwitchX-2.
+ */
+MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
+
+/* reg_sfgc_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
+
+/* reg_sfgc_mid
+ * The multicast ID for the swid. Not supported for Spectrum
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
+
+/* reg_sfgc_counter_set_type
+ * Counter Set Type for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
+
+/* reg_sfgc_counter_index
+ * Counter Index for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
+ enum mlxsw_reg_sfgc_bridge_type bridge_type,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int flood_table)
+{
+ MLXSW_REG_ZERO(sfgc, payload);
+ mlxsw_reg_sfgc_type_set(payload, type);
+ mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
+ mlxsw_reg_sfgc_table_type_set(payload, table_type);
+ mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
+ mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+}
+
+/* SFTR - Switch Flooding Table Register
+ * -------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR_ID 0x2012
+#define MLXSW_REG_SFTR_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_sftr = {
+ .id = MLXSW_REG_SFTR_ID,
+ .len = MLXSW_REG_SFTR_LEN,
+};
+
+/* reg_sftr_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
+
+/* reg_sftr_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
+
+/* reg_sftr_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
+
+/* reg_sftr_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
+
+/* reg_sftr_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
+
+/* reg_sftr_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
+
+/* reg_sftr_cpu_port_mask
+ * CPU port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_sftr_pack(char *payload,
+ unsigned int flood_table,
+ unsigned int index,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int range)
+{
+ MLXSW_REG_ZERO(sftr, payload);
+ mlxsw_reg_sftr_swid_set(payload, 0);
+ mlxsw_reg_sftr_flood_table_set(payload, flood_table);
+ mlxsw_reg_sftr_index_set(payload, index);
+ mlxsw_reg_sftr_table_type_set(payload, table_type);
+ mlxsw_reg_sftr_range_set(payload, range);
+ mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+ mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SPMLR - Switch Port MAC Learning Register
+ * -----------------------------------------
+ * Controls the Switch MAC learning policy per port.
+ */
+#define MLXSW_REG_SPMLR_ID 0x2018
+#define MLXSW_REG_SPMLR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
+ .id = MLXSW_REG_SPMLR_ID,
+ .len = MLXSW_REG_SPMLR_LEN,
+};
+
+/* reg_spmlr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+
+/* reg_spmlr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
+
+enum mlxsw_reg_spmlr_learn_mode {
+ MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
+ MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
+ MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
+};
+
+/* reg_spmlr_learn_mode
+ * Learning mode on the port.
+ * 0 - Learning disabled.
+ * 2 - Learning enabled.
+ * 3 - Security mode.
+ *
+ * In security mode the switch does not learn MACs on the port, but uses the
+ * SMAC to see if it exists on another ingress port. If so, the packet is
+ * classified as a bad packet and is discarded unless the software registers
+ * to receive port security error packets usign HPKT.
+ */
+MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
+
+static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_spmlr_learn_mode mode)
+{
+ MLXSW_REG_ZERO(spmlr, payload);
+ mlxsw_reg_spmlr_local_port_set(payload, local_port);
+ mlxsw_reg_spmlr_sub_port_set(payload, 0);
+ mlxsw_reg_spmlr_learn_mode_set(payload, mode);
+}
+
+/* PMLP - Ports Module to Local Port Register
+ * ------------------------------------------
+ * Configures the assignment of modules to local ports.
+ */
+#define MLXSW_REG_PMLP_ID 0x5002
+#define MLXSW_REG_PMLP_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
+ .id = MLXSW_REG_PMLP_ID,
+ .len = MLXSW_REG_PMLP_LEN,
+};
+
+/* reg_pmlp_rxtx
+ * 0 - Tx value is used for both Tx and Rx.
+ * 1 - Rx value is taken from a separte field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
+
+/* reg_pmlp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+
+/* reg_pmlp_width
+ * 0 - Unmap local port.
+ * 1 - Lane 0 is used.
+ * 2 - Lanes 0 and 1 are used.
+ * 4 - Lanes 0, 1, 2 and 3 are used.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
+
+/* reg_pmlp_module
+ * Module number.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+
+/* reg_pmlp_tx_lane
+ * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+
+/* reg_pmlp_rx_lane
+ * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
+ * equal to Tx lane.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+
+static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(pmlp, payload);
+ mlxsw_reg_pmlp_local_port_set(payload, local_port);
+}
+
+/* PMTU - Port MTU Register
+ * ------------------------
+ * Configures and reports the port MTU.
+ */
+#define MLXSW_REG_PMTU_ID 0x5003
+#define MLXSW_REG_PMTU_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
+ .id = MLXSW_REG_PMTU_ID,
+ .len = MLXSW_REG_PMTU_LEN,
+};
+
+/* reg_pmtu_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+
+/* reg_pmtu_max_mtu
+ * Maximum MTU.
+ * When port type (e.g. Ethernet) is configured, the relevant MTU is
+ * reported, otherwise the minimum between the max_mtu of the different
+ * types is reported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
+
+/* reg_pmtu_admin_mtu
+ * MTU value to set port to. Must be smaller or equal to max_mtu.
+ * Note: If port type is Infiniband, then port must be disabled, when its
+ * MTU is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
+
+/* reg_pmtu_oper_mtu
+ * The actual MTU configured on the port. Packets exceeding this size
+ * will be dropped.
+ * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
+ * oper_mtu might be smaller than admin_mtu.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
+
+static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+ u16 new_mtu)
+{
+ MLXSW_REG_ZERO(pmtu, payload);
+ mlxsw_reg_pmtu_local_port_set(payload, local_port);
+ mlxsw_reg_pmtu_max_mtu_set(payload, 0);
+ mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
+ mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
+}
+
+/* PTYS - Port Type and Speed Register
+ * -----------------------------------
+ * Configures and reports the port speed type.
+ *
+ * Note: When set while the link is up, the changes will not take effect
+ * until the port transitions from down to up state.
+ */
+#define MLXSW_REG_PTYS_ID 0x5004
+#define MLXSW_REG_PTYS_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ptys = {
+ .id = MLXSW_REG_PTYS_ID,
+ .len = MLXSW_REG_PTYS_LEN,
+};
+
+/* reg_ptys_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+
+#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2)
+
+/* reg_ptys_proto_mask
+ * Protocol mask. Indicates which protocol is used.
+ * 0 - Infiniband.
+ * 1 - Fibre Channel.
+ * 2 - Ethernet.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+
+#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4)
+#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
+#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4 BIT(8)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(25)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T BIT(26)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 BIT(30)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2 BIT(31)
+
+/* reg_ptys_eth_proto_cap
+ * Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
+
+/* reg_ptys_eth_proto_admin
+ * Speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
+
+/* reg_ptys_eth_proto_oper
+ * The current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+
+static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
+ u32 proto_admin)
+{
+ MLXSW_REG_ZERO(ptys, payload);
+ mlxsw_reg_ptys_local_port_set(payload, local_port);
+ mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+ mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+}
+
+static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
+ u32 *p_eth_proto_adm,
+ u32 *p_eth_proto_oper)
+{
+ if (p_eth_proto_cap)
+ *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
+ if (p_eth_proto_adm)
+ *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
+ if (p_eth_proto_oper)
+ *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
+}
+
+/* PPAD - Port Physical Address Register
+ * -------------------------------------
+ * The PPAD register configures the per port physical MAC address.
+ */
+#define MLXSW_REG_PPAD_ID 0x5005
+#define MLXSW_REG_PPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_ppad = {
+ .id = MLXSW_REG_PPAD_ID,
+ .len = MLXSW_REG_PPAD_LEN,
+};
+
+/* reg_ppad_single_base_mac
+ * 0: base_mac, local port should be 0 and mac[7:0] is
+ * reserved. HW will set incremental
+ * 1: single_mac - mac of the local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
+
+/* reg_ppad_local_port
+ * port number, if single_base_mac = 0 then local_port is reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+
+/* reg_ppad_mac
+ * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
+ * If single_base_mac = 1 - the per port MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
+
+static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
+ u8 local_port)
+{
+ MLXSW_REG_ZERO(ppad, payload);
+ mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
+ mlxsw_reg_ppad_local_port_set(payload, local_port);
+}
+
+/* PAOS - Ports Administrative and Operational Status Register
+ * -----------------------------------------------------------
+ * Configures and retrieves per port administrative and operational status.
+ */
+#define MLXSW_REG_PAOS_ID 0x5006
+#define MLXSW_REG_PAOS_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_paos = {
+ .id = MLXSW_REG_PAOS_ID,
+ .len = MLXSW_REG_PAOS_LEN,
+};
+
+/* reg_paos_swid
+ * Switch partition ID with which to associate the port.
+ * Note: while external ports uses unique local port numbers (and thus swid is
+ * redundant), router ports use the same local port number where swid is the
+ * only indication for the relevant port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
+
+/* reg_paos_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+
+/* reg_paos_admin_status
+ * Port administrative state (the desired state of the port):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ * into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
+
+/* reg_paos_oper_status
+ * Port operational state (the current state):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ * port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
+
+/* reg_paos_ase
+ * Admin state update enabled.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
+
+/* reg_paos_ee
+ * Event update enable. If this bit is set, event generation will be
+ * updated based on the e field.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
+
+/* reg_paos_e
+ * Event generation on operational state change:
+ * 0 - Do not generate event.
+ * 1 - Generate Event.
+ * 2 - Generate Single Event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+ enum mlxsw_port_admin_status status)
+{
+ MLXSW_REG_ZERO(paos, payload);
+ mlxsw_reg_paos_swid_set(payload, 0);
+ mlxsw_reg_paos_local_port_set(payload, local_port);
+ mlxsw_reg_paos_admin_status_set(payload, status);
+ mlxsw_reg_paos_oper_status_set(payload, 0);
+ mlxsw_reg_paos_ase_set(payload, 1);
+ mlxsw_reg_paos_ee_set(payload, 1);
+ mlxsw_reg_paos_e_set(payload, 1);
+}
+
+/* PPCNT - Ports Performance Counters Register
+ * -------------------------------------------
+ * The PPCNT register retrieves per port performance counters.
+ */
+#define MLXSW_REG_PPCNT_ID 0x5008
+#define MLXSW_REG_PPCNT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
+ .id = MLXSW_REG_PPCNT_ID,
+ .len = MLXSW_REG_PPCNT_LEN,
+};
+
+/* reg_ppcnt_swid
+ * For HCA: must be always 0.
+ * Switch partition ID to associate port with.
+ * Switch partitions are numbered from 0 to 7 inclusively.
+ * Switch partition 254 indicates stacking ports.
+ * Switch partition 255 indicates all switch partitions.
+ * Only valid on Set() operation with local_port=255.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
+
+/* reg_ppcnt_local_port
+ * Local port number.
+ * 255 indicates all ports on the device, and is only allowed
+ * for Set() operation.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+
+/* reg_ppcnt_pnat
+ * Port number access type:
+ * 0 - Local port number
+ * 1 - IB port number
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+
+/* reg_ppcnt_grp
+ * Performance counter group.
+ * Group 63 indicates all groups. Only valid on Set() operation with
+ * clr bit set.
+ * 0x0: IEEE 802.3 Counters
+ * 0x1: RFC 2863 Counters
+ * 0x2: RFC 2819 Counters
+ * 0x3: RFC 3635 Counters
+ * 0x5: Ethernet Extended Counters
+ * 0x8: Link Level Retransmission Counters
+ * 0x10: Per Priority Counters
+ * 0x11: Per Traffic Class Counters
+ * 0x12: Physical Layer Counters
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
+
+/* reg_ppcnt_clr
+ * Clear counters. Setting the clr bit will reset the counter value
+ * for all counters in the counter group. This bit can be set
+ * for both Set() and Get() operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+
+/* reg_ppcnt_prio_tc
+ * Priority for counter set that support per priority, valid values: 0-7.
+ * Traffic class for counter set that support per traffic class,
+ * valid values: 0- cap_max_tclass-1 .
+ * For HCA: cap_max_tclass is always 8.
+ * Otherwise must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+
+/* reg_ppcnt_a_frames_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
+ 0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_a_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
+ 0x08 + 0x08, 0, 64);
+
+/* reg_ppcnt_a_frame_check_sequence_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
+ 0x08 + 0x10, 0, 64);
+
+/* reg_ppcnt_a_alignment_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
+ 0x08 + 0x18, 0, 64);
+
+/* reg_ppcnt_a_octets_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
+ 0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_a_octets_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
+ 0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
+ 0x08 + 0x30, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
+ 0x08 + 0x38, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
+ 0x08 + 0x40, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
+ 0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_a_in_range_length_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
+ 0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_a_out_of_range_length_field
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
+ 0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_a_frame_too_long_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
+ 0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_a_symbol_error_during_carrier
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
+ 0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
+ 0x08 + 0x70, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
+ 0x08 + 0x78, 0, 64);
+
+/* reg_ppcnt_a_unsupported_opcodes_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
+ 0x08 + 0x80, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
+ 0x08 + 0x88, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
+ 0x08 + 0x90, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(ppcnt, payload);
+ mlxsw_reg_ppcnt_swid_set(payload, 0);
+ mlxsw_reg_ppcnt_local_port_set(payload, local_port);
+ mlxsw_reg_ppcnt_pnat_set(payload, 0);
+ mlxsw_reg_ppcnt_grp_set(payload, 0);
+ mlxsw_reg_ppcnt_clr_set(payload, 0);
+ mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+}
+
+/* PSPA - Port Switch Partition Allocation
+ * ---------------------------------------
+ * Controls the association of a port with a switch partition and enables
+ * configuring ports as stacking ports.
+ */
+#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_pspa = {
+ .id = MLXSW_REG_PSPA_ID,
+ .len = MLXSW_REG_PSPA_LEN,
+};
+
+/* reg_pspa_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
+
+/* reg_pspa_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+
+/* reg_pspa_sub_port
+ * Virtual port within the local port. Set to 0 when virtual ports are
+ * disabled on the local port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
+
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+{
+ MLXSW_REG_ZERO(pspa, payload);
+ mlxsw_reg_pspa_swid_set(payload, swid);
+ mlxsw_reg_pspa_local_port_set(payload, local_port);
+ mlxsw_reg_pspa_sub_port_set(payload, 0);
+}
+
+/* HTGT - Host Trap Group Table
+ * ----------------------------
+ * Configures the properties for forwarding to CPU.
+ */
+#define MLXSW_REG_HTGT_ID 0x7002
+#define MLXSW_REG_HTGT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_htgt = {
+ .id = MLXSW_REG_HTGT_ID,
+ .len = MLXSW_REG_HTGT_LEN,
+};
+
+/* reg_htgt_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
+
+#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0 /* For locally attached CPU */
+
+/* reg_htgt_type
+ * CPU path type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
+
+#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0
+#define MLXSW_REG_HTGT_TRAP_GROUP_RX 0x1
+
+/* reg_htgt_trap_group
+ * Trap group number. User defined number specifying which trap groups
+ * should be forwarded to the CPU. The mapping between trap IDs and trap
+ * groups is configured using HPKT register.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
+
+enum {
+ MLXSW_REG_HTGT_POLICER_DISABLE,
+ MLXSW_REG_HTGT_POLICER_ENABLE,
+};
+
+/* reg_htgt_pide
+ * Enable policer ID specified using 'pid' field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
+
+/* reg_htgt_pid
+ * Policer ID for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
+
+#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
+
+/* reg_htgt_mirror_action
+ * Mirror action to use.
+ * 0 - Trap to CPU.
+ * 1 - Trap to CPU and mirror to a mirroring agent.
+ * 2 - Mirror to a mirroring agent and do not trap to CPU.
+ * Access: RW
+ *
+ * Note: Mirroring to a mirroring agent is only supported in Spectrum.
+ */
+MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
+
+/* reg_htgt_mirroring_agent
+ * Mirroring agent.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
+
+/* reg_htgt_priority
+ * Trap group priority.
+ * In case a packet matches multiple classification rules, the packet will
+ * only be trapped once, based on the trap ID associated with the group (via
+ * register HPKT) with the highest priority.
+ * Supported values are 0-7, with 7 represnting the highest priority.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field is ignored and the priority value is replaced
+ * by the 'trap_group' field.
+ */
+MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
+
+/* reg_htgt_local_path_cpu_tclass
+ * CPU ingress traffic class for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
+
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14
+
+/* reg_htgt_local_path_rdq
+ * Receive descriptor queue (RDQ) to use for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
+
+static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+{
+ u8 swid, rdq;
+
+ MLXSW_REG_ZERO(htgt, payload);
+ if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+ swid = MLXSW_PORT_SWID_ALL_SWIDS;
+ rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
+ } else {
+ swid = 0;
+ rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+ }
+ mlxsw_reg_htgt_swid_set(payload, swid);
+ mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
+ mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+ mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
+ mlxsw_reg_htgt_pid_set(payload, 0);
+ mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
+ mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
+ mlxsw_reg_htgt_priority_set(payload, 0);
+ mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
+ mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
+}
+
+/* HPKT - Host Packet Trap
+ * -----------------------
+ * Configures trap IDs inside trap groups.
+ */
+#define MLXSW_REG_HPKT_ID 0x7003
+#define MLXSW_REG_HPKT_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
+ .id = MLXSW_REG_HPKT_ID,
+ .len = MLXSW_REG_HPKT_LEN,
+};
+
+enum {
+ MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
+ MLXSW_REG_HPKT_ACK_REQUIRED,
+};
+
+/* reg_hpkt_ack
+ * Require acknowledgements from the host for events.
+ * If set, then the device will wait for the event it sent to be acknowledged
+ * by the host. This option is only relevant for event trap IDs.
+ * Access: RW
+ *
+ * Note: Currently not supported by firmware.
+ */
+MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
+
+enum mlxsw_reg_hpkt_action {
+ MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_DISCARD,
+ MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+};
+
+/* reg_hpkt_action
+ * Action to perform on packet when trapped.
+ * 0 - No action. Forward to CPU based on switching rules.
+ * 1 - Trap to CPU (CPU receives sole copy).
+ * 2 - Mirror to CPU (CPU receives a replica of the packet).
+ * 3 - Discard.
+ * 4 - Soft discard (allow other traps to act on the packet).
+ * 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * Access: RW
+ *
+ * Note: Must be set to 0 (forward) for event trap IDs, as they are already
+ * addressed to the CPU.
+ */
+MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
+
+/* reg_hpkt_trap_group
+ * Trap group to associate the trap with.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
+
+/* reg_hpkt_trap_id
+ * Trap ID.
+ * Access: Index
+ *
+ * Note: A trap ID can only be associated with a single trap group. The device
+ * will associate the trap ID with the last trap group configured.
+ */
+MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
+
+enum {
+ MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
+ MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
+ MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
+};
+
+/* reg_hpkt_ctrl
+ * Configure dedicated buffer resources for control packets.
+ * 0 - Keep factory defaults.
+ * 1 - Do not use control buffer for this trap ID.
+ * 2 - Use control buffer for this trap ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
+
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
+ u8 trap_group, u16 trap_id)
+{
+ MLXSW_REG_ZERO(hpkt, payload);
+ mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
+ mlxsw_reg_hpkt_action_set(payload, action);
+ mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
+ mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
+ mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
+}
+
+static inline const char *mlxsw_reg_id_str(u16 reg_id)
+{
+ switch (reg_id) {
+ case MLXSW_REG_SGCR_ID:
+ return "SGCR";
+ case MLXSW_REG_SPAD_ID:
+ return "SPAD";
+ case MLXSW_REG_SMID_ID:
+ return "SMID";
+ case MLXSW_REG_SSPR_ID:
+ return "SSPR";
+ case MLXSW_REG_SPMS_ID:
+ return "SPMS";
+ case MLXSW_REG_SFGC_ID:
+ return "SFGC";
+ case MLXSW_REG_SFTR_ID:
+ return "SFTR";
+ case MLXSW_REG_SPMLR_ID:
+ return "SPMLR";
+ case MLXSW_REG_PMLP_ID:
+ return "PMLP";
+ case MLXSW_REG_PMTU_ID:
+ return "PMTU";
+ case MLXSW_REG_PTYS_ID:
+ return "PTYS";
+ case MLXSW_REG_PPAD_ID:
+ return "PPAD";
+ case MLXSW_REG_PAOS_ID:
+ return "PAOS";
+ case MLXSW_REG_PPCNT_ID:
+ return "PPCNT";
+ case MLXSW_REG_PSPA_ID:
+ return "PSPA";
+ case MLXSW_REG_HTGT_ID:
+ return "HTGT";
+ case MLXSW_REG_HPKT_ID:
+ return "HPKT";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+/* PUDE - Port Up / Down Event
+ * ---------------------------
+ * Reports the operational state change of a port.
+ */
+#define MLXSW_REG_PUDE_LEN 0x10
+
+/* reg_pude_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
+
+/* reg_pude_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+
+/* reg_pude_admin_status
+ * Port administrative state (the desired state).
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ * into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
+
+/* reg_pude_oper_status
+ * Port operatioanl state.
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ * port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
new file mode 100644
index 000000000..62cbbd1ad
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -0,0 +1,1568 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
+static const char mlxsw_sx_driver_version[] = "1.0";
+
+struct mlxsw_sx_port;
+
+#define MLXSW_SW_HW_ID_LEN 6
+
+struct mlxsw_sx {
+ struct mlxsw_sx_port **ports;
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 hw_id[MLXSW_SW_HW_ID_LEN];
+};
+
+struct mlxsw_sx_port_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 tx_dropped;
+};
+
+struct mlxsw_sx_port {
+ struct net_device *dev;
+ struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
+ struct mlxsw_sx *mlxsw_sx;
+ u8 local_port;
+};
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ * The MSB is specified in the 'ctclass3' field.
+ * Range is 0-15, where 15 is the highest priority.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
+
+/* tx_hdr_swid
+ * Switch partition ID.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_ctclass3
+ * See field 'etclass'.
+ */
+MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
+
+/* tx_hdr_rdq
+ * RDQ for control packets sent to remote CPU.
+ * Must be set to 0x1F for EMADs, otherwise 0.
+ */
+MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
+
+/* tx_hdr_cpu_sig
+ * Signature control for packets going to CPU. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
+
+/* tx_hdr_sig
+ * Stacking protocl signature. Must be set to 0xE0E0.
+ */
+MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
+
+/* tx_hdr_stclass
+ * Stacking TClass.
+ */
+MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
+
+/* tx_hdr_emad
+ * EMAD bit. Must be set for EMADs.
+ */
+MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ bool is_emad = tx_info->is_emad;
+
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ /* We currently set default values for the egress tclass (QoS). */
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
+ MLXSW_TXHDR_ETCLASS_5);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+ mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
+ mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
+ MLXSW_TXHDR_RDQ_OTHER);
+ mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
+ mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
+ mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
+ mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
+ MLXSW_TXHDR_NOT_EMAD);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool is_up)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
+ is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+ MLXSW_PORT_ADMIN_STATUS_DOWN);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool *p_is_up)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+ u8 oper_status;
+ int err;
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+ if (err)
+ return err;
+ oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+ *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+ return 0;
+}
+
+static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+ int max_mtu;
+ int err;
+
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+ if (err)
+ return err;
+ max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+ if (mtu > max_mtu)
+ return -EINVAL;
+
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+ mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int
+mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+ mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool *p_usable)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+ return 0;
+}
+
+static int mlxsw_sx_port_open(struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+ if (err)
+ return err;
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int mlxsw_sx_port_stop(struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+}
+
+static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+ const struct mlxsw_tx_info tx_info = {
+ .local_port = mlxsw_sx_port->local_port,
+ .is_emad = false,
+ };
+ u64 len;
+ int err;
+
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
+ return NETDEV_TX_BUSY;
+
+ if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+ struct sk_buff *skb_orig = skb;
+
+ skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb_orig);
+ return NETDEV_TX_OK;
+ }
+ }
+ mlxsw_sx_txhdr_construct(skb, &tx_info);
+ len = skb->len;
+ /* Due to a race we might fail here because of a full queue. In that
+ * unlikely case we simply drop the packet.
+ */
+ err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+
+ if (!err) {
+ pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+ if (err)
+ return err;
+ dev->mtu = mtu;
+ return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sx_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx_port_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ u32 tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /* tx_dropped is u32, updated without syncp protection. */
+ tx_dropped += p->tx_dropped;
+ }
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
+static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
+ .ndo_open = mlxsw_sx_port_open,
+ .ndo_stop = mlxsw_sx_port_stop,
+ .ndo_start_xmit = mlxsw_sx_port_xmit,
+ .ndo_change_mtu = mlxsw_sx_port_change_mtu,
+ .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
+};
+
+static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+ strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, mlxsw_sx_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ mlxsw_sx->bus_info->fw_rev.major,
+ mlxsw_sx->bus_info->fw_rev.minor,
+ mlxsw_sx->bus_info->fw_rev.subminor);
+ strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
+ sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sx_port_hw_stats {
+ char str[ETH_GSTRING_LEN];
+ u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
+ {
+ .str = "a_frames_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+ },
+ {
+ .str = "a_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+ },
+ {
+ .str = "a_frame_check_sequence_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+ },
+ {
+ .str = "a_alignment_errors",
+ .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+ },
+ {
+ .str = "a_octets_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+ },
+ {
+ .str = "a_octets_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+ },
+ {
+ .str = "a_in_range_length_errors",
+ .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+ },
+ {
+ .str = "a_out_of_range_length_field",
+ .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+ },
+ {
+ .str = "a_frame_too_long_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+ },
+ {
+ .str = "a_symbol_error_during_carrier",
+ .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+ },
+ {
+ .str = "a_mac_control_frames_transmitted",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+ },
+ {
+ .str = "a_mac_control_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+ },
+ {
+ .str = "a_unsupported_opcodes_received",
+ .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_xmitted",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+ },
+};
+
+#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
+
+static void mlxsw_sx_port_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sx_port_hw_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void mlxsw_sx_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
+ data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return MLXSW_SX_PORT_HW_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+struct mlxsw_sx_port_link_mode {
+ u32 mask;
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+};
+
+static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .speed = 25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .speed = 50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .speed = 100000,
+ },
+};
+
+#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
+
+static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return SUPPORTED_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+ return SUPPORTED_Backplane;
+ return 0;
+}
+
+static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+ modes |= mlxsw_sx_port_link_mode[i].supported;
+ }
+ return modes;
+}
+
+static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+ modes |= mlxsw_sx_port_link_mode[i].advertised;
+ }
+ return modes;
+}
+
+static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+ struct ethtool_cmd *cmd)
+{
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+ int i;
+
+ if (!carrier_ok)
+ goto out;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
+ speed = mlxsw_sx_port_link_mode[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return PORT_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+ return PORT_DA;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+ return PORT_NONE;
+
+ return PORT_OTHER;
+}
+
+static int mlxsw_sx_port_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_oper;
+ int err;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
+
+ cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
+ mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
+ mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
+ eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+ cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
+ cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+
+ cmd->transceiver = XCVR_INTERNAL;
+ return 0;
+}
+
+static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (advertising & mlxsw_sx_port_link_mode[i].advertised)
+ ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static u32 mlxsw_sx_to_ptys_speed(u32 speed)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (speed == mlxsw_sx_port_link_mode[i].speed)
+ ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static int mlxsw_sx_port_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 speed;
+ u32 eth_proto_new;
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ bool is_up;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+ mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
+ mlxsw_sx_to_ptys_speed(speed);
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+ eth_proto_new = eth_proto_new & eth_proto_cap;
+ if (!eth_proto_new) {
+ netdev_err(dev, "Not supported proto admin requested");
+ return -EINVAL;
+ }
+ if (eth_proto_new == eth_proto_admin)
+ return 0;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to set proto admin");
+ return err;
+ }
+
+ err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
+ if (err) {
+ netdev_err(dev, "Failed to get oper status");
+ return err;
+ }
+ if (!is_up)
+ return 0;
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
+ .get_drvinfo = mlxsw_sx_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlxsw_sx_port_get_strings,
+ .get_ethtool_stats = mlxsw_sx_port_get_stats,
+ .get_sset_count = mlxsw_sx_port_get_sset_count,
+ .get_settings = mlxsw_sx_port_get_settings,
+ .set_settings = mlxsw_sx_port_set_settings,
+};
+
+static int mlxsw_sx_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
+ memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
+ .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
+};
+
+static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN];
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
+ return 0;
+}
+
+static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ struct net_device *dev = mlxsw_sx_port->dev;
+ char ppad_pl[MLXSW_REG_PPAD_LEN];
+ int err;
+
+ mlxsw_reg_ppad_pack(ppad_pl, false, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
+ /* The last byte value in base mac address is guaranteed
+ * to be such it does not overflow when adding local_port
+ * value.
+ */
+ dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
+ return 0;
+}
+
+static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u16 vid, enum mlxsw_reg_spms_state state)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char *spms_pl;
+ int err;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u32 speed)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int
+mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ enum mlxsw_reg_spmlr_learn_mode mode)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char spmlr_pl[MLXSW_REG_SPMLR_LEN];
+
+ mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
+}
+
+static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port;
+ struct net_device *dev;
+ bool usable;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
+ if (!dev)
+ return -ENOMEM;
+ mlxsw_sx_port = netdev_priv(dev);
+ mlxsw_sx_port->dev = dev;
+ mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
+ mlxsw_sx_port->local_port = local_port;
+
+ mlxsw_sx_port->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
+ if (!mlxsw_sx_port->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
+ dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
+ dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
+
+ err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
+ mlxsw_sx_port->local_port);
+ goto err_dev_addr_get;
+ }
+
+ netif_carrier_off(dev);
+
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+ NETIF_F_VLAN_CHALLENGED;
+
+ /* Each packet needs to have a Tx header (metadata) on top all other
+ * headers.
+ */
+ dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+ err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_module_check;
+ }
+
+ if (!usable) {
+ dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+ mlxsw_sx_port->local_port);
+ goto port_not_usable;
+ }
+
+ err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_system_port_mapping_set;
+ }
+
+ err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_swid_set;
+ }
+
+ err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_speed_set;
+ }
+
+ err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_mtu_set;
+ }
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+ if (err)
+ goto err_port_admin_status_set;
+
+ err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
+ MLXSW_PORT_DEFAULT_VID,
+ MLXSW_REG_SPMS_STATE_FORWARDING);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_stp_state_set;
+ }
+
+ err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
+ MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_mac_learning_mode_set;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
+ mlxsw_sx_port->local_port);
+ goto err_register_netdev;
+ }
+
+ mlxsw_sx->ports[local_port] = mlxsw_sx_port;
+ return 0;
+
+err_register_netdev:
+err_port_mac_learning_mode_set:
+err_port_stp_state_set:
+err_port_admin_status_set:
+err_port_mtu_set:
+err_port_speed_set:
+err_port_swid_set:
+err_port_system_port_mapping_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_get:
+ free_percpu(mlxsw_sx_port->pcpu_stats);
+err_alloc_stats:
+ free_netdev(dev);
+ return err;
+}
+
+static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+
+ if (!mlxsw_sx_port)
+ return;
+ unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+ free_percpu(mlxsw_sx_port->pcpu_stats);
+ free_netdev(mlxsw_sx_port->dev);
+}
+
+static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
+{
+ int i;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ mlxsw_sx_port_remove(mlxsw_sx, i);
+ kfree(mlxsw_sx->ports);
+}
+
+static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
+{
+ size_t alloc_size;
+ int i;
+ int err;
+
+ alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+ mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_sx->ports)
+ return -ENOMEM;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+ err = mlxsw_sx_port_create(mlxsw_sx, i);
+ if (err)
+ goto err_port_create;
+ }
+ return 0;
+
+err_port_create:
+ for (i--; i >= 1; i--)
+ mlxsw_sx_port_remove(mlxsw_sx, i);
+ kfree(mlxsw_sx->ports);
+ return err;
+}
+
+static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
+ char *pude_pl, void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx_port *mlxsw_sx_port;
+ enum mlxsw_reg_pude_oper_status status;
+ u8 local_port;
+
+ local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+ mlxsw_sx_port = mlxsw_sx->ports[local_port];
+ if (!mlxsw_sx_port) {
+ dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ status = mlxsw_reg_pude_oper_status_get(pude_pl);
+ if (MLXSW_PORT_OPER_STATUS_UP == status) {
+ netdev_info(mlxsw_sx_port->dev, "link up\n");
+ netif_carrier_on(mlxsw_sx_port->dev);
+ } else {
+ netdev_info(mlxsw_sx_port->dev, "link down\n");
+ netif_carrier_off(mlxsw_sx_port->dev);
+ }
+}
+
+static struct mlxsw_event_listener mlxsw_sx_pude_event = {
+ .func = mlxsw_sx_pude_event_func,
+ .trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sx_pude_event;
+ break;
+ }
+ err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_event_trap_set;
+
+ return 0;
+
+err_event_trap_set:
+ mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+ return err;
+}
+
+static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sx_pude_event;
+ break;
+ }
+ mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+}
+
+static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+ struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sx_port)) {
+ if (net_ratelimit())
+ dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ skb->dev = mlxsw_sx_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_FDB_MC,
+ },
+ /* Traps for specific L2 packet types, not trapped as FDB MC */
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_STP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LACP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_EAPOL,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LLDP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MMRP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MVRP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_RPVST,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_DHCP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+ },
+};
+
+static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+ err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ if (err)
+ goto err_rx_listener_register;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ mlxsw_sx_rx_listener[i].trap_id);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_rx_trap_set;
+ }
+ return 0;
+
+err_rx_trap_set:
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+err_rx_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ mlxsw_sx_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ }
+ return err;
+}
+
+static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ mlxsw_sx_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ }
+}
+
+static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
+{
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+ char sgcr_pl[MLXSW_REG_SGCR_LEN];
+ char *smid_pl;
+ char *sftr_pl;
+ int err;
+
+ /* Due to FW bug, we must configure SMID. */
+ smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+ if (!smid_pl)
+ return -ENOMEM;
+ mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
+ kfree(smid_pl);
+ if (err)
+ return err;
+
+ /* Configure a flooding table, which includes only CPU port. */
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+ mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
+ kfree(sftr_pl);
+ if (err)
+ return err;
+
+ /* Flood different packet types using the flooding table. */
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_BROADCAST,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sgcr_pack(sgcr_pl, true);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
+}
+
+static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ int err;
+
+ mlxsw_sx->core = mlxsw_core;
+ mlxsw_sx->bus_info = mlxsw_bus_info;
+
+ err = mlxsw_sx_hw_id_get(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
+ return err;
+ }
+
+ err = mlxsw_sx_ports_create(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
+ return err;
+ }
+
+ err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
+ goto err_event_register;
+ }
+
+ err = mlxsw_sx_traps_init(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
+ goto err_rx_listener_register;
+ }
+
+ err = mlxsw_sx_flood_init(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
+ goto err_flood_init;
+ }
+
+ return 0;
+
+err_flood_init:
+ mlxsw_sx_traps_fini(mlxsw_sx);
+err_rx_listener_register:
+ mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+ mlxsw_sx_ports_remove(mlxsw_sx);
+ return err;
+}
+
+static void mlxsw_sx_fini(void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+
+ mlxsw_sx_traps_fini(mlxsw_sx);
+ mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+ mlxsw_sx_ports_remove(mlxsw_sx);
+}
+
+static struct mlxsw_config_profile mlxsw_sx_config_profile = {
+ .used_max_vepa_channels = 1,
+ .max_vepa_channels = 0,
+ .used_max_lag = 1,
+ .max_lag = 64,
+ .used_max_port_per_lag = 1,
+ .max_port_per_lag = 16,
+ .used_max_mid = 1,
+ .max_mid = 7000,
+ .used_max_pgt = 1,
+ .max_pgt = 0,
+ .used_max_system_port = 1,
+ .max_system_port = 48000,
+ .used_max_vlan_groups = 1,
+ .max_vlan_groups = 127,
+ .used_max_regions = 1,
+ .max_regions = 400,
+ .used_flood_tables = 1,
+ .max_flood_tables = 2,
+ .max_vid_flood_tables = 1,
+ .used_flood_mode = 1,
+ .flood_mode = 3,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
+static struct mlxsw_driver mlxsw_sx_driver = {
+ .kind = MLXSW_DEVICE_KIND_SWITCHX2,
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct mlxsw_sx),
+ .init = mlxsw_sx_init,
+ .fini = mlxsw_sx_fini,
+ .txhdr_construct = mlxsw_sx_txhdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sx_config_profile,
+};
+
+static int __init mlxsw_sx_module_init(void)
+{
+ return mlxsw_core_driver_register(&mlxsw_sx_driver);
+}
+
+static void __exit mlxsw_sx_module_exit(void)
+{
+ mlxsw_core_driver_unregister(&mlxsw_sx_driver);
+}
+
+module_init(mlxsw_sx_module_init);
+module_exit(mlxsw_sx_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
new file mode 100644
index 000000000..53a9550be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/trap.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_TRAP_H
+#define _MLXSW_TRAP_H
+
+enum {
+ /* Ethernet EMAD and FDB miss */
+ MLXSW_TRAP_ID_FDB_MC = 0x01,
+ MLXSW_TRAP_ID_ETHEMAD = 0x05,
+ /* L2 traps for specific packet types */
+ MLXSW_TRAP_ID_STP = 0x10,
+ MLXSW_TRAP_ID_LACP = 0x11,
+ MLXSW_TRAP_ID_EAPOL = 0x12,
+ MLXSW_TRAP_ID_LLDP = 0x13,
+ MLXSW_TRAP_ID_MMRP = 0x14,
+ MLXSW_TRAP_ID_MVRP = 0x15,
+ MLXSW_TRAP_ID_RPVST = 0x16,
+ MLXSW_TRAP_ID_DHCP = 0x19,
+ MLXSW_TRAP_ID_IGMP_QUERY = 0x30,
+ MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31,
+ MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
+ MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
+ MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+
+ MLXSW_TRAP_ID_MAX = 0x1FF
+};
+
+enum mlxsw_event_trap_id {
+ /* Port Up/Down event generated by hardware */
+ MLXSW_TRAP_ID_PUDE = 0x8,
+};
+
+#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
new file mode 100644
index 000000000..06fc46c78
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -0,0 +1,80 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/txheader.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_TXHEADER_H
+#define _MLXSW_TXHEADER_H
+
+#define MLXSW_TXHDR_LEN 0x10
+#define MLXSW_TXHDR_VERSION_0 0
+
+enum {
+ MLXSW_TXHDR_ETH_CTL,
+ MLXSW_TXHDR_ETH_DATA,
+};
+
+#define MLXSW_TXHDR_PROTO_ETH 1
+
+enum {
+ MLXSW_TXHDR_ETCLASS_0,
+ MLXSW_TXHDR_ETCLASS_1,
+ MLXSW_TXHDR_ETCLASS_2,
+ MLXSW_TXHDR_ETCLASS_3,
+ MLXSW_TXHDR_ETCLASS_4,
+ MLXSW_TXHDR_ETCLASS_5,
+ MLXSW_TXHDR_ETCLASS_6,
+ MLXSW_TXHDR_ETCLASS_7,
+};
+
+enum {
+ MLXSW_TXHDR_RDQ_OTHER,
+ MLXSW_TXHDR_RDQ_EMAD = 0x1f,
+};
+
+#define MLXSW_TXHDR_CTCLASS3 0
+#define MLXSW_TXHDR_CPU_SIG 0
+#define MLXSW_TXHDR_SIG 0xE0E0
+#define MLXSW_TXHDR_STCLASS_NONE 0
+
+enum {
+ MLXSW_TXHDR_NOT_EMAD,
+ MLXSW_TXHDR_EMAD,
+};
+
+enum {
+ MLXSW_TXHDR_TYPE_DATA,
+ MLXSW_TXHDR_TYPE_CONTROL = 6,
+};
+
+#endif