summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h521
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c679
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c860
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1915
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c252
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c350
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/flow_table.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mad.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c444
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c360
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.h64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h171
28 files changed, 7296 insertions, 360 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 8ff57e8e3..158c88c69 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -3,6 +3,18 @@
#
config MLX5_CORE
- tristate
+ tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
depends on PCI
default n
+ ---help---
+ Core driver for low level functionality of the ConnectX-4 and
+ Connect-IB cards by Mellanox Technologies.
+
+config MLX5_CORE_EN
+ bool "Mellanox Technologies ConnectX-4 Ethernet support"
+ depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ default n
+ ---help---
+ Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+ Ethernet and Infiniband support in ConnectX-4 are currently mutually
+ exclusive.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 105780bb9..26a68b8af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -2,4 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o
+ mad.o transobj.o vport.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \
+ en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
+ en_txrx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index ac0f7bf4b..0715b4975 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -42,95 +42,36 @@
#include "mlx5_core.h"
/* Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0. If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
+ * register it in a memory region at HCA virtual address 0.
*/
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
- struct mlx5_buf *buf)
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
{
dma_addr_t t;
buf->size = size;
- if (size <= max_direct) {
- buf->nbufs = 1;
- buf->npages = 1;
- buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
- if (!buf->direct.buf)
- return -ENOMEM;
-
- buf->direct.map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
- } else {
- int i;
-
- buf->direct.buf = NULL;
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- buf->npages = buf->nbufs;
- buf->page_shift = PAGE_SHIFT;
- buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- GFP_KERNEL);
- if (!buf->page_list)
- return -ENOMEM;
-
- for (i = 0; i < buf->nbufs; i++) {
- buf->page_list[i].buf =
- dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
- &t, GFP_KERNEL);
- if (!buf->page_list[i].buf)
- goto err_free;
-
- buf->page_list[i].map = t;
- }
-
- if (BITS_PER_LONG == 64) {
- struct page **pages;
- pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
- if (!pages)
- goto err_free;
- for (i = 0; i < buf->nbufs; i++)
- pages[i] = virt_to_page(buf->page_list[i].buf);
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- goto err_free;
- }
- }
+ buf->npages = 1;
+ buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
+ buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
+ size, &t, GFP_KERNEL);
+ if (!buf->direct.buf)
+ return -ENOMEM;
- return 0;
+ buf->direct.map = t;
-err_free:
- mlx5_buf_free(dev, buf);
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
- return -ENOMEM;
+ return 0;
}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
{
- int i;
-
- if (buf->nbufs == 1)
- dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
- buf->direct.map);
- else {
- if (BITS_PER_LONG == 64)
- vunmap(buf->direct.buf);
-
- for (i = 0; i < buf->nbufs; i++)
- if (buf->page_list[i].buf)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
- buf->page_list[i].buf,
- buf->page_list[i].map);
- kfree(buf->page_list);
- }
+ dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
+ buf->direct.map);
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
@@ -230,10 +171,7 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
int i;
for (i = 0; i < buf->npages; i++) {
- if (buf->nbufs == 1)
- addr = buf->direct.map + (i << buf->page_shift);
- else
- addr = buf->page_list[i].map;
+ addr = buf->direct.map + (i << buf->page_shift);
pas[i] = cpu_to_be64(addr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e3273faf4..75ff58dc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -75,25 +75,6 @@ enum {
MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
};
-enum {
- MLX5_CMD_STAT_OK = 0x0,
- MLX5_CMD_STAT_INT_ERR = 0x1,
- MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
- MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
- MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
- MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
- MLX5_CMD_STAT_RES_BUSY = 0x6,
- MLX5_CMD_STAT_LIM_ERR = 0x8,
- MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
- MLX5_CMD_STAT_IX_ERR = 0xa,
- MLX5_CMD_STAT_NO_RES_ERR = 0xf,
- MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
- MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
- MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
- MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
- MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
-};
-
static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out,
@@ -390,8 +371,17 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ARM_RQ:
return "ARM_RQ";
- case MLX5_CMD_OP_RESIZE_SRQ:
- return "RESIZE_SRQ";
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return "CREATE_XRC_SRQ";
+
+ case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ return "DESTROY_XRC_SRQ";
+
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ return "QUERY_XRC_SRQ";
+
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ return "ARM_XRC_SRQ";
case MLX5_CMD_OP_ALLOC_PD:
return "ALLOC_PD";
@@ -408,8 +398,8 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ATTACH_TO_MCG:
return "ATTACH_TO_MCG";
- case MLX5_CMD_OP_DETACH_FROM_MCG:
- return "DETACH_FROM_MCG";
+ case MLX5_CMD_OP_DETTACH_FROM_MCG:
+ return "DETTACH_FROM_MCG";
case MLX5_CMD_OP_ALLOC_XRCD:
return "ALLOC_XRCD";
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index eb0cf81f5..04ab7e445 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -219,6 +219,24 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq,
+ u16 cq_period,
+ u16 cq_max_count)
+{
+ struct mlx5_modify_cq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.cqn = cpu_to_be32(cq->cqn);
+ in.ctx.cq_period = cpu_to_be16(cq_period);
+ in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
+ in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
+ MLX5_CQ_MODIFY_COUNT);
+
+ return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+}
+
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
new file mode 100644
index 000000000..3d23bd657
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -0,0 +1,521 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/vport.h>
+#include "wq.h"
+#include "transobj.h"
+#include "mlx5_core.h"
+
+#define MLX5E_MAX_NUM_TC 8
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
+#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
+
+#define MLX5E_TX_CQ_POLL_BUDGET 128
+#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
+
+static const char vport_strings[][ETH_GSTRING_LEN] = {
+ /* vport statistics */
+ "rx_packets",
+ "rx_bytes",
+ "tx_packets",
+ "tx_bytes",
+ "rx_error_packets",
+ "rx_error_bytes",
+ "tx_error_packets",
+ "tx_error_bytes",
+ "rx_unicast_packets",
+ "rx_unicast_bytes",
+ "tx_unicast_packets",
+ "tx_unicast_bytes",
+ "rx_multicast_packets",
+ "rx_multicast_bytes",
+ "tx_multicast_packets",
+ "tx_multicast_bytes",
+ "rx_broadcast_packets",
+ "rx_broadcast_bytes",
+ "tx_broadcast_packets",
+ "tx_broadcast_bytes",
+
+ /* SW counters */
+ "tso_packets",
+ "tso_bytes",
+ "lro_packets",
+ "lro_bytes",
+ "rx_csum_good",
+ "rx_csum_none",
+ "tx_csum_offload",
+ "tx_queue_stopped",
+ "tx_queue_wake",
+ "tx_queue_dropped",
+ "rx_wqe_err",
+};
+
+struct mlx5e_vport_stats {
+ /* HW counters */
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_error_packets;
+ u64 rx_error_bytes;
+ u64 tx_error_packets;
+ u64 tx_error_bytes;
+ u64 rx_unicast_packets;
+ u64 rx_unicast_bytes;
+ u64 tx_unicast_packets;
+ u64 tx_unicast_bytes;
+ u64 rx_multicast_packets;
+ u64 rx_multicast_bytes;
+ u64 tx_multicast_packets;
+ u64 tx_multicast_bytes;
+ u64 rx_broadcast_packets;
+ u64 rx_broadcast_bytes;
+ u64 tx_broadcast_packets;
+ u64 tx_broadcast_bytes;
+
+ /* SW counters */
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 rx_csum_good;
+ u64 rx_csum_none;
+ u64 tx_csum_offload;
+ u64 tx_queue_stopped;
+ u64 tx_queue_wake;
+ u64 tx_queue_dropped;
+ u64 rx_wqe_err;
+
+#define NUM_VPORT_COUNTERS 31
+};
+
+static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "csum_none",
+ "lro_packets",
+ "lro_bytes",
+ "wqe_err"
+};
+
+struct mlx5e_rq_stats {
+ u64 packets;
+ u64 csum_none;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 wqe_err;
+#define NUM_RQ_STATS 5
+};
+
+static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "tso_packets",
+ "tso_bytes",
+ "csum_offload_none",
+ "stopped",
+ "wake",
+ "dropped",
+ "nop"
+};
+
+struct mlx5e_sq_stats {
+ u64 packets;
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 csum_offload_none;
+ u64 stopped;
+ u64 wake;
+ u64 dropped;
+ u64 nop;
+#define NUM_SQ_STATS 8
+};
+
+struct mlx5e_stats {
+ struct mlx5e_vport_stats vport;
+};
+
+struct mlx5e_params {
+ u8 log_sq_size;
+ u8 log_rq_size;
+ u16 num_channels;
+ u8 default_vlan_prio;
+ u8 num_tc;
+ u16 rx_cq_moderation_usec;
+ u16 rx_cq_moderation_pkts;
+ u16 tx_cq_moderation_usec;
+ u16 tx_cq_moderation_pkts;
+ u16 min_rx_wqes;
+ u16 rx_hash_log_tbl_sz;
+ bool lro_en;
+ u32 lro_wqe_sz;
+};
+
+enum {
+ MLX5E_RQ_STATE_POST_WQES_ENABLE,
+};
+
+enum cq_flags {
+ MLX5E_CQ_HAS_CQES = 1,
+};
+
+struct mlx5e_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+ unsigned long flags;
+
+ /* data path - accessed per napi poll */
+ struct napi_struct *napi;
+ struct mlx5_core_cq mcq;
+ struct mlx5e_channel *channel;
+
+ /* control */
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_rq {
+ /* data path */
+ struct mlx5_wq_ll wq;
+ u32 wqe_sz;
+ struct sk_buff **skb;
+
+ struct device *pdev;
+ struct net_device *netdev;
+ struct mlx5e_rq_stats stats;
+ struct mlx5e_cq cq;
+
+ unsigned long state;
+ int ix;
+
+ /* control */
+ struct mlx5_wq_ctrl wq_ctrl;
+ u32 rqn;
+ struct mlx5e_channel *channel;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_tx_skb_cb {
+ u32 num_bytes;
+ u8 num_wqebbs;
+ u8 num_dma;
+};
+
+#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
+
+struct mlx5e_sq_dma {
+ dma_addr_t addr;
+ u32 size;
+};
+
+enum {
+ MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+};
+
+struct mlx5e_sq {
+ /* data path */
+
+ /* dirtied @completion */
+ u16 cc;
+ u32 dma_fifo_cc;
+
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ u32 dma_fifo_pc;
+ u32 bf_offset;
+ struct mlx5e_sq_stats stats;
+
+ struct mlx5e_cq cq;
+
+ /* pointers to per packet info: write@xmit, read@completion */
+ struct sk_buff **skb;
+ struct mlx5e_sq_dma *dma_fifo;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ u32 dma_fifo_mask;
+ void __iomem *uar_map;
+ struct netdev_queue *txq;
+ u32 sqn;
+ u32 bf_buf_size;
+ u16 max_inline;
+ u16 edge;
+ struct device *pdev;
+ __be32 mkey_be;
+ unsigned long state;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_uar uar;
+ struct mlx5e_channel *channel;
+ int tc;
+} ____cacheline_aligned_in_smp;
+
+static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
+{
+ return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
+ (sq->cc == sq->pc));
+}
+
+enum channel_flags {
+ MLX5E_CHANNEL_NAPI_SCHED = 1,
+};
+
+struct mlx5e_channel {
+ /* data path */
+ struct mlx5e_rq rq;
+ struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
+ struct napi_struct napi;
+ struct device *pdev;
+ struct net_device *netdev;
+ __be32 mkey_be;
+ u8 num_tc;
+ unsigned long flags;
+ int tc_to_txq_map[MLX5E_MAX_NUM_TC];
+
+ /* control */
+ struct mlx5e_priv *priv;
+ int ix;
+ int cpu;
+};
+
+enum mlx5e_traffic_types {
+ MLX5E_TT_IPV4_TCP = 0,
+ MLX5E_TT_IPV6_TCP = 1,
+ MLX5E_TT_IPV4_UDP = 2,
+ MLX5E_TT_IPV6_UDP = 3,
+ MLX5E_TT_IPV4 = 4,
+ MLX5E_TT_IPV6 = 5,
+ MLX5E_TT_ANY = 6,
+ MLX5E_NUM_TT = 7,
+};
+
+enum {
+ MLX5E_RQT_SPREADING = 0,
+ MLX5E_RQT_DEFAULT_RQ = 1,
+ MLX5E_NUM_RQT = 2,
+};
+
+struct mlx5e_eth_addr_info {
+ u8 addr[ETH_ALEN + 2];
+ u32 tt_vec;
+ u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+};
+
+#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
+
+struct mlx5e_eth_addr_db {
+ struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct mlx5e_eth_addr_info broadcast;
+ struct mlx5e_eth_addr_info allmulti;
+ struct mlx5e_eth_addr_info promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
+};
+
+enum {
+ MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+ MLX5E_STATE_OPENED,
+};
+
+struct mlx5e_vlan_db {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u32 active_vlans_ft_ix[VLAN_N_VID];
+ u32 untagged_rule_ft_ix;
+ u32 any_vlan_rule_ft_ix;
+ bool filter_disabled;
+};
+
+struct mlx5e_flow_table {
+ void *vlan;
+ void *main;
+};
+
+struct mlx5e_priv {
+ /* priv data path fields - start */
+ int num_tc;
+ int default_vlan_prio;
+ struct mlx5e_sq **txq_to_sq_map;
+ /* priv data path fields - end */
+
+ unsigned long state;
+ struct mutex state_lock; /* Protects Interface state */
+ struct mlx5_uar cq_uar;
+ u32 pdn;
+ u32 tdn;
+ struct mlx5_core_mr mr;
+
+ struct mlx5e_channel **channel;
+ u32 tisn[MLX5E_MAX_NUM_TC];
+ u32 rqtn;
+ u32 tirn[MLX5E_NUM_TT];
+
+ struct mlx5e_flow_table ft;
+ struct mlx5e_eth_addr_db eth_addr;
+ struct mlx5e_vlan_db vlan;
+
+ struct mlx5e_params params;
+ spinlock_t async_events_spinlock; /* sync hw events */
+ struct work_struct update_carrier_work;
+ struct work_struct set_rx_mode_work;
+ struct delayed_work update_stats_work;
+
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_stats stats;
+};
+
+#define MLX5E_NET_IP_ALIGN 2
+
+struct mlx5e_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_eth_seg eth;
+};
+
+struct mlx5e_rx_wqe {
+ struct mlx5_wqe_srq_next_seg next;
+ struct mlx5_wqe_data_seg data;
+};
+
+enum mlx5e_link_mode {
+ MLX5E_1000BASE_CX_SGMII = 0,
+ MLX5E_1000BASE_KX = 1,
+ MLX5E_10GBASE_CX4 = 2,
+ MLX5E_10GBASE_KX4 = 3,
+ MLX5E_10GBASE_KR = 4,
+ MLX5E_20GBASE_KR2 = 5,
+ MLX5E_40GBASE_CR4 = 6,
+ MLX5E_40GBASE_KR4 = 7,
+ MLX5E_56GBASE_R4 = 8,
+ MLX5E_10GBASE_CR = 12,
+ MLX5E_10GBASE_SR = 13,
+ MLX5E_10GBASE_ER = 14,
+ MLX5E_40GBASE_SR4 = 15,
+ MLX5E_40GBASE_LR4 = 16,
+ MLX5E_100GBASE_CR4 = 20,
+ MLX5E_100GBASE_SR4 = 21,
+ MLX5E_100GBASE_KR4 = 22,
+ MLX5E_100GBASE_LR4 = 23,
+ MLX5E_100BASE_TX = 24,
+ MLX5E_100BASE_T = 25,
+ MLX5E_10GBASE_T = 26,
+ MLX5E_25GBASE_CR = 27,
+ MLX5E_25GBASE_KR = 28,
+ MLX5E_25GBASE_SR = 29,
+ MLX5E_50GBASE_CR2 = 30,
+ MLX5E_50GBASE_KR2 = 31,
+ MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+int mlx5e_napi_poll(struct napi_struct *napi, int budget);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq);
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+
+void mlx5e_update_stats(struct mlx5e_priv *priv);
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv);
+void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_work(struct work_struct *work);
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid);
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid);
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_open_locked(struct net_device *netdev);
+int mlx5e_close_locked(struct net_device *netdev);
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params);
+
+static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
+ struct mlx5e_tx_wqe *wqe)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ *sq->wq.db = cpu_to_be32(sq->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)&wqe->ctrl,
+ sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
+ NULL);
+
+ sq->bf_offset ^= sq->bf_buf_size;
+}
+
+static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
+{
+ struct mlx5_core_cq *mcq;
+
+ mcq = &cq->mcq;
+ mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
+}
+
+extern const struct ethtool_ops mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
new file mode 100644
index 000000000..388938482
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+static void mlx5e_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+ strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static const struct {
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = {
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ [MLX5E_1000BASE_KX] = {
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ [MLX5E_10GBASE_CX4] = {
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_KX4] = {
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_KR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_20GBASE_KR2] = {
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ [MLX5E_40GBASE_CR4] = {
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_40GBASE_KR4] = {
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_56GBASE_R4] = {
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ [MLX5E_10GBASE_CR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_SR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_ER] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_40GBASE_SR4] = {
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_40GBASE_LR4] = {
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_100GBASE_CR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_SR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_KR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_LR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100BASE_TX] = {
+ .speed = 100,
+ },
+ [MLX5E_100BASE_T] = {
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ [MLX5E_10GBASE_T] = {
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 1000,
+ },
+ [MLX5E_25GBASE_CR] = {
+ .speed = 25000,
+ },
+ [MLX5E_25GBASE_KR] = {
+ .speed = 25000,
+ },
+ [MLX5E_25GBASE_SR] = {
+ .speed = 25000,
+ },
+ [MLX5E_50GBASE_CR2] = {
+ .speed = 50000,
+ },
+ [MLX5E_50GBASE_KR2] = {
+ .speed = 50000,
+ },
+};
+
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return NUM_VPORT_COUNTERS +
+ priv->params.num_channels * NUM_RQ_STATS +
+ priv->params.num_channels * priv->num_tc *
+ NUM_SQ_STATS;
+ /* fallthrough */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlx5e_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ int i, j, tc, idx = 0;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ break;
+
+ case ETH_SS_TEST:
+ break;
+
+ case ETH_SS_STATS:
+ /* VPORT counters */
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_strings[i]);
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ "rx%d_%s", i, rq_stats_strings[j]);
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->num_tc; tc++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data +
+ (idx++) * ETH_GSTRING_LEN,
+ "tx%d_%d_%s", i, tc,
+ sq_stats_strings[j]);
+ break;
+ }
+}
+
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int i, j, tc, idx = 0;
+
+ if (!data)
+ return;
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_stats(priv);
+ mutex_unlock(&priv->state_lock);
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ data[idx++] = ((u64 *)&priv->stats.vport)[i];
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+ &priv->state) ? 0 :
+ ((u64 *)&priv->channel[i]->rq.stats)[j];
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->num_tc; tc++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+ &priv->state) ? 0 :
+ ((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+}
+
+static void mlx5e_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
+ param->rx_pending = 1 << priv->params.log_rq_size;
+ param->tx_pending = 1 << priv->params.log_sq_size;
+}
+
+static int mlx5e_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_params new_params;
+ u16 min_rx_wqes;
+ u8 log_rq_size;
+ u8 log_sq_size;
+ int err = 0;
+
+ if (param->rx_jumbo_pending) {
+ netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (param->rx_mini_pending) {
+ netdev_info(dev, "%s: rx_mini_pending not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+ netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+ __func__, param->rx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+ netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+ __func__, param->rx_pending,
+ 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+ netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+ __func__, param->tx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
+ netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+ __func__, param->tx_pending,
+ 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
+ return -EINVAL;
+ }
+
+ log_rq_size = order_base_2(param->rx_pending);
+ log_sq_size = order_base_2(param->tx_pending);
+ min_rx_wqes = min_t(u16, param->rx_pending - 1,
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+
+ if (log_rq_size == priv->params.log_rq_size &&
+ log_sq_size == priv->params.log_sq_size &&
+ min_rx_wqes == priv->params.min_rx_wqes)
+ return 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+ new_params.log_rq_size = log_rq_size;
+ new_params.log_sq_size = log_sq_size;
+ new_params.min_rx_wqes = min_rx_wqes;
+ err = mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static void mlx5e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+
+ ch->max_combined = ncv;
+ ch->combined_count = priv->params.num_channels;
+}
+
+static int mlx5e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+ unsigned int count = ch->combined_count;
+ struct mlx5e_params new_params;
+ int err = 0;
+
+ if (!count) {
+ netdev_info(dev, "%s: combined_count=0 not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (ch->rx_count || ch->tx_count) {
+ netdev_info(dev, "%s: separate rx/tx count not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (count > ncv) {
+ netdev_info(dev, "%s: count (%d) > max (%d)\n",
+ __func__, count, ncv);
+ return -EINVAL;
+ }
+
+ if (priv->params.num_channels == count)
+ return 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+ new_params.num_channels = count;
+ err = mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static int mlx5e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
+ coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
+ coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
+ coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts;
+
+ return 0;
+}
+
+static int mlx5e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channel *c;
+ int tc;
+ int i;
+
+ priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
+ priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
+ priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
+ priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+
+ for (i = 0; i < priv->params.num_channels; ++i) {
+ c = priv->channel[i];
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ mlx5_core_modify_cq_moderation(mdev,
+ &c->sq[tc].cq.mcq,
+ coal->tx_coalesce_usecs,
+ coal->tx_max_coalesced_frames);
+ }
+
+ mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+ coal->rx_coalesce_usecs,
+ coal->rx_max_coalesced_frames);
+ }
+
+ return 0;
+}
+
+static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
+{
+ int i;
+ u32 supported_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_cap & MLX5E_PROT_MASK(i))
+ supported_modes |= ptys2ethtool_table[i].supported;
+ }
+ return supported_modes;
+}
+
+static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
+{
+ int i;
+ u32 advertising_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_cap & MLX5E_PROT_MASK(i))
+ advertising_modes |= ptys2ethtool_table[i].advertised;
+ }
+ return advertising_modes;
+}
+
+static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
+{
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ return SUPPORTED_FIBRE;
+ }
+
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
+ return SUPPORTED_Backplane;
+ }
+ return 0;
+}
+
+static void get_speed_duplex(struct net_device *netdev,
+ u32 eth_proto_oper,
+ struct ethtool_cmd *cmd)
+{
+ int i;
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+
+ if (!netif_carrier_ok(netdev))
+ goto out;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
+ speed = ptys2ethtool_table[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static void get_supported(u32 eth_proto_cap, u32 *supported)
+{
+ *supported |= ptys2ethtool_supported_port(eth_proto_cap);
+ *supported |= ptys2ethtool_supported_link(eth_proto_cap);
+ *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+}
+
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
+ u8 rx_pause, u32 *advertising)
+{
+ *advertising |= ptys2ethtool_adver_link(eth_proto_cap);
+ *advertising |= tx_pause ? ADVERTISED_Pause : 0;
+ *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
+}
+
+static u8 get_connector_port(u32 eth_proto)
+{
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ return PORT_FIBRE;
+ }
+
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
+ return PORT_DA;
+ }
+
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
+ return PORT_NONE;
+ }
+
+ return PORT_OTHER;
+}
+
+static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
+{
+ *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
+}
+
+static int mlx5e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_lp;
+ u32 eth_proto_oper;
+ int err;
+
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
+
+ if (err) {
+ netdev_err(netdev, "%s: query port ptys failed: %d\n",
+ __func__, err);
+ goto err_query_ptys;
+ }
+
+ eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+
+ cmd->supported = 0;
+ cmd->advertising = 0;
+
+ get_supported(eth_proto_cap, &cmd->supported);
+ get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
+ get_speed_duplex(netdev, eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+
+ cmd->port = get_connector_port(eth_proto_oper);
+ get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
+
+ cmd->transceiver = XCVR_INTERNAL;
+
+err_query_ptys:
+ return err;
+}
+
+static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
+{
+ u32 i, ptys_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (ptys2ethtool_table[i].advertised & link_modes)
+ ptys_modes |= MLX5E_PROT_MASK(i);
+ }
+
+ return ptys_modes;
+}
+
+static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
+{
+ u32 i, speed_links = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (ptys2ethtool_table[i].speed == speed)
+ speed_links |= MLX5E_PROT_MASK(i);
+ }
+
+ return speed_links;
+}
+
+static int mlx5e_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 link_modes;
+ u32 speed;
+ u32 eth_proto_cap, eth_proto_admin;
+ u8 port_status;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ link_modes = cmd->autoneg == AUTONEG_ENABLE ?
+ mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
+ mlx5e_ethtool2ptys_speed_link(speed);
+
+ err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ link_modes = link_modes & eth_proto_cap;
+ if (!link_modes) {
+ netdev_err(netdev, "%s: Not supported link mode(s) requested",
+ __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ if (link_modes == eth_proto_admin)
+ goto out;
+
+ err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = mlx5_query_port_status(mdev, &port_status);
+ if (err)
+ goto out;
+
+ if (port_status == MLX5_PORT_DOWN)
+ return 0;
+
+ err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
+ if (err)
+ goto out;
+ err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
+out:
+ return err;
+}
+
+const struct ethtool_ops mlx5e_ethtool_ops = {
+ .get_drvinfo = mlx5e_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx5e_get_strings,
+ .get_sset_count = mlx5e_get_sset_count,
+ .get_ethtool_stats = mlx5e_get_ethtool_stats,
+ .get_ringparam = mlx5e_get_ringparam,
+ .set_ringparam = mlx5e_set_ringparam,
+ .get_channels = mlx5e_get_channels,
+ .set_channels = mlx5e_set_channels,
+ .get_coalesce = mlx5e_get_coalesce,
+ .set_coalesce = mlx5e_set_coalesce,
+ .get_settings = mlx5e_get_settings,
+ .set_settings = mlx5e_set_settings,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
new file mode 100644
index 000000000..120db80c4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -0,0 +1,860 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+enum {
+ MLX5E_FULLMATCH = 0,
+ MLX5E_ALLMULTI = 1,
+ MLX5E_PROMISC = 2,
+};
+
+enum {
+ MLX5E_UC = 0,
+ MLX5E_MC_IPV4 = 1,
+ MLX5E_MC_IPV6 = 2,
+ MLX5E_MC_OTHER = 3,
+};
+
+enum {
+ MLX5E_ACTION_NONE = 0,
+ MLX5E_ACTION_ADD = 1,
+ MLX5E_ACTION_DEL = 2,
+};
+
+struct mlx5e_eth_addr_hash_node {
+ struct hlist_node hlist;
+ u8 action;
+ struct mlx5e_eth_addr_info ai;
+};
+
+static inline int mlx5e_hash_eth_addr(u8 *addr)
+{
+ return addr[5];
+}
+
+static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ int ix = mlx5e_hash_eth_addr(addr);
+ int found = 0;
+
+ hlist_for_each_entry(hn, &hash[ix], hlist)
+ if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
+ found = 1;
+ break;
+ }
+
+ if (found) {
+ hn->action = MLX5E_ACTION_NONE;
+ return;
+ }
+
+ hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
+ if (!hn)
+ return;
+
+ ether_addr_copy(hn->ai.addr, addr);
+ hn->action = MLX5E_ACTION_ADD;
+
+ hlist_add_head(&hn->hlist, &hash[ix]);
+}
+
+static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+{
+ hlist_del(&hn->hlist);
+ kfree(hn);
+}
+
+static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai)
+{
+ void *ft = priv->ft.main;
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
+}
+
+static int mlx5e_get_eth_addr_type(u8 *addr)
+{
+ if (is_unicast_ether_addr(addr))
+ return MLX5E_UC;
+
+ if ((addr[0] == 0x01) &&
+ (addr[1] == 0x00) &&
+ (addr[2] == 0x5e) &&
+ !(addr[3] & 0x80))
+ return MLX5E_MC_IPV4;
+
+ if ((addr[0] == 0x33) &&
+ (addr[1] == 0x33))
+ return MLX5E_MC_IPV6;
+
+ return MLX5E_MC_OTHER;
+}
+
+static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
+{
+ int eth_addr_type;
+ u32 ret;
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
+ switch (eth_addr_type) {
+ case MLX5E_UC:
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV4:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV6:
+ ret =
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV6) |
+ 0;
+ break;
+
+ case MLX5E_MC_OTHER:
+ ret =
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+
+ break;
+
+ case MLX5E_ALLMULTI:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ default: /* MLX5E_PROMISC */
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type,
+ void *flow_context, void *match_criteria)
+{
+ u8 match_criteria_enable = 0;
+ void *match_value;
+ void *dest;
+ u8 *dmac;
+ u8 *match_criteria_dmac;
+ void *ft = priv->ft.main;
+ u32 *tirn = priv->tirn;
+ u32 tt_vec;
+ int err;
+
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dmac_47_16);
+ match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ memset(match_criteria_dmac, 0xff, ETH_ALEN);
+ ether_addr_copy(dmac, ai->addr);
+ break;
+
+ case MLX5E_ALLMULTI:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ match_criteria_dmac[0] = 0x01;
+ dmac[0] = 0x01;
+ break;
+
+ case MLX5E_PROMISC:
+ break;
+ }
+
+ tt_vec = mlx5e_get_tt_vec(ai, type);
+
+ if (tt_vec & (1 << MLX5E_TT_ANY)) {
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_ANY]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_ANY]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_ANY);
+ }
+
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ethertype);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_UDP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_TCP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+ }
+
+ return 0;
+}
+
+static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type)
+{
+ u32 *flow_context;
+ u32 *match_criteria;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_eth_addr_rule_out;
+ }
+
+ err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
+ match_criteria);
+ if (err)
+ netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_eth_addr_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return err;
+}
+
+enum mlx5e_vlan_rule_type {
+ MLX5E_VLAN_RULE_TYPE_UNTAGGED,
+ MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+};
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ u8 match_criteria_enable = 0;
+ u32 *flow_context;
+ void *match_value;
+ void *dest;
+ u32 *match_criteria;
+ u32 *ft_ix;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_vlan_rule_out;
+ }
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ mlx5_get_flow_table_id(priv->ft.main));
+
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.vlan_tag);
+
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ ft_ix = &priv->vlan.untagged_rule_ft_ix;
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
+ MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ 1);
+ break;
+ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
+ ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
+ MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ 1);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
+ vid);
+ break;
+ }
+
+ err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
+ match_criteria, flow_context, ft_ix);
+ if (err)
+ netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_vlan_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return err;
+}
+
+static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.untagged_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.any_vlan_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.active_vlans_ft_ix[vid]);
+ break;
+ }
+}
+
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ if (priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = false;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ if (!priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = true;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ set_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ vid);
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mutex_lock(&priv->state_lock);
+
+ clear_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+ int err;
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ vid);
+ if (err)
+ return err;
+ }
+
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ return err;
+
+ if (priv->vlan.filter_disabled) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+
+ if (priv->vlan.filter_disabled)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+}
+
+#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
+ for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+ hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
+
+static void mlx5e_execute_action(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_hash_node *hn)
+{
+ switch (hn->action) {
+ case MLX5E_ACTION_ADD:
+ mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ hn->action = MLX5E_ACTION_NONE;
+ break;
+
+ case MLX5E_ACTION_DEL:
+ mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
+ mlx5e_del_eth_addr_from_hash(hn);
+ break;
+ }
+}
+
+static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(netdev);
+
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
+ priv->netdev->dev_addr);
+
+ netdev_for_each_uc_addr(ha, netdev)
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+
+ netdev_for_each_mc_addr(ha, netdev)
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+
+ netif_addr_unlock_bh(netdev);
+}
+
+static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct hlist_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ mlx5e_execute_action(priv, hn);
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ mlx5e_execute_action(priv, hn);
+}
+
+static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct hlist_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ hn->action = MLX5E_ACTION_DEL;
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ hn->action = MLX5E_ACTION_DEL;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_sync_netdev_addr(priv);
+
+ mlx5e_apply_netdev_addr(priv);
+}
+
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct net_device *ndev = priv->netdev;
+
+ bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
+ bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+ bool broadcast_enabled = rx_mode_enable;
+
+ bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
+ bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
+ bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
+ bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
+ bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
+ bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
+
+ if (enable_promisc)
+ mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+ if (enable_allmulti)
+ mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ if (enable_broadcast)
+ mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+
+ mlx5e_handle_netdev_addr(priv);
+
+ if (disable_broadcast)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+ if (disable_allmulti)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+ if (disable_promisc)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+
+ ea->promisc_enabled = promisc_enabled;
+ ea->allmulti_enabled = allmulti_enabled;
+ ea->broadcast_enabled = broadcast_enabled;
+}
+
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_set_rx_mode_core(priv);
+ mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+{
+ ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+}
+
+static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+ u8 *dmac;
+
+ g = kcalloc(9, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+
+ g[0].log_sz = 2;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.ethertype);
+
+ g[2].log_sz = 0;
+
+ g[3].log_sz = 14;
+ g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[4].log_sz = 13;
+ g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
+ outer_headers.ethertype);
+
+ g[5].log_sz = 11;
+ g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+
+ g[6].log_sz = 2;
+ g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[7].log_sz = 1;
+ g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
+ outer_headers.ethertype);
+
+ g[8].log_sz = 0;
+ g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 9, g);
+ kfree(g);
+
+ return priv->ft.main ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.main);
+}
+
+static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+
+ g = kcalloc(2, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+
+ g[0].log_sz = 12;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.first_vid);
+
+ /* untagged + any vlan id */
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.vlan_tag);
+
+ priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 2, g);
+
+ kfree(g);
+ return priv->ft.vlan ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.vlan);
+}
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_create_main_flow_table(priv);
+ if (err)
+ return err;
+
+ err = mlx5e_create_vlan_flow_table(priv);
+ if (err)
+ goto err_destroy_main_flow_table;
+
+ return 0;
+
+err_destroy_main_flow_table:
+ mlx5e_destroy_main_flow_table(priv);
+
+ return err;
+}
+
+void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5e_destroy_vlan_flow_table(priv);
+ mlx5e_destroy_main_flow_table(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
new file mode 100644
index 000000000..40206da1f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -0,0 +1,1915 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+struct mlx5e_rq_param {
+ u32 rqc[MLX5_ST_SZ_DW(rqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_sq_param {
+ u32 sqc[MLX5_ST_SZ_DW(sqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_cq_param {
+ u32 cqc[MLX5_ST_SZ_DW(cqc)];
+ struct mlx5_wq_param wq;
+ u16 eq_ix;
+};
+
+struct mlx5e_channel_param {
+ struct mlx5e_rq_param rq;
+ struct mlx5e_sq_param sq;
+ struct mlx5e_cq_param rx_cq;
+ struct mlx5e_cq_param tx_cq;
+};
+
+static void mlx5e_update_carrier(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 port_state;
+
+ port_state = mlx5_query_vport_state(mdev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
+
+ if (port_state == VPORT_STATE_UP)
+ netif_carrier_on(priv->netdev);
+ else
+ netif_carrier_off(priv->netdev);
+}
+
+static void mlx5e_update_carrier_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ update_carrier_work);
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_carrier(priv);
+ mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_vport_stats *s = &priv->stats.vport;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u64 tx_offload_none;
+ int i, j;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return;
+
+ /* Collect firts the SW counters and then HW for consistency */
+ s->tso_packets = 0;
+ s->tso_bytes = 0;
+ s->tx_queue_stopped = 0;
+ s->tx_queue_wake = 0;
+ s->tx_queue_dropped = 0;
+ tx_offload_none = 0;
+ s->lro_packets = 0;
+ s->lro_bytes = 0;
+ s->rx_csum_none = 0;
+ s->rx_wqe_err = 0;
+ for (i = 0; i < priv->params.num_channels; i++) {
+ rq_stats = &priv->channel[i]->rq.stats;
+
+ s->lro_packets += rq_stats->lro_packets;
+ s->lro_bytes += rq_stats->lro_bytes;
+ s->rx_csum_none += rq_stats->csum_none;
+ s->rx_wqe_err += rq_stats->wqe_err;
+
+ for (j = 0; j < priv->num_tc; j++) {
+ sq_stats = &priv->channel[i]->sq[j].stats;
+
+ s->tso_packets += sq_stats->tso_packets;
+ s->tso_bytes += sq_stats->tso_bytes;
+ s->tx_queue_stopped += sq_stats->stopped;
+ s->tx_queue_wake += sq_stats->wake;
+ s->tx_queue_dropped += sq_stats->dropped;
+ tx_offload_none += sq_stats->csum_offload_none;
+ }
+ }
+
+ /* HW counters */
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+ MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+
+ memset(out, 0, outlen);
+
+ if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+ goto free_out;
+
+#define MLX5_GET_CTR(p, x) \
+ MLX5_GET64(query_vport_counter_out, p, x)
+
+ s->rx_error_packets =
+ MLX5_GET_CTR(out, received_errors.packets);
+ s->rx_error_bytes =
+ MLX5_GET_CTR(out, received_errors.octets);
+ s->tx_error_packets =
+ MLX5_GET_CTR(out, transmit_errors.packets);
+ s->tx_error_bytes =
+ MLX5_GET_CTR(out, transmit_errors.octets);
+
+ s->rx_unicast_packets =
+ MLX5_GET_CTR(out, received_eth_unicast.packets);
+ s->rx_unicast_bytes =
+ MLX5_GET_CTR(out, received_eth_unicast.octets);
+ s->tx_unicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
+ s->tx_unicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
+
+ s->rx_multicast_packets =
+ MLX5_GET_CTR(out, received_eth_multicast.packets);
+ s->rx_multicast_bytes =
+ MLX5_GET_CTR(out, received_eth_multicast.octets);
+ s->tx_multicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
+ s->tx_multicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
+
+ s->rx_broadcast_packets =
+ MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ s->rx_broadcast_bytes =
+ MLX5_GET_CTR(out, received_eth_broadcast.octets);
+ s->tx_broadcast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+ s->tx_broadcast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+ s->rx_packets =
+ s->rx_unicast_packets +
+ s->rx_multicast_packets +
+ s->rx_broadcast_packets;
+ s->rx_bytes =
+ s->rx_unicast_bytes +
+ s->rx_multicast_bytes +
+ s->rx_broadcast_bytes;
+ s->tx_packets =
+ s->tx_unicast_packets +
+ s->tx_multicast_packets +
+ s->tx_broadcast_packets;
+ s->tx_bytes =
+ s->tx_unicast_bytes +
+ s->tx_multicast_bytes +
+ s->tx_broadcast_bytes;
+
+ /* Update calculated offload counters */
+ s->tx_csum_offload = s->tx_packets - tx_offload_none;
+ s->rx_csum_good = s->rx_packets - s->rx_csum_none;
+
+free_out:
+ kvfree(out);
+}
+
+static void mlx5e_update_stats_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+ update_stats_work);
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_update_stats(priv);
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(
+ MLX5E_UPDATE_STATS_INTERVAL));
+ }
+ mutex_unlock(&priv->state_lock);
+}
+
+static void __mlx5e_async_event(struct mlx5e_priv *priv,
+ enum mlx5_dev_event event)
+{
+ switch (event) {
+ case MLX5_DEV_EVENT_PORT_UP:
+ case MLX5_DEV_EVENT_PORT_DOWN:
+ schedule_work(&priv->update_carrier_work);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+ enum mlx5_dev_event event, unsigned long param)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ spin_lock(&priv->async_events_spinlock);
+ if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ __mlx5e_async_event(priv, event);
+ spin_unlock(&priv->async_events_spinlock);
+}
+
+static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
+{
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+}
+
+static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
+{
+ spin_lock_irq(&priv->async_events_spinlock);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ spin_unlock_irq(&priv->async_events_spinlock);
+}
+
+#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
+static int mlx5e_create_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqc = param->rqc;
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int wq_sz;
+ int err;
+ int i;
+
+ err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+ &rq->wq_ctrl);
+ if (err)
+ return err;
+
+ rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
+
+ wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!rq->skb) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+
+ rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
+ MLX5E_SW2HW_MTU(priv->netdev->mtu);
+ rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
+
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+ u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
+
+ wqe->data.lkey = c->mkey_be;
+ wqe->data.byte_count =
+ cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+ }
+
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->channel = c;
+ rq->ix = c->ix;
+
+ return 0;
+
+err_rq_wq_destroy:
+ mlx5_wq_destroy(&rq->wq_ctrl);
+
+ return err;
+}
+
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+{
+ kfree(rq->skb);
+ mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+ sizeof(u64) * rq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ memcpy(rqc, param->rqc, sizeof(param->rqc));
+
+ MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&rq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_rq(mdev, rq->rqn);
+}
+
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_wq_ll *wq = &rq->wq;
+ int i;
+
+ for (i = 0; i < 1000; i++) {
+ if (wq->cur_sz >= priv->params.min_rx_wqes)
+ return 0;
+
+ msleep(20);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mlx5e_open_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ int err;
+
+ err = mlx5e_create_rq(c, param, rq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_rq(rq, param);
+ if (err)
+ goto err_destroy_rq;
+
+ err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ goto err_disable_rq;
+
+ set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
+
+ return 0;
+
+err_disable_rq:
+ mlx5e_disable_rq(rq);
+err_destroy_rq:
+ mlx5e_destroy_rq(rq);
+
+ return err;
+}
+
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+ clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
+
+ mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+ while (!mlx5_wq_ll_is_empty(&rq->wq))
+ msleep(20);
+
+ /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
+ napi_synchronize(&rq->channel->napi);
+
+ mlx5e_disable_rq(rq);
+ mlx5e_destroy_rq(rq);
+}
+
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->dma_fifo);
+ kfree(sq->skb);
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
+ sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
+ numa);
+
+ if (!sq->skb || !sq->dma_fifo) {
+ mlx5e_free_sq_db(sq);
+ return -ENOMEM;
+ }
+
+ sq->dma_fifo_mask = df_sz - 1;
+
+ return 0;
+}
+
+static int mlx5e_create_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *sqc = param->sqc;
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ int txq_ix;
+ int err;
+
+ err = mlx5_alloc_map_uar(mdev, &sq->uar);
+ if (err)
+ return err;
+
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
+ &sq->wq_ctrl);
+ if (err)
+ goto err_unmap_free_uar;
+
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+ sq->uar_map = sq->uar.map;
+ sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
+ if (err)
+ goto err_sq_wq_destroy;
+
+ txq_ix = c->ix + tc * priv->params.num_channels;
+ sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ priv->txq_to_sq_map[txq_ix] = sq;
+
+ return 0;
+
+err_sq_wq_destroy:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &sq->uar);
+
+ return err;
+}
+
+static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+
+ mlx5e_free_sq_db(sq);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+}
+
+static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *sqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, param->sqc, sizeof(param->sqc));
+
+ MLX5_SET(sqc, sqc, user_index, sq->tc);
+ MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, tis_lst_sz, 1);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, sq->uar.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *sqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+}
+
+static int mlx5e_open_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ int err;
+
+ err = mlx5e_create_sq(c, tc, param, sq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_sq(sq, param);
+ if (err)
+ goto err_destroy_sq;
+
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+ if (err)
+ goto err_disable_sq;
+
+ set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ netdev_tx_reset_queue(sq->txq);
+ netif_tx_start_queue(sq->txq);
+
+ return 0;
+
+err_disable_sq:
+ mlx5e_disable_sq(sq);
+err_destroy_sq:
+ mlx5e_destroy_sq(sq);
+
+ return err;
+}
+
+static inline void netif_tx_disable_queue(struct netdev_queue *txq)
+{
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+}
+
+static void mlx5e_close_sq(struct mlx5e_sq *sq)
+{
+ clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
+ netif_tx_disable_queue(sq->txq);
+
+ /* ensure hw is notified of all pending wqes */
+ if (mlx5e_sq_has_room_for(sq, 1))
+ mlx5e_send_nop(sq, true);
+
+ mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ while (sq->cc != sq->pc) /* wait till sq is empty */
+ msleep(20);
+
+ /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
+ napi_synchronize(&sq->channel->napi);
+
+ mlx5e_disable_sq(sq);
+ mlx5e_destroy_sq(sq);
+}
+
+static int mlx5e_create_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int eqn_not_used;
+ int irqn;
+ int err;
+ u32 i;
+
+ param->wq.numa = cpu_to_node(c->cpu);
+ param->eq_ix = c->ix;
+
+ err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+ cq->napi = &c->napi;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ *mcq->set_ci_db = 0;
+ *mcq->arm_db = 0;
+ mcq->vector = param->eq_ix;
+ mcq->comp = mlx5e_completion_event;
+ mcq->event = mlx5e_cq_error_event;
+ mcq->irqn = irqn;
+ mcq->uar = &priv->cq_uar;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->channel = c;
+
+ return 0;
+}
+
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+{
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+
+ void *in;
+ void *cqc;
+ int inlen;
+ int irqn_not_used;
+ int eqn;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, param->cqc, sizeof(param->cqc));
+
+ mlx5_fill_page_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen);
+
+ kvfree(in);
+
+ if (err)
+ return err;
+
+ mlx5e_cq_arm(cq);
+
+ return 0;
+}
+
+static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_cq(mdev, &cq->mcq);
+}
+
+static int mlx5e_open_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq,
+ u16 moderation_usecs,
+ u16 moderation_frames)
+{
+ int err;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ err = mlx5e_create_cq(c, param, cq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_cq(cq, param);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+ moderation_usecs,
+ moderation_frames);
+ if (err)
+ goto err_destroy_cq;
+
+ return 0;
+
+err_destroy_cq:
+ mlx5e_destroy_cq(cq);
+
+ return err;
+}
+
+static void mlx5e_close_cq(struct mlx5e_cq *cq)
+{
+ mlx5e_disable_cq(cq);
+ mlx5e_destroy_cq(cq);
+}
+
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+ return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
+static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ struct mlx5e_priv *priv = c->priv;
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
+ priv->params.tx_cq_moderation_usec,
+ priv->params.tx_cq_moderation_pkts);
+ if (err)
+ goto err_close_tx_cqs;
+ }
+
+ return 0;
+
+err_close_tx_cqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_cq(&c->sq[tc].cq);
+
+ return err;
+}
+
+static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_cq(&c->sq[tc].cq);
+}
+
+static int mlx5e_open_sqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+ if (err)
+ goto err_close_sqs;
+ }
+
+ return 0;
+
+err_close_sqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_sq(&c->sq[tc]);
+
+ return err;
+}
+
+static void mlx5e_close_sqs(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_sq(&c->sq[tc]);
+}
+
+static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
+ int num_channels)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
+ c->tc_to_txq_map[i] = c->ix + i * num_channels;
+}
+
+static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_channel **cp)
+{
+ struct net_device *netdev = priv->netdev;
+ int cpu = mlx5e_get_cpu(priv, ix);
+ struct mlx5e_channel *c;
+ int err;
+
+ c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+ if (!c)
+ return -ENOMEM;
+
+ c->priv = priv;
+ c->ix = ix;
+ c->cpu = cpu;
+ c->pdev = &priv->mdev->pdev->dev;
+ c->netdev = priv->netdev;
+ c->mkey_be = cpu_to_be32(priv->mr.key);
+ c->num_tc = priv->num_tc;
+
+ mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
+
+ netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+
+ err = mlx5e_open_tx_cqs(c, cparam);
+ if (err)
+ goto err_napi_del;
+
+ err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
+ priv->params.rx_cq_moderation_usec,
+ priv->params.rx_cq_moderation_pkts);
+ if (err)
+ goto err_close_tx_cqs;
+
+ napi_enable(&c->napi);
+
+ err = mlx5e_open_sqs(c, cparam);
+ if (err)
+ goto err_disable_napi;
+
+ err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+ if (err)
+ goto err_close_sqs;
+
+ netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
+ *cp = c;
+
+ return 0;
+
+err_close_sqs:
+ mlx5e_close_sqs(c);
+
+err_disable_napi:
+ napi_disable(&c->napi);
+ mlx5e_close_cq(&c->rq.cq);
+
+err_close_tx_cqs:
+ mlx5e_close_tx_cqs(c);
+
+err_napi_del:
+ netif_napi_del(&c->napi);
+ kfree(c);
+
+ return err;
+}
+
+static void mlx5e_close_channel(struct mlx5e_channel *c)
+{
+ mlx5e_close_rq(&c->rq);
+ mlx5e_close_sqs(c);
+ napi_disable(&c->napi);
+ mlx5e_close_cq(&c->rq.cq);
+ mlx5e_close_tx_cqs(c);
+ netif_napi_del(&c->napi);
+ kfree(c);
+}
+
+static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.linear = 1;
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+}
+
+static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+ struct mlx5e_channel_param *cparam)
+{
+ memset(cparam, 0, sizeof(*cparam));
+
+ mlx5e_build_rq_param(priv, &cparam->rq);
+ mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
+ mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+}
+
+static int mlx5e_open_channels(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channel_param cparam;
+ int err = -ENOMEM;
+ int i;
+ int j;
+
+ priv->channel = kcalloc(priv->params.num_channels,
+ sizeof(struct mlx5e_channel *), GFP_KERNEL);
+
+ priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+ sizeof(struct mlx5e_sq *), GFP_KERNEL);
+
+ if (!priv->channel || !priv->txq_to_sq_map)
+ goto err_free_txq_to_sq_map;
+
+ mlx5e_build_channel_param(priv, &cparam);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+ if (err)
+ goto err_close_channels;
+ }
+
+ for (j = 0; j < priv->params.num_channels; j++) {
+ err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+ if (err)
+ goto err_close_channels;
+ }
+
+ return 0;
+
+err_close_channels:
+ for (i--; i >= 0; i--)
+ mlx5e_close_channel(priv->channel[i]);
+
+err_free_txq_to_sq_map:
+ kfree(priv->txq_to_sq_map);
+ kfree(priv->channel);
+
+ return err;
+}
+
+static void mlx5e_close_channels(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_close_channel(priv->channel[i]);
+
+ kfree(priv->txq_to_sq_map);
+ kfree(priv->channel);
+}
+
+static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(tisc, tisc, prio, tc);
+ MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+ return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+{
+ mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_open_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int err;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ err = mlx5e_open_tis(priv, tc);
+ if (err)
+ goto err_close_tises;
+ }
+
+ return 0;
+
+err_close_tises:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_tis(priv, tc);
+
+ return err;
+}
+
+static void mlx5e_close_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++)
+ mlx5e_close_tis(priv, tc);
+}
+
+static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ void *rqtc;
+ int inlen;
+ int err;
+ int sz;
+ int i;
+
+ sz = 1 << priv->params.rx_hash_log_tbl_sz;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ for (i = 0; i < sz; i++) {
+ int ix = i % priv->params.num_channels;
+
+ MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
+ }
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+ if (!err)
+ priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+
+ mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+ if (priv->params.lro_en) {
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (priv->params.lro_wqe_sz -
+ ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+ MLX5_CAP_ETH(priv->mdev,
+ lro_timer_supported_periods[3]));
+ }
+
+ switch (tt) {
+ case MLX5E_TT_ANY:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn,
+ priv->channel[0]->rq.rqn);
+ break;
+ default:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn);
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
+ rx_hash_toeplitz_key),
+ MLX5_FLD_SZ_BYTES(tirc,
+ rx_hash_toeplitz_key));
+ break;
+ }
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ }
+}
+
+static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ mlx5e_build_tir_ctx(priv, tirc, tt);
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+{
+ mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_open_tirs(struct mlx5e_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ err = mlx5e_open_tir(priv, i);
+ if (err)
+ goto err_close_tirs;
+ }
+
+ return 0;
+
+err_close_tirs:
+ for (i--; i >= 0; i--)
+ mlx5e_close_tir(priv, i);
+
+ return err;
+}
+
+static void mlx5e_close_tirs(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ mlx5e_close_tir(priv, i);
+}
+
+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int hw_mtu;
+ int err;
+
+ err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+ if (err)
+ return err;
+
+ mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+ if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
+ netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
+ __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
+
+ netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+ return 0;
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int num_txqs;
+ int err;
+
+ num_txqs = priv->params.num_channels * priv->params.num_tc;
+ netif_set_real_num_tx_queues(netdev, num_txqs);
+ netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
+
+ err = mlx5e_set_dev_port_mtu(netdev);
+ if (err)
+ return err;
+
+ err = mlx5e_open_tises(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
+ __func__, err);
+ return err;
+ }
+
+ err = mlx5e_open_channels(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
+ __func__, err);
+ goto err_close_tises;
+ }
+
+ err = mlx5e_open_rqt(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
+ __func__, err);
+ goto err_close_channels;
+ }
+
+ err = mlx5e_open_tirs(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
+ __func__, err);
+ goto err_close_rqls;
+ }
+
+ err = mlx5e_open_flow_table(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
+ __func__, err);
+ goto err_close_tirs;
+ }
+
+ err = mlx5e_add_all_vlan_rules(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
+ __func__, err);
+ goto err_close_flow_table;
+ }
+
+ mlx5e_init_eth_addr(priv);
+
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_update_carrier(priv);
+ mlx5e_set_rx_mode_core(priv);
+
+ schedule_delayed_work(&priv->update_stats_work, 0);
+ return 0;
+
+err_close_flow_table:
+ mlx5e_close_flow_table(priv);
+
+err_close_tirs:
+ mlx5e_close_tirs(priv);
+
+err_close_rqls:
+ mlx5e_close_rqt(priv);
+
+err_close_channels:
+ mlx5e_close_channels(priv);
+
+err_close_tises:
+ mlx5e_close_tises(priv);
+
+ return err;
+}
+
+static int mlx5e_open(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_open_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_close_locked(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_set_rx_mode_core(priv);
+ mlx5e_del_all_vlan_rules(priv);
+ netif_carrier_off(priv->netdev);
+ mlx5e_close_flow_table(priv);
+ mlx5e_close_tirs(priv);
+ mlx5e_close_rqt(priv);
+ mlx5e_close_channels(priv);
+ mlx5e_close_tises(priv);
+
+ return 0;
+}
+
+static int mlx5e_close(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_close_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params)
+{
+ int err = 0;
+ int was_opened;
+
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params = *new_params;
+
+ if (was_opened)
+ err = mlx5e_open_locked(priv->netdev);
+
+ return err;
+}
+
+static struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+
+ stats->rx_packets = vstats->rx_packets;
+ stats->rx_bytes = vstats->rx_bytes;
+ stats->tx_packets = vstats->tx_packets;
+ stats->tx_bytes = vstats->tx_bytes;
+ stats->multicast = vstats->rx_multicast_packets +
+ vstats->tx_multicast_packets;
+ stats->tx_errors = vstats->tx_error_packets;
+ stats->rx_errors = vstats->rx_error_packets;
+ stats->tx_dropped = vstats->tx_queue_dropped;
+ stats->rx_crc_errors = 0;
+ stats->rx_length_errors = 0;
+
+ return stats;
+}
+
+static void mlx5e_set_rx_mode(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ schedule_work(&priv->set_rx_mode_work);
+}
+
+static int mlx5e_set_mac(struct net_device *netdev, void *addr)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ netif_addr_lock_bh(netdev);
+ ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ netif_addr_unlock_bh(netdev);
+
+ schedule_work(&priv->set_rx_mode_work);
+
+ return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ netdev_features_t changes = features ^ netdev->features;
+ struct mlx5e_params new_params;
+ bool update_params = false;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+
+ if (changes & NETIF_F_LRO) {
+ new_params.lro_en = !!(features & NETIF_F_LRO);
+ update_params = true;
+ }
+
+ if (update_params)
+ mlx5e_update_priv_params(priv, &new_params);
+
+ if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ mlx5e_enable_vlan_filter(priv);
+ else
+ mlx5e_disable_vlan_filter(priv);
+ }
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int max_mtu;
+ int err;
+
+ mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+
+ if (new_mtu > max_mtu) {
+ netdev_err(netdev,
+ "%s: Bad MTU (%d) > (%d) Max\n",
+ __func__, new_mtu, max_mtu);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->state_lock);
+ netdev->mtu = new_mtu;
+ err = mlx5e_update_priv_params(priv, &priv->params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static struct net_device_ops mlx5e_netdev_ops = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
+ .ndo_start_xmit = mlx5e_xmit,
+ .ndo_get_stats64 = mlx5e_get_stats,
+ .ndo_set_rx_mode = mlx5e_set_rx_mode,
+ .ndo_set_mac_address = mlx5e_set_mac,
+ .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
+ .ndo_set_features = mlx5e_set_features,
+ .ndo_change_mtu = mlx5e_change_mtu,
+};
+
+static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return -ENOTSUPP;
+ if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
+ !MLX5_CAP_GEN(mdev, nic_flow_table) ||
+ !MLX5_CAP_ETH(mdev, csum_cap) ||
+ !MLX5_CAP_ETH(mdev, max_lso_cap) ||
+ !MLX5_CAP_ETH(mdev, vlan_cap) ||
+ !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
+ MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.max_ft_level)
+ < 3) {
+ mlx5_core_warn(mdev,
+ "Not creating net device, some required device capabilities are missing\n");
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ int num_comp_vectors)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ priv->params.log_sq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ priv->params.log_rq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ priv->params.rx_cq_moderation_usec =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+ priv->params.rx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ priv->params.tx_cq_moderation_usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ priv->params.tx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ priv->params.min_rx_wqes =
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
+ priv->params.rx_hash_log_tbl_sz =
+ (order_base_2(num_comp_vectors) >
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
+ order_base_2(num_comp_vectors) :
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
+ priv->params.num_tc = 1;
+ priv->params.default_vlan_prio = 0;
+
+ priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = num_comp_vectors;
+ priv->num_tc = priv->params.num_tc;
+ priv->default_vlan_prio = priv->params.default_vlan_prio;
+
+ spin_lock_init(&priv->async_events_spinlock);
+ mutex_init(&priv->state_lock);
+
+ INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+ INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
+}
+
+static void mlx5e_build_netdev(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
+
+ if (priv->num_tc > 1) {
+ mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
+ }
+
+ netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->watchdog_timeo = 15 * HZ;
+
+ netdev->ethtool_ops = &mlx5e_ethtool_ops;
+
+ netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_GRO;
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_RXCSUM;
+ netdev->vlan_features |= NETIF_F_RXHASH;
+
+ if (!!MLX5_CAP_ETH(mdev, lro_cap))
+ netdev->vlan_features |= NETIF_F_LRO;
+
+ netdev->hw_features = netdev->vlan_features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->features = netdev->hw_features;
+ if (!priv->params.lro_en)
+ netdev->features &= ~NETIF_F_LRO;
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ mlx5e_set_netdev_dev_addr(netdev);
+}
+
+static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
+ struct mlx5_core_mr *mr)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_create_mkey_mbox_in *in;
+ int err;
+
+ in = mlx5_vzalloc(sizeof(*in));
+ if (!in)
+ return -ENOMEM;
+
+ in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_ACCESS_MODE_PA;
+ in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+ err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
+ NULL);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int ncv = mdev->priv.eq_table.num_comp_vectors;
+ int err;
+
+ if (mlx5e_check_required_hca_cap(mdev))
+ return NULL;
+
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
+ if (!netdev) {
+ mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+ return NULL;
+ }
+
+ mlx5e_build_netdev_priv(mdev, netdev, ncv);
+ mlx5e_build_netdev(netdev);
+
+ netif_carrier_off(netdev);
+
+ priv = netdev_priv(netdev);
+
+ err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
+ __func__, err);
+ goto err_free_netdev;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
+ __func__, err);
+ goto err_unmap_free_uar;
+ }
+
+ err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
+ __func__, err);
+ goto err_dealloc_pd;
+ }
+
+ err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
+ __func__, err);
+ goto err_dealloc_transport_domain;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ netdev_err(netdev, "%s: register_netdev failed, %d\n",
+ __func__, err);
+ goto err_destroy_mkey;
+ }
+
+ mlx5e_enable_async_events(priv);
+
+ return priv;
+
+err_destroy_mkey:
+ mlx5_core_destroy_mkey(mdev, &priv->mr);
+
+err_dealloc_transport_domain:
+ mlx5_dealloc_transport_domain(mdev, priv->tdn);
+
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, priv->pdn);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+
+err_free_netdev:
+ free_netdev(netdev);
+
+ return NULL;
+}
+
+static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct net_device *netdev = priv->netdev;
+
+ unregister_netdev(netdev);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+ mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
+ mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
+ mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+ mlx5e_disable_async_events(priv);
+ flush_scheduled_work();
+ free_netdev(netdev);
+}
+
+static void *mlx5e_get_netdev(void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ return priv->netdev;
+}
+
+static struct mlx5_interface mlx5e_interface = {
+ .add = mlx5e_create_netdev,
+ .remove = mlx5e_destroy_netdev,
+ .event = mlx5e_async_event,
+ .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
+ .get_dev = mlx5e_get_netdev,
+};
+
+void mlx5e_init(void)
+{
+ mlx5_register_interface(&mlx5e_interface);
+}
+
+void mlx5e_cleanup(void)
+{
+ mlx5_unregister_interface(&mlx5e_interface);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
new file mode 100644
index 000000000..9a9374131
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include "en.h"
+
+static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(rq->pdev,
+ /* hw start padding */
+ skb->data,
+ /* hw end padding */
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
+ goto err_free_skb;
+
+ skb_reserve(skb, MLX5E_NET_IP_ALIGN);
+
+ *((dma_addr_t *)skb->cb) = dma_addr;
+ wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+
+ rq->skb[ix] = skb;
+
+ return 0;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+
+ return -ENOMEM;
+}
+
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+ return false;
+
+ while (!mlx5_wq_ll_is_full(wq)) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+ if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+ break;
+
+ mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+ }
+
+ /* ensure wqes are visible to device before updating doorbell record */
+ dma_wmb();
+
+ mlx5_wq_ll_update_db_record(wq);
+
+ return !mlx5_wq_ll_is_full(wq);
+}
+
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ struct tcphdr *tcp;
+
+ u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+ int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
+ (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+
+ u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ sizeof(struct iphdr));
+ ipv6 = NULL;
+ } else {
+ tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ sizeof(struct ipv6hdr));
+ ipv4 = NULL;
+ }
+
+ if (get_cqe_lro_tcppsh(cqe))
+ tcp->psh = 1;
+
+ if (tcp_ack) {
+ tcp->ack = 1;
+ tcp->ack_seq = cqe->lro_ack_seq_num;
+ tcp->window = cqe->lro_tcp_win;
+ }
+
+ if (ipv4) {
+ ipv4->ttl = cqe->lro_min_ttl;
+ ipv4->tot_len = cpu_to_be16(tot_len);
+ ipv4->check = 0;
+ ipv4->check = ip_fast_csum((unsigned char *)ipv4,
+ ipv4->ihl);
+ } else {
+ ipv6->hop_limit = cqe->lro_min_ttl;
+ ipv6->payload_len = cpu_to_be16(tot_len -
+ sizeof(struct ipv6hdr));
+ }
+}
+
+static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb)
+{
+ u8 cht = cqe->rss_hash_type;
+ int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
+ (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
+ PKT_HASH_TYPE_NONE;
+ skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
+}
+
+static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq *rq,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rq->netdev;
+ u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ int lro_num_seg;
+
+ skb_put(skb, cqe_bcnt);
+
+ lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+ if (lro_num_seg > 1) {
+ mlx5e_lro_update_hdr(skb, cqe);
+ skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ rq->stats.lro_packets++;
+ rq->stats.lro_bytes += cqe_bcnt;
+ }
+
+ if (likely(netdev->features & NETIF_F_RXCSUM) &&
+ (cqe->hds_ip_ext & CQE_L2_OK) &&
+ (cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ rq->stats.csum_none++;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ skb_record_rx_queue(skb, rq->ix);
+
+ if (likely(netdev->features & NETIF_F_RXHASH))
+ mlx5e_skb_set_hash(cqe, skb);
+
+ if (cqe_has_vlan(cqe))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(cqe->vlan_info));
+}
+
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+{
+ struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+ int i;
+
+ /* avoid accessing cq (dma coherent memory) if not needed */
+ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+ return false;
+
+ for (i = 0; i < budget; i++) {
+ struct mlx5e_rx_wqe *wqe;
+ struct mlx5_cqe64 *cqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ skb = rq->skb[wqe_counter];
+ prefetch(skb->data);
+ rq->skb[wqe_counter] = NULL;
+
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ dev_kfree_skb(skb);
+ goto wq_ll_pop;
+ }
+
+ mlx5e_build_rx_skb(cqe, rq, skb);
+ rq->stats.packets++;
+ napi_gro_receive(cq->napi, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ if (i == budget) {
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
new file mode 100644
index 000000000..03f28f438
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include "en.h"
+
+#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
+#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
+ MLX5E_SQ_NOPS_ROOM)
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+
+ memset(cseg, 0, sizeof(*cseg));
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
+
+ sq->skb[pi] = NULL;
+ sq->pc++;
+
+ if (notify_hw) {
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ mlx5e_tx_notify_hw(sq, wqe);
+ }
+}
+
+static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
+ u32 *size)
+{
+ sq->dma_fifo_pc--;
+ *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
+ *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
+}
+
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+ dma_addr_t addr;
+ u32 size;
+ int i;
+
+ for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
+ mlx5e_dma_pop_last_pushed(sq, &addr, &size);
+ dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+ }
+}
+
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
+ u32 size)
+{
+ sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
+ sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
+ sq->dma_fifo_pc++;
+}
+
+static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
+ u32 *size)
+{
+ *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
+ *size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int channel_ix = fallback(dev, skb);
+ int up = skb_vlan_tag_present(skb) ?
+ skb->vlan_tci >> VLAN_PRIO_SHIFT :
+ priv->default_vlan_prio;
+ int tc = netdev_get_prio_tc_map(dev, up);
+
+ return priv->channel[channel_ix]->tc_to_txq_map[tc];
+}
+
+static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
+ struct sk_buff *skb)
+{
+#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+ return MLX5E_MIN_INLINE;
+}
+
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ u8 opcode = MLX5_OPCODE_SEND;
+ dma_addr_t dma_addr = 0;
+ u16 headlen;
+ u16 ds_cnt;
+ u16 ihs;
+ int i;
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ else
+ sq->stats.csum_offload_none++;
+
+ if (skb_is_gso(skb)) {
+ u32 payload_len;
+
+ eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ opcode = MLX5_OPCODE_LSO;
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ payload_len = skb->len - ihs;
+ MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
+ (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ sq->stats.tso_packets++;
+ sq->stats.tso_bytes += payload_len;
+ } else {
+ ihs = mlx5e_get_inline_hdr_size(sq, skb);
+ MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
+ ETH_ZLEN);
+ }
+
+ skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+ skb_pull_inline(skb, ihs);
+
+ eseg->inline_hdr_sz = cpu_to_be16(ihs);
+
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
+ MLX5_SEND_WQE_DS);
+ dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+
+ MLX5E_TX_SKB_CB(skb)->num_dma = 0;
+
+ headlen = skb_headlen(skb);
+ if (headlen) {
+ dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ goto dma_unmap_wqe_err;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(headlen);
+
+ mlx5e_dma_push(sq, dma_addr, headlen);
+ MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+ dseg++;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ int fsz = skb_frag_size(frag);
+
+ dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ goto dma_unmap_wqe_err;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(fsz);
+
+ mlx5e_dma_push(sq, dma_addr, fsz);
+ MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+ dseg++;
+ }
+
+ ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+
+ sq->skb[pi] = skb;
+
+ MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
+ MLX5_SEND_WQEBB_NUM_DS);
+ sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+ netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
+
+ if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
+ netif_tx_stop_queue(sq->txq);
+ sq->stats.stopped++;
+ }
+
+ if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ mlx5e_tx_notify_hw(sq, wqe);
+ }
+
+ /* fill sq edge with nops to avoid wqe wrap around */
+ while ((sq->pc & wq->sz_m1) > sq->edge)
+ mlx5e_send_nop(sq, false);
+
+ sq->stats.packets++;
+ return NETDEV_TX_OK;
+
+dma_unmap_wqe_err:
+ sq->stats.dropped++;
+ mlx5e_dma_unmap_wqe_err(sq, skb);
+
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
+
+ return mlx5e_sq_xmit(sq, skb);
+}
+
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_sq *sq;
+ u32 dma_fifo_cc;
+ u32 nbytes;
+ u16 npkts;
+ u16 sqcc;
+ int i;
+
+ /* avoid accessing cq (dma coherent memory) if not needed */
+ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+ return false;
+
+ sq = container_of(cq, struct mlx5e_sq, cq);
+
+ npkts = 0;
+ nbytes = 0;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ /* avoid dirtying sq cache line every cqe */
+ dma_fifo_cc = sq->dma_fifo_cc;
+
+ for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+ struct mlx5_cqe64 *cqe;
+ u16 wqe_counter;
+ bool last_wqe;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ struct sk_buff *skb;
+ u16 ci;
+ int j;
+
+ last_wqe = (sqcc == wqe_counter);
+
+ ci = sqcc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+
+ if (unlikely(!skb)) { /* nop */
+ sq->stats.nop++;
+ sqcc++;
+ continue;
+ }
+
+ for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
+ dma_addr_t addr;
+ u32 size;
+
+ mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
+ dma_fifo_cc++;
+ dma_unmap_single(sq->pdev, addr, size,
+ DMA_TO_DEVICE);
+ }
+
+ npkts++;
+ nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
+ sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+ dev_kfree_skb(skb);
+ } while (!last_wqe);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->dma_fifo_cc = dma_fifo_cc;
+ sq->cc = sqcc;
+
+ netdev_tx_completed_queue(sq->txq, npkts, nbytes);
+
+ if (netif_tx_queue_stopped(sq->txq) &&
+ mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
+ likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats.wake++;
+ }
+ if (i == MLX5E_TX_CQ_POLL_BUDGET) {
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
new file mode 100644
index 000000000..2c7cb6755
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
+{
+ struct mlx5_cqwq *wq = &cq->wq;
+ u32 ci = mlx5_cqwq_get_ci(wq);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+ int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+ int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+ if (cqe_ownership_bit != sw_ownership_val)
+ return NULL;
+
+ /* ensure cqe content is read after cqe ownership bit */
+ rmb();
+
+ return cqe;
+}
+
+int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
+ napi);
+ bool busy = false;
+ int i;
+
+ clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+
+ for (i = 0; i < c->num_tc; i++)
+ busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);
+
+ busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget);
+
+ busy |= mlx5e_post_rx_wqes(&c->rq);
+
+ if (busy)
+ return budget;
+
+ napi_complete(napi);
+
+ /* avoid losing completion event during/after polling cqs */
+ if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
+ napi_schedule(napi);
+ return 0;
+ }
+
+ for (i = 0; i < c->num_tc; i++)
+ mlx5e_cq_arm(&c->sq[i].cq);
+ mlx5e_cq_arm(&c->rq.cq);
+
+ return 0;
+}
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq)
+{
+ struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
+ barrier();
+ napi_schedule(cq->napi);
+}
+
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
+{
+ struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct net_device *netdev = priv->netdev;
+
+ netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
+ __func__, mcq->cqn, event);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 58800e4f3..a40b96d4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -339,15 +339,14 @@ static void init_eq_buf(struct mlx5_eq *eq)
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_priv *priv = &dev->priv;
struct mlx5_create_eq_mbox_in *in;
struct mlx5_create_eq_mbox_out out;
int err;
int inlen;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
- err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
- &eq->buf);
+ err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
if (err)
return err;
@@ -378,14 +377,15 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
goto err_in;
}
- snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+ snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
+
eq->eqn = out.eq_number;
eq->irqn = vecidx;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
- eq->name, eq);
+ err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+ priv->irq_info[vecidx].name, eq);
if (err)
goto err_eq;
@@ -401,7 +401,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
return 0;
err_irq:
- free_irq(table->msix_arr[vecidx].vector, eq);
+ free_irq(priv->msix_arr[vecidx].vector, eq);
err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn);
@@ -417,16 +417,15 @@ EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
mlx5_debug_eq_remove(dev, eq);
- free_irq(table->msix_arr[eq->irqn].vector, eq);
+ free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- synchronize_irq(table->msix_arr[eq->irqn].vector);
+ synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
mlx5_buf_free(dev, &eq->buf);
return err;
@@ -456,7 +455,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
- if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+ if (MLX5_CAP_GEN(dev, pg))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
@@ -479,7 +478,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
err = mlx5_create_map_eq(dev, &table->pages_eq,
MLX5_EQ_VEC_PAGES,
- dev->caps.gen.max_vf + 1,
+ /* TODO: sriov max_vf + */ 1,
1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
&dev->priv.uuari.uars[0]);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
new file mode 100644
index 000000000..ca90b9bc3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/flow_table.h>
+#include "mlx5_core.h"
+
+struct mlx5_ftg {
+ struct mlx5_flow_table_group g;
+ u32 id;
+ u32 start_ix;
+};
+
+struct mlx5_flow_table {
+ struct mlx5_core_dev *dev;
+ u8 level;
+ u8 type;
+ u32 id;
+ struct mutex mutex; /* sync bitmap alloc */
+ u16 num_groups;
+ struct mlx5_ftg *group;
+ unsigned long *bitmap;
+ u32 size;
+};
+
+static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
+ u32 flow_index, void *flow_context)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+ u32 *in;
+ void *in_flow_context;
+ int fcdls =
+ MLX5_GET(flow_context, flow_context, destination_list_size) *
+ MLX5_ST_SZ_BYTES(dest_format_struct);
+ int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(set_fte_in, in, table_type, ft->type);
+ MLX5_SET(set_fte_in, in, table_id, ft->id);
+ MLX5_SET(set_fte_in, in, flow_index, flow_index);
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ memcpy(in_flow_context, flow_context,
+ MLX5_ST_SZ_BYTES(flow_context) + fcdls);
+
+ MLX5_SET(flow_context, in_flow_context, group_id,
+ ft->group[group_ix].id);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
+ MLX5_SET_DFTEI(in, table_type, ft->type);
+ MLX5_SET_DFTEI(in, table_id, ft->id);
+ MLX5_SET_DFTEI(in, flow_index, flow_index);
+ MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
+ MLX5_SET_DFGI(in, table_type, ft->type);
+ MLX5_SET_DFGI(in, table_id, ft->id);
+ MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET_DFGI(in, group_id, ft->group[i].id);
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
+ u32 *in;
+ void *in_match_criteria;
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ u32 start_ix = ft->group[i].start_ix;
+ u32 end_ix = start_ix + (1 << g->log_sz) - 1;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+ in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
+ match_criteria);
+
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
+ MLX5_SET_CFGI(in, table_type, ft->type);
+ MLX5_SET_CFGI(in, table_id, ft->id);
+ MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET_CFGI(in, start_flow_index, start_ix);
+ MLX5_SET_CFGI(in, end_flow_index, end_ix);
+ MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
+
+ memcpy(in_match_criteria, g->match_criteria,
+ MLX5_ST_SZ_BYTES(fte_match_param));
+
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ if (!err)
+ ft->group[i].id = MLX5_GET(create_flow_group_out, out,
+ group_id);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++)
+ mlx5_destroy_flow_group_cmd(ft, i);
+}
+
+static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ err = mlx5_create_flow_group_cmd(ft, i);
+ if (err)
+ goto err_destroy_flow_table_groups;
+ }
+
+ return 0;
+
+err_destroy_flow_table_groups:
+ for (i--; i >= 0; i--)
+ mlx5_destroy_flow_group_cmd(ft, i);
+
+ return err;
+}
+
+static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(create_flow_table_in, in, level, ft->level);
+ MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size));
+
+ MLX5_SET(create_flow_table_in, in, opcode,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ ft->id = MLX5_GET(create_flow_table_out, out, table_id);
+
+ return 0;
+}
+
+static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
+ MLX5_SET_DFTI(in, table_type, ft->type);
+ MLX5_SET_DFTI(in, table_id, ft->id);
+ MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
+ u32 *match_criteria, int *group_ix)
+{
+ void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers);
+ void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ misc_parameters);
+ void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ inner_headers);
+ int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc);
+ int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ outer_headers);
+ void *gmc_misc = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ misc_parameters);
+ void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ inner_headers);
+
+ if (g->match_criteria_enable != match_criteria_enable)
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
+ if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
+ if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
+ if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
+ continue;
+
+ *group_ix = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
+{
+ struct mlx5_ftg *g = &ft->group[group_ix];
+ int err = 0;
+
+ mutex_lock(&ft->mutex);
+
+ *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
+ if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
+ err = -ENOSPC;
+ else
+ __set_bit(*ix, ft->bitmap);
+
+ mutex_unlock(&ft->mutex);
+
+ return err;
+}
+
+static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
+{
+ __clear_bit(ix, ft->bitmap);
+}
+
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+ void *match_criteria, void *flow_context,
+ u32 *flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+ int group_ix;
+ int err;
+
+ err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
+ &group_ix);
+ if (err) {
+ mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
+ return err;
+ }
+
+ err = alloc_flow_index(ft, group_ix, flow_index);
+ if (err) {
+ mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
+ return err;
+ }
+
+ return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
+}
+EXPORT_SYMBOL(mlx5_add_flow_table_entry);
+
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ mlx5_del_flow_entry_cmd(ft, flow_index);
+ mlx5_free_flow_index(ft, flow_index);
+}
+EXPORT_SYMBOL(mlx5_del_flow_table_entry);
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+ u16 num_groups,
+ struct mlx5_flow_table_group *group)
+{
+ struct mlx5_flow_table *ft;
+ u32 start_ix = 0;
+ u32 ft_size = 0;
+ void *gr;
+ void *bm;
+ int err;
+ int i;
+
+ for (i = 0; i < num_groups; i++)
+ ft_size += (1 << group[i].log_sz);
+
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
+ bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
+ if (!ft || !gr || !bm)
+ goto err_free_ft;
+
+ ft->group = gr;
+ ft->bitmap = bm;
+ ft->num_groups = num_groups;
+ ft->level = level;
+ ft->type = table_type;
+ ft->size = ft_size;
+ ft->dev = dev;
+ mutex_init(&ft->mutex);
+
+ for (i = 0; i < ft->num_groups; i++) {
+ memcpy(&ft->group[i].g, &group[i], sizeof(*group));
+ ft->group[i].start_ix = start_ix;
+ start_ix += 1 << group[i].log_sz;
+ }
+
+ err = mlx5_create_flow_table_cmd(ft);
+ if (err)
+ goto err_free_ft;
+
+ err = mlx5_create_flow_table_groups(ft);
+ if (err)
+ goto err_destroy_flow_table_cmd;
+
+ return ft;
+
+err_destroy_flow_table_cmd:
+ mlx5_destroy_flow_table_cmd(ft);
+
+err_free_ft:
+ mlx5_core_warn(dev, "failed to alloc flow table\n");
+ kfree(bm);
+ kfree(gr);
+ kfree(ft);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mlx5_create_flow_table);
+
+void mlx5_destroy_flow_table(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ mlx5_destroy_flow_table_groups(ft);
+ mlx5_destroy_flow_table_cmd(ft);
+ kfree(ft->bitmap);
+ kfree(ft->group);
+ kfree(ft);
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_table);
+
+u32 mlx5_get_flow_table_id(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ return ft->id;
+}
+EXPORT_SYMBOL(mlx5_get_flow_table_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 4b4cda3bc..9335e5ae1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,79 +35,133 @@
#include <linux/module.h>
#include "mlx5_core.h"
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev)
+static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
+ int outlen)
{
- struct mlx5_cmd_query_adapter_mbox_out *out;
- struct mlx5_cmd_query_adapter_mbox_in in;
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_query_board_id(struct mlx5_core_dev *dev)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
int err;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- memset(&in, 0, sizeof(in));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
+ err = mlx5_cmd_query_adapter(dev, out, outlen);
if (err)
- goto out_out;
-
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
- goto out_out;
- }
+ goto out;
- memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid));
+ memcpy(dev->board_id,
+ MLX5_ADDR_OF(query_adapter_out, out,
+ query_adapter_struct.vsd_contd_psid),
+ MLX5_FLD_SZ_BYTES(query_adapter_out,
+ query_adapter_struct.vsd_contd_psid));
-out_out:
+out:
kfree(out);
-
return err;
}
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
{
- return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_cmd_query_adapter(mdev, out, outlen);
+ if (err)
+ goto out;
+
+ *vendor_id = MLX5_GET(query_adapter_out, out,
+ query_adapter_struct.ieee_vendor_id);
+out:
+ kfree(out);
+ return err;
}
+EXPORT_SYMBOL(mlx5_core_query_vendor_id);
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
- u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
- int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *out;
int err;
- if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
- return -ENOTSUPP;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
- memset(in, 0, sizeof(in));
- out = kzalloc(out_sz, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
if (err)
- goto out;
+ return err;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err) {
- mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
- goto out;
+ if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
}
- memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
- sizeof(*caps));
+ if (MLX5_CAP_GEN(dev, pg)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
- mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
- be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
- be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
- be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
+ if (MLX5_CAP_GEN(dev, atomic)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
-out:
- kfree(out);
- return err;
+ if (MLX5_CAP_GEN(dev, roce)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+ return 0;
}
-EXPORT_SYMBOL(mlx5_query_odp_caps);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
index ee1b0b965..1368dac00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
@@ -36,7 +36,7 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
+int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port)
{
struct mlx5_mad_ifc_mbox_in *in = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 28425e5ea..06e3e1e54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -38,6 +38,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#include <linux/interrupt.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
@@ -47,10 +48,6 @@
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
-#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0"
-#define DRIVER_RELDATE "January 2015"
-
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
MODULE_LICENSE("Dual BSD/GPL");
@@ -208,24 +205,28 @@ static void release_bar(struct pci_dev *pdev)
static int mlx5_enable_msix(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- int num_eqs = 1 << dev->caps.gen.log_max_eq;
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_eq_table *table = &priv->eq_table;
+ int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
int i;
- nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
+ nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+ MLX5_EQ_VEC_COMP_BASE;
nvec = min_t(int, nvec, num_eqs);
if (nvec <= MLX5_EQ_VEC_COMP_BASE)
return -ENOMEM;
- table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
- if (!table->msix_arr)
- return -ENOMEM;
+ priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
+
+ priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
+ if (!priv->msix_arr || !priv->irq_info)
+ goto err_free_msix;
for (i = 0; i < nvec; i++)
- table->msix_arr[i].entry = i;
+ priv->msix_arr[i].entry = i;
- nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+ nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
MLX5_EQ_VEC_COMP_BASE + 1, nvec);
if (nvec < 0)
return nvec;
@@ -233,14 +234,20 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
+
+err_free_msix:
+ kfree(priv->irq_info);
+ kfree(priv->msix_arr);
+ return -ENOMEM;
}
static void mlx5_disable_msix(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_priv *priv = &dev->priv;
pci_disable_msix(dev->pdev);
- kfree(table->msix_arr);
+ kfree(priv->irq_info);
+ kfree(priv->msix_arr);
}
struct mlx5_reg_host_endianess {
@@ -277,98 +284,20 @@ static u16 to_fw_pkey_sz(u32 size)
}
}
-/* selectively copy writable fields clearing any reserved area
- */
-static void copy_rw_fields(void *to, struct mlx5_caps *from)
-{
- __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
- u64 v64;
-
- MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
- MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
- MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
- MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
- v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
- *flags_off = cpu_to_be64(v64);
-}
-
-static u16 get_pkey_table_size(int pkey)
-{
- if (pkey > MLX5_MAX_LOG_PKEY_TABLE)
- return 0;
-
- return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
-}
-
-static void fw2drv_caps(struct mlx5_caps *caps, void *out)
-{
- struct mlx5_general_caps *gen = &caps->gen;
-
- gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
- gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
- gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
- gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
- gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
- gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
- gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
- gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
- gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
- gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
- gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
- gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
- gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
- gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
- gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
- gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
- gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
- gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
- gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
- gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
- gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
- gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
- gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
- gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
- gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
- pr_debug("flags = 0x%llx\n", gen->flags);
- gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
- gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
- gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
- gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
- gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
- gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
- gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
- gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
- gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
- gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
- gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
-}
-
-static const char *caps_opmod_str(u16 opmod)
-{
- switch (opmod) {
- case HCA_CAP_OPMOD_GET_MAX:
- return "GET_MAX";
- case HCA_CAP_OPMOD_GET_CUR:
- return "GET_CUR";
- default:
- return "Invalid";
- }
-}
-
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
- u16 opmod)
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode)
{
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *out;
+ void *out, *hca_caps;
+ u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
int err;
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
+
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
@@ -377,12 +306,30 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
err = mlx5_cmd_status_to_err_v2(out);
if (err) {
- mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
+ mlx5_core_warn(dev,
+ "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
+ cap_type, cap_mode, err);
goto query_ex;
}
- mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
- fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+ switch (cap_mode) {
+ case HCA_CAP_OPMOD_GET_MAX:
+ memcpy(dev->hca_caps_max[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ case HCA_CAP_OPMOD_GET_CUR:
+ memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ default:
+ mlx5_core_warn(dev,
+ "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
+ cap_type, cap_mode);
+ err = -EINVAL;
+ break;
+ }
query_ex:
kfree(out);
return err;
@@ -409,49 +356,47 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
{
void *set_ctx = NULL;
struct mlx5_profile *prof = dev->profile;
- struct mlx5_caps *cur_caps = NULL;
- struct mlx5_caps *max_caps = NULL;
int err = -ENOMEM;
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *set_hca_cap;
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
goto query_ex;
- max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL);
- if (!max_caps)
- goto query_ex;
-
- cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL);
- if (!cur_caps)
- goto query_ex;
-
- err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
if (err)
goto query_ex;
- err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
if (err)
goto query_ex;
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+ capability);
+ memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+ MLX5_ST_SZ_BYTES(cmd_hca_cap));
+
+ mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
+ mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
+ 128);
/* we limit the size of the pkey table to 128 entries for now */
- cur_caps->gen.pkey_table_size = 128;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
+ to_fw_pkey_sz(128));
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
- cur_caps->gen.log_max_qp = prof->log_max_qp;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+ prof->log_max_qp);
+
+ /* disable cmdif checksum */
+ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
- /* disable checksum */
- cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
- copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
- cur_caps);
err = set_caps(dev, set_ctx, set_sz);
query_ex:
- kfree(cur_caps);
- kfree(max_caps);
kfree(set_ctx);
-
return err;
}
@@ -507,6 +452,74 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
return 0;
}
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct msix_entry *msix = priv->msix_arr;
+ int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+ int numa_node = dev_to_node(&mdev->pdev->dev);
+ int err;
+
+ if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+ mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ priv->irq_info[i].mask);
+
+ err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
+ if (err) {
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
+ irq);
+ goto err_clear_mask;
+ }
+
+ return 0;
+
+err_clear_mask:
+ free_cpumask_var(priv->irq_info[i].mask);
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct msix_entry *msix = priv->msix_arr;
+ int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+
+ irq_set_affinity_hint(irq, NULL);
+ free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+ err = mlx5_irq_set_affinity_hint(mdev, i);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
@@ -549,7 +562,7 @@ static void free_comp_eqs(struct mlx5_core_dev *dev)
static int alloc_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
- char name[MLX5_MAX_EQ_NAME];
+ char name[MLX5_MAX_IRQ_NAME];
struct mlx5_eq *eq;
int ncomp_vec;
int nent;
@@ -566,7 +579,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
goto clean;
}
- snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
name, &dev->priv.uuari.uars[0]);
@@ -588,6 +601,61 @@ clean:
return err;
}
+#ifdef CONFIG_MLX5_CORE_EN
+static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
+{
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
+ u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
+ int err;
+ u32 sup_issi;
+
+ memset(query_in, 0, sizeof(query_in));
+ memset(query_out, 0, sizeof(query_out));
+
+ MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
+
+ err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
+ query_out, sizeof(query_out));
+ if (err) {
+ if (((struct mlx5_outbox_hdr *)query_out)->status ==
+ MLX5_CMD_STAT_BAD_OP_ERR) {
+ pr_debug("Only ISSI 0 is supported\n");
+ return 0;
+ }
+
+ pr_err("failed to query ISSI\n");
+ return err;
+ }
+
+ sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
+
+ if (sup_issi & (1 << 1)) {
+ memset(set_in, 0, sizeof(set_in));
+ memset(set_out, 0, sizeof(set_out));
+
+ MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
+ MLX5_SET(set_issi_in, set_in, current_issi, 1);
+
+ err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
+ set_out, sizeof(set_out));
+ if (err) {
+ pr_err("failed to set ISSI=1\n");
+ return err;
+ }
+
+ dev->issi = 1;
+
+ return 0;
+ } else if (sup_issi & (1 << 0) || !sup_issi) {
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+#endif
+
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -650,6 +718,14 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_pagealloc_cleanup;
}
+#ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_core_set_issi(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to set issi\n");
+ goto err_disable_hca;
+ }
+#endif
+
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
dev_err(&pdev->dev, "failed to allocate boot pages\n");
@@ -688,15 +764,15 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_start_health_poll(dev);
- err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
+ err = mlx5_query_hca_caps(dev);
if (err) {
dev_err(&pdev->dev, "query hca failed\n");
goto err_stop_poll;
}
- err = mlx5_cmd_query_adapter(dev);
+ err = mlx5_query_board_id(dev);
if (err) {
- dev_err(&pdev->dev, "query adapter failed\n");
+ dev_err(&pdev->dev, "query board id failed\n");
goto err_stop_poll;
}
@@ -730,6 +806,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_eqs;
}
+ err = mlx5_irq_set_affinity_hints(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+ goto err_free_comp_eqs;
+ }
+
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
mlx5_init_cq_table(dev);
@@ -739,6 +821,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
return 0;
+err_free_comp_eqs:
+ free_comp_eqs(dev);
+
err_stop_eqs:
mlx5_stop_eqs(dev);
@@ -793,6 +878,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
+ mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
@@ -1048,6 +1134,10 @@ static int __init init(void)
if (err)
goto err_health;
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5e_init();
+#endif
+
return 0;
err_health:
@@ -1060,6 +1150,9 @@ err_debug:
static void __exit cleanup(void)
{
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5e_cleanup();
+#endif
pci_unregister_driver(&mlx5_core_driver);
mlx5_health_cleanup();
destroy_workqueue(mlx5_core_wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index d79fd85d1..d5a0c2d61 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -91,7 +91,7 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
memcpy(in.gid, mgid, sizeof(*mgid));
in.qpn = cpu_to_be32(qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a051b906a..fc88ecaec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -37,6 +37,10 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#define DRIVER_NAME "mlx5_core"
+#define DRIVER_VERSION "3.0-1"
+#define DRIVER_RELDATE "January 2015"
+
extern int mlx5_core_debug_mask;
#define mlx5_core_dbg(dev, format, ...) \
@@ -65,11 +69,20 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
+static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
+ int in_size, u32 *out,
+ int out_size)
+{
+ mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
+}
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
- struct mlx5_caps *caps);
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
+int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5e_init(void);
+void mlx5e_cleanup(void);
+
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 49e90f261..70147999f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -102,3 +102,229 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
+
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask, u8 local_port)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(ptys_reg, in, local_port, local_port);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+ ptys_size, MLX5_REG_PTYS, 0, 0);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ else
+ *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
+
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ else
+ *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
+
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ u8 *link_width_oper, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
+ if (err)
+ return err;
+
+ *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
+
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ else
+ *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
+
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+ if (proto_mask == MLX5_PTYS_EN)
+ MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
+ else
+ MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(paos_reg, in, admin_status, status);
+ MLX5_SET(paos_reg, in, ase, 1);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 1);
+}
+
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 0);
+ if (err)
+ return err;
+
+ *status = MLX5_GET(paos_reg, out, oper_status);
+ return err;
+}
+
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
+ int *max_mtu, int *oper_mtu, u8 port)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, local_port, port);
+
+ mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 0);
+
+ if (max_mtu)
+ *max_mtu = MLX5_GET(pmtu_reg, out, max_mtu);
+ if (oper_mtu)
+ *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
+ if (admin_mtu)
+ *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+}
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
+ MLX5_SET(pmtu_reg, in, local_port, port);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+ u8 port)
+{
+ mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+ u8 port)
+{
+ mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+
+static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
+ int pvlc_size, u8 local_port)
+{
+ u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(ptys_reg, in, local_port, local_port);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+ pvlc_size, MLX5_REG_PVLC, 0, 0);
+
+ return err;
+}
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(pvlc_reg)];
+ int err;
+
+ err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port);
+ if (err)
+ return err;
+
+ *vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index dc7dbf7e9..8b494b562 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -187,10 +187,17 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_destroy_qp_mbox_in din;
struct mlx5_destroy_qp_mbox_out dout;
int err;
+ void *qpc;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
+ if (dev->issi) {
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(qpc, qpc, user_index, 0xffffff);
+ }
+
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "ret %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f9d25dcd0..c48f504cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/srq.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
+#include "transobj.h"
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
{
@@ -62,6 +63,74 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
complete(&srq->free);
}
+static int get_pas_size(void *srqc)
+{
+ u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
+ u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
+ u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
+ u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
+ u32 po_quanta = 1 << (log_page_size - 6);
+ u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
+ u32 page_size = 1 << log_page_size;
+ u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
+ u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
+
+ return rq_num_pas * sizeof(u64);
+}
+
+static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
+{
+ void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ if (srqc_to_rmpc) {
+ switch (MLX5_GET(srqc, srqc, state)) {
+ case MLX5_SRQC_STATE_GOOD:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ break;
+ case MLX5_SRQC_STATE_ERROR:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
+ break;
+ default:
+ pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
+ __LINE__, MLX5_GET(srqc, srqc, state));
+ MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
+ }
+
+ MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
+ MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
+ MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
+ MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
+ MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
+ MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
+ MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
+ MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr));
+ } else {
+ switch (MLX5_GET(rmpc, rmpc, state)) {
+ case MLX5_RMPC_STATE_RDY:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
+ break;
+ case MLX5_RMPC_STATE_ERR:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
+ break;
+ default:
+ pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
+ __func__, __LINE__,
+ MLX5_GET(rmpc, rmpc, state));
+ MLX5_SET(srqc, srqc, state,
+ MLX5_GET(rmpc, rmpc, state));
+ }
+
+ MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
+ MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
+ MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
+ MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
+ MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
+ MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
+ MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
+ MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr));
+ }
+}
+
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
@@ -79,26 +148,311 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
}
EXPORT_SYMBOL(mlx5_core_get_srq);
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen)
+static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen)
{
struct mlx5_create_srq_mbox_out out;
- struct mlx5_srq_table *table = &dev->priv.srq_table;
- struct mlx5_destroy_srq_mbox_in din;
- struct mlx5_destroy_srq_mbox_out dout;
int err;
memset(&out, 0, sizeof(out));
+
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- if (err)
- return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
+ sizeof(out));
srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
+ return err;
+}
+
+static int destroy_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ struct mlx5_destroy_srq_mbox_in in;
+ struct mlx5_destroy_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+ (u32 *)(&out), sizeof(out));
+}
+
+static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+{
+ struct mlx5_arm_srq_mbox_in in;
+ struct mlx5_arm_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
+ in.hdr.opmod = cpu_to_be16(!!is_srq);
+ in.srqn = cpu_to_be32(srq->srqn);
+ in.lwm = cpu_to_be16(lwm);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
+ sizeof(in), (u32 *)(&out),
+ sizeof(out));
+}
+
+static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ struct mlx5_query_srq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+ (u32 *)out, sizeof(*out));
+}
+
+static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in,
+ int srq_inlen)
+{
+ u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ void *create_in;
+ void *srqc;
+ void *xrc_srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
+ xrc_srq_context_entry);
+ pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
+
+ memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
+ memcpy(pas, in->pas, pas_size);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff);
+ MLX5_SET(create_xrc_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(create_out, 0, sizeof(create_out));
+ err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
+ sizeof(create_out));
+ if (err)
+ goto out;
+
+ srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+out:
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+ memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq, u16 lwm)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+ memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
+
+ return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ u32 *xrcsrq_out;
+ void *srqc;
+ void *xrc_srqc;
+ int err;
+
+ xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!xrcsrq_out)
+ return -ENOMEM;
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+
+ MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
+ MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (err)
+ goto out;
+
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
+ xrc_srq_context_entry);
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+
+out:
+ kvfree(xrcsrq_out);
+ return err;
+}
+
+static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int srq_inlen)
+{
+ void *create_in;
+ void *rmpc;
+ void *srqc;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+
+ memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
+ rmpc_srqc_reformat(srqc, rmpc, true);
+
+ err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
+
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ return mlx5_core_destroy_rmp(dev, srq->srqn);
+}
+
+static int arm_rmp_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ u16 lwm)
+{
+ void *in;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int err;
+
+ in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ if (!in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+ err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+ kvfree(in);
+ return err;
+}
+
+static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 *rmp_out;
+ void *rmpc;
+ void *srqc;
+ int err;
+
+ rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+ if (!rmp_out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
+ if (err)
+ goto out;
+
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
+ rmpc_srqc_reformat(srqc, rmpc, false);
+
+out:
+ kvfree(rmp_out);
+ return err;
+}
+
+static int create_srq_split(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in,
+ int inlen, int is_xrc)
+{
+ if (!dev->issi)
+ return create_srq_cmd(dev, srq, in, inlen);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return create_xrc_srq_cmd(dev, srq, in, inlen);
+ else
+ return create_rmp_cmd(dev, srq, in, inlen);
+}
+
+static int destroy_srq_split(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ if (!dev->issi)
+ return destroy_srq_cmd(dev, srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return destroy_xrc_srq_cmd(dev, srq);
+ else
+ return destroy_rmp_cmd(dev, srq);
+}
+
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc)
+{
+ int err;
+ struct mlx5_srq_table *table = &dev->priv.srq_table;
+
+ srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
+
+ err = create_srq_split(dev, srq, in, inlen, is_xrc);
+ if (err)
+ return err;
+
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
@@ -107,25 +461,20 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
spin_unlock_irq(&table->lock);
if (err) {
mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
- goto err_cmd;
+ goto err_destroy_srq_split;
}
return 0;
-err_cmd:
- memset(&din, 0, sizeof(din));
- memset(&dout, 0, sizeof(dout));
- din.srqn = cpu_to_be32(srq->srqn);
- din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
+err_destroy_srq_split:
+ destroy_srq_split(dev, srq);
+
return err;
}
EXPORT_SYMBOL(mlx5_core_create_srq);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
{
- struct mlx5_destroy_srq_mbox_in in;
- struct mlx5_destroy_srq_mbox_out out;
struct mlx5_srq_table *table = &dev->priv.srq_table;
struct mlx5_core_srq *tmp;
int err;
@@ -142,17 +491,10 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
return -EINVAL;
}
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ err = destroy_srq_split(dev, srq);
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
@@ -164,48 +506,24 @@ EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
{
- struct mlx5_query_srq_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, sizeof(*out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
- if (err)
- return err;
-
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ if (!dev->issi)
+ return query_srq_cmd(dev, srq, out);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return query_xrc_srq_cmd(dev, srq, out);
+ else
+ return query_rmp_cmd(dev, srq, out);
}
EXPORT_SYMBOL(mlx5_core_query_srq);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- struct mlx5_arm_srq_mbox_in in;
- struct mlx5_arm_srq_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
- in.hdr.opmod = cpu_to_be16(!!is_srq);
- in.srqn = cpu_to_be32(srq->srqn);
- in.lwm = cpu_to_be16(lwm);
-
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ if (!dev->issi)
+ return arm_srq_cmd(dev, srq, lwm, is_srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return arm_xrc_srq_cmd(dev, srq, lwm);
+ else
+ return arm_rmp_cmd(dev, srq, lwm);
}
EXPORT_SYMBOL(mlx5_core_arm_srq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
new file mode 100644
index 000000000..8d98b0302
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "transobj.h"
+
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+
+ return err;
+}
+
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+ int err;
+
+ MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqn = MLX5_GET(create_rq_out, out, rqn);
+
+ return err;
+}
+
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
+
+ MLX5_SET(modify_rq_in, in, rqn, rqn);
+ MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+ int err;
+
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *sqn = MLX5_GET(create_sq_out, out, sqn);
+
+ return err;
+}
+
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+
+ MLX5_SET(modify_sq_in, in, sqn, sqn);
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, in, sqn, sqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+ int err;
+
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tirn = MLX5_GET(create_tir_out, out, tirn);
+
+ return err;
+}
+
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+ int err;
+
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tisn = MLX5_GET(create_tis_out, out, tisn);
+
+ return err;
+}
+
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
+ int err;
+
+ MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rmpn = MLX5_GET(create_rmp_out, out, rmpn);
+
+ return err;
+}
+
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
+
+ MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
+ int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
+ MLX5_SET(query_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
+{
+ void *in;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int err;
+
+ in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ if (!in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, rmpn);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+ err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *xsrqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ int err;
+
+ MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+
+ return err;
+}
+
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ void *srqc;
+ void *xrc_srqc;
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!err) {
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
+ xrc_srq_context_entry);
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+ }
+
+ return err;
+}
+
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
+ MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, in, op_mod,
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
new file mode 100644
index 000000000..f9ef24471
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __TRANSOBJ_H__
+#define __TRANSOBJ_H__
+
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqn);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *sqn);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn);
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn);
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+
+#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 5a89bb1d6..9ef85873c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -175,12 +175,13 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
for (i = 0; i < tot_uuars; i++) {
bf = &uuari->bfs[i];
- bf->buf_size = dev->caps.gen.bf_reg_size / 2;
+ bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
bf->reg = NULL; /* Add WC support */
- bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size +
- MLX5_BF_OFFSET;
+ bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
+ (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
+ MLX5_BF_OFFSET;
bf->need_lock = need_uuar_lock(i);
spin_lock_init(&bf->lock);
spin_lock_init(&bf->lock32);
@@ -223,3 +224,40 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
return 0;
}
+
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ phys_addr_t pfn;
+ phys_addr_t uar_bar_start;
+ int err;
+
+ err = mlx5_cmd_alloc_uar(mdev, &uar->index);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+ return err;
+ }
+
+ uar_bar_start = pci_resource_start(mdev->pdev, 0);
+ pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+ uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map) {
+ mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
+ err = -ENOMEM;
+ goto err_free_uar;
+ }
+
+ return 0;
+
+err_free_uar:
+ mlx5_cmd_free_uar(mdev, uar->index);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_map_uar);
+
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ iounmap(uar->map);
+ mlx5_cmd_free_uar(mdev, uar->index);
+}
+EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
new file mode 100644
index 000000000..b94177ebc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
+{
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_STATE);
+ MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
+
+ return MLX5_GET(query_vport_state_out, out, state);
+}
+EXPORT_SYMBOL(mlx5_query_vport_state);
+
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
+{
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u8 *out_addr;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return;
+
+ out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+ nic_vport_context.permanent_address);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+
+ memset(out, 0, outlen);
+ mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+
+ ether_addr_copy(addr, &out_addr[2]);
+
+ kvfree(out);
+}
+EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address);
+
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 gid_index,
+ union ib_gid *gid)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ union ib_gid *tmp;
+ int tbsz;
+ int nout;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+ tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
+ mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
+ vf_num, gid_index, tbsz);
+
+ if (gid_index > tbsz && gid_index != 0xffff)
+ return -EINVAL;
+
+ if (gid_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * sizeof(*gid);
+
+ in = kzalloc(in_sz, GFP_KERNEL);
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
+ MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+ MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+ gid->global.subnet_prefix = tmp->global.subnet_prefix;
+ gid->global.interface_id = tmp->global.interface_id;
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
+
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 pkey_index,
+ u16 *pkey)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ void *pkarr;
+ int nout;
+ int tbsz;
+ int err;
+ int i;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
+ if (pkey_index > tbsz && pkey_index != 0xffff)
+ return -EINVAL;
+
+ if (pkey_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
+
+ in = kzalloc(in_sz, GFP_KERNEL);
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
+ MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+ MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
+ for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
+ *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
+
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ u16 vf_num,
+ struct mlx5_hca_vport_context *rep)
+{
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
+ int is_group_manager;
+ void *out;
+ void *ctx;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
+
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
+ MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
+ } else {
+ err = -EPERM;
+ goto ex;
+ }
+ }
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto ex;
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto ex;
+
+ ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
+ rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
+ rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
+ rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
+ rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
+ rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
+ rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
+ port_physical_state);
+ rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
+ rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
+ port_physical_state);
+ rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
+ rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
+ rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
+ rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
+ cap_mask1_field_select);
+ rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
+ rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
+ cap_mask2_field_select);
+ rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
+ rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
+ init_type_reply);
+ rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
+ rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
+ subnet_timeout);
+ rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
+ rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
+ rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+ qkey_violation_counter);
+ rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+ pkey_violation_counter);
+ rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
+ rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
+ system_image_guid);
+
+ex:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
+
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid)
+{
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+ if (!err)
+ *sys_image_guid = rep->sys_image_guid;
+
+ kfree(rep);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
+
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+ u64 *node_guid)
+{
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+ if (!err)
+ *node_guid = rep->node_guid;
+
+ kfree(rep);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
new file mode 100644
index 000000000..838841158
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "wq.h"
+#include "mlx5_core.h"
+
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+ return wq->sz_m1 + 1;
+}
+
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
+{
+ return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+{
+ return mlx5_cqwq_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
+{
+ return mlx5_wq_ll_get_size(wq) << wq->log_stride;
+}
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+ wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+ wq->sz_m1 = (1 << wq->log_sz) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ struct mlx5_wqe_srq_next_seg *next_seg;
+ int err;
+ int i;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ for (i = 0; i < wq->sz_m1; i++) {
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ next_seg->next_wqe_index = cpu_to_be16(i + 1);
+ }
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ wq->tail_next = &next_seg->next_wqe_index;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+{
+ mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+ mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
new file mode 100644
index 000000000..e0ddd69fb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_WQ_H__
+#define __MLX5_WQ_H__
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+struct mlx5_wq_param {
+ int linear;
+ int numa;
+};
+
+struct mlx5_wq_ctrl {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_buf buf;
+ struct mlx5_db db;
+};
+
+struct mlx5_wq_cyc {
+ void *buf;
+ __be32 *db;
+ u16 sz_m1;
+ u8 log_stride;
+};
+
+struct mlx5_cqwq {
+ void *buf;
+ __be32 *db;
+ u32 sz_m1;
+ u32 cc; /* consumer counter */
+ u8 log_sz;
+ u8 log_stride;
+};
+
+struct mlx5_wq_ll {
+ void *buf;
+ __be32 *db;
+ __be16 *tail_next;
+ u16 sz_m1;
+ u16 head;
+ u16 wqe_ctr;
+ u16 cur_sz;
+ u8 log_stride;
+};
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+
+static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+ return ctr & wq->sz_m1;
+}
+
+static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
+{
+ int equal = (cc1 == cc2);
+ int smaller = 0x8000 & (cc1 - cc2);
+
+ return !equal && !smaller;
+}
+
+static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
+{
+ return wq->cc & wq->sz_m1;
+}
+
+static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
+{
+ return wq->cc >> wq->log_sz;
+}
+
+static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
+{
+ wq->cc++;
+}
+
+static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
+{
+ *wq->db = cpu_to_be32(wq->cc & 0xffffff);
+}
+
+static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
+{
+ return wq->cur_sz == wq->sz_m1;
+}
+
+static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
+{
+ return !wq->cur_sz;
+}
+
+static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
+{
+ wq->head = head_next;
+ wq->wqe_ctr++;
+ wq->cur_sz++;
+}
+
+static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
+ __be16 *next_tail_next)
+{
+ *wq->tail_next = ix;
+ wq->tail_next = next_tail_next;
+ wq->cur_sz--;
+}
+
+static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
+{
+ *wq->db = cpu_to_be32(wq->wqe_ctr);
+}
+
+#endif /* __MLX5_WQ_H__ */