summaryrefslogtreecommitdiff
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
commit863981e96738983919de841ec669e157e6bdaeb0 (patch)
treed6d89a12e7eb8017837c057935a2271290907f76 /drivers/infiniband/ulp
parent8dec7c70575785729a6a9e6719a955e9c545bcab (diff)
Linux-libre 4.7.1-gnupck-4.7.1-gnu
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c67
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c113
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c149
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c48
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c8
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c4
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c852
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h69
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c226
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c737
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h30
15 files changed, 911 insertions, 1408 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index caec8e9c4..4f7d9b48d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -92,6 +92,9 @@ enum {
IPOIB_FLAG_UMCAST = 10,
IPOIB_STOP_NEIGH_GC = 11,
IPOIB_NEIGH_TBL_FLUSH = 12,
+ IPOIB_FLAG_DEV_ADDR_SET = 13,
+ IPOIB_FLAG_DEV_ADDR_CTRL = 14,
+ IPOIB_FLAG_GOING_DOWN = 15,
IPOIB_MAX_BACKOFF_SECONDS = 16,
@@ -392,6 +395,7 @@ struct ipoib_dev_priv {
struct ipoib_ethtool_st ethtool;
struct timer_list poll_timer;
unsigned max_send_sge;
+ bool sm_fullmember_sendonly_support;
};
struct ipoib_ah {
@@ -476,6 +480,7 @@ void ipoib_reap_ah(struct work_struct *work);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
+int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index c8ed53562..951d9abcc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -766,7 +766,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
} else {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
++tx->tx_head;
if (++priv->tx_outstanding == ipoib_sendq_size) {
@@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
{
struct net_device *dev = to_net_dev(d);
int ret;
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
+ return -EPERM;
if (!rtnl_trylock())
return restart_syscall();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index a53fa5fc0..1502199c8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -36,6 +36,27 @@
#include "ipoib.h"
+struct ipoib_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define IPOIB_NETDEV_STAT(m) { \
+ .stat_string = #m, \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
+
+static const struct ipoib_stats ipoib_gstrings_stats[] = {
+ IPOIB_NETDEV_STAT(rx_packets),
+ IPOIB_NETDEV_STAT(tx_packets),
+ IPOIB_NETDEV_STAT(rx_bytes),
+ IPOIB_NETDEV_STAT(tx_bytes),
+ IPOIB_NETDEV_STAT(tx_errors),
+ IPOIB_NETDEV_STAT(rx_dropped),
+ IPOIB_NETDEV_STAT(tx_dropped)
+};
+
+#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
+
static void ipoib_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
@@ -92,11 +113,57 @@ static int ipoib_set_coalesce(struct net_device *dev,
return 0;
}
+static void ipoib_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ int i;
+ struct net_device_stats *net_stats = &dev->stats;
+ u8 *p = (u8 *)net_stats;
+
+ for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++)
+ data[i] = *(u64 *)(p + ipoib_gstrings_stats[i].stat_offset);
+
+}
+static void ipoib_get_strings(struct net_device __always_unused *dev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ipoib_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_TEST:
+ default:
+ break;
+ }
+}
+static int ipoib_get_sset_count(struct net_device __always_unused *dev,
+ int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IPOIB_GLOBAL_STATS_LEN;
+ case ETH_SS_TEST:
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
static const struct ethtool_ops ipoib_ethtool_ops = {
.get_drvinfo = ipoib_get_drvinfo,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
+ .get_strings = ipoib_get_strings,
+ .get_ethtool_stats = ipoib_get_ethtool_stats,
+ .get_sset_count = ipoib_get_sset_count,
};
void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f0e55e47e..dc6d241b9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -51,8 +51,6 @@ MODULE_PARM_DESC(data_debug_level,
"Enable data path debug tracing if > 0");
#endif
-static DEFINE_MUTEX(pkey_mutex);
-
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
{
@@ -637,7 +635,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
} else {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
address->last_send = priv->tx_head;
++priv->tx_head;
@@ -999,6 +997,106 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
return 0;
}
+/*
+ * returns true if the device address of the ipoib interface has changed and the
+ * new address is a valid one (i.e in the gid table), return false otherwise.
+ */
+static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
+{
+ union ib_gid search_gid;
+ union ib_gid gid0;
+ union ib_gid *netdev_gid;
+ int err;
+ u16 index;
+ u8 port;
+ bool ret = false;
+
+ netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
+ if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
+ return false;
+
+ netif_addr_lock_bh(priv->dev);
+
+ /* The subnet prefix may have changed, update it now so we won't have
+ * to do it later
+ */
+ priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+ netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
+ search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+
+ search_gid.global.interface_id = priv->local_gid.global.interface_id;
+
+ netif_addr_unlock_bh(priv->dev);
+
+ err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
+ priv->dev, &port, &index);
+
+ netif_addr_lock_bh(priv->dev);
+
+ if (search_gid.global.interface_id !=
+ priv->local_gid.global.interface_id)
+ /* There was a change while we were looking up the gid, bail
+ * here and let the next work sort this out
+ */
+ goto out;
+
+ /* The next section of code needs some background:
+ * Per IB spec the port GUID can't change if the HCA is powered on.
+ * port GUID is the basis for GID at index 0 which is the basis for
+ * the default device address of a ipoib interface.
+ *
+ * so it seems the flow should be:
+ * if user_changed_dev_addr && gid in gid tbl
+ * set bit dev_addr_set
+ * return true
+ * else
+ * return false
+ *
+ * The issue is that there are devices that don't follow the spec,
+ * they change the port GUID when the HCA is powered, so in order
+ * not to break userspace applications, We need to check if the
+ * user wanted to control the device address and we assume that
+ * if he sets the device address back to be based on GID index 0,
+ * he no longer wishs to control it.
+ *
+ * If the user doesn't control the the device address,
+ * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
+ * the port GUID has changed and GID at index 0 has changed
+ * so we need to change priv->local_gid and priv->dev->dev_addr
+ * to reflect the new GID.
+ */
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ if (!err && port == priv->port) {
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+ if (index == 0)
+ clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
+ &priv->flags);
+ else
+ set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
+ ret = true;
+ } else {
+ ret = false;
+ }
+ } else {
+ if (!err && port == priv->port) {
+ ret = true;
+ } else {
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
+ memcpy(&priv->local_gid, &gid0,
+ sizeof(priv->local_gid));
+ memcpy(priv->dev->dev_addr + 4, &gid0,
+ sizeof(priv->local_gid));
+ ret = true;
+ }
+ }
+ }
+
+out:
+ netif_addr_unlock_bh(priv->dev);
+
+ return ret;
+}
+
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
enum ipoib_flush_level level,
int nesting)
@@ -1020,6 +1118,9 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
level != IPOIB_FLUSH_HEAVY) {
+ /* Make sure the dev_addr is set even if not flushing */
+ if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
return;
}
@@ -1031,7 +1132,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
update_parent_pkey(priv);
else
update_child_pkey(priv);
- }
+ } else if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
return;
}
@@ -1083,7 +1185,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
if (level >= IPOIB_FLUSH_NORMAL)
ipoib_ib_dev_up(dev);
- ipoib_mcast_restart_task(&priv->restart_task);
+ if (ipoib_dev_addr_changed_valid(priv))
+ ipoib_mcast_restart_task(&priv->restart_task);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 80807d6e5..5f58c41ef 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -99,6 +99,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
struct ib_device *dev, u8 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data);
+static int ipoib_set_mac(struct net_device *dev, void *addr);
static struct ib_client ipoib_client = {
.name = "ipoib",
@@ -117,6 +118,8 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+ priv->sm_fullmember_sendonly_support = false;
+
if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
@@ -629,6 +632,77 @@ void ipoib_mark_paths_invalid(struct net_device *dev)
spin_unlock_irq(&priv->lock);
}
+struct classport_info_context {
+ struct ipoib_dev_priv *priv;
+ struct completion done;
+ struct ib_sa_query *sa_query;
+};
+
+static void classport_info_query_cb(int status, struct ib_class_port_info *rec,
+ void *context)
+{
+ struct classport_info_context *cb_ctx = context;
+ struct ipoib_dev_priv *priv;
+
+ WARN_ON(!context);
+
+ priv = cb_ctx->priv;
+
+ if (status || !rec) {
+ pr_debug("device: %s failed query classport_info status: %d\n",
+ priv->dev->name, status);
+ /* keeps the default, will try next mcast_restart */
+ priv->sm_fullmember_sendonly_support = false;
+ goto out;
+ }
+
+ if (ib_get_cpi_capmask2(rec) &
+ IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT) {
+ pr_debug("device: %s enabled fullmember-sendonly for sendonly MCG\n",
+ priv->dev->name);
+ priv->sm_fullmember_sendonly_support = true;
+ } else {
+ pr_debug("device: %s disabled fullmember-sendonly for sendonly MCG\n",
+ priv->dev->name);
+ priv->sm_fullmember_sendonly_support = false;
+ }
+
+out:
+ complete(&cb_ctx->done);
+}
+
+int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv)
+{
+ struct classport_info_context *callback_context;
+ int ret;
+
+ callback_context = kmalloc(sizeof(*callback_context), GFP_KERNEL);
+ if (!callback_context)
+ return -ENOMEM;
+
+ callback_context->priv = priv;
+ init_completion(&callback_context->done);
+
+ ret = ib_sa_classport_info_rec_query(&ipoib_sa_client,
+ priv->ca, priv->port, 3000,
+ GFP_KERNEL,
+ classport_info_query_cb,
+ callback_context,
+ &callback_context->sa_query);
+ if (ret < 0) {
+ pr_info("%s failed to send ib_sa_classport_info query, ret: %d\n",
+ priv->dev->name, ret);
+ kfree(callback_context);
+ return ret;
+ }
+
+ /* waiting for the callback to finish before returnning */
+ wait_for_completion(&callback_context->done);
+ kfree(callback_context);
+
+ return ret;
+}
+
void ipoib_flush_paths(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1036,7 +1110,7 @@ static void ipoib_timeout(struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev);
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
- jiffies_to_msecs(jiffies - dev->trans_start));
+ jiffies_to_msecs(jiffies - dev_trans_start(dev)));
ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
netif_queue_stopped(dev),
priv->tx_head, priv->tx_tail);
@@ -1132,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
neigh = NULL;
goto out_unlock;
}
- neigh->alive = jiffies;
+
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+ neigh->alive = jiffies;
goto out_unlock;
}
}
@@ -1649,6 +1725,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_get_vf_config = ipoib_get_vf_config,
.ndo_get_vf_stats = ipoib_get_vf_stats,
.ndo_set_vf_guid = ipoib_set_vf_guid,
+ .ndo_set_mac_address = ipoib_set_mac,
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -1771,6 +1848,70 @@ int ipoib_add_umcast_attr(struct net_device *dev)
return device_create_file(&dev->dev, &dev_attr_umcast);
}
+static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
+{
+ struct ipoib_dev_priv *child_priv;
+ struct net_device *netdev = priv->dev;
+
+ netif_addr_lock_bh(netdev);
+
+ memcpy(&priv->local_gid.global.interface_id,
+ &gid->global.interface_id,
+ sizeof(gid->global.interface_id));
+ memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
+ clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+ netif_addr_unlock_bh(netdev);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ down_read(&priv->vlan_rwsem);
+ list_for_each_entry(child_priv, &priv->child_intfs, list)
+ set_base_guid(child_priv, gid);
+ up_read(&priv->vlan_rwsem);
+ }
+}
+
+static int ipoib_check_lladdr(struct net_device *dev,
+ struct sockaddr_storage *ss)
+{
+ union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
+ int ret = 0;
+
+ netif_addr_lock_bh(dev);
+
+ /* Make sure the QPN, reserved and subnet prefix match the current
+ * lladdr, it also makes sure the lladdr is unicast.
+ */
+ if (memcmp(dev->dev_addr, ss->__data,
+ 4 + sizeof(gid->global.subnet_prefix)) ||
+ gid->global.interface_id == 0)
+ ret = -EINVAL;
+
+ netif_addr_unlock_bh(dev);
+
+ return ret;
+}
+
+static int ipoib_set_mac(struct net_device *dev, void *addr)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct sockaddr_storage *ss = addr;
+ int ret;
+
+ if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
+ return -EBUSY;
+
+ ret = ipoib_check_lladdr(dev, ss);
+ if (ret)
+ return ret;
+
+ set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
+
+ queue_work(ipoib_workqueue, &priv->flush_light);
+
+ return 0;
+}
+
static ssize_t create_child(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -1894,6 +2035,7 @@ static struct net_device *ipoib_add_port(const char *format,
goto device_init_failed;
} else
memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
result = ipoib_dev_init(priv->dev, hca, port);
if (result < 0) {
@@ -2001,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
ib_unregister_event_handler(&priv->event_handler);
flush_workqueue(ipoib_workqueue);
+ /* mark interface in the middle of destruction */
+ set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
+
rtnl_lock();
dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
rtnl_unlock();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 25889311b..d3394b6ad 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -64,6 +64,9 @@ struct ipoib_mcast_iter {
unsigned int send_only;
};
+/* join state that allows creating mcg with sendonly member request */
+#define SENDONLY_FULLMEMBER_JOIN 8
+
/*
* This should be called with the priv->lock held
*/
@@ -326,12 +329,23 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
carrier_on_task);
struct ib_port_attr attr;
+ int ret;
if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
+ /*
+ * Check if can send sendonly MCG's with sendonly-fullmember join state.
+ * It done here after the successfully join to the broadcast group,
+ * because the broadcast group must always be joined first and is always
+ * re-joined if the SM changes substantially.
+ */
+ ret = ipoib_check_sm_sendonly_fullmember_support(priv);
+ if (ret < 0)
+ pr_debug("%s failed query sm support for sendonly-fullmember (ret: %d)\n",
+ priv->dev->name, ret);
/*
* Take rtnl_lock to avoid racing with ipoib_stop() and
@@ -515,22 +529,20 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
/*
- * Send-only IB Multicast joins do not work at the core
- * IB layer yet, so we can't use them here. However,
- * we are emulating an Ethernet multicast send, which
- * does not require a multicast subscription and will
- * still send properly. The most appropriate thing to
+ * Send-only IB Multicast joins work at the core IB layer but
+ * require specific SM support.
+ * We can use such joins here only if the current SM supports that feature.
+ * However, if not, we emulate an Ethernet multicast send,
+ * which does not require a multicast subscription and will
+ * still send properly. The most appropriate thing to
* do is to create the group if it doesn't exist as that
* most closely emulates the behavior, from a user space
- * application perspecitive, of Ethernet multicast
- * operation. For now, we do a full join, maybe later
- * when the core IB layers support send only joins we
- * will use them.
+ * application perspective, of Ethernet multicast operation.
*/
-#if 0
- if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- rec.join_state = 4;
-#endif
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+ priv->sm_fullmember_sendonly_support)
+ /* SM supports sendonly-fullmember, otherwise fallback to full-member */
+ rec.join_state = SENDONLY_FULLMEMBER_JOIN;
}
spin_unlock_irq(&priv->lock);
@@ -570,11 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
return;
}
priv->local_lid = port_attr.lid;
+ netif_addr_lock_bh(dev);
- if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid, NULL))
- ipoib_warn(priv, "ib_query_gid() failed\n");
- else
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ netif_addr_unlock_bh(dev);
+ return;
+ }
+ netif_addr_unlock_bh(dev);
spin_lock_irq(&priv->lock);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b809c373e..1e7cbbaa1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -307,5 +307,8 @@ void ipoib_event(struct ib_event_handler *handler,
queue_work(ipoib_workqueue, &priv->flush_normal);
} else if (record->event == IB_EVENT_PKEY_CHANGE) {
queue_work(ipoib_workqueue, &priv->flush_heavy);
+ } else if (record->event == IB_EVENT_GID_CHANGE &&
+ !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ queue_work(ipoib_workqueue, &priv->flush_light);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index fca1a882d..a2f9f29c6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -68,6 +68,8 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
priv->pkey = pkey;
memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
+ memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
priv->dev->broadcast[8] = pkey >> 8;
priv->dev->broadcast[9] = pkey & 0xff;
@@ -129,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+ return -EPERM;
+
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
priv = ipoib_intf_alloc(intf_name);
@@ -181,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+ return -EPERM;
+
if (!rtnl_trylock())
return restart_syscall();
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 9a391cc5b..90be56893 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -236,7 +236,7 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
page_vec->npages = 0;
page_vec->fake_mr.page_size = SIZE_4K;
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
- mem->size, iser_set_page);
+ mem->size, NULL, iser_set_page);
if (unlikely(plen < mem->size)) {
iser_err("page vec too short to hold this SG\n");
iser_data_buf_dump(mem, device->ib_device);
@@ -446,7 +446,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- n = ib_map_mr_sg(mr, mem->sg, mem->size, SIZE_4K);
+ n = ib_map_mr_sg(mr, mem->sg, mem->size, NULL, SIZE_4K);
if (unlikely(n != mem->size)) {
iser_err("failed to map sg (%d/%d)\n",
n, mem->size);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 411e4464c..a990c0420 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -33,7 +33,8 @@
#define ISERT_MAX_CONN 8
#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
-#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
+#define ISER_MAX_TX_CQ_LEN \
+ ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
ISERT_MAX_CONN)
@@ -46,14 +47,6 @@ static LIST_HEAD(device_list);
static struct workqueue_struct *isert_comp_wq;
static struct workqueue_struct *isert_release_wq;
-static void
-isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
-static int
-isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
-static void
-isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
-static int
-isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
static int
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
static int
@@ -142,6 +135,7 @@ isert_create_qp(struct isert_conn *isert_conn,
attr.recv_cq = comp->cq;
attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
+ attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
device->ib_device->attrs.max_sge_rd);
@@ -270,9 +264,9 @@ isert_alloc_comps(struct isert_device *device)
device->ib_device->num_comp_vectors));
isert_info("Using %d CQs, %s supports %d vectors support "
- "Fast registration %d pi_capable %d\n",
+ "pi_capable %d\n",
device->comps_used, device->ib_device->name,
- device->ib_device->num_comp_vectors, device->use_fastreg,
+ device->ib_device->num_comp_vectors,
device->pi_capable);
device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
@@ -313,18 +307,6 @@ isert_create_device_ib_res(struct isert_device *device)
isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge);
isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
- /* asign function handlers */
- if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
- ib_dev->attrs.device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
- device->use_fastreg = 1;
- device->reg_rdma_mem = isert_reg_rdma;
- device->unreg_rdma_mem = isert_unreg_rdma;
- } else {
- device->use_fastreg = 0;
- device->reg_rdma_mem = isert_map_rdma;
- device->unreg_rdma_mem = isert_unmap_cmd;
- }
-
ret = isert_alloc_comps(device);
if (ret)
goto out;
@@ -417,146 +399,6 @@ isert_device_get(struct rdma_cm_id *cma_id)
}
static void
-isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
-{
- struct fast_reg_descriptor *fr_desc, *tmp;
- int i = 0;
-
- if (list_empty(&isert_conn->fr_pool))
- return;
-
- isert_info("Freeing conn %p fastreg pool", isert_conn);
-
- list_for_each_entry_safe(fr_desc, tmp,
- &isert_conn->fr_pool, list) {
- list_del(&fr_desc->list);
- ib_dereg_mr(fr_desc->data_mr);
- if (fr_desc->pi_ctx) {
- ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
- ib_dereg_mr(fr_desc->pi_ctx->sig_mr);
- kfree(fr_desc->pi_ctx);
- }
- kfree(fr_desc);
- ++i;
- }
-
- if (i < isert_conn->fr_pool_size)
- isert_warn("Pool still has %d regions registered\n",
- isert_conn->fr_pool_size - i);
-}
-
-static int
-isert_create_pi_ctx(struct fast_reg_descriptor *desc,
- struct ib_device *device,
- struct ib_pd *pd)
-{
- struct pi_context *pi_ctx;
- int ret;
-
- pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
- if (!pi_ctx) {
- isert_err("Failed to allocate pi context\n");
- return -ENOMEM;
- }
-
- pi_ctx->prot_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_mr)) {
- isert_err("Failed to allocate prot frmr err=%ld\n",
- PTR_ERR(pi_ctx->prot_mr));
- ret = PTR_ERR(pi_ctx->prot_mr);
- goto err_pi_ctx;
- }
- desc->ind |= ISERT_PROT_KEY_VALID;
-
- pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
- if (IS_ERR(pi_ctx->sig_mr)) {
- isert_err("Failed to allocate signature enabled mr err=%ld\n",
- PTR_ERR(pi_ctx->sig_mr));
- ret = PTR_ERR(pi_ctx->sig_mr);
- goto err_prot_mr;
- }
-
- desc->pi_ctx = pi_ctx;
- desc->ind |= ISERT_SIG_KEY_VALID;
- desc->ind &= ~ISERT_PROTECTED;
-
- return 0;
-
-err_prot_mr:
- ib_dereg_mr(pi_ctx->prot_mr);
-err_pi_ctx:
- kfree(pi_ctx);
-
- return ret;
-}
-
-static int
-isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
- struct fast_reg_descriptor *fr_desc)
-{
- fr_desc->data_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(fr_desc->data_mr)) {
- isert_err("Failed to allocate data frmr err=%ld\n",
- PTR_ERR(fr_desc->data_mr));
- return PTR_ERR(fr_desc->data_mr);
- }
- fr_desc->ind |= ISERT_DATA_KEY_VALID;
-
- isert_dbg("Created fr_desc %p\n", fr_desc);
-
- return 0;
-}
-
-static int
-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
-{
- struct fast_reg_descriptor *fr_desc;
- struct isert_device *device = isert_conn->device;
- struct se_session *se_sess = isert_conn->conn->sess->se_sess;
- struct se_node_acl *se_nacl = se_sess->se_node_acl;
- int i, ret, tag_num;
- /*
- * Setup the number of FRMRs based upon the number of tags
- * available to session in iscsi_target_locate_portal().
- */
- tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
- tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
-
- isert_conn->fr_pool_size = 0;
- for (i = 0; i < tag_num; i++) {
- fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
- if (!fr_desc) {
- isert_err("Failed to allocate fast_reg descriptor\n");
- ret = -ENOMEM;
- goto err;
- }
-
- ret = isert_create_fr_desc(device->ib_device,
- device->pd, fr_desc);
- if (ret) {
- isert_err("Failed to create fastreg descriptor err=%d\n",
- ret);
- kfree(fr_desc);
- goto err;
- }
-
- list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
- isert_conn->fr_pool_size++;
- }
-
- isert_dbg("Creating conn %p fastreg pool size=%d",
- isert_conn, isert_conn->fr_pool_size);
-
- return 0;
-
-err:
- isert_conn_free_fastreg_pool(isert_conn);
- return ret;
-}
-
-static void
isert_init_conn(struct isert_conn *isert_conn)
{
isert_conn->state = ISER_CONN_INIT;
@@ -565,8 +407,6 @@ isert_init_conn(struct isert_conn *isert_conn)
init_completion(&isert_conn->login_req_comp);
kref_init(&isert_conn->kref);
mutex_init(&isert_conn->mutex);
- spin_lock_init(&isert_conn->pool_lock);
- INIT_LIST_HEAD(&isert_conn->fr_pool);
INIT_WORK(&isert_conn->release_work, isert_release_work);
}
@@ -739,9 +579,6 @@ isert_connect_release(struct isert_conn *isert_conn)
BUG_ON(!device);
- if (device->use_fastreg)
- isert_conn_free_fastreg_pool(isert_conn);
-
isert_free_rx_descriptors(isert_conn);
if (isert_conn->cm_id)
rdma_destroy_id(isert_conn->cm_id);
@@ -1080,7 +917,6 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
{
struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
- isert_cmd->iser_ib_op = ISER_IB_SEND;
tx_desc->tx_cqe.done = isert_send_done;
send_wr->wr_cqe = &tx_desc->tx_cqe;
@@ -1160,16 +996,6 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
}
if (!login->login_failed) {
if (login->login_complete) {
- if (!conn->sess->sess_ops->SessionType &&
- isert_conn->device->use_fastreg) {
- ret = isert_conn_create_fastreg_pool(isert_conn);
- if (ret) {
- isert_err("Conn: %p failed to create"
- " fastreg pool\n", isert_conn);
- return ret;
- }
- }
-
ret = isert_alloc_rx_descriptors(isert_conn);
if (ret)
return ret;
@@ -1633,97 +1459,26 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
-static int
-isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct scatterlist *sg, u32 nents, u32 length, u32 offset,
- enum iser_ib_op_code op, struct isert_data_buf *data)
-{
- struct ib_device *ib_dev = isert_conn->cm_id->device;
-
- data->dma_dir = op == ISER_IB_RDMA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- data->len = length - offset;
- data->offset = offset;
- data->sg_off = data->offset / PAGE_SIZE;
-
- data->sg = &sg[data->sg_off];
- data->nents = min_t(unsigned int, nents - data->sg_off,
- ISCSI_ISER_SG_TABLESIZE);
- data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
- PAGE_SIZE);
-
- data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
- data->dma_dir);
- if (unlikely(!data->dma_nents)) {
- isert_err("Cmd: unable to dma map SGs %p\n", sg);
- return -EINVAL;
- }
-
- isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
- isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
-
- return 0;
-}
-
-static void
-isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
-{
- struct ib_device *ib_dev = isert_conn->cm_id->device;
-
- ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
- memset(data, 0, sizeof(*data));
-}
-
-
-
static void
-isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
+isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
{
- isert_dbg("Cmd %p\n", isert_cmd);
+ struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
+ enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
- if (isert_cmd->data.sg) {
- isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
- isert_unmap_data_buf(isert_conn, &isert_cmd->data);
- }
-
- if (isert_cmd->rdma_wr) {
- isert_dbg("Cmd %p free send_wr\n", isert_cmd);
- kfree(isert_cmd->rdma_wr);
- isert_cmd->rdma_wr = NULL;
- }
-
- if (isert_cmd->ib_sge) {
- isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
- kfree(isert_cmd->ib_sge);
- isert_cmd->ib_sge = NULL;
- }
-}
-
-static void
-isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
-{
- isert_dbg("Cmd %p\n", isert_cmd);
-
- if (isert_cmd->fr_desc) {
- isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, isert_cmd->fr_desc);
- if (isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
- isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
- isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
- }
- spin_lock_bh(&isert_conn->pool_lock);
- list_add_tail(&isert_cmd->fr_desc->list, &isert_conn->fr_pool);
- spin_unlock_bh(&isert_conn->pool_lock);
- isert_cmd->fr_desc = NULL;
- }
+ if (!cmd->rw.nr_ops)
+ return;
- if (isert_cmd->data.sg) {
- isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
- isert_unmap_data_buf(isert_conn, &isert_cmd->data);
+ if (isert_prot_cmd(conn, se_cmd)) {
+ rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
+ conn->cm_id->port_num, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, se_cmd->t_prot_sg,
+ se_cmd->t_prot_nents, dir);
+ } else {
+ rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
}
- isert_cmd->ib_sge = NULL;
- isert_cmd->rdma_wr = NULL;
+ cmd->rw.nr_ops = 0;
}
static void
@@ -1732,7 +1487,6 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
struct iscsi_conn *conn = isert_conn->conn;
- struct isert_device *device = isert_conn->device;
struct iscsi_text_rsp *hdr;
isert_dbg("Cmd %p\n", isert_cmd);
@@ -1760,7 +1514,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
}
}
- device->unreg_rdma_mem(isert_cmd, isert_conn);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
case ISCSI_OP_SCSI_TMFUNC:
@@ -1894,14 +1648,9 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
isert_dbg("Cmd %p\n", isert_cmd);
- if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
- ret = isert_check_pi_status(cmd,
- isert_cmd->fr_desc->pi_ctx->sig_mr);
- isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
- }
+ ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
- device->unreg_rdma_mem(isert_cmd, isert_conn);
- isert_cmd->rdma_wr_num = 0;
if (ret)
transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
else
@@ -1929,16 +1678,12 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
isert_dbg("Cmd %p\n", isert_cmd);
- if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
- ret = isert_check_pi_status(se_cmd,
- isert_cmd->fr_desc->pi_ctx->sig_mr);
- isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
- }
-
iscsit_stop_dataout_timer(cmd);
- device->unreg_rdma_mem(isert_cmd, isert_conn);
- cmd->write_data_done = isert_cmd->data.len;
- isert_cmd->rdma_wr_num = 0;
+
+ if (isert_prot_cmd(isert_conn, se_cmd))
+ ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
+ cmd->write_data_done = 0;
isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
@@ -2111,7 +1856,6 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
- struct isert_device *device = isert_conn->device;
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
@@ -2120,8 +1864,7 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
if (cmd->data_direction == DMA_TO_DEVICE)
iscsit_stop_dataout_timer(cmd);
-
- device->unreg_rdma_mem(isert_cmd, isert_conn);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
}
static enum target_prot_op
@@ -2274,234 +2017,6 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
return isert_post_response(isert_conn, isert_cmd);
}
-static int
-isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct ib_sge *ib_sge, struct ib_rdma_wr *rdma_wr,
- u32 data_left, u32 offset)
-{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct scatterlist *sg_start, *tmp_sg;
- struct isert_device *device = isert_conn->device;
- struct ib_device *ib_dev = device->ib_device;
- u32 sg_off, page_off;
- int i = 0, sg_nents;
-
- sg_off = offset / PAGE_SIZE;
- sg_start = &cmd->se_cmd.t_data_sg[sg_off];
- sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
- page_off = offset % PAGE_SIZE;
-
- rdma_wr->wr.sg_list = ib_sge;
- rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
-
- /*
- * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
- */
- for_each_sg(sg_start, tmp_sg, sg_nents, i) {
- isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
- "page_off: %u\n",
- (unsigned long long)tmp_sg->dma_address,
- tmp_sg->length, page_off);
-
- ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
- ib_sge->length = min_t(u32, data_left,
- ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
- ib_sge->lkey = device->pd->local_dma_lkey;
-
- isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
- ib_sge->addr, ib_sge->length, ib_sge->lkey);
- page_off = 0;
- data_left -= ib_sge->length;
- if (!data_left)
- break;
- ib_sge++;
- isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
- }
-
- rdma_wr->wr.num_sge = ++i;
- isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
- rdma_wr->wr.sg_list, rdma_wr->wr.num_sge);
-
- return rdma_wr->wr.num_sge;
-}
-
-static int
-isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
-{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_conn *isert_conn = conn->context;
- struct isert_data_buf *data = &isert_cmd->data;
- struct ib_rdma_wr *rdma_wr;
- struct ib_sge *ib_sge;
- u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
- int ret = 0, i, ib_sge_cnt;
-
- offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
- cmd->write_data_done : 0;
- ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
- se_cmd->t_data_nents, se_cmd->data_length,
- offset, isert_cmd->iser_ib_op,
- &isert_cmd->data);
- if (ret)
- return ret;
-
- data_left = data->len;
- offset = data->offset;
-
- ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
- if (!ib_sge) {
- isert_warn("Unable to allocate ib_sge\n");
- ret = -ENOMEM;
- goto unmap_cmd;
- }
- isert_cmd->ib_sge = ib_sge;
-
- isert_cmd->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
- isert_cmd->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) *
- isert_cmd->rdma_wr_num, GFP_KERNEL);
- if (!isert_cmd->rdma_wr) {
- isert_dbg("Unable to allocate isert_cmd->rdma_wr\n");
- ret = -ENOMEM;
- goto unmap_cmd;
- }
-
- rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
-
- for (i = 0; i < isert_cmd->rdma_wr_num; i++) {
- rdma_wr = &isert_cmd->rdma_wr[i];
- data_len = min(data_left, rdma_write_max);
-
- rdma_wr->wr.send_flags = 0;
- if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
- isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
-
- rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
- rdma_wr->remote_addr = isert_cmd->read_va + offset;
- rdma_wr->rkey = isert_cmd->read_stag;
- if (i + 1 == isert_cmd->rdma_wr_num)
- rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
- else
- rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
- } else {
- isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
-
- rdma_wr->wr.opcode = IB_WR_RDMA_READ;
- rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
- rdma_wr->rkey = isert_cmd->write_stag;
- if (i + 1 == isert_cmd->rdma_wr_num)
- rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
- else
- rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
- }
-
- ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
- rdma_wr, data_len, offset);
- ib_sge += ib_sge_cnt;
-
- offset += data_len;
- va_offset += data_len;
- data_left -= data_len;
- }
-
- return 0;
-unmap_cmd:
- isert_unmap_data_buf(isert_conn, data);
-
- return ret;
-}
-
-static inline void
-isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
-{
- u32 rkey;
-
- memset(inv_wr, 0, sizeof(*inv_wr));
- inv_wr->wr_cqe = NULL;
- inv_wr->opcode = IB_WR_LOCAL_INV;
- inv_wr->ex.invalidate_rkey = mr->rkey;
-
- /* Bump the key */
- rkey = ib_inc_rkey(mr->rkey);
- ib_update_fast_reg_key(mr, rkey);
-}
-
-static int
-isert_fast_reg_mr(struct isert_conn *isert_conn,
- struct fast_reg_descriptor *fr_desc,
- struct isert_data_buf *mem,
- enum isert_indicator ind,
- struct ib_sge *sge)
-{
- struct isert_device *device = isert_conn->device;
- struct ib_device *ib_dev = device->ib_device;
- struct ib_mr *mr;
- struct ib_reg_wr reg_wr;
- struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
- int ret, n;
-
- if (mem->dma_nents == 1) {
- sge->lkey = device->pd->local_dma_lkey;
- sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
- sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
- isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
- sge->addr, sge->length, sge->lkey);
- return 0;
- }
-
- if (ind == ISERT_DATA_KEY_VALID)
- /* Registering data buffer */
- mr = fr_desc->data_mr;
- else
- /* Registering protection buffer */
- mr = fr_desc->pi_ctx->prot_mr;
-
- if (!(fr_desc->ind & ind)) {
- isert_inv_rkey(&inv_wr, mr);
- wr = &inv_wr;
- }
-
- n = ib_map_mr_sg(mr, mem->sg, mem->nents, PAGE_SIZE);
- if (unlikely(n != mem->nents)) {
- isert_err("failed to map mr sg (%d/%d)\n",
- n, mem->nents);
- return n < 0 ? n : -EINVAL;
- }
-
- isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
- fr_desc, mem->nents, mem->offset);
-
- reg_wr.wr.next = NULL;
- reg_wr.wr.opcode = IB_WR_REG_MR;
- reg_wr.wr.wr_cqe = NULL;
- reg_wr.wr.send_flags = 0;
- reg_wr.wr.num_sge = 0;
- reg_wr.mr = mr;
- reg_wr.key = mr->lkey;
- reg_wr.access = IB_ACCESS_LOCAL_WRITE;
-
- if (!wr)
- wr = &reg_wr.wr;
- else
- wr->next = &reg_wr.wr;
-
- ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
- if (ret) {
- isert_err("fast registration failed, ret:%d\n", ret);
- return ret;
- }
- fr_desc->ind &= ~ind;
-
- sge->lkey = mr->lkey;
- sge->addr = mr->iova;
- sge->length = mr->length;
-
- isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
- sge->addr, sge->length, sge->lkey);
-
- return ret;
-}
-
static inline void
isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
struct ib_sig_domain *domain)
@@ -2526,6 +2041,8 @@ isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
static int
isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
{
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+
switch (se_cmd->prot_op) {
case TARGET_PROT_DIN_INSERT:
case TARGET_PROT_DOUT_STRIP:
@@ -2547,228 +2064,59 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
return -EINVAL;
}
+ sig_attrs->check_mask =
+ (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
+ (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
+ (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
return 0;
}
-static inline u8
-isert_set_prot_checks(u8 prot_checks)
-{
- return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
- (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
- (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
-}
-
static int
-isert_reg_sig_mr(struct isert_conn *isert_conn,
- struct isert_cmd *isert_cmd,
- struct fast_reg_descriptor *fr_desc)
-{
- struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
- struct ib_sig_handover_wr sig_wr;
- struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
- struct pi_context *pi_ctx = fr_desc->pi_ctx;
- struct ib_sig_attrs sig_attrs;
+isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
+ struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+{
+ struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
+ enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
+ u8 port_num = conn->cm_id->port_num;
+ u64 addr;
+ u32 rkey, offset;
int ret;
- memset(&sig_attrs, 0, sizeof(sig_attrs));
- ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
- if (ret)
- goto err;
-
- sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
-
- if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
- isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
- wr = &inv_wr;
- }
-
- memset(&sig_wr, 0, sizeof(sig_wr));
- sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
- sig_wr.wr.wr_cqe = NULL;
- sig_wr.wr.sg_list = &isert_cmd->ib_sg[DATA];
- sig_wr.wr.num_sge = 1;
- sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
- sig_wr.sig_attrs = &sig_attrs;
- sig_wr.sig_mr = pi_ctx->sig_mr;
- if (se_cmd->t_prot_sg)
- sig_wr.prot = &isert_cmd->ib_sg[PROT];
-
- if (!wr)
- wr = &sig_wr.wr;
- else
- wr->next = &sig_wr.wr;
-
- ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
- if (ret) {
- isert_err("fast registration failed, ret:%d\n", ret);
- goto err;
- }
- fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
-
- isert_cmd->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
- isert_cmd->ib_sg[SIG].addr = 0;
- isert_cmd->ib_sg[SIG].length = se_cmd->data_length;
- if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
- se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
- /*
- * We have protection guards on the wire
- * so we need to set a larget transfer
- */
- isert_cmd->ib_sg[SIG].length += se_cmd->prot_length;
-
- isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
- isert_cmd->ib_sg[SIG].addr, isert_cmd->ib_sg[SIG].length,
- isert_cmd->ib_sg[SIG].lkey);
-err:
- return ret;
-}
-
-static int
-isert_handle_prot_cmd(struct isert_conn *isert_conn,
- struct isert_cmd *isert_cmd)
-{
- struct isert_device *device = isert_conn->device;
- struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
- int ret;
-
- if (!isert_cmd->fr_desc->pi_ctx) {
- ret = isert_create_pi_ctx(isert_cmd->fr_desc,
- device->ib_device,
- device->pd);
- if (ret) {
- isert_err("conn %p failed to allocate pi_ctx\n",
- isert_conn);
- return ret;
- }
- }
-
- if (se_cmd->t_prot_sg) {
- ret = isert_map_data_buf(isert_conn, isert_cmd,
- se_cmd->t_prot_sg,
- se_cmd->t_prot_nents,
- se_cmd->prot_length,
- 0,
- isert_cmd->iser_ib_op,
- &isert_cmd->prot);
- if (ret) {
- isert_err("conn %p failed to map protection buffer\n",
- isert_conn);
- return ret;
- }
-
- memset(&isert_cmd->ib_sg[PROT], 0, sizeof(isert_cmd->ib_sg[PROT]));
- ret = isert_fast_reg_mr(isert_conn, isert_cmd->fr_desc,
- &isert_cmd->prot,
- ISERT_PROT_KEY_VALID,
- &isert_cmd->ib_sg[PROT]);
- if (ret) {
- isert_err("conn %p failed to fast reg mr\n",
- isert_conn);
- goto unmap_prot_cmd;
- }
- }
-
- ret = isert_reg_sig_mr(isert_conn, isert_cmd, isert_cmd->fr_desc);
- if (ret) {
- isert_err("conn %p failed to fast reg mr\n",
- isert_conn);
- goto unmap_prot_cmd;
- }
- isert_cmd->fr_desc->ind |= ISERT_PROTECTED;
-
- return 0;
-
-unmap_prot_cmd:
- if (se_cmd->t_prot_sg)
- isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
-
- return ret;
-}
-
-static int
-isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
-{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_conn *isert_conn = conn->context;
- struct fast_reg_descriptor *fr_desc = NULL;
- struct ib_rdma_wr *rdma_wr;
- struct ib_sge *ib_sg;
- u32 offset;
- int ret = 0;
- unsigned long flags;
-
- offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
- cmd->write_data_done : 0;
- ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
- se_cmd->t_data_nents, se_cmd->data_length,
- offset, isert_cmd->iser_ib_op,
- &isert_cmd->data);
- if (ret)
- return ret;
-
- if (isert_cmd->data.dma_nents != 1 ||
- isert_prot_cmd(isert_conn, se_cmd)) {
- spin_lock_irqsave(&isert_conn->pool_lock, flags);
- fr_desc = list_first_entry(&isert_conn->fr_pool,
- struct fast_reg_descriptor, list);
- list_del(&fr_desc->list);
- spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
- isert_cmd->fr_desc = fr_desc;
- }
-
- ret = isert_fast_reg_mr(isert_conn, fr_desc, &isert_cmd->data,
- ISERT_DATA_KEY_VALID, &isert_cmd->ib_sg[DATA]);
- if (ret)
- goto unmap_cmd;
-
- if (isert_prot_cmd(isert_conn, se_cmd)) {
- ret = isert_handle_prot_cmd(isert_conn, isert_cmd);
- if (ret)
- goto unmap_cmd;
-
- ib_sg = &isert_cmd->ib_sg[SIG];
+ if (dir == DMA_FROM_DEVICE) {
+ addr = cmd->write_va;
+ rkey = cmd->write_stag;
+ offset = cmd->iscsi_cmd->write_data_done;
} else {
- ib_sg = &isert_cmd->ib_sg[DATA];
+ addr = cmd->read_va;
+ rkey = cmd->read_stag;
+ offset = 0;
}
- memcpy(&isert_cmd->s_ib_sge, ib_sg, sizeof(*ib_sg));
- isert_cmd->ib_sge = &isert_cmd->s_ib_sge;
- isert_cmd->rdma_wr_num = 1;
- memset(&isert_cmd->s_rdma_wr, 0, sizeof(isert_cmd->s_rdma_wr));
- isert_cmd->rdma_wr = &isert_cmd->s_rdma_wr;
+ if (isert_prot_cmd(conn, se_cmd)) {
+ struct ib_sig_attrs sig_attrs;
- rdma_wr = &isert_cmd->s_rdma_wr;
- rdma_wr->wr.sg_list = &isert_cmd->s_ib_sge;
- rdma_wr->wr.num_sge = 1;
- rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
- if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
- isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+ ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
+ if (ret)
+ return ret;
- rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
- rdma_wr->remote_addr = isert_cmd->read_va;
- rdma_wr->rkey = isert_cmd->read_stag;
- rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
- 0 : IB_SEND_SIGNALED;
+ WARN_ON_ONCE(offset);
+ ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents,
+ se_cmd->t_prot_sg, se_cmd->t_prot_nents,
+ &sig_attrs, addr, rkey, dir);
} else {
- isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
-
- rdma_wr->wr.opcode = IB_WR_RDMA_READ;
- rdma_wr->remote_addr = isert_cmd->write_va;
- rdma_wr->rkey = isert_cmd->write_stag;
- rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
+ ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents,
+ offset, addr, rkey, dir);
}
-
- return 0;
-
-unmap_cmd:
- if (fr_desc) {
- spin_lock_irqsave(&isert_conn->pool_lock, flags);
- list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
- spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
+ if (ret < 0) {
+ isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
+ return ret;
}
- isert_unmap_data_buf(isert_conn, &isert_cmd->data);
+ ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
+ if (ret < 0)
+ isert_err("Cmd: %p failed to post RDMA res\n", cmd);
return ret;
}
@@ -2778,21 +2126,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
- struct isert_device *device = isert_conn->device;
- struct ib_send_wr *wr_failed;
+ struct ib_cqe *cqe = NULL;
+ struct ib_send_wr *chain_wr = NULL;
int rc;
isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
isert_cmd, se_cmd->data_length);
- isert_cmd->iser_ib_op = ISER_IB_RDMA_WRITE;
- rc = device->reg_rdma_mem(isert_cmd, conn);
- if (rc) {
- isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
- return rc;
- }
-
- if (!isert_prot_cmd(isert_conn, se_cmd)) {
+ if (isert_prot_cmd(isert_conn, se_cmd)) {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+ cqe = &isert_cmd->tx_desc.tx_cqe;
+ } else {
/*
* Build isert_conn->tx_desc for iSCSI response PDU and attach
*/
@@ -2803,56 +2147,35 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd,
&isert_cmd->tx_desc.send_wr);
- isert_cmd->s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
- isert_cmd->rdma_wr_num += 1;
rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
if (rc) {
isert_err("ib_post_recv failed with %d\n", rc);
return rc;
}
- }
- rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
- if (rc)
- isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
-
- if (!isert_prot_cmd(isert_conn, se_cmd))
- isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
- "READ\n", isert_cmd);
- else
- isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
- isert_cmd);
+ chain_wr = &isert_cmd->tx_desc.send_wr;
+ }
+ isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
+ isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
return 1;
}
static int
isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
{
- struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = conn->context;
- struct isert_device *device = isert_conn->device;
- struct ib_send_wr *wr_failed;
- int rc;
isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
- isert_cmd, se_cmd->data_length, cmd->write_data_done);
- isert_cmd->iser_ib_op = ISER_IB_RDMA_READ;
- rc = device->reg_rdma_mem(isert_cmd, conn);
- if (rc) {
- isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
- return rc;
- }
+ isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
- rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
- if (rc)
- isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
+ isert_rdma_rw_ctx_post(isert_cmd, conn->context,
+ &isert_cmd->tx_desc.tx_cqe, NULL);
isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
isert_cmd);
-
return 0;
}
@@ -3273,9 +2596,19 @@ static void isert_free_conn(struct iscsi_conn *conn)
isert_put_conn(isert_conn);
}
+static void isert_get_rx_pdu(struct iscsi_conn *conn)
+{
+ struct completion comp;
+
+ init_completion(&comp);
+
+ wait_for_completion_interruptible(&comp);
+}
+
static struct iscsit_transport iser_target_transport = {
.name = "IB/iSER",
.transport_type = ISCSI_INFINIBAND,
+ .rdma_shutdown = true,
.priv_size = sizeof(struct isert_cmd),
.owner = THIS_MODULE,
.iscsit_setup_np = isert_setup_np,
@@ -3291,6 +2624,7 @@ static struct iscsit_transport iser_target_transport = {
.iscsit_queue_data_in = isert_put_datain,
.iscsit_queue_status = isert_put_response,
.iscsit_aborted_task = isert_aborted_task,
+ .iscsit_get_rx_pdu = isert_get_rx_pdu,
.iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
};
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 147900cbb..e512ba941 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -3,6 +3,7 @@
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
+#include <rdma/rw.h>
#include <scsi/iser.h>
@@ -53,10 +54,7 @@
#define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
-#define ISERT_INFLIGHT_DATAOUTS 8
-
-#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
- (1 + ISERT_INFLIGHT_DATAOUTS) + \
+#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
ISERT_MAX_TX_MISC_PDUS + \
ISERT_MAX_RX_MISC_PDUS)
@@ -71,13 +69,6 @@ enum isert_desc_type {
ISCSI_TX_DATAIN
};
-enum iser_ib_op_code {
- ISER_IB_RECV,
- ISER_IB_SEND,
- ISER_IB_RDMA_WRITE,
- ISER_IB_RDMA_READ,
-};
-
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
@@ -118,42 +109,6 @@ static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
return container_of(cqe, struct iser_tx_desc, tx_cqe);
}
-
-enum isert_indicator {
- ISERT_PROTECTED = 1 << 0,
- ISERT_DATA_KEY_VALID = 1 << 1,
- ISERT_PROT_KEY_VALID = 1 << 2,
- ISERT_SIG_KEY_VALID = 1 << 3,
-};
-
-struct pi_context {
- struct ib_mr *prot_mr;
- struct ib_mr *sig_mr;
-};
-
-struct fast_reg_descriptor {
- struct list_head list;
- struct ib_mr *data_mr;
- u8 ind;
- struct pi_context *pi_ctx;
-};
-
-struct isert_data_buf {
- struct scatterlist *sg;
- int nents;
- u32 sg_off;
- u32 len; /* cur_rdma_length */
- u32 offset;
- unsigned int dma_nents;
- enum dma_data_direction dma_dir;
-};
-
-enum {
- DATA = 0,
- PROT = 1,
- SIG = 2,
-};
-
struct isert_cmd {
uint32_t read_stag;
uint32_t write_stag;
@@ -166,16 +121,7 @@ struct isert_cmd {
struct iscsi_cmd *iscsi_cmd;
struct iser_tx_desc tx_desc;
struct iser_rx_desc *rx_desc;
- enum iser_ib_op_code iser_ib_op;
- struct ib_sge *ib_sge;
- struct ib_sge s_ib_sge;
- int rdma_wr_num;
- struct ib_rdma_wr *rdma_wr;
- struct ib_rdma_wr s_rdma_wr;
- struct ib_sge ib_sg[3];
- struct isert_data_buf data;
- struct isert_data_buf prot;
- struct fast_reg_descriptor *fr_desc;
+ struct rdma_rw_ctx rw;
struct work_struct comp_work;
struct scatterlist sg;
};
@@ -210,10 +156,6 @@ struct isert_conn {
struct isert_device *device;
struct mutex mutex;
struct kref kref;
- struct list_head fr_pool;
- int fr_pool_size;
- /* lock to protect fastreg pool */
- spinlock_t pool_lock;
struct work_struct release_work;
bool logout_posted;
bool snd_w_inv;
@@ -236,7 +178,6 @@ struct isert_comp {
};
struct isert_device {
- int use_fastreg;
bool pi_capable;
int refcount;
struct ib_device *ib_device;
@@ -244,10 +185,6 @@ struct isert_device {
struct isert_comp *comps;
int comps_used;
struct list_head dev_node;
- int (*reg_rdma_mem)(struct isert_cmd *isert_cmd,
- struct iscsi_conn *conn);
- void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
- struct isert_conn *isert_conn);
};
struct isert_np {
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 845ce90c2..3322ed750 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -70,6 +70,7 @@ static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
static bool prefer_fr = true;
static bool register_always = true;
+static bool never_register;
static int topspin_workarounds = 1;
module_param(srp_sg_tablesize, uint, 0444);
@@ -81,7 +82,7 @@ MODULE_PARM_DESC(cmd_sg_entries,
module_param(indirect_sg_entries, uint, 0444);
MODULE_PARM_DESC(indirect_sg_entries,
- "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
+ "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
module_param(allow_ext_sg, bool, 0444);
MODULE_PARM_DESC(allow_ext_sg,
@@ -99,6 +100,9 @@ module_param(register_always, bool, 0444);
MODULE_PARM_DESC(register_always,
"Use memory registration even for contiguous memory regions");
+module_param(never_register, bool, 0444);
+MODULE_PARM_DESC(never_register, "Never register memory");
+
static const struct kernel_param_ops srp_tmo_ops;
static int srp_reconnect_delay = 10;
@@ -316,7 +320,7 @@ static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
struct ib_fmr_pool_param fmr_param;
memset(&fmr_param, 0, sizeof(fmr_param));
- fmr_param.pool_size = target->scsi_host->can_queue;
+ fmr_param.pool_size = target->mr_pool_size;
fmr_param.dirty_watermark = fmr_param.pool_size / 4;
fmr_param.cache = 1;
fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
@@ -441,8 +445,7 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
{
struct srp_device *dev = target->srp_host->srp_dev;
- return srp_create_fr_pool(dev->dev, dev->pd,
- target->scsi_host->can_queue,
+ return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
dev->max_pages_per_mr);
}
@@ -469,7 +472,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
struct ib_qp *qp;
struct ib_fmr_pool *fmr_pool = NULL;
struct srp_fr_pool *fr_pool = NULL;
- const int m = dev->use_fast_reg ? 3 : 1;
+ const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
int ret;
init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
@@ -850,7 +853,7 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
for (i = 0; i < target->req_ring_size; ++i) {
req = &ch->req_ring[i];
- mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
+ mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
GFP_KERNEL);
if (!mr_list)
goto out;
@@ -1112,7 +1115,7 @@ static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
}
/**
- * srp_free_req() - Unmap data and add request to the free request list.
+ * srp_free_req() - Unmap data and adjust ch->req_lim.
* @ch: SRP RDMA channel.
* @req: Request to be freed.
* @scmnd: SCSI command associated with @req.
@@ -1299,9 +1302,16 @@ static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
srp_handle_qp_err(cq, wc, "FAST REG");
}
+/*
+ * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
+ * where to start in the first element. If sg_offset_p != NULL then
+ * *sg_offset_p is updated to the offset in state->sg[retval] of the first
+ * byte that has not yet been mapped.
+ */
static int srp_map_finish_fr(struct srp_map_state *state,
struct srp_request *req,
- struct srp_rdma_ch *ch, int sg_nents)
+ struct srp_rdma_ch *ch, int sg_nents,
+ unsigned int *sg_offset_p)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
@@ -1316,13 +1326,14 @@ static int srp_map_finish_fr(struct srp_map_state *state,
WARN_ON_ONCE(!dev->use_fast_reg);
- if (sg_nents == 0)
- return 0;
-
if (sg_nents == 1 && target->global_mr) {
- srp_map_desc(state, sg_dma_address(state->sg),
- sg_dma_len(state->sg),
+ unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
+
+ srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
+ sg_dma_len(state->sg) - sg_offset,
target->global_mr->rkey);
+ if (sg_offset_p)
+ *sg_offset_p = 0;
return 1;
}
@@ -1333,9 +1344,17 @@ static int srp_map_finish_fr(struct srp_map_state *state,
rkey = ib_inc_rkey(desc->mr->rkey);
ib_update_fast_reg_key(desc->mr, rkey);
- n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
- if (unlikely(n < 0))
+ n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
+ dev->mr_page_size);
+ if (unlikely(n < 0)) {
+ srp_fr_pool_put(ch->fr_pool, &desc, 1);
+ pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
+ dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
+ sg_offset_p ? *sg_offset_p : -1, n);
return n;
+ }
+
+ WARN_ON_ONCE(desc->mr->length == 0);
req->reg_cqe.done = srp_reg_mr_err_done;
@@ -1357,8 +1376,10 @@ static int srp_map_finish_fr(struct srp_map_state *state,
desc->mr->length, desc->mr->rkey);
err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
- if (unlikely(err))
+ if (unlikely(err)) {
+ WARN_ON_ONCE(err == -ENOMEM);
return err;
+ }
return n;
}
@@ -1398,7 +1419,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
/*
* If the last entry of the MR wasn't a full page, then we need to
* close it out and start a new one -- we can only merge at page
- * boundries.
+ * boundaries.
*/
ret = 0;
if (len != dev->mr_page_size)
@@ -1413,10 +1434,9 @@ static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct scatterlist *sg;
int i, ret;
- state->desc = req->indirect_desc;
state->pages = req->map_page;
state->fmr.next = req->fmr_list;
- state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
+ state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
for_each_sg(scat, sg, count, i) {
ret = srp_map_sg_entry(state, ch, sg, i);
@@ -1428,8 +1448,6 @@ static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
if (ret)
return ret;
- req->nmdesc = state->nmdesc;
-
return 0;
}
@@ -1437,15 +1455,19 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct srp_request *req, struct scatterlist *scat,
int count)
{
- state->desc = req->indirect_desc;
+ unsigned int sg_offset = 0;
+
state->fr.next = req->fr_list;
- state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
+ state->fr.end = req->fr_list + ch->target->mr_per_cmd;
state->sg = scat;
+ if (count == 0)
+ return 0;
+
while (count) {
int i, n;
- n = srp_map_finish_fr(state, req, ch, count);
+ n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
if (unlikely(n < 0))
return n;
@@ -1454,8 +1476,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
state->sg = sg_next(state->sg);
}
- req->nmdesc = state->nmdesc;
-
return 0;
}
@@ -1468,15 +1488,12 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct scatterlist *sg;
int i;
- state->desc = req->indirect_desc;
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
ib_sg_dma_len(dev->dev, sg),
target->global_mr->rkey);
}
- req->nmdesc = state->nmdesc;
-
return 0;
}
@@ -1514,9 +1531,10 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
#ifdef CONFIG_NEED_SG_DMA_LENGTH
idb_sg->dma_length = idb_sg->length; /* hack^2 */
#endif
- ret = srp_map_finish_fr(&state, req, ch, 1);
+ ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
if (ret < 0)
return ret;
+ WARN_ON_ONCE(ret < 1);
} else if (dev->use_fmr) {
state.pages = idb_pages;
state.pages[0] = (req->indirect_dma_addr &
@@ -1534,6 +1552,41 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
return 0;
}
+#if defined(DYNAMIC_DATA_DEBUG)
+static void srp_check_mapping(struct srp_map_state *state,
+ struct srp_rdma_ch *ch, struct srp_request *req,
+ struct scatterlist *scat, int count)
+{
+ struct srp_device *dev = ch->target->srp_host->srp_dev;
+ struct srp_fr_desc **pfr;
+ u64 desc_len = 0, mr_len = 0;
+ int i;
+
+ for (i = 0; i < state->ndesc; i++)
+ desc_len += be32_to_cpu(req->indirect_desc[i].len);
+ if (dev->use_fast_reg)
+ for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
+ mr_len += (*pfr)->mr->length;
+ else if (dev->use_fmr)
+ for (i = 0; i < state->nmdesc; i++)
+ mr_len += be32_to_cpu(req->indirect_desc[i].len);
+ if (desc_len != scsi_bufflen(req->scmnd) ||
+ mr_len > scsi_bufflen(req->scmnd))
+ pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
+ scsi_bufflen(req->scmnd), desc_len, mr_len,
+ state->ndesc, state->nmdesc);
+}
+#endif
+
+/**
+ * srp_map_data() - map SCSI data buffer onto an SRP request
+ * @scmnd: SCSI command to map
+ * @ch: SRP RDMA channel
+ * @req: SRP request
+ *
+ * Returns the length in bytes of the SRP_CMD IU or a negative value if
+ * mapping failed.
+ */
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_request *req)
{
@@ -1600,12 +1653,25 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
target->indirect_size, DMA_TO_DEVICE);
memset(&state, 0, sizeof(state));
+ state.desc = req->indirect_desc;
if (dev->use_fast_reg)
- srp_map_sg_fr(&state, ch, req, scat, count);
+ ret = srp_map_sg_fr(&state, ch, req, scat, count);
else if (dev->use_fmr)
- srp_map_sg_fmr(&state, ch, req, scat, count);
+ ret = srp_map_sg_fmr(&state, ch, req, scat, count);
else
- srp_map_sg_dma(&state, ch, req, scat, count);
+ ret = srp_map_sg_dma(&state, ch, req, scat, count);
+ req->nmdesc = state.nmdesc;
+ if (ret < 0)
+ goto unmap;
+
+#if defined(DYNAMIC_DEBUG)
+ {
+ DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
+ "Memory mapping consistency check");
+ if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT))
+ srp_check_mapping(&state, ch, req, scat, count);
+ }
+#endif
/* We've mapped the request, now pull as much of the indirect
* descriptor table as we can into the command buffer. If this
@@ -1628,7 +1694,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
!target->allow_ext_sg)) {
shost_printk(KERN_ERR, target->scsi_host,
"Could not fit S/G list into SRP_CMD\n");
- return -EIO;
+ ret = -EIO;
+ goto unmap;
}
count = min(state.ndesc, target->cmd_sg_cnt);
@@ -1646,7 +1713,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
idb_len, &idb_rkey);
if (ret < 0)
- return ret;
+ goto unmap;
req->nmdesc++;
} else {
idb_rkey = cpu_to_be32(target->global_mr->rkey);
@@ -1672,6 +1739,12 @@ map_complete:
cmd->buf_fmt = fmt;
return len;
+
+unmap:
+ srp_unmap_data(scmnd, ch, req);
+ if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
+ ret = -E2BIG;
+ return ret;
}
/*
@@ -2564,6 +2637,20 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
}
+static int srp_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct srp_target_port *target = host_to_target(shost);
+ struct srp_device *srp_dev = target->srp_host->srp_dev;
+ struct ib_device *ibdev = srp_dev->dev;
+
+ if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ blk_queue_virt_boundary(sdev->request_queue,
+ ~srp_dev->mr_page_mask);
+
+ return 0;
+}
+
static int srp_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
@@ -2755,6 +2842,7 @@ static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
+ .slave_alloc = srp_slave_alloc,
.slave_configure = srp_slave_configure,
.info = srp_target_info,
.queuecommand = srp_queuecommand,
@@ -2819,7 +2907,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
spin_unlock(&host->target_lock);
scsi_scan_target(&target->scsi_host->shost_gendev,
- 0, target->scsi_id, SCAN_WILD_CARD, 0);
+ 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
if (srp_connected_ch(target) < target->ch_count ||
target->qp_in_error) {
@@ -2829,7 +2917,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
goto out;
}
- pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
+ pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
dev_name(&target->scsi_host->shost_gendev),
srp_sdev_count(target->scsi_host));
@@ -3097,7 +3185,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
case SRP_OPT_SG_TABLESIZE:
if (match_int(args, &token) || token < 1 ||
- token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
+ token > SG_MAX_SEGMENTS) {
pr_warn("bad max sg_tablesize parameter '%s'\n",
p);
goto out;
@@ -3161,6 +3249,7 @@ static ssize_t srp_create_target(struct device *dev,
struct srp_device *srp_dev = host->srp_dev;
struct ib_device *ibdev = srp_dev->dev;
int ret, node_idx, node, cpu, i;
+ unsigned int max_sectors_per_mr, mr_per_cmd = 0;
bool multich = false;
target_host = scsi_host_alloc(&srp_template,
@@ -3217,7 +3306,33 @@ static ssize_t srp_create_target(struct device *dev,
target->sg_tablesize = target->cmd_sg_cnt;
}
+ if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
+ /*
+ * FR and FMR can only map one HCA page per entry. If the
+ * start address is not aligned on a HCA page boundary two
+ * entries will be used for the head and the tail although
+ * these two entries combined contain at most one HCA page of
+ * data. Hence the "+ 1" in the calculation below.
+ *
+ * The indirect data buffer descriptor is contiguous so the
+ * memory for that buffer will only be registered if
+ * register_always is true. Hence add one to mr_per_cmd if
+ * register_always has been set.
+ */
+ max_sectors_per_mr = srp_dev->max_pages_per_mr <<
+ (ilog2(srp_dev->mr_page_size) - 9);
+ mr_per_cmd = register_always +
+ (target->scsi_host->max_sectors + 1 +
+ max_sectors_per_mr - 1) / max_sectors_per_mr;
+ pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
+ target->scsi_host->max_sectors,
+ srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
+ max_sectors_per_mr, mr_per_cmd);
+ }
+
target_host->sg_tablesize = target->sg_tablesize;
+ target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
+ target->mr_per_cmd = mr_per_cmd;
target->indirect_size = target->sg_tablesize *
sizeof (struct srp_direct_buf);
target->max_iu_len = sizeof (struct srp_cmd) +
@@ -3410,21 +3525,10 @@ static void srp_add_one(struct ib_device *device)
int mr_page_shift, p;
u64 max_pages_per_mr;
- srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
+ srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
return;
- srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
- device->map_phys_fmr && device->unmap_fmr);
- srp_dev->has_fr = (device->attrs.device_cap_flags &
- IB_DEVICE_MEM_MGT_EXTENSIONS);
- if (!srp_dev->has_fmr && !srp_dev->has_fr)
- dev_warn(&device->dev, "neither FMR nor FR is supported\n");
-
- srp_dev->use_fast_reg = (srp_dev->has_fr &&
- (!srp_dev->has_fmr || prefer_fr));
- srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
-
/*
* Use the smallest page size supported by the HCA, down to a
* minimum of 4096 bytes. We're unlikely to build large sglists
@@ -3435,8 +3539,25 @@ static void srp_add_one(struct ib_device *device)
srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
max_pages_per_mr = device->attrs.max_mr_size;
do_div(max_pages_per_mr, srp_dev->mr_page_size);
+ pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
+ device->attrs.max_mr_size, srp_dev->mr_page_size,
+ max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
max_pages_per_mr);
+
+ srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
+ device->map_phys_fmr && device->unmap_fmr);
+ srp_dev->has_fr = (device->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS);
+ if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
+ dev_warn(&device->dev, "neither FMR nor FR is supported\n");
+ } else if (!never_register &&
+ device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) {
+ srp_dev->use_fast_reg = (srp_dev->has_fr &&
+ (!srp_dev->has_fmr || prefer_fr));
+ srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
+ }
+
if (srp_dev->use_fast_reg) {
srp_dev->max_pages_per_mr =
min_t(u32, srp_dev->max_pages_per_mr,
@@ -3456,15 +3577,14 @@ static void srp_add_one(struct ib_device *device)
if (IS_ERR(srp_dev->pd))
goto free_dev;
- if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
+ if (never_register || !register_always ||
+ (!srp_dev->has_fmr && !srp_dev->has_fr)) {
srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE);
if (IS_ERR(srp_dev->global_mr))
goto err_pd;
- } else {
- srp_dev->global_mr = NULL;
}
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 9e05ce4a0..26bb9b0a7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -202,6 +202,8 @@ struct srp_target_port {
char target_name[32];
unsigned int scsi_id;
unsigned int sg_tablesize;
+ int mr_pool_size;
+ int mr_per_cmd;
int queue_size;
int req_ring_size;
int comp_vector;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 8b42401d4..4a4155640 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -254,8 +254,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
memset(cif, 0, sizeof(*cif));
cif->base_version = 1;
cif->class_version = 1;
- cif->resp_time_value = 20;
+ ib_set_cpi_resp_time(cif, 20);
mad->mad_hdr.status = 0;
}
@@ -765,52 +765,6 @@ static int srpt_post_recv(struct srpt_device *sdev,
}
/**
- * srpt_post_send() - Post an IB send request.
- *
- * Returns zero upon success and a non-zero value upon failure.
- */
-static int srpt_post_send(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx, int len)
-{
- struct ib_sge list;
- struct ib_send_wr wr, *bad_wr;
- struct srpt_device *sdev = ch->sport->sdev;
- int ret;
-
- atomic_inc(&ch->req_lim);
-
- ret = -ENOMEM;
- if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
- pr_warn("IB send queue full (needed 1)\n");
- goto out;
- }
-
- ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
- DMA_TO_DEVICE);
-
- list.addr = ioctx->ioctx.dma;
- list.length = len;
- list.lkey = sdev->pd->local_dma_lkey;
-
- ioctx->ioctx.cqe.done = srpt_send_done;
- wr.next = NULL;
- wr.wr_cqe = &ioctx->ioctx.cqe;
- wr.sg_list = &list;
- wr.num_sge = 1;
- wr.opcode = IB_WR_SEND;
- wr.send_flags = IB_SEND_SIGNALED;
-
- ret = ib_post_send(ch->qp, &wr, &bad_wr);
-
-out:
- if (ret < 0) {
- atomic_inc(&ch->sq_wr_avail);
- atomic_dec(&ch->req_lim);
- }
- return ret;
-}
-
-/**
* srpt_zerolength_write() - Perform a zero-length RDMA write.
*
* A quote from the InfiniBand specification: C9-88: For an HCA responder
@@ -843,6 +797,110 @@ static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
}
}
+static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
+ struct srp_direct_buf *db, int nbufs, struct scatterlist **sg,
+ unsigned *sg_cnt)
+{
+ enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+ struct scatterlist *prev = NULL;
+ unsigned prev_nents;
+ int ret, i;
+
+ if (nbufs == 1) {
+ ioctx->rw_ctxs = &ioctx->s_rw_ctx;
+ } else {
+ ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
+ GFP_KERNEL);
+ if (!ioctx->rw_ctxs)
+ return -ENOMEM;
+ }
+
+ for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+ u64 remote_addr = be64_to_cpu(db->va);
+ u32 size = be32_to_cpu(db->len);
+ u32 rkey = be32_to_cpu(db->key);
+
+ ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false,
+ i < nbufs - 1);
+ if (ret)
+ goto unwind;
+
+ ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
+ ctx->sg, ctx->nents, 0, remote_addr, rkey, dir);
+ if (ret < 0) {
+ target_free_sgl(ctx->sg, ctx->nents);
+ goto unwind;
+ }
+
+ ioctx->n_rdma += ret;
+ ioctx->n_rw_ctx++;
+
+ if (prev) {
+ sg_unmark_end(&prev[prev_nents - 1]);
+ sg_chain(prev, prev_nents + 1, ctx->sg);
+ } else {
+ *sg = ctx->sg;
+ }
+
+ prev = ctx->sg;
+ prev_nents = ctx->nents;
+
+ *sg_cnt += ctx->nents;
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
+ ctx->sg, ctx->nents, dir);
+ target_free_sgl(ctx->sg, ctx->nents);
+ }
+ if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
+ kfree(ioctx->rw_ctxs);
+ return ret;
+}
+
+static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
+ int i;
+
+ for (i = 0; i < ioctx->n_rw_ctx; i++) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
+ ctx->sg, ctx->nents, dir);
+ target_free_sgl(ctx->sg, ctx->nents);
+ }
+
+ if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
+ kfree(ioctx->rw_ctxs);
+}
+
+static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
+{
+ /*
+ * The pointer computations below will only be compiled correctly
+ * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
+ * whether srp_cmd::add_data has been declared as a byte pointer.
+ */
+ BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) &&
+ !__same_type(srp_cmd->add_data[0], (u8)0));
+
+ /*
+ * According to the SRP spec, the lower two bits of the 'ADDITIONAL
+ * CDB LENGTH' field are reserved and the size in bytes of this field
+ * is four times the value specified in bits 3..7. Hence the "& ~3".
+ */
+ return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3);
+}
+
/**
* srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
* @ioctx: Pointer to the I/O context associated with the request.
@@ -858,94 +916,59 @@ static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
* -ENOMEM when memory allocation fails and zero upon success.
*/
static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
- struct srp_cmd *srp_cmd,
- enum dma_data_direction *dir, u64 *data_len)
+ struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
+ struct scatterlist **sg, unsigned *sg_cnt, u64 *data_len)
{
- struct srp_indirect_buf *idb;
- struct srp_direct_buf *db;
- unsigned add_cdb_offset;
- int ret;
-
- /*
- * The pointer computations below will only be compiled correctly
- * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
- * whether srp_cmd::add_data has been declared as a byte pointer.
- */
- BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
- && !__same_type(srp_cmd->add_data[0], (u8)0));
-
BUG_ON(!dir);
BUG_ON(!data_len);
- ret = 0;
- *data_len = 0;
-
/*
* The lower four bits of the buffer format field contain the DATA-IN
* buffer descriptor format, and the highest four bits contain the
* DATA-OUT buffer descriptor format.
*/
- *dir = DMA_NONE;
if (srp_cmd->buf_fmt & 0xf)
/* DATA-IN: transfer data from target to initiator (read). */
*dir = DMA_FROM_DEVICE;
else if (srp_cmd->buf_fmt >> 4)
/* DATA-OUT: transfer data from initiator to target (write). */
*dir = DMA_TO_DEVICE;
+ else
+ *dir = DMA_NONE;
+
+ /* initialize data_direction early as srpt_alloc_rw_ctxs needs it */
+ ioctx->cmd.data_direction = *dir;
- /*
- * According to the SRP spec, the lower two bits of the 'ADDITIONAL
- * CDB LENGTH' field are reserved and the size in bytes of this field
- * is four times the value specified in bits 3..7. Hence the "& ~3".
- */
- add_cdb_offset = srp_cmd->add_cdb_len & ~3;
if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
- ioctx->n_rbuf = 1;
- ioctx->rbufs = &ioctx->single_rbuf;
+ struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
- db = (struct srp_direct_buf *)(srp_cmd->add_data
- + add_cdb_offset);
- memcpy(ioctx->rbufs, db, sizeof(*db));
*data_len = be32_to_cpu(db->len);
+ return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
} else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
- idb = (struct srp_indirect_buf *)(srp_cmd->add_data
- + add_cdb_offset);
-
- ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof(*db);
+ struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd);
+ int nbufs = be32_to_cpu(idb->table_desc.len) /
+ sizeof(struct srp_direct_buf);
- if (ioctx->n_rbuf >
+ if (nbufs >
(srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
pr_err("received unsupported SRP_CMD request"
" type (%u out + %u in != %u / %zu)\n",
srp_cmd->data_out_desc_cnt,
srp_cmd->data_in_desc_cnt,
be32_to_cpu(idb->table_desc.len),
- sizeof(*db));
- ioctx->n_rbuf = 0;
- ret = -EINVAL;
- goto out;
+ sizeof(struct srp_direct_buf));
+ return -EINVAL;
}
- if (ioctx->n_rbuf == 1)
- ioctx->rbufs = &ioctx->single_rbuf;
- else {
- ioctx->rbufs =
- kmalloc(ioctx->n_rbuf * sizeof(*db), GFP_ATOMIC);
- if (!ioctx->rbufs) {
- ioctx->n_rbuf = 0;
- ret = -ENOMEM;
- goto out;
- }
- }
-
- db = idb->desc_list;
- memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof(*db));
*data_len = be32_to_cpu(idb->len);
+ return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
+ sg, sg_cnt);
+ } else {
+ *data_len = 0;
+ return 0;
}
-out:
- return ret;
}
/**
@@ -1049,217 +1072,6 @@ static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
}
/**
- * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
- */
-static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- struct scatterlist *sg;
- enum dma_data_direction dir;
-
- BUG_ON(!ch);
- BUG_ON(!ioctx);
- BUG_ON(ioctx->n_rdma && !ioctx->rdma_wrs);
-
- while (ioctx->n_rdma)
- kfree(ioctx->rdma_wrs[--ioctx->n_rdma].wr.sg_list);
-
- kfree(ioctx->rdma_wrs);
- ioctx->rdma_wrs = NULL;
-
- if (ioctx->mapped_sg_count) {
- sg = ioctx->sg;
- WARN_ON(!sg);
- dir = ioctx->cmd.data_direction;
- BUG_ON(dir == DMA_NONE);
- ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
- target_reverse_dma_direction(&ioctx->cmd));
- ioctx->mapped_sg_count = 0;
- }
-}
-
-/**
- * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
- */
-static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- struct ib_device *dev = ch->sport->sdev->device;
- struct se_cmd *cmd;
- struct scatterlist *sg, *sg_orig;
- int sg_cnt;
- enum dma_data_direction dir;
- struct ib_rdma_wr *riu;
- struct srp_direct_buf *db;
- dma_addr_t dma_addr;
- struct ib_sge *sge;
- u64 raddr;
- u32 rsize;
- u32 tsize;
- u32 dma_len;
- int count, nrdma;
- int i, j, k;
-
- BUG_ON(!ch);
- BUG_ON(!ioctx);
- cmd = &ioctx->cmd;
- dir = cmd->data_direction;
- BUG_ON(dir == DMA_NONE);
-
- ioctx->sg = sg = sg_orig = cmd->t_data_sg;
- ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
-
- count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
- target_reverse_dma_direction(cmd));
- if (unlikely(!count))
- return -EAGAIN;
-
- ioctx->mapped_sg_count = count;
-
- if (ioctx->rdma_wrs && ioctx->n_rdma_wrs)
- nrdma = ioctx->n_rdma_wrs;
- else {
- nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
- + ioctx->n_rbuf;
-
- ioctx->rdma_wrs = kcalloc(nrdma, sizeof(*ioctx->rdma_wrs),
- GFP_KERNEL);
- if (!ioctx->rdma_wrs)
- goto free_mem;
-
- ioctx->n_rdma_wrs = nrdma;
- }
-
- db = ioctx->rbufs;
- tsize = cmd->data_length;
- dma_len = ib_sg_dma_len(dev, &sg[0]);
- riu = ioctx->rdma_wrs;
-
- /*
- * For each remote desc - calculate the #ib_sge.
- * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
- * each remote desc rdma_iu is required a rdma wr;
- * else
- * we need to allocate extra rdma_iu to carry extra #ib_sge in
- * another rdma wr
- */
- for (i = 0, j = 0;
- j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
- rsize = be32_to_cpu(db->len);
- raddr = be64_to_cpu(db->va);
- riu->remote_addr = raddr;
- riu->rkey = be32_to_cpu(db->key);
- riu->wr.num_sge = 0;
-
- /* calculate how many sge required for this remote_buf */
- while (rsize > 0 && tsize > 0) {
-
- if (rsize >= dma_len) {
- tsize -= dma_len;
- rsize -= dma_len;
- raddr += dma_len;
-
- if (tsize > 0) {
- ++j;
- if (j < count) {
- sg = sg_next(sg);
- dma_len = ib_sg_dma_len(
- dev, sg);
- }
- }
- } else {
- tsize -= rsize;
- dma_len -= rsize;
- rsize = 0;
- }
-
- ++riu->wr.num_sge;
-
- if (rsize > 0 &&
- riu->wr.num_sge == SRPT_DEF_SG_PER_WQE) {
- ++ioctx->n_rdma;
- riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
- sizeof(*riu->wr.sg_list),
- GFP_KERNEL);
- if (!riu->wr.sg_list)
- goto free_mem;
-
- ++riu;
- riu->wr.num_sge = 0;
- riu->remote_addr = raddr;
- riu->rkey = be32_to_cpu(db->key);
- }
- }
-
- ++ioctx->n_rdma;
- riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
- sizeof(*riu->wr.sg_list),
- GFP_KERNEL);
- if (!riu->wr.sg_list)
- goto free_mem;
- }
-
- db = ioctx->rbufs;
- tsize = cmd->data_length;
- riu = ioctx->rdma_wrs;
- sg = sg_orig;
- dma_len = ib_sg_dma_len(dev, &sg[0]);
- dma_addr = ib_sg_dma_address(dev, &sg[0]);
-
- /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
- for (i = 0, j = 0;
- j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
- rsize = be32_to_cpu(db->len);
- sge = riu->wr.sg_list;
- k = 0;
-
- while (rsize > 0 && tsize > 0) {
- sge->addr = dma_addr;
- sge->lkey = ch->sport->sdev->pd->local_dma_lkey;
-
- if (rsize >= dma_len) {
- sge->length =
- (tsize < dma_len) ? tsize : dma_len;
- tsize -= dma_len;
- rsize -= dma_len;
-
- if (tsize > 0) {
- ++j;
- if (j < count) {
- sg = sg_next(sg);
- dma_len = ib_sg_dma_len(
- dev, sg);
- dma_addr = ib_sg_dma_address(
- dev, sg);
- }
- }
- } else {
- sge->length = (tsize < rsize) ? tsize : rsize;
- tsize -= rsize;
- dma_len -= rsize;
- dma_addr += rsize;
- rsize = 0;
- }
-
- ++k;
- if (k == riu->wr.num_sge && rsize > 0 && tsize > 0) {
- ++riu;
- sge = riu->wr.sg_list;
- k = 0;
- } else if (rsize > 0 && tsize > 0)
- ++sge;
- }
- }
-
- return 0;
-
-free_mem:
- srpt_unmap_sg_to_ib_sge(ch, ioctx);
-
- return -ENOMEM;
-}
-
-/**
* srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
*/
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
@@ -1284,12 +1096,8 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
BUG_ON(ioctx->ch != ch);
spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW;
- ioctx->n_rbuf = 0;
- ioctx->rbufs = NULL;
ioctx->n_rdma = 0;
- ioctx->n_rdma_wrs = 0;
- ioctx->rdma_wrs = NULL;
- ioctx->mapped_sg_count = 0;
+ ioctx->n_rw_ctx = 0;
init_completion(&ioctx->tx_done);
ioctx->queue_status_only = false;
/*
@@ -1359,7 +1167,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* SRP_RSP sending failed or the SRP_RSP send completion has
* not been received in time.
*/
- srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
break;
case SRPT_STATE_MGMT_RSP_SENT:
@@ -1387,6 +1194,7 @@ static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(ioctx->n_rdma <= 0);
atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+ ioctx->n_rdma = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
@@ -1403,23 +1211,6 @@ static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
__LINE__, srpt_get_cmd_state(ioctx));
}
-static void srpt_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct srpt_send_ioctx *ioctx =
- container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
-
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
- /*
- * Note: if an RDMA write error completion is received that
- * means that a SEND also has been posted. Defer further
- * processing of the associated command until the send error
- * completion has been received.
- */
- pr_info("RDMA_WRITE for ioctx 0x%p failed with status %d\n",
- ioctx, wc->status);
- }
-}
-
/**
* srpt_build_cmd_rsp() - Build an SRP_RSP response.
* @ch: RDMA channel through which the request has been received.
@@ -1537,6 +1328,8 @@ static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
{
struct se_cmd *cmd;
struct srp_cmd *srp_cmd;
+ struct scatterlist *sg = NULL;
+ unsigned sg_cnt = 0;
u64 data_len;
enum dma_data_direction dir;
int rc;
@@ -1563,16 +1356,21 @@ static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
break;
}
- if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
- pr_err("0x%llx: parsing SRP descriptor table failed.\n",
- srp_cmd->tag);
+ rc = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &sg, &sg_cnt,
+ &data_len);
+ if (rc) {
+ if (rc != -EAGAIN) {
+ pr_err("0x%llx: parsing SRP descriptor table failed.\n",
+ srp_cmd->tag);
+ }
goto release_ioctx;
}
- rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
+ rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
&send_ioctx->sense_data[0],
scsilun_to_int(&srp_cmd->lun), data_len,
- TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+ TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
+ sg, sg_cnt, NULL, 0, NULL, 0);
if (rc != 0) {
pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
srp_cmd->tag);
@@ -1664,23 +1462,21 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
recv_ioctx->ioctx.dma, srp_max_req_size,
DMA_FROM_DEVICE);
- if (unlikely(ch->state == CH_CONNECTING)) {
- list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
- goto out;
- }
+ if (unlikely(ch->state == CH_CONNECTING))
+ goto out_wait;
if (unlikely(ch->state != CH_LIVE))
- goto out;
+ return;
srp_cmd = recv_ioctx->ioctx.buf;
if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
- if (!send_ioctx)
+ if (!send_ioctx) {
+ if (!list_empty(&ch->cmd_wait_list))
+ goto out_wait;
send_ioctx = srpt_get_send_ioctx(ch);
- if (unlikely(!send_ioctx)) {
- list_add_tail(&recv_ioctx->wait_list,
- &ch->cmd_wait_list);
- goto out;
}
+ if (unlikely(!send_ioctx))
+ goto out_wait;
}
switch (srp_cmd->opcode) {
@@ -1709,8 +1505,10 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
}
srpt_post_recv(ch->sport->sdev, recv_ioctx);
-out:
return;
+
+out_wait:
+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
}
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1779,14 +1577,13 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
state != SRPT_STATE_MGMT_RSP_SENT);
- atomic_inc(&ch->sq_wr_avail);
+ atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
if (wc->status != IB_WC_SUCCESS)
pr_info("sending response for ioctx 0x%p failed"
" with status %d\n", ioctx, wc->status);
if (state != SRPT_STATE_DONE) {
- srpt_unmap_sg_to_ib_sge(ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
} else {
pr_err("IB completion has been received too late for"
@@ -1832,8 +1629,17 @@ retry:
qp_init->srq = sdev->srq;
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
qp_init->qp_type = IB_QPT_RC;
- qp_init->cap.max_send_wr = srp_sq_size;
+ /*
+ * We divide up our send queue size into half SEND WRs to send the
+ * completions, and half R/W contexts to actually do the RDMA
+ * READ/WRITE transfers. Note that we need to allocate CQ slots for
+ * both both, as RDMA contexts will also post completions for the
+ * RDMA READ case.
+ */
+ qp_init->cap.max_send_wr = srp_sq_size / 2;
+ qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
+ qp_init->port_num = ch->sport->port;
ch->qp = ib_create_qp(sdev->pd, qp_init);
if (IS_ERR(ch->qp)) {
@@ -1960,14 +1766,6 @@ static void __srpt_close_all_ch(struct srpt_device *sdev)
}
}
-/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
- return 1;
-}
-
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
@@ -2386,95 +2184,6 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return ret;
}
-/**
- * srpt_perform_rdmas() - Perform IB RDMA.
- *
- * Returns zero upon success or a negative number upon failure.
- */
-static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- struct ib_send_wr *bad_wr;
- int sq_wr_avail, ret, i;
- enum dma_data_direction dir;
- const int n_rdma = ioctx->n_rdma;
-
- dir = ioctx->cmd.data_direction;
- if (dir == DMA_TO_DEVICE) {
- /* write */
- ret = -ENOMEM;
- sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
- if (sq_wr_avail < 0) {
- pr_warn("IB send queue full (needed %d)\n",
- n_rdma);
- goto out;
- }
- }
-
- for (i = 0; i < n_rdma; i++) {
- struct ib_send_wr *wr = &ioctx->rdma_wrs[i].wr;
-
- wr->opcode = (dir == DMA_FROM_DEVICE) ?
- IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
-
- if (i == n_rdma - 1) {
- /* only get completion event for the last rdma read */
- if (dir == DMA_TO_DEVICE) {
- wr->send_flags = IB_SEND_SIGNALED;
- ioctx->rdma_cqe.done = srpt_rdma_read_done;
- } else {
- ioctx->rdma_cqe.done = srpt_rdma_write_done;
- }
- wr->wr_cqe = &ioctx->rdma_cqe;
- wr->next = NULL;
- } else {
- wr->wr_cqe = NULL;
- wr->next = &ioctx->rdma_wrs[i + 1].wr;
- }
- }
-
- ret = ib_post_send(ch->qp, &ioctx->rdma_wrs->wr, &bad_wr);
- if (ret)
- pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n",
- __func__, __LINE__, ret, i, n_rdma);
-out:
- if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
- atomic_add(n_rdma, &ch->sq_wr_avail);
- return ret;
-}
-
-/**
- * srpt_xfer_data() - Start data transfer from initiator to target.
- */
-static int srpt_xfer_data(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- int ret;
-
- ret = srpt_map_sg_to_ib_sge(ch, ioctx);
- if (ret) {
- pr_err("%s[%d] ret=%d\n", __func__, __LINE__, ret);
- goto out;
- }
-
- ret = srpt_perform_rdmas(ch, ioctx);
- if (ret) {
- if (ret == -EAGAIN || ret == -ENOMEM)
- pr_info("%s[%d] queue full -- ret=%d\n",
- __func__, __LINE__, ret);
- else
- pr_err("%s[%d] fatal error -- ret=%d\n",
- __func__, __LINE__, ret);
- goto out_unmap;
- }
-
-out:
- return ret;
-out_unmap:
- srpt_unmap_sg_to_ib_sge(ch, ioctx);
- goto out;
-}
-
static int srpt_write_pending_status(struct se_cmd *se_cmd)
{
struct srpt_send_ioctx *ioctx;
@@ -2491,11 +2200,42 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx =
container_of(se_cmd, struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
+ struct ib_send_wr *first_wr = NULL, *bad_wr;
+ struct ib_cqe *cqe = &ioctx->rdma_cqe;
enum srpt_command_state new_state;
+ int ret, i;
new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
WARN_ON(new_state == SRPT_STATE_DONE);
- return srpt_xfer_data(ch, ioctx);
+
+ if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
+ pr_warn("%s: IB send queue full (needed %d)\n",
+ __func__, ioctx->n_rdma);
+ ret = -ENOMEM;
+ goto out_undo;
+ }
+
+ cqe->done = srpt_rdma_read_done;
+ for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
+ cqe, first_wr);
+ cqe = NULL;
+ }
+
+ ret = ib_post_send(ch->qp, first_wr, &bad_wr);
+ if (ret) {
+ pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
+ __func__, ret, ioctx->n_rdma,
+ atomic_read(&ch->sq_wr_avail));
+ goto out_undo;
+ }
+
+ return 0;
+out_undo:
+ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+ return ret;
}
static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
@@ -2517,17 +2257,17 @@ static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
*/
static void srpt_queue_response(struct se_cmd *cmd)
{
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *ioctx;
+ struct srpt_send_ioctx *ioctx =
+ container_of(cmd, struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+ struct srpt_device *sdev = ch->sport->sdev;
+ struct ib_send_wr send_wr, *first_wr = NULL, *bad_wr;
+ struct ib_sge sge;
enum srpt_command_state state;
unsigned long flags;
- int ret;
- enum dma_data_direction dir;
- int resp_len;
+ int resp_len, ret, i;
u8 srp_tm_status;
- ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
- ch = ioctx->ch;
BUG_ON(!ch);
spin_lock_irqsave(&ioctx->spinlock, flags);
@@ -2554,17 +2294,19 @@ static void srpt_queue_response(struct se_cmd *cmd)
return;
}
- dir = ioctx->cmd.data_direction;
-
/* For read commands, transfer the data to the initiator. */
- if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
+ if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
+ ioctx->cmd.data_length &&
!ioctx->queue_status_only) {
- ret = srpt_xfer_data(ch, ioctx);
- if (ret) {
- pr_err("xfer_data failed for tag %llu\n",
- ioctx->cmd.tag);
- return;
+ for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
+ ch->sport->port, NULL,
+ first_wr ? first_wr : &send_wr);
}
+ } else {
+ first_wr = &send_wr;
}
if (state != SRPT_STATE_MGMT)
@@ -2576,14 +2318,46 @@ static void srpt_queue_response(struct se_cmd *cmd)
resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
ioctx->cmd.tag);
}
- ret = srpt_post_send(ch, ioctx, resp_len);
- if (ret) {
- pr_err("sending cmd response failed for tag %llu\n",
- ioctx->cmd.tag);
- srpt_unmap_sg_to_ib_sge(ch, ioctx);
- srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- target_put_sess_cmd(&ioctx->cmd);
+
+ atomic_inc(&ch->req_lim);
+
+ if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
+ &ch->sq_wr_avail) < 0)) {
+ pr_warn("%s: IB send queue full (needed %d)\n",
+ __func__, ioctx->n_rdma);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
+ DMA_TO_DEVICE);
+
+ sge.addr = ioctx->ioctx.dma;
+ sge.length = resp_len;
+ sge.lkey = sdev->pd->local_dma_lkey;
+
+ ioctx->ioctx.cqe.done = srpt_send_done;
+ send_wr.next = NULL;
+ send_wr.wr_cqe = &ioctx->ioctx.cqe;
+ send_wr.sg_list = &sge;
+ send_wr.num_sge = 1;
+ send_wr.opcode = IB_WR_SEND;
+ send_wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(ch->qp, first_wr, &bad_wr);
+ if (ret < 0) {
+ pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
+ __func__, ioctx->cmd.tag, ret);
+ goto out;
}
+
+ return;
+
+out:
+ atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
+ atomic_dec(&ch->req_lim);
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+ target_put_sess_cmd(&ioctx->cmd);
}
static int srpt_queue_data_in(struct se_cmd *cmd)
@@ -2599,10 +2373,6 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
static void srpt_aborted_task(struct se_cmd *cmd)
{
- struct srpt_send_ioctx *ioctx = container_of(cmd,
- struct srpt_send_ioctx, cmd);
-
- srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
}
static int srpt_queue_status(struct se_cmd *cmd)
@@ -2903,12 +2673,10 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
unsigned long flags;
WARN_ON(ioctx->state != SRPT_STATE_DONE);
- WARN_ON(ioctx->mapped_sg_count != 0);
- if (ioctx->n_rbuf > 1) {
- kfree(ioctx->rbufs);
- ioctx->rbufs = NULL;
- ioctx->n_rbuf = 0;
+ if (ioctx->n_rw_ctx) {
+ srpt_free_rw_ctxs(ch, ioctx);
+ ioctx->n_rw_ctx = 0;
}
spin_lock_irqsave(&ch->spinlock, flags);
@@ -3287,7 +3055,6 @@ static const struct target_core_fabric_ops srpt_template = {
.tpg_get_inst_index = srpt_tpg_get_inst_index,
.release_cmd = srpt_release_cmd,
.check_stop_free = srpt_check_stop_free,
- .shutdown_session = srpt_shutdown_session,
.close_session = srpt_close_session,
.sess_get_index = srpt_sess_get_index,
.sess_get_initiator_sid = NULL,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index af9b8b527..389030487 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -42,6 +42,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
+#include <rdma/rw.h>
#include <scsi/srp.h>
@@ -174,21 +175,17 @@ struct srpt_recv_ioctx {
struct srpt_ioctx ioctx;
struct list_head wait_list;
};
+
+struct srpt_rw_ctx {
+ struct rdma_rw_ctx rw;
+ struct scatterlist *sg;
+ unsigned int nents;
+};
/**
* struct srpt_send_ioctx - SRPT send I/O context.
* @ioctx: See above.
* @ch: Channel pointer.
- * @free_list: Node in srpt_rdma_ch.free_list.
- * @n_rbuf: Number of data buffers in the received SRP command.
- * @rbufs: Pointer to SRP data buffer array.
- * @single_rbuf: SRP data buffer if the command has only a single buffer.
- * @sg: Pointer to sg-list associated with this I/O context.
- * @sg_cnt: SG-list size.
- * @mapped_sg_count: ib_dma_map_sg() return value.
- * @n_rdma_wrs: Number of elements in the rdma_wrs array.
- * @rdma_wrs: Array with information about the RDMA mapping.
- * @tag: Tag of the received SRP information unit.
* @spinlock: Protects 'state'.
* @state: I/O context state.
* @cmd: Target core command data structure.
@@ -197,21 +194,18 @@ struct srpt_recv_ioctx {
struct srpt_send_ioctx {
struct srpt_ioctx ioctx;
struct srpt_rdma_ch *ch;
- struct ib_rdma_wr *rdma_wrs;
+
+ struct srpt_rw_ctx s_rw_ctx;
+ struct srpt_rw_ctx *rw_ctxs;
+
struct ib_cqe rdma_cqe;
- struct srp_direct_buf *rbufs;
- struct srp_direct_buf single_rbuf;
- struct scatterlist *sg;
struct list_head free_list;
spinlock_t spinlock;
enum srpt_command_state state;
struct se_cmd cmd;
struct completion tx_done;
- int sg_cnt;
- int mapped_sg_count;
- u16 n_rdma_wrs;
u8 n_rdma;
- u8 n_rbuf;
+ u8 n_rw_ctx;
bool queue_status_only;
u8 sense_data[TRANSPORT_SENSE_BUFFER];
};