summaryrefslogtreecommitdiff
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-09-08 01:01:14 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-09-08 01:01:14 -0300
commite5fd91f1ef340da553f7a79da9540c3db711c937 (patch)
treeb11842027dc6641da63f4bcc524f8678263304a3 /drivers/infiniband/core
parent2a9b0348e685a63d97486f6749622b61e9e3292f (diff)
Linux-libre 4.2-gnu
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/agent.c27
-rw-r--r--drivers/infiniband/core/agent.h6
-rw-r--r--drivers/infiniband/core/cache.c69
-rw-r--r--drivers/infiniband/core/cm.c87
-rw-r--r--drivers/infiniband/core/cma.c287
-rw-r--r--drivers/infiniband/core/device.c96
-rw-r--r--drivers/infiniband/core/iwpm_msg.c33
-rw-r--r--drivers/infiniband/core/iwpm_util.c12
-rw-r--r--drivers/infiniband/core/iwpm_util.h28
-rw-r--r--drivers/infiniband/core/mad.c650
-rw-r--r--drivers/infiniband/core/mad_priv.h15
-rw-r--r--drivers/infiniband/core/mad_rmpp.c33
-rw-r--r--drivers/infiniband/core/multicast.c20
-rw-r--r--drivers/infiniband/core/opa_smi.h78
-rw-r--r--drivers/infiniband/core/sa_query.c41
-rw-r--r--drivers/infiniband/core/smi.c245
-rw-r--r--drivers/infiniband/core/smi.h4
-rw-r--r--drivers/infiniband/core/sysfs.c10
-rw-r--r--drivers/infiniband/core/ucm.c7
-rw-r--r--drivers/infiniband/core/ucma.c30
-rw-r--r--drivers/infiniband/core/user_mad.c64
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c188
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/core/verbs.c85
26 files changed, 1367 insertions, 754 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 38339d220..746cdf56b 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -457,8 +457,8 @@ static void resolve_cb(int status, struct sockaddr *src_addr,
complete(&((struct resolve_cb_context *)context)->comp);
}
-int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
- u16 *vlan_id)
+int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid,
+ u8 *dmac, u16 *vlan_id)
{
int ret = 0;
struct rdma_dev_addr dev_addr;
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index f6d29614c..042904030 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -54,7 +54,7 @@ static DEFINE_SPINLOCK(ib_agent_port_list_lock);
static LIST_HEAD(ib_agent_port_list);
static struct ib_agent_port_private *
-__ib_get_agent_port(struct ib_device *device, int port_num)
+__ib_get_agent_port(const struct ib_device *device, int port_num)
{
struct ib_agent_port_private *entry;
@@ -67,7 +67,7 @@ __ib_get_agent_port(struct ib_device *device, int port_num)
}
static struct ib_agent_port_private *
-ib_get_agent_port(struct ib_device *device, int port_num)
+ib_get_agent_port(const struct ib_device *device, int port_num)
{
struct ib_agent_port_private *entry;
unsigned long flags;
@@ -78,9 +78,9 @@ ib_get_agent_port(struct ib_device *device, int port_num)
return entry;
}
-void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
- struct ib_wc *wc, struct ib_device *device,
- int port_num, int qpn)
+void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh,
+ const struct ib_wc *wc, const struct ib_device *device,
+ int port_num, int qpn, size_t resp_mad_len, bool opa)
{
struct ib_agent_port_private *port_priv;
struct ib_mad_agent *agent;
@@ -88,7 +88,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
struct ib_ah *ah;
struct ib_mad_send_wr_private *mad_send_wr;
- if (device->node_type == RDMA_NODE_IB_SWITCH)
+ if (rdma_cap_ib_switch(device))
port_priv = ib_get_agent_port(device, 0);
else
port_priv = ib_get_agent_port(device, port_num);
@@ -106,18 +106,23 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
return;
}
+ if (opa && mad_hdr->base_version != OPA_MGMT_BASE_VERSION)
+ resp_mad_len = IB_MGMT_MAD_SIZE;
+
send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0,
- IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
- GFP_KERNEL);
+ IB_MGMT_MAD_HDR,
+ resp_mad_len - IB_MGMT_MAD_HDR,
+ GFP_KERNEL,
+ mad_hdr->base_version);
if (IS_ERR(send_buf)) {
dev_err(&device->dev, "ib_create_send_mad error\n");
goto err1;
}
- memcpy(send_buf->mad, mad, sizeof *mad);
+ memcpy(send_buf->mad, mad_hdr, resp_mad_len);
send_buf->ah = ah;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
+ if (rdma_cap_ib_switch(device)) {
mad_send_wr = container_of(send_buf,
struct ib_mad_send_wr_private,
send_buf);
@@ -156,7 +161,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
goto error1;
}
- if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) {
+ if (rdma_cap_ib_smi(device, port_num)) {
/* Obtain send only MAD agent for SMI QP */
port_priv->agent[0] = ib_register_mad_agent(device, port_num,
IB_QPT_SMI, NULL, 0,
diff --git a/drivers/infiniband/core/agent.h b/drivers/infiniband/core/agent.h
index 666928700..65f92beda 100644
--- a/drivers/infiniband/core/agent.h
+++ b/drivers/infiniband/core/agent.h
@@ -44,8 +44,8 @@ extern int ib_agent_port_open(struct ib_device *device, int port_num);
extern int ib_agent_port_close(struct ib_device *device, int port_num);
-extern void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
- struct ib_wc *wc, struct ib_device *device,
- int port_num, int qpn);
+extern void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh,
+ const struct ib_wc *wc, const struct ib_device *device,
+ int port_num, int qpn, size_t resp_mad_len, bool opa);
#endif /* __AGENT_H_ */
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 80f6cf244..871da832d 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -58,17 +58,6 @@ struct ib_update_work {
u8 port_num;
};
-static inline int start_port(struct ib_device *device)
-{
- return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
-}
-
-static inline int end_port(struct ib_device *device)
-{
- return (device->node_type == RDMA_NODE_IB_SWITCH) ?
- 0 : device->phys_port_cnt;
-}
-
int ib_get_cached_gid(struct ib_device *device,
u8 port_num,
int index,
@@ -78,12 +67,12 @@ int ib_get_cached_gid(struct ib_device *device,
unsigned long flags;
int ret = 0;
- if (port_num < start_port(device) || port_num > end_port(device))
+ if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
- cache = device->cache.gid_cache[port_num - start_port(device)];
+ cache = device->cache.gid_cache[port_num - rdma_start_port(device)];
if (index < 0 || index >= cache->table_len)
ret = -EINVAL;
@@ -96,10 +85,10 @@ int ib_get_cached_gid(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_gid);
-int ib_find_cached_gid(struct ib_device *device,
- union ib_gid *gid,
- u8 *port_num,
- u16 *index)
+int ib_find_cached_gid(struct ib_device *device,
+ const union ib_gid *gid,
+ u8 *port_num,
+ u16 *index)
{
struct ib_gid_cache *cache;
unsigned long flags;
@@ -112,11 +101,11 @@ int ib_find_cached_gid(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags);
- for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+ for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
cache = device->cache.gid_cache[p];
for (i = 0; i < cache->table_len; ++i) {
if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
- *port_num = p + start_port(device);
+ *port_num = p + rdma_start_port(device);
if (index)
*index = i;
ret = 0;
@@ -140,12 +129,12 @@ int ib_get_cached_pkey(struct ib_device *device,
unsigned long flags;
int ret = 0;
- if (port_num < start_port(device) || port_num > end_port(device))
+ if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
- cache = device->cache.pkey_cache[port_num - start_port(device)];
+ cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
if (index < 0 || index >= cache->table_len)
ret = -EINVAL;
@@ -169,12 +158,12 @@ int ib_find_cached_pkey(struct ib_device *device,
int ret = -ENOENT;
int partial_ix = -1;
- if (port_num < start_port(device) || port_num > end_port(device))
+ if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
- cache = device->cache.pkey_cache[port_num - start_port(device)];
+ cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
*index = -1;
@@ -209,12 +198,12 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
int i;
int ret = -ENOENT;
- if (port_num < start_port(device) || port_num > end_port(device))
+ if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
- cache = device->cache.pkey_cache[port_num - start_port(device)];
+ cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
*index = -1;
@@ -238,11 +227,11 @@ int ib_get_cached_lmc(struct ib_device *device,
unsigned long flags;
int ret = 0;
- if (port_num < start_port(device) || port_num > end_port(device))
+ if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
- *lmc = device->cache.lmc_cache[port_num - start_port(device)];
+ *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
@@ -303,13 +292,13 @@ static void ib_cache_update(struct ib_device *device,
write_lock_irq(&device->cache.lock);
- old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
- old_gid_cache = device->cache.gid_cache [port - start_port(device)];
+ old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
+ old_gid_cache = device->cache.gid_cache [port - rdma_start_port(device)];
- device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
- device->cache.gid_cache [port - start_port(device)] = gid_cache;
+ device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
+ device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache;
- device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
+ device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
write_unlock_irq(&device->cache.lock);
@@ -363,14 +352,14 @@ static void ib_cache_setup_one(struct ib_device *device)
device->cache.pkey_cache =
kmalloc(sizeof *device->cache.pkey_cache *
- (end_port(device) - start_port(device) + 1), GFP_KERNEL);
+ (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
device->cache.gid_cache =
kmalloc(sizeof *device->cache.gid_cache *
- (end_port(device) - start_port(device) + 1), GFP_KERNEL);
+ (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
- (end_port(device) -
- start_port(device) + 1),
+ (rdma_end_port(device) -
+ rdma_start_port(device) + 1),
GFP_KERNEL);
if (!device->cache.pkey_cache || !device->cache.gid_cache ||
@@ -380,10 +369,10 @@ static void ib_cache_setup_one(struct ib_device *device)
goto err;
}
- for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+ for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
device->cache.pkey_cache[p] = NULL;
device->cache.gid_cache [p] = NULL;
- ib_cache_update(device, p + start_port(device));
+ ib_cache_update(device, p + rdma_start_port(device));
}
INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
@@ -394,7 +383,7 @@ static void ib_cache_setup_one(struct ib_device *device)
return;
err_cache:
- for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+ for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
kfree(device->cache.pkey_cache[p]);
kfree(device->cache.gid_cache[p]);
}
@@ -412,7 +401,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
ib_unregister_event_handler(&device->cache.event_handler);
flush_workqueue(ib_wq);
- for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+ for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
kfree(device->cache.pkey_cache[p]);
kfree(device->cache.gid_cache[p]);
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0271608a5..3a972ebf3 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -169,6 +169,7 @@ struct cm_device {
struct ib_device *ib_device;
struct device *device;
u8 ack_delay;
+ int going_down;
struct cm_port *port[0];
};
@@ -267,7 +268,8 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
cm_id_priv->av.pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
- GFP_ATOMIC);
+ GFP_ATOMIC,
+ IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
return PTR_ERR(m);
@@ -297,7 +299,8 @@ static int cm_alloc_response_msg(struct cm_port *port,
m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
- GFP_ATOMIC);
+ GFP_ATOMIC,
+ IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
return PTR_ERR(m);
@@ -803,6 +806,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
{
int wait_time;
unsigned long flags;
+ struct cm_device *cm_dev;
+
+ cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
+ if (!cm_dev)
+ return;
spin_lock_irqsave(&cm.lock, flags);
cm_cleanup_timewait(cm_id_priv->timewait_info);
@@ -816,8 +824,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
*/
cm_id_priv->id.state = IB_CM_TIMEWAIT;
wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
- queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
- msecs_to_jiffies(wait_time));
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!cm_dev->going_down)
+ queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
+ msecs_to_jiffies(wait_time));
+ spin_unlock_irq(&cm.lock);
+
cm_id_priv->timewait_info = NULL;
}
@@ -3303,6 +3317,11 @@ static int cm_establish(struct ib_cm_id *cm_id)
struct cm_work *work;
unsigned long flags;
int ret = 0;
+ struct cm_device *cm_dev;
+
+ cm_dev = ib_get_client_data(cm_id->device, &cm_client);
+ if (!cm_dev)
+ return -ENODEV;
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (!work)
@@ -3341,7 +3360,17 @@ static int cm_establish(struct ib_cm_id *cm_id)
work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED;
- queue_delayed_work(cm.wq, &work->work, 0);
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!cm_dev->going_down) {
+ queue_delayed_work(cm.wq, &work->work, 0);
+ } else {
+ kfree(work);
+ ret = -ENODEV;
+ }
+ spin_unlock_irq(&cm.lock);
+
out:
return ret;
}
@@ -3392,6 +3421,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
enum ib_cm_event_type event;
u16 attr_id;
int paths = 0;
+ int going_down = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
@@ -3450,7 +3480,19 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = port;
- queue_delayed_work(cm.wq, &work->work, 0);
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!port->cm_dev->going_down)
+ queue_delayed_work(cm.wq, &work->work, 0);
+ else
+ going_down = 1;
+ spin_unlock_irq(&cm.lock);
+
+ if (going_down) {
+ kfree(work);
+ ib_free_recv_mad(mad_recv_wc);
+ }
}
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3759,11 +3801,9 @@ static void cm_add_one(struct ib_device *ib_device)
};
unsigned long flags;
int ret;
+ int count = 0;
u8 i;
- if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB)
- return;
-
cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
ib_device->phys_port_cnt, GFP_KERNEL);
if (!cm_dev)
@@ -3771,7 +3811,7 @@ static void cm_add_one(struct ib_device *ib_device)
cm_dev->ib_device = ib_device;
cm_get_ack_delay(cm_dev);
-
+ cm_dev->going_down = 0;
cm_dev->device = device_create(&cm_class, &ib_device->dev,
MKDEV(0, 0), NULL,
"%s", ib_device->name);
@@ -3782,6 +3822,9 @@ static void cm_add_one(struct ib_device *ib_device)
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
+ if (!rdma_cap_ib_cm(ib_device, i))
+ continue;
+
port = kzalloc(sizeof *port, GFP_KERNEL);
if (!port)
goto error1;
@@ -3808,7 +3851,13 @@ static void cm_add_one(struct ib_device *ib_device)
ret = ib_modify_port(ib_device, i, 0, &port_modify);
if (ret)
goto error3;
+
+ count++;
}
+
+ if (!count)
+ goto free;
+
ib_set_client_data(ib_device, &cm_client, cm_dev);
write_lock_irqsave(&cm.device_lock, flags);
@@ -3824,11 +3873,15 @@ error1:
port_modify.set_port_cap_mask = 0;
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
while (--i) {
+ if (!rdma_cap_ib_cm(ib_device, i))
+ continue;
+
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
}
+free:
device_unregister(cm_dev->device);
kfree(cm_dev);
}
@@ -3851,11 +3904,23 @@ static void cm_remove_one(struct ib_device *ib_device)
list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags);
+ spin_lock_irq(&cm.lock);
+ cm_dev->going_down = 1;
+ spin_unlock_irq(&cm.lock);
+
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
+ if (!rdma_cap_ib_cm(ib_device, i))
+ continue;
+
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
- ib_unregister_mad_agent(port->mad_agent);
+ /*
+ * We flush the queue here after the going_down set, this
+ * verify that no new works will be queued in the recv handler,
+ * after that we can call the unregister_mad_agent
+ */
flush_workqueue(cm.wq);
+ ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 38ffe0981..143ded2bb 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -65,6 +65,34 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18
+static const char * const cma_events[] = {
+ [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
+ [RDMA_CM_EVENT_ADDR_ERROR] = "address error",
+ [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ",
+ [RDMA_CM_EVENT_ROUTE_ERROR] = "route error",
+ [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request",
+ [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
+ [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error",
+ [RDMA_CM_EVENT_UNREACHABLE] = "unreachable",
+ [RDMA_CM_EVENT_REJECTED] = "rejected",
+ [RDMA_CM_EVENT_ESTABLISHED] = "established",
+ [RDMA_CM_EVENT_DISCONNECTED] = "disconnected",
+ [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal",
+ [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join",
+ [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error",
+ [RDMA_CM_EVENT_ADDR_CHANGE] = "address change",
+ [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
+};
+
+const char *rdma_event_msg(enum rdma_cm_event_type event)
+{
+ size_t index = event;
+
+ return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
+ cma_events[index] : "unrecognized event";
+}
+EXPORT_SYMBOL(rdma_event_msg);
+
static void cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device);
@@ -349,18 +377,35 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
return ret;
}
+static inline int cma_validate_port(struct ib_device *device, u8 port,
+ union ib_gid *gid, int dev_type)
+{
+ u8 found_port;
+ int ret = -ENODEV;
+
+ if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
+ return ret;
+
+ if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
+ return ret;
+
+ ret = ib_find_cached_gid(device, gid, &found_port, NULL);
+ if (port != found_port)
+ return -ENODEV;
+
+ return ret;
+}
+
static int cma_acquire_dev(struct rdma_id_private *id_priv,
struct rdma_id_private *listen_id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
struct cma_device *cma_dev;
- union ib_gid gid, iboe_gid;
+ union ib_gid gid, iboe_gid, *gidp;
int ret = -ENODEV;
- u8 port, found_port;
- enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
- IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
+ u8 port;
- if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
+ if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
id_priv->id.ps == RDMA_PS_IPOIB)
return -EINVAL;
@@ -370,41 +415,36 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
memcpy(&gid, dev_addr->src_dev_addr +
rdma_addr_gid_offset(dev_addr), sizeof gid);
- if (listen_id_priv &&
- rdma_port_get_link_layer(listen_id_priv->id.device,
- listen_id_priv->id.port_num) == dev_ll) {
+
+ if (listen_id_priv) {
cma_dev = listen_id_priv->cma_dev;
port = listen_id_priv->id.port_num;
- if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
- rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
- ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
- &found_port, NULL);
- else
- ret = ib_find_cached_gid(cma_dev->device, &gid,
- &found_port, NULL);
+ gidp = rdma_protocol_roce(cma_dev->device, port) ?
+ &iboe_gid : &gid;
- if (!ret && (port == found_port)) {
- id_priv->id.port_num = found_port;
+ ret = cma_validate_port(cma_dev->device, port, gidp,
+ dev_addr->dev_type);
+ if (!ret) {
+ id_priv->id.port_num = port;
goto out;
}
}
+
list_for_each_entry(cma_dev, &dev_list, list) {
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
if (listen_id_priv &&
listen_id_priv->cma_dev == cma_dev &&
listen_id_priv->id.port_num == port)
continue;
- if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
- if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
- rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
- ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
- else
- ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
-
- if (!ret && (port == found_port)) {
- id_priv->id.port_num = found_port;
- goto out;
- }
+
+ gidp = rdma_protocol_roce(cma_dev->device, port) ?
+ &iboe_gid : &gid;
+
+ ret = cma_validate_port(cma_dev->device, port, gidp,
+ dev_addr->dev_type);
+ if (!ret) {
+ id_priv->id.port_num = port;
+ goto out;
}
}
}
@@ -435,10 +475,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
pkey = ntohs(addr->sib_pkey);
list_for_each_entry(cur_dev, &dev_list, list) {
- if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
- continue;
-
for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
+ if (!rdma_cap_af_ib(cur_dev->device, p))
+ continue;
+
if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
continue;
@@ -633,10 +673,9 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
if (ret)
goto out;
- if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
- == RDMA_TRANSPORT_IB &&
- rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
- == IB_LINK_LAYER_ETHERNET) {
+ BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
+
+ if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
if (ret)
@@ -700,11 +739,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
int ret;
u16 pkey;
- if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
- IB_LINK_LAYER_INFINIBAND)
- pkey = ib_addr_get_pkey(dev_addr);
- else
+ if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
pkey = 0xffff;
+ else
+ pkey = ib_addr_get_pkey(dev_addr);
ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
pkey, &qp_attr->pkey_index);
@@ -735,8 +773,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
int ret = 0;
id_priv = container_of(id, struct rdma_id_private, id);
- switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
- case RDMA_TRANSPORT_IB:
+ if (rdma_cap_ib_cm(id->device, id->port_num)) {
if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
else
@@ -745,19 +782,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
if (qp_attr->qp_state == IB_QPS_RTR)
qp_attr->rq_psn = id_priv->seq_num;
- break;
- case RDMA_TRANSPORT_IWARP:
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
if (!id_priv->cm_id.iw) {
qp_attr->qp_access_flags = 0;
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
} else
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
qp_attr_mask);
- break;
- default:
+ } else
ret = -ENOSYS;
- break;
- }
return ret;
}
@@ -945,13 +978,9 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
static void cma_cancel_route(struct rdma_id_private *id_priv)
{
- switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
- case IB_LINK_LAYER_INFINIBAND:
+ if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
if (id_priv->query)
ib_sa_cancel_query(id_priv->query_id, id_priv->query);
- break;
- default:
- break;
}
}
@@ -1023,17 +1052,12 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
mc = container_of(id_priv->mc_list.next,
struct cma_multicast, list);
list_del(&mc->list);
- switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
- case IB_LINK_LAYER_INFINIBAND:
+ if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
+ id_priv->id.port_num)) {
ib_sa_free_multicast(mc->multicast.ib);
kfree(mc);
- break;
- case IB_LINK_LAYER_ETHERNET:
+ } else
kref_put(&mc->mcref, release_mc);
- break;
- default:
- break;
- }
}
}
@@ -1054,17 +1078,12 @@ void rdma_destroy_id(struct rdma_cm_id *id)
mutex_unlock(&id_priv->handler_mutex);
if (id_priv->cma_dev) {
- switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
- case RDMA_TRANSPORT_IB:
+ if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
if (id_priv->cm_id.ib)
ib_destroy_cm_id(id_priv->cm_id.ib);
- break;
- case RDMA_TRANSPORT_IWARP:
+ } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
if (id_priv->cm_id.iw)
iw_destroy_cm_id(id_priv->cm_id.iw);
- break;
- default:
- break;
}
cma_leave_mc_groups(id_priv);
cma_release_dev(id_priv);
@@ -1610,6 +1629,7 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
if (IS_ERR(id))
return PTR_ERR(id);
+ id->tos = id_priv->tos;
id_priv->cm_id.iw = id;
memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
@@ -1642,8 +1662,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
struct rdma_cm_id *id;
int ret;
- if (cma_family(id_priv) == AF_IB &&
- rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
+ if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
return;
id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
@@ -1984,26 +2003,15 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
return -EINVAL;
atomic_inc(&id_priv->refcount);
- switch (rdma_node_get_transport(id->device->node_type)) {
- case RDMA_TRANSPORT_IB:
- switch (rdma_port_get_link_layer(id->device, id->port_num)) {
- case IB_LINK_LAYER_INFINIBAND:
- ret = cma_resolve_ib_route(id_priv, timeout_ms);
- break;
- case IB_LINK_LAYER_ETHERNET:
- ret = cma_resolve_iboe_route(id_priv);
- break;
- default:
- ret = -ENOSYS;
- }
- break;
- case RDMA_TRANSPORT_IWARP:
+ if (rdma_cap_ib_sa(id->device, id->port_num))
+ ret = cma_resolve_ib_route(id_priv, timeout_ms);
+ else if (rdma_protocol_roce(id->device, id->port_num))
+ ret = cma_resolve_iboe_route(id_priv);
+ else if (rdma_protocol_iwarp(id->device, id->port_num))
ret = cma_resolve_iw_route(id_priv, timeout_ms);
- break;
- default:
+ else
ret = -ENOSYS;
- break;
- }
+
if (ret)
goto err;
@@ -2045,7 +2053,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
mutex_lock(&lock);
list_for_each_entry(cur_dev, &dev_list, list) {
if (cma_family(id_priv) == AF_IB &&
- rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
+ !rdma_cap_ib_cm(cur_dev->device, 1))
continue;
if (!cma_dev)
@@ -2077,7 +2085,7 @@ port_found:
goto out;
id_priv->id.route.addr.dev_addr.dev_type =
- (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
+ (rdma_protocol_ib(cma_dev->device, p)) ?
ARPHRD_INFINIBAND : ARPHRD_ETHER;
rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
@@ -2554,18 +2562,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
id_priv->backlog = backlog;
if (id->device) {
- switch (rdma_node_get_transport(id->device->node_type)) {
- case RDMA_TRANSPORT_IB:
+ if (rdma_cap_ib_cm(id->device, 1)) {
ret = cma_ib_listen(id_priv);
if (ret)
goto err;
- break;
- case RDMA_TRANSPORT_IWARP:
+ } else if (rdma_cap_iw_cm(id->device, 1)) {
ret = cma_iw_listen(id_priv, backlog);
if (ret)
goto err;
- break;
- default:
+ } else {
ret = -ENOSYS;
goto err;
}
@@ -2857,6 +2862,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
+ cm_id->tos = id_priv->tos;
id_priv->cm_id.iw = cm_id;
memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
@@ -2901,20 +2907,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
id_priv->srq = conn_param->srq;
}
- switch (rdma_node_get_transport(id->device->node_type)) {
- case RDMA_TRANSPORT_IB:
+ if (rdma_cap_ib_cm(id->device, id->port_num)) {
if (id->qp_type == IB_QPT_UD)
ret = cma_resolve_ib_udp(id_priv, conn_param);
else
ret = cma_connect_ib(id_priv, conn_param);
- break;
- case RDMA_TRANSPORT_IWARP:
+ } else if (rdma_cap_iw_cm(id->device, id->port_num))
ret = cma_connect_iw(id_priv, conn_param);
- break;
- default:
+ else
ret = -ENOSYS;
- break;
- }
if (ret)
goto err;
@@ -3017,8 +3018,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
id_priv->srq = conn_param->srq;
}
- switch (rdma_node_get_transport(id->device->node_type)) {
- case RDMA_TRANSPORT_IB:
+ if (rdma_cap_ib_cm(id->device, id->port_num)) {
if (id->qp_type == IB_QPT_UD) {
if (conn_param)
ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
@@ -3034,14 +3034,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
else
ret = cma_rep_recv(id_priv);
}
- break;
- case RDMA_TRANSPORT_IWARP:
+ } else if (rdma_cap_iw_cm(id->device, id->port_num))
ret = cma_accept_iw(id_priv, conn_param);
- break;
- default:
+ else
ret = -ENOSYS;
- break;
- }
if (ret)
goto reject;
@@ -3085,8 +3081,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
if (!id_priv->cm_id.ib)
return -EINVAL;
- switch (rdma_node_get_transport(id->device->node_type)) {
- case RDMA_TRANSPORT_IB:
+ if (rdma_cap_ib_cm(id->device, id->port_num)) {
if (id->qp_type == IB_QPT_UD)
ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
private_data, private_data_len);
@@ -3094,15 +3089,12 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
ret = ib_send_cm_rej(id_priv->cm_id.ib,
IB_CM_REJ_CONSUMER_DEFINED, NULL,
0, private_data, private_data_len);
- break;
- case RDMA_TRANSPORT_IWARP:
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = iw_cm_reject(id_priv->cm_id.iw,
private_data, private_data_len);
- break;
- default:
+ } else
ret = -ENOSYS;
- break;
- }
+
return ret;
}
EXPORT_SYMBOL(rdma_reject);
@@ -3116,22 +3108,18 @@ int rdma_disconnect(struct rdma_cm_id *id)
if (!id_priv->cm_id.ib)
return -EINVAL;
- switch (rdma_node_get_transport(id->device->node_type)) {
- case RDMA_TRANSPORT_IB:
+ if (rdma_cap_ib_cm(id->device, id->port_num)) {
ret = cma_modify_qp_err(id_priv);
if (ret)
goto out;
/* Initiate or respond to a disconnect. */
if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
- break;
- case RDMA_TRANSPORT_IWARP:
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
- break;
- default:
+ } else
ret = -EINVAL;
- break;
- }
+
out:
return ret;
}
@@ -3377,24 +3365,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
list_add(&mc->list, &id_priv->mc_list);
spin_unlock(&id_priv->lock);
- switch (rdma_node_get_transport(id->device->node_type)) {
- case RDMA_TRANSPORT_IB:
- switch (rdma_port_get_link_layer(id->device, id->port_num)) {
- case IB_LINK_LAYER_INFINIBAND:
- ret = cma_join_ib_multicast(id_priv, mc);
- break;
- case IB_LINK_LAYER_ETHERNET:
- kref_init(&mc->mcref);
- ret = cma_iboe_join_multicast(id_priv, mc);
- break;
- default:
- ret = -EINVAL;
- }
- break;
- default:
+ if (rdma_protocol_roce(id->device, id->port_num)) {
+ kref_init(&mc->mcref);
+ ret = cma_iboe_join_multicast(id_priv, mc);
+ } else if (rdma_cap_ib_mcast(id->device, id->port_num))
+ ret = cma_join_ib_multicast(id_priv, mc);
+ else
ret = -ENOSYS;
- break;
- }
if (ret) {
spin_lock_irq(&id_priv->lock);
@@ -3422,19 +3399,15 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
ib_detach_mcast(id->qp,
&mc->multicast.ib->rec.mgid,
be16_to_cpu(mc->multicast.ib->rec.mlid));
- if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
- switch (rdma_port_get_link_layer(id->device, id->port_num)) {
- case IB_LINK_LAYER_INFINIBAND:
- ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
- break;
- case IB_LINK_LAYER_ETHERNET:
- kref_put(&mc->mcref, release_mc);
- break;
- default:
- break;
- }
- }
+
+ BUG_ON(id_priv->cma_dev->device != id->device);
+
+ if (rdma_cap_ib_mcast(id->device, id->port_num)) {
+ ib_sa_free_multicast(mc->multicast.ib);
+ kfree(mc);
+ } else if (rdma_protocol_roce(id->device, id->port_num))
+ kref_put(&mc->mcref, release_mc);
+
return;
}
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 18c1ece76..9567756ca 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -92,7 +92,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
IB_MANDATORY_FUNC(poll_cq),
IB_MANDATORY_FUNC(req_notify_cq),
IB_MANDATORY_FUNC(get_dma_mr),
- IB_MANDATORY_FUNC(dereg_mr)
+ IB_MANDATORY_FUNC(dereg_mr),
+ IB_MANDATORY_FUNC(get_port_immutable)
};
int i;
@@ -151,18 +152,6 @@ static int alloc_name(char *name)
return 0;
}
-static int start_port(struct ib_device *device)
-{
- return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
-}
-
-
-static int end_port(struct ib_device *device)
-{
- return (device->node_type == RDMA_NODE_IB_SWITCH) ?
- 0 : device->phys_port_cnt;
-}
-
/**
* ib_alloc_device - allocate an IB device struct
* @size:size of structure to allocate
@@ -222,42 +211,49 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
return 0;
}
-static int read_port_table_lengths(struct ib_device *device)
+static int verify_immutable(const struct ib_device *dev, u8 port)
{
- struct ib_port_attr *tprops = NULL;
- int num_ports, ret = -ENOMEM;
- u8 port_index;
-
- tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
- if (!tprops)
- goto out;
-
- num_ports = end_port(device) - start_port(device) + 1;
+ return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
+ rdma_max_mad_size(dev, port) != 0);
+}
- device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports,
- GFP_KERNEL);
- device->gid_tbl_len = kmalloc(sizeof *device->gid_tbl_len * num_ports,
- GFP_KERNEL);
- if (!device->pkey_tbl_len || !device->gid_tbl_len)
+static int read_port_immutable(struct ib_device *device)
+{
+ int ret = -ENOMEM;
+ u8 start_port = rdma_start_port(device);
+ u8 end_port = rdma_end_port(device);
+ u8 port;
+
+ /**
+ * device->port_immutable is indexed directly by the port number to make
+ * access to this data as efficient as possible.
+ *
+ * Therefore port_immutable is declared as a 1 based array with
+ * potential empty slots at the beginning.
+ */
+ device->port_immutable = kzalloc(sizeof(*device->port_immutable)
+ * (end_port + 1),
+ GFP_KERNEL);
+ if (!device->port_immutable)
goto err;
- for (port_index = 0; port_index < num_ports; ++port_index) {
- ret = ib_query_port(device, port_index + start_port(device),
- tprops);
+ for (port = start_port; port <= end_port; ++port) {
+ ret = device->get_port_immutable(device, port,
+ &device->port_immutable[port]);
if (ret)
goto err;
- device->pkey_tbl_len[port_index] = tprops->pkey_tbl_len;
- device->gid_tbl_len[port_index] = tprops->gid_tbl_len;
+
+ if (verify_immutable(device, port)) {
+ ret = -EINVAL;
+ goto err;
+ }
}
ret = 0;
goto out;
-
err:
- kfree(device->gid_tbl_len);
- kfree(device->pkey_tbl_len);
+ kfree(device->port_immutable);
out:
- kfree(tprops);
return ret;
}
@@ -294,9 +290,9 @@ int ib_register_device(struct ib_device *device,
spin_lock_init(&device->event_handler_lock);
spin_lock_init(&device->client_data_lock);
- ret = read_port_table_lengths(device);
+ ret = read_port_immutable(device);
if (ret) {
- printk(KERN_WARNING "Couldn't create table lengths cache for device %s\n",
+ printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
device->name);
goto out;
}
@@ -305,8 +301,7 @@ int ib_register_device(struct ib_device *device,
if (ret) {
printk(KERN_WARNING "Couldn't register device %s with driver model\n",
device->name);
- kfree(device->gid_tbl_len);
- kfree(device->pkey_tbl_len);
+ kfree(device->port_immutable);
goto out;
}
@@ -348,9 +343,6 @@ void ib_unregister_device(struct ib_device *device)
list_del(&device->core_list);
- kfree(device->gid_tbl_len);
- kfree(device->pkey_tbl_len);
-
mutex_unlock(&device_mutex);
ib_device_unregister_sysfs(device);
@@ -558,7 +550,11 @@ EXPORT_SYMBOL(ib_dispatch_event);
int ib_query_device(struct ib_device *device,
struct ib_device_attr *device_attr)
{
- return device->query_device(device, device_attr);
+ struct ib_udata uhw = {.outlen = 0, .inlen = 0};
+
+ memset(device_attr, 0, sizeof(*device_attr));
+
+ return device->query_device(device, device_attr, &uhw);
}
EXPORT_SYMBOL(ib_query_device);
@@ -575,7 +571,7 @@ int ib_query_port(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr)
{
- if (port_num < start_port(device) || port_num > end_port(device))
+ if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
return -EINVAL;
return device->query_port(device, port_num, port_attr);
@@ -653,7 +649,7 @@ int ib_modify_port(struct ib_device *device,
if (!device->modify_port)
return -ENOSYS;
- if (port_num < start_port(device) || port_num > end_port(device))
+ if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
return -EINVAL;
return device->modify_port(device, port_num, port_modify_mask,
@@ -676,8 +672,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
union ib_gid tmp_gid;
int ret, port, i;
- for (port = start_port(device); port <= end_port(device); ++port) {
- for (i = 0; i < device->gid_tbl_len[port - start_port(device)]; ++i) {
+ for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
+ for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
ret = ib_query_gid(device, port, i, &tmp_gid);
if (ret)
return ret;
@@ -709,7 +705,7 @@ int ib_find_pkey(struct ib_device *device,
u16 tmp_pkey;
int partial_ix = -1;
- for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) {
+ for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index e6ffa2e66..22a3abee2 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -67,7 +67,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client";
goto pid_query_error;
}
- if (iwpm_registered_client(nl_client))
+ if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
+ iwpm_user_pid == IWPM_PID_UNAVAILABLE)
return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
if (!skb) {
@@ -106,7 +107,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
- iwpm_set_registered(nl_client, 1);
iwpm_user_pid = IWPM_PID_UNAVAILABLE;
err_str = "Unable to send a nlmsg";
goto pid_query_error;
@@ -144,12 +144,12 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client";
goto add_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
err_str = "Unregistered port mapper client";
goto add_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
if (!skb) {
err_str = "Unable to create a nlmsg";
@@ -214,12 +214,12 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client";
goto query_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
err_str = "Unregistered port mapper client";
goto query_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
ret = -ENOMEM;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
if (!skb) {
@@ -288,12 +288,12 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
err_str = "Invalid port mapper client";
goto remove_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
err_str = "Unregistered port mapper client";
goto remove_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
if (!skb) {
ret = -ENOMEM;
@@ -388,7 +388,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
__func__, iwpm_user_pid);
if (iwpm_valid_client(nl_client))
- iwpm_set_registered(nl_client, 1);
+ iwpm_set_registration(nl_client, IWPM_REG_VALID);
register_pid_response_exit:
nlmsg_request->request_done = 1;
/* always for found nlmsg_request */
@@ -644,7 +644,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];
const char *msg_type = "Mapping Info response";
- int iwpm_pid;
u8 nl_client;
char *iwpm_name;
u16 iwpm_version;
@@ -669,14 +668,14 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
__func__, nl_client);
return ret;
}
- iwpm_set_registered(nl_client, 0);
+ iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+ iwpm_user_pid = cb->nlh->nlmsg_pid;
if (!iwpm_mapinfo_available())
return 0;
- iwpm_pid = cb->nlh->nlmsg_pid;
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
- __func__, iwpm_pid);
- ret = iwpm_send_mapinfo(nl_client, iwpm_pid);
+ __func__, iwpm_user_pid);
+ ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
return ret;
}
EXPORT_SYMBOL(iwpm_mapping_info_cb);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index a626795bf..5fb089e91 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -78,6 +78,7 @@ init_exit:
mutex_unlock(&iwpm_admin_lock);
if (!ret) {
iwpm_set_valid(nl_client, 1);
+ iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
pr_debug("%s: Mapinfo and reminfo tables are created\n",
__func__);
}
@@ -106,6 +107,7 @@ int iwpm_exit(u8 nl_client)
}
mutex_unlock(&iwpm_admin_lock);
iwpm_set_valid(nl_client, 0);
+ iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
return 0;
}
EXPORT_SYMBOL(iwpm_exit);
@@ -397,17 +399,23 @@ void iwpm_set_valid(u8 nl_client, int valid)
}
/* valid client */
-int iwpm_registered_client(u8 nl_client)
+u32 iwpm_get_registration(u8 nl_client)
{
return iwpm_admin.reg_list[nl_client];
}
/* valid client */
-void iwpm_set_registered(u8 nl_client, int reg)
+void iwpm_set_registration(u8 nl_client, u32 reg)
{
iwpm_admin.reg_list[nl_client] = reg;
}
+/* valid client */
+u32 iwpm_check_registration(u8 nl_client, u32 reg)
+{
+ return (iwpm_get_registration(nl_client) & reg);
+}
+
int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
struct sockaddr_storage *b_sockaddr)
{
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index ee2d9ff09..b7b9e194c 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -58,6 +58,10 @@
#define IWPM_PID_UNDEFINED -1
#define IWPM_PID_UNAVAILABLE -2
+#define IWPM_REG_UNDEF 0x01
+#define IWPM_REG_VALID 0x02
+#define IWPM_REG_INCOMPL 0x04
+
struct iwpm_nlmsg_request {
struct list_head inprocess_list;
__u32 nlmsg_seq;
@@ -88,7 +92,7 @@ struct iwpm_admin_data {
atomic_t refcount;
atomic_t nlmsg_seq;
int client_list[RDMA_NL_NUM_CLIENTS];
- int reg_list[RDMA_NL_NUM_CLIENTS];
+ u32 reg_list[RDMA_NL_NUM_CLIENTS];
};
/**
@@ -159,19 +163,31 @@ int iwpm_valid_client(u8 nl_client);
void iwpm_set_valid(u8 nl_client, int valid);
/**
- * iwpm_registered_client - Check if the port mapper client is registered
+ * iwpm_check_registration - Check if the client registration
+ * matches the given one
* @nl_client: The index of the netlink client
+ * @reg: The given registration type to compare with
*
* Call iwpm_register_pid() to register a client
+ * Returns true if the client registration matches reg,
+ * otherwise returns false
+ */
+u32 iwpm_check_registration(u8 nl_client, u32 reg);
+
+/**
+ * iwpm_set_registration - Set the client registration
+ * @nl_client: The index of the netlink client
+ * @reg: Registration type to set
*/
-int iwpm_registered_client(u8 nl_client);
+void iwpm_set_registration(u8 nl_client, u32 reg);
/**
- * iwpm_set_registered - Set the port mapper client to registered or not
+ * iwpm_get_registration
* @nl_client: The index of the netlink client
- * @reg: 1 if registered or 0 if not
+ *
+ * Returns the client registration type
*/
-void iwpm_set_registered(u8 nl_client, int reg);
+u32 iwpm_get_registration(u8 nl_client);
/**
* iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 74c30f4c5..786fc51bf 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2009 HNR Consulting. All rights reserved.
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -44,6 +45,7 @@
#include "mad_priv.h"
#include "mad_rmpp.h"
#include "smi.h"
+#include "opa_smi.h"
#include "agent.h"
MODULE_LICENSE("Dual BSD/GPL");
@@ -59,8 +61,6 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
-static struct kmem_cache *ib_mad_cache;
-
static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0;
@@ -73,7 +73,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
static struct ib_mad_agent_private *find_mad_agent(
struct ib_mad_port_private *port_priv,
- struct ib_mad *mad);
+ const struct ib_mad_hdr *mad);
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_private *mad);
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
@@ -179,12 +179,12 @@ static int is_vendor_method_in_use(
return 0;
}
-int ib_response_mad(struct ib_mad *mad)
+int ib_response_mad(const struct ib_mad_hdr *hdr)
{
- return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
- (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
- ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
- (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
+ return ((hdr->method & IB_MGMT_METHOD_RESP) ||
+ (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
+ ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
+ (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
}
EXPORT_SYMBOL(ib_response_mad);
@@ -717,6 +717,32 @@ static void build_smp_wc(struct ib_qp *qp,
wc->port_num = port_num;
}
+static size_t mad_priv_size(const struct ib_mad_private *mp)
+{
+ return sizeof(struct ib_mad_private) + mp->mad_size;
+}
+
+static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
+{
+ size_t size = sizeof(struct ib_mad_private) + mad_size;
+ struct ib_mad_private *ret = kzalloc(size, flags);
+
+ if (ret)
+ ret->mad_size = mad_size;
+
+ return ret;
+}
+
+static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
+{
+ return rdma_max_mad_size(port_priv->device, port_priv->port_num);
+}
+
+static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
+{
+ return sizeof(struct ib_grh) + mp->mad_size;
+}
+
/*
* Return 0 if SMP is to be sent
* Return 1 if SMP was consumed locally (whether or not solicited)
@@ -727,6 +753,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
{
int ret = 0;
struct ib_smp *smp = mad_send_wr->send_buf.mad;
+ struct opa_smp *opa_smp = (struct opa_smp *)smp;
unsigned long flags;
struct ib_mad_local_private *local;
struct ib_mad_private *mad_priv;
@@ -736,8 +763,13 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
u8 port_num;
struct ib_wc mad_wc;
struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
+ size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
+ u16 out_mad_pkey_index = 0;
+ u16 drslid;
+ bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
+ mad_agent_priv->qp_info->port_priv->port_num);
- if (device->node_type == RDMA_NODE_IB_SWITCH &&
+ if (rdma_cap_ib_switch(device) &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
port_num = send_wr->wr.ud.port_num;
else
@@ -749,19 +781,49 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
* If we are at the start of the LID routed part, don't update the
* hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
*/
- if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
- IB_LID_PERMISSIVE &&
- smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
- IB_SMI_DISCARD) {
- ret = -EINVAL;
- dev_err(&device->dev, "Invalid directed route\n");
- goto out;
- }
+ if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
+ u32 opa_drslid;
+
+ if ((opa_get_smp_direction(opa_smp)
+ ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
+ OPA_LID_PERMISSIVE &&
+ opa_smi_handle_dr_smp_send(opa_smp,
+ rdma_cap_ib_switch(device),
+ port_num) == IB_SMI_DISCARD) {
+ ret = -EINVAL;
+ dev_err(&device->dev, "OPA Invalid directed route\n");
+ goto out;
+ }
+ opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
+ if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
+ opa_drslid & 0xffff0000) {
+ ret = -EINVAL;
+ dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
+ opa_drslid);
+ goto out;
+ }
+ drslid = (u16)(opa_drslid & 0x0000ffff);
- /* Check to post send on QP or process locally */
- if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
- smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
- goto out;
+ /* Check to post send on QP or process locally */
+ if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
+ opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
+ goto out;
+ } else {
+ if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
+ IB_LID_PERMISSIVE &&
+ smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
+ IB_SMI_DISCARD) {
+ ret = -EINVAL;
+ dev_err(&device->dev, "Invalid directed route\n");
+ goto out;
+ }
+ drslid = be16_to_cpu(smp->dr_slid);
+
+ /* Check to post send on QP or process locally */
+ if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
+ smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
+ goto out;
+ }
local = kmalloc(sizeof *local, GFP_ATOMIC);
if (!local) {
@@ -771,7 +833,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
}
local->mad_priv = NULL;
local->recv_mad_agent = NULL;
- mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
+ mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
if (!mad_priv) {
ret = -ENOMEM;
dev_err(&device->dev, "No memory for local response MAD\n");
@@ -780,18 +842,25 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
}
build_smp_wc(mad_agent_priv->agent.qp,
- send_wr->wr_id, be16_to_cpu(smp->dr_slid),
+ send_wr->wr_id, drslid,
send_wr->wr.ud.pkey_index,
send_wr->wr.ud.port_num, &mad_wc);
+ if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
+ mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
+ + mad_send_wr->send_buf.data_len
+ + sizeof(struct ib_grh);
+ }
+
/* No GRH for DR SMP */
ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
- (struct ib_mad *)smp,
- (struct ib_mad *)&mad_priv->mad);
+ (const struct ib_mad_hdr *)smp, mad_size,
+ (struct ib_mad_hdr *)mad_priv->mad,
+ &mad_size, &out_mad_pkey_index);
switch (ret)
{
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
- if (ib_response_mad(&mad_priv->mad.mad) &&
+ if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
mad_agent_priv->agent.recv_handler) {
local->mad_priv = mad_priv;
local->recv_mad_agent = mad_agent_priv;
@@ -801,39 +870,43 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
*/
atomic_inc(&mad_agent_priv->refcount);
} else
- kmem_cache_free(ib_mad_cache, mad_priv);
+ kfree(mad_priv);
break;
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
- kmem_cache_free(ib_mad_cache, mad_priv);
+ kfree(mad_priv);
break;
case IB_MAD_RESULT_SUCCESS:
/* Treat like an incoming receive MAD */
port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
mad_agent_priv->agent.port_num);
if (port_priv) {
- memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
+ memcpy(mad_priv->mad, smp, mad_priv->mad_size);
recv_mad_agent = find_mad_agent(port_priv,
- &mad_priv->mad.mad);
+ (const struct ib_mad_hdr *)mad_priv->mad);
}
if (!port_priv || !recv_mad_agent) {
/*
* No receiving agent so drop packet and
* generate send completion.
*/
- kmem_cache_free(ib_mad_cache, mad_priv);
+ kfree(mad_priv);
break;
}
local->mad_priv = mad_priv;
local->recv_mad_agent = recv_mad_agent;
break;
default:
- kmem_cache_free(ib_mad_cache, mad_priv);
+ kfree(mad_priv);
kfree(local);
ret = -EINVAL;
goto out;
}
local->mad_send_wr = mad_send_wr;
+ if (opa) {
+ local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index;
+ local->return_wc_byte_len = mad_size;
+ }
/* Reference MAD agent until send side of local completion handled */
atomic_inc(&mad_agent_priv->refcount);
/* Queue local completion to local list */
@@ -847,11 +920,11 @@ out:
return ret;
}
-static int get_pad_size(int hdr_len, int data_len)
+static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
{
int seg_size, pad;
- seg_size = sizeof(struct ib_mad) - hdr_len;
+ seg_size = mad_size - hdr_len;
if (data_len && seg_size) {
pad = seg_size - data_len % seg_size;
return pad == seg_size ? 0 : pad;
@@ -870,14 +943,15 @@ static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
}
static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
- gfp_t gfp_mask)
+ size_t mad_size, gfp_t gfp_mask)
{
struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
struct ib_rmpp_segment *seg = NULL;
int left, seg_size, pad;
- send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
+ send_buf->seg_size = mad_size - send_buf->hdr_len;
+ send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
seg_size = send_buf->seg_size;
pad = send_wr->pad;
@@ -910,7 +984,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
return 0;
}
-int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent)
+int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
{
return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
}
@@ -920,26 +994,37 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
u32 remote_qpn, u16 pkey_index,
int rmpp_active,
int hdr_len, int data_len,
- gfp_t gfp_mask)
+ gfp_t gfp_mask,
+ u8 base_version)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
int pad, message_size, ret, size;
void *buf;
+ size_t mad_size;
+ bool opa;
mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
agent);
- pad = get_pad_size(hdr_len, data_len);
+
+ opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
+
+ if (opa && base_version == OPA_MGMT_BASE_VERSION)
+ mad_size = sizeof(struct opa_mad);
+ else
+ mad_size = sizeof(struct ib_mad);
+
+ pad = get_pad_size(hdr_len, data_len, mad_size);
message_size = hdr_len + data_len + pad;
if (ib_mad_kernel_rmpp_agent(mad_agent)) {
- if (!rmpp_active && message_size > sizeof(struct ib_mad))
+ if (!rmpp_active && message_size > mad_size)
return ERR_PTR(-EINVAL);
} else
- if (rmpp_active || message_size > sizeof(struct ib_mad))
+ if (rmpp_active || message_size > mad_size)
return ERR_PTR(-EINVAL);
- size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
+ size = rmpp_active ? hdr_len : mad_size;
buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -954,7 +1039,14 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
mad_send_wr->mad_agent_priv = mad_agent_priv;
mad_send_wr->sg_list[0].length = hdr_len;
mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
- mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
+
+ /* OPA MADs don't have to be the full 2048 bytes */
+ if (opa && base_version == OPA_MGMT_BASE_VERSION &&
+ data_len < mad_size - hdr_len)
+ mad_send_wr->sg_list[1].length = data_len;
+ else
+ mad_send_wr->sg_list[1].length = mad_size - hdr_len;
+
mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
@@ -967,7 +1059,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
if (rmpp_active) {
- ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
+ ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
if (ret) {
kfree(buf);
return ERR_PTR(ret);
@@ -1237,7 +1329,7 @@ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
recv_wc);
priv = container_of(mad_priv_hdr, struct ib_mad_private,
header);
- kmem_cache_free(ib_mad_cache, priv);
+ kfree(priv);
}
}
EXPORT_SYMBOL(ib_free_recv_mad);
@@ -1324,7 +1416,7 @@ static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
}
static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
- char *oui)
+ const char *oui)
{
int i;
@@ -1622,13 +1714,13 @@ out:
static struct ib_mad_agent_private *
find_mad_agent(struct ib_mad_port_private *port_priv,
- struct ib_mad *mad)
+ const struct ib_mad_hdr *mad_hdr)
{
struct ib_mad_agent_private *mad_agent = NULL;
unsigned long flags;
spin_lock_irqsave(&port_priv->reg_lock, flags);
- if (ib_response_mad(mad)) {
+ if (ib_response_mad(mad_hdr)) {
u32 hi_tid;
struct ib_mad_agent_private *entry;
@@ -1636,7 +1728,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
* Routing is based on high 32 bits of transaction ID
* of MAD.
*/
- hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
+ hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
if (entry->agent.hi_tid == hi_tid) {
mad_agent = entry;
@@ -1648,45 +1740,45 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
struct ib_mad_mgmt_method_table *method;
struct ib_mad_mgmt_vendor_class_table *vendor;
struct ib_mad_mgmt_vendor_class *vendor_class;
- struct ib_vendor_mad *vendor_mad;
+ const struct ib_vendor_mad *vendor_mad;
int index;
/*
* Routing is based on version, class, and method
* For "newer" vendor MADs, also based on OUI
*/
- if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
+ if (mad_hdr->class_version >= MAX_MGMT_VERSION)
goto out;
- if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
+ if (!is_vendor_class(mad_hdr->mgmt_class)) {
class = port_priv->version[
- mad->mad_hdr.class_version].class;
+ mad_hdr->class_version].class;
if (!class)
goto out;
- if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
+ if (convert_mgmt_class(mad_hdr->mgmt_class) >=
IB_MGMT_MAX_METHODS)
goto out;
method = class->method_table[convert_mgmt_class(
- mad->mad_hdr.mgmt_class)];
+ mad_hdr->mgmt_class)];
if (method)
- mad_agent = method->agent[mad->mad_hdr.method &
+ mad_agent = method->agent[mad_hdr->method &
~IB_MGMT_METHOD_RESP];
} else {
vendor = port_priv->version[
- mad->mad_hdr.class_version].vendor;
+ mad_hdr->class_version].vendor;
if (!vendor)
goto out;
vendor_class = vendor->vendor_class[vendor_class_index(
- mad->mad_hdr.mgmt_class)];
+ mad_hdr->mgmt_class)];
if (!vendor_class)
goto out;
/* Find matching OUI */
- vendor_mad = (struct ib_vendor_mad *)mad;
+ vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
index = find_vendor_oui(vendor_class, vendor_mad->oui);
if (index == -1)
goto out;
method = vendor_class->method_table[index];
if (method) {
- mad_agent = method->agent[mad->mad_hdr.method &
+ mad_agent = method->agent[mad_hdr->method &
~IB_MGMT_METHOD_RESP];
}
}
@@ -1708,20 +1800,24 @@ out:
return mad_agent;
}
-static int validate_mad(struct ib_mad *mad, u32 qp_num)
+static int validate_mad(const struct ib_mad_hdr *mad_hdr,
+ const struct ib_mad_qp_info *qp_info,
+ bool opa)
{
int valid = 0;
+ u32 qp_num = qp_info->qp->qp_num;
/* Make sure MAD base version is understood */
- if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
- pr_err("MAD received with unsupported base version %d\n",
- mad->mad_hdr.base_version);
+ if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
+ (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
+ pr_err("MAD received with unsupported base version %d %s\n",
+ mad_hdr->base_version, opa ? "(opa)" : "");
goto out;
}
/* Filter SMI packets sent to other than QP0 */
- if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
- (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
+ if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
+ (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
if (qp_num == 0)
valid = 1;
} else {
@@ -1734,8 +1830,8 @@ out:
return valid;
}
-static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
- struct ib_mad_hdr *mad_hdr)
+static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
+ const struct ib_mad_hdr *mad_hdr)
{
struct ib_rmpp_mad *rmpp_mad;
@@ -1747,16 +1843,16 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
}
-static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
- struct ib_mad_recv_wc *rwc)
+static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
+ const struct ib_mad_recv_wc *rwc)
{
- return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
+ return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
rwc->recv_buf.mad->mad_hdr.mgmt_class;
}
-static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
- struct ib_mad_send_wr_private *wr,
- struct ib_mad_recv_wc *rwc )
+static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
+ const struct ib_mad_send_wr_private *wr,
+ const struct ib_mad_recv_wc *rwc )
{
struct ib_ah_attr attr;
u8 send_resp, rcv_resp;
@@ -1765,8 +1861,8 @@ static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
u8 port_num = mad_agent_priv->agent.port_num;
u8 lmc;
- send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
- rcv_resp = ib_response_mad(rwc->recv_buf.mad);
+ send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
+ rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
if (send_resp == rcv_resp)
/* both requests, or both responses. GIDs different */
@@ -1811,22 +1907,22 @@ static inline int is_direct(u8 class)
}
struct ib_mad_send_wr_private*
-ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
- struct ib_mad_recv_wc *wc)
+ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
+ const struct ib_mad_recv_wc *wc)
{
struct ib_mad_send_wr_private *wr;
- struct ib_mad *mad;
+ const struct ib_mad_hdr *mad_hdr;
- mad = (struct ib_mad *)wc->recv_buf.mad;
+ mad_hdr = &wc->recv_buf.mad->mad_hdr;
list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
- if ((wr->tid == mad->mad_hdr.tid) &&
+ if ((wr->tid == mad_hdr->tid) &&
rcv_has_same_class(wr, wc) &&
/*
* Don't check GID for direct routed MADs.
* These might have permissive LIDs.
*/
- (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
+ (is_direct(mad_hdr->mgmt_class) ||
rcv_has_same_gid(mad_agent_priv, wr, wc)))
return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
}
@@ -1836,15 +1932,15 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
* been notified that the send has completed
*/
list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
- if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
- wr->tid == mad->mad_hdr.tid &&
+ if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
+ wr->tid == mad_hdr->tid &&
wr->timeout &&
rcv_has_same_class(wr, wc) &&
/*
* Don't check GID for direct routed MADs.
* These might have permissive LIDs.
*/
- (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
+ (is_direct(mad_hdr->mgmt_class) ||
rcv_has_same_gid(mad_agent_priv, wr, wc)))
/* Verify request has not been canceled */
return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
@@ -1879,7 +1975,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
}
/* Complete corresponding request */
- if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
+ if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
spin_lock_irqsave(&mad_agent_priv->lock, flags);
mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
if (!mad_send_wr) {
@@ -1924,26 +2020,163 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
}
}
-static bool generate_unmatched_resp(struct ib_mad_private *recv,
- struct ib_mad_private *response)
+static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
+ const struct ib_mad_qp_info *qp_info,
+ const struct ib_wc *wc,
+ int port_num,
+ struct ib_mad_private *recv,
+ struct ib_mad_private *response)
+{
+ enum smi_forward_action retsmi;
+ struct ib_smp *smp = (struct ib_smp *)recv->mad;
+
+ if (smi_handle_dr_smp_recv(smp,
+ rdma_cap_ib_switch(port_priv->device),
+ port_num,
+ port_priv->device->phys_port_cnt) ==
+ IB_SMI_DISCARD)
+ return IB_SMI_DISCARD;
+
+ retsmi = smi_check_forward_dr_smp(smp);
+ if (retsmi == IB_SMI_LOCAL)
+ return IB_SMI_HANDLE;
+
+ if (retsmi == IB_SMI_SEND) { /* don't forward */
+ if (smi_handle_dr_smp_send(smp,
+ rdma_cap_ib_switch(port_priv->device),
+ port_num) == IB_SMI_DISCARD)
+ return IB_SMI_DISCARD;
+
+ if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
+ return IB_SMI_DISCARD;
+ } else if (rdma_cap_ib_switch(port_priv->device)) {
+ /* forward case for switches */
+ memcpy(response, recv, mad_priv_size(response));
+ response->header.recv_wc.wc = &response->header.wc;
+ response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
+ response->header.recv_wc.recv_buf.grh = &response->grh;
+
+ agent_send_response((const struct ib_mad_hdr *)response->mad,
+ &response->grh, wc,
+ port_priv->device,
+ smi_get_fwd_port(smp),
+ qp_info->qp->qp_num,
+ response->mad_size,
+ false);
+
+ return IB_SMI_DISCARD;
+ }
+ return IB_SMI_HANDLE;
+}
+
+static bool generate_unmatched_resp(const struct ib_mad_private *recv,
+ struct ib_mad_private *response,
+ size_t *resp_len, bool opa)
{
- if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET ||
- recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) {
- memcpy(response, recv, sizeof *response);
+ const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
+ struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
+
+ if (recv_hdr->method == IB_MGMT_METHOD_GET ||
+ recv_hdr->method == IB_MGMT_METHOD_SET) {
+ memcpy(response, recv, mad_priv_size(response));
response->header.recv_wc.wc = &response->header.wc;
- response->header.recv_wc.recv_buf.mad = &response->mad.mad;
+ response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
response->header.recv_wc.recv_buf.grh = &response->grh;
- response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
- response->mad.mad.mad_hdr.status =
- cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
- if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
+ resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
+ resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
+ if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ resp_hdr->status |= IB_SMP_DIRECTION;
+
+ if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
+ if (recv_hdr->mgmt_class ==
+ IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ recv_hdr->mgmt_class ==
+ IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ *resp_len = opa_get_smp_header_size(
+ (struct opa_smp *)recv->mad);
+ else
+ *resp_len = sizeof(struct ib_mad_hdr);
+ }
return true;
} else {
return false;
}
}
+
+static enum smi_action
+handle_opa_smi(struct ib_mad_port_private *port_priv,
+ struct ib_mad_qp_info *qp_info,
+ struct ib_wc *wc,
+ int port_num,
+ struct ib_mad_private *recv,
+ struct ib_mad_private *response)
+{
+ enum smi_forward_action retsmi;
+ struct opa_smp *smp = (struct opa_smp *)recv->mad;
+
+ if (opa_smi_handle_dr_smp_recv(smp,
+ rdma_cap_ib_switch(port_priv->device),
+ port_num,
+ port_priv->device->phys_port_cnt) ==
+ IB_SMI_DISCARD)
+ return IB_SMI_DISCARD;
+
+ retsmi = opa_smi_check_forward_dr_smp(smp);
+ if (retsmi == IB_SMI_LOCAL)
+ return IB_SMI_HANDLE;
+
+ if (retsmi == IB_SMI_SEND) { /* don't forward */
+ if (opa_smi_handle_dr_smp_send(smp,
+ rdma_cap_ib_switch(port_priv->device),
+ port_num) == IB_SMI_DISCARD)
+ return IB_SMI_DISCARD;
+
+ if (opa_smi_check_local_smp(smp, port_priv->device) ==
+ IB_SMI_DISCARD)
+ return IB_SMI_DISCARD;
+
+ } else if (rdma_cap_ib_switch(port_priv->device)) {
+ /* forward case for switches */
+ memcpy(response, recv, mad_priv_size(response));
+ response->header.recv_wc.wc = &response->header.wc;
+ response->header.recv_wc.recv_buf.opa_mad =
+ (struct opa_mad *)response->mad;
+ response->header.recv_wc.recv_buf.grh = &response->grh;
+
+ agent_send_response((const struct ib_mad_hdr *)response->mad,
+ &response->grh, wc,
+ port_priv->device,
+ opa_smi_get_fwd_port(smp),
+ qp_info->qp->qp_num,
+ recv->header.wc.byte_len,
+ true);
+
+ return IB_SMI_DISCARD;
+ }
+
+ return IB_SMI_HANDLE;
+}
+
+static enum smi_action
+handle_smi(struct ib_mad_port_private *port_priv,
+ struct ib_mad_qp_info *qp_info,
+ struct ib_wc *wc,
+ int port_num,
+ struct ib_mad_private *recv,
+ struct ib_mad_private *response,
+ bool opa)
+{
+ struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
+
+ if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
+ mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
+ return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
+ response);
+
+ return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
+}
+
static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
struct ib_wc *wc)
{
@@ -1954,109 +2187,97 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
struct ib_mad_agent_private *mad_agent;
int port_num;
int ret = IB_MAD_RESULT_SUCCESS;
+ size_t mad_size;
+ u16 resp_mad_pkey_index = 0;
+ bool opa;
mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
qp_info = mad_list->mad_queue->qp_info;
dequeue_mad(mad_list);
+ opa = rdma_cap_opa_mad(qp_info->port_priv->device,
+ qp_info->port_priv->port_num);
+
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
mad_list);
recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
ib_dma_unmap_single(port_priv->device,
recv->header.mapping,
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
+ mad_priv_dma_size(recv),
DMA_FROM_DEVICE);
/* Setup MAD receive work completion from "normal" work completion */
recv->header.wc = *wc;
recv->header.recv_wc.wc = &recv->header.wc;
- recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
- recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
+
+ if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
+ recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
+ recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
+ } else {
+ recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
+ recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
+ }
+
+ recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
recv->header.recv_wc.recv_buf.grh = &recv->grh;
if (atomic_read(&qp_info->snoop_count))
snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
/* Validate MAD */
- if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
+ if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
goto out;
- response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
+ mad_size = recv->mad_size;
+ response = alloc_mad_private(mad_size, GFP_KERNEL);
if (!response) {
dev_err(&port_priv->device->dev,
"ib_mad_recv_done_handler no memory for response buffer\n");
goto out;
}
- if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
+ if (rdma_cap_ib_switch(port_priv->device))
port_num = wc->port_num;
else
port_num = port_priv->port_num;
- if (recv->mad.mad.mad_hdr.mgmt_class ==
+ if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- enum smi_forward_action retsmi;
-
- if (smi_handle_dr_smp_recv(&recv->mad.smp,
- port_priv->device->node_type,
- port_num,
- port_priv->device->phys_port_cnt) ==
- IB_SMI_DISCARD)
- goto out;
-
- retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
- if (retsmi == IB_SMI_LOCAL)
- goto local;
-
- if (retsmi == IB_SMI_SEND) { /* don't forward */
- if (smi_handle_dr_smp_send(&recv->mad.smp,
- port_priv->device->node_type,
- port_num) == IB_SMI_DISCARD)
- goto out;
-
- if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
- goto out;
- } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
- /* forward case for switches */
- memcpy(response, recv, sizeof(*response));
- response->header.recv_wc.wc = &response->header.wc;
- response->header.recv_wc.recv_buf.mad = &response->mad.mad;
- response->header.recv_wc.recv_buf.grh = &response->grh;
-
- agent_send_response(&response->mad.mad,
- &response->grh, wc,
- port_priv->device,
- smi_get_fwd_port(&recv->mad.smp),
- qp_info->qp->qp_num);
-
+ if (handle_smi(port_priv, qp_info, wc, port_num, recv,
+ response, opa)
+ == IB_SMI_DISCARD)
goto out;
- }
}
-local:
/* Give driver "right of first refusal" on incoming MAD */
if (port_priv->device->process_mad) {
ret = port_priv->device->process_mad(port_priv->device, 0,
port_priv->port_num,
wc, &recv->grh,
- &recv->mad.mad,
- &response->mad.mad);
+ (const struct ib_mad_hdr *)recv->mad,
+ recv->mad_size,
+ (struct ib_mad_hdr *)response->mad,
+ &mad_size, &resp_mad_pkey_index);
+
+ if (opa)
+ wc->pkey_index = resp_mad_pkey_index;
+
if (ret & IB_MAD_RESULT_SUCCESS) {
if (ret & IB_MAD_RESULT_CONSUMED)
goto out;
if (ret & IB_MAD_RESULT_REPLY) {
- agent_send_response(&response->mad.mad,
+ agent_send_response((const struct ib_mad_hdr *)response->mad,
&recv->grh, wc,
port_priv->device,
port_num,
- qp_info->qp->qp_num);
+ qp_info->qp->qp_num,
+ mad_size, opa);
goto out;
}
}
}
- mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
+ mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
if (mad_agent) {
ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
/*
@@ -2065,17 +2286,17 @@ local:
*/
recv = NULL;
} else if ((ret & IB_MAD_RESULT_SUCCESS) &&
- generate_unmatched_resp(recv, response)) {
- agent_send_response(&response->mad.mad, &recv->grh, wc,
- port_priv->device, port_num, qp_info->qp->qp_num);
+ generate_unmatched_resp(recv, response, &mad_size, opa)) {
+ agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
+ port_priv->device, port_num,
+ qp_info->qp->qp_num, mad_size, opa);
}
out:
/* Post another receive request for this QP */
if (response) {
ib_mad_post_receive_mads(qp_info, response);
- if (recv)
- kmem_cache_free(ib_mad_cache, recv);
+ kfree(recv);
} else
ib_mad_post_receive_mads(qp_info, recv);
}
@@ -2411,7 +2632,8 @@ find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
agent_list) {
- if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
+ if (is_rmpp_data_mad(mad_agent_priv,
+ mad_send_wr->send_buf.mad) &&
&mad_send_wr->send_buf == send_buf)
return mad_send_wr;
}
@@ -2468,10 +2690,14 @@ static void local_completions(struct work_struct *work)
int free_mad;
struct ib_wc wc;
struct ib_mad_send_wc mad_send_wc;
+ bool opa;
mad_agent_priv =
container_of(work, struct ib_mad_agent_private, local_work);
+ opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
+ mad_agent_priv->qp_info->port_priv->port_num);
+
spin_lock_irqsave(&mad_agent_priv->lock, flags);
while (!list_empty(&mad_agent_priv->local_list)) {
local = list_entry(mad_agent_priv->local_list.next,
@@ -2481,6 +2707,7 @@ static void local_completions(struct work_struct *work)
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
free_mad = 0;
if (local->mad_priv) {
+ u8 base_version;
recv_mad_agent = local->recv_mad_agent;
if (!recv_mad_agent) {
dev_err(&mad_agent_priv->agent.device->dev,
@@ -2496,17 +2723,26 @@ static void local_completions(struct work_struct *work)
build_smp_wc(recv_mad_agent->agent.qp,
(unsigned long) local->mad_send_wr,
be16_to_cpu(IB_LID_PERMISSIVE),
- 0, recv_mad_agent->agent.port_num, &wc);
+ local->mad_send_wr->send_wr.wr.ud.pkey_index,
+ recv_mad_agent->agent.port_num, &wc);
local->mad_priv->header.recv_wc.wc = &wc;
- local->mad_priv->header.recv_wc.mad_len =
- sizeof(struct ib_mad);
+
+ base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
+ if (opa && base_version == OPA_MGMT_BASE_VERSION) {
+ local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
+ local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
+ } else {
+ local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
+ local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
+ }
+
INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
&local->mad_priv->header.recv_wc.rmpp_list);
local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
local->mad_priv->header.recv_wc.recv_buf.mad =
- &local->mad_priv->mad.mad;
+ (struct ib_mad *)local->mad_priv->mad;
if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
snoop_recv(recv_mad_agent->qp_info,
&local->mad_priv->header.recv_wc,
@@ -2534,7 +2770,7 @@ local_send_completion:
spin_lock_irqsave(&mad_agent_priv->lock, flags);
atomic_dec(&mad_agent_priv->refcount);
if (free_mad)
- kmem_cache_free(ib_mad_cache, local->mad_priv);
+ kfree(local->mad_priv);
kfree(local);
}
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
@@ -2649,7 +2885,6 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
/* Initialize common scatter list fields */
- sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
sg_list.lkey = (*qp_info->port_priv->mr).lkey;
/* Initialize common receive WR fields */
@@ -2663,7 +2898,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
mad_priv = mad;
mad = NULL;
} else {
- mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
+ mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
+ GFP_ATOMIC);
if (!mad_priv) {
dev_err(&qp_info->port_priv->device->dev,
"No memory for receive buffer\n");
@@ -2671,10 +2907,10 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
break;
}
}
+ sg_list.length = mad_priv_dma_size(mad_priv);
sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
&mad_priv->grh,
- sizeof *mad_priv -
- sizeof mad_priv->header,
+ mad_priv_dma_size(mad_priv),
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) {
@@ -2698,10 +2934,9 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
spin_unlock_irqrestore(&recv_queue->lock, flags);
ib_dma_unmap_single(qp_info->port_priv->device,
mad_priv->header.mapping,
- sizeof *mad_priv -
- sizeof mad_priv->header,
+ mad_priv_dma_size(mad_priv),
DMA_FROM_DEVICE);
- kmem_cache_free(ib_mad_cache, mad_priv);
+ kfree(mad_priv);
dev_err(&qp_info->port_priv->device->dev,
"ib_post_recv failed: %d\n", ret);
break;
@@ -2738,10 +2973,9 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
ib_dma_unmap_single(qp_info->port_priv->device,
recv->header.mapping,
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
+ mad_priv_dma_size(recv),
DMA_FROM_DEVICE);
- kmem_cache_free(ib_mad_cache, recv);
+ kfree(recv);
}
qp_info->recv_queue.count = 0;
@@ -2922,6 +3156,14 @@ static int ib_mad_port_open(struct ib_device *device,
unsigned long flags;
char name[sizeof "ib_mad123"];
int has_smi;
+ struct ib_cq_init_attr cq_attr = {};
+
+ if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
+ return -EFAULT;
+
+ if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
+ rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
+ return -EFAULT;
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
@@ -2938,13 +3180,14 @@ static int ib_mad_port_open(struct ib_device *device,
init_mad_qp(port_priv, &port_priv->qp_info[1]);
cq_size = mad_sendq_size + mad_recvq_size;
- has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
+ has_smi = rdma_cap_ib_smi(device, port_num);
if (has_smi)
cq_size *= 2;
+ cq_attr.cqe = cq_size;
port_priv->cq = ib_create_cq(port_priv->device,
ib_mad_thread_completion_handler,
- NULL, port_priv, cq_size, 0);
+ NULL, port_priv, &cq_attr);
if (IS_ERR(port_priv->cq)) {
dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
ret = PTR_ERR(port_priv->cq);
@@ -3055,20 +3298,14 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
static void ib_mad_init_device(struct ib_device *device)
{
- int start, end, i;
+ int start, i;
- if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
- return;
+ start = rdma_start_port(device);
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- start = 0;
- end = 0;
- } else {
- start = 1;
- end = device->phys_port_cnt;
- }
+ for (i = start; i <= rdma_end_port(device); i++) {
+ if (!rdma_cap_ib_mad(device, i))
+ continue;
- for (i = start; i <= end; i++) {
if (ib_mad_port_open(device, i)) {
dev_err(&device->dev, "Couldn't open port %d\n", i);
goto error;
@@ -3086,40 +3323,31 @@ error_agent:
dev_err(&device->dev, "Couldn't close port %d\n", i);
error:
- i--;
+ while (--i >= start) {
+ if (!rdma_cap_ib_mad(device, i))
+ continue;
- while (i >= start) {
if (ib_agent_port_close(device, i))
dev_err(&device->dev,
"Couldn't close port %d for agents\n", i);
if (ib_mad_port_close(device, i))
dev_err(&device->dev, "Couldn't close port %d\n", i);
- i--;
}
}
static void ib_mad_remove_device(struct ib_device *device)
{
- int i, num_ports, cur_port;
+ int i;
- if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
- return;
+ for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
+ if (!rdma_cap_ib_mad(device, i))
+ continue;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- num_ports = 1;
- cur_port = 0;
- } else {
- num_ports = device->phys_port_cnt;
- cur_port = 1;
- }
- for (i = 0; i < num_ports; i++, cur_port++) {
- if (ib_agent_port_close(device, cur_port))
+ if (ib_agent_port_close(device, i))
dev_err(&device->dev,
- "Couldn't close port %d for agents\n",
- cur_port);
- if (ib_mad_port_close(device, cur_port))
- dev_err(&device->dev, "Couldn't close port %d\n",
- cur_port);
+ "Couldn't close port %d for agents\n", i);
+ if (ib_mad_port_close(device, i))
+ dev_err(&device->dev, "Couldn't close port %d\n", i);
}
}
@@ -3131,45 +3359,25 @@ static struct ib_client mad_client = {
static int __init ib_mad_init_module(void)
{
- int ret;
-
mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
- ib_mad_cache = kmem_cache_create("ib_mad",
- sizeof(struct ib_mad_private),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!ib_mad_cache) {
- pr_err("Couldn't create ib_mad cache\n");
- ret = -ENOMEM;
- goto error1;
- }
-
INIT_LIST_HEAD(&ib_mad_port_list);
if (ib_register_client(&mad_client)) {
pr_err("Couldn't register ib_mad client\n");
- ret = -EINVAL;
- goto error2;
+ return -EINVAL;
}
return 0;
-
-error2:
- kmem_cache_destroy(ib_mad_cache);
-error1:
- return ret;
}
static void __exit ib_mad_cleanup_module(void)
{
ib_unregister_client(&mad_client);
- kmem_cache_destroy(ib_mad_cache);
}
module_init(ib_mad_init_module);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d1a0b0ee9..5be89f989 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -41,6 +41,7 @@
#include <linux/workqueue.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
+#include <rdma/opa_smi.h>
#define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */
@@ -56,7 +57,7 @@
/* Registration table sizes */
#define MAX_MGMT_CLASS 80
-#define MAX_MGMT_VERSION 8
+#define MAX_MGMT_VERSION 0x83
#define MAX_MGMT_OUI 8
#define MAX_MGMT_VENDOR_RANGE2 (IB_MGMT_CLASS_VENDOR_RANGE2_END - \
IB_MGMT_CLASS_VENDOR_RANGE2_START + 1)
@@ -75,12 +76,9 @@ struct ib_mad_private_header {
struct ib_mad_private {
struct ib_mad_private_header header;
+ size_t mad_size;
struct ib_grh grh;
- union {
- struct ib_mad mad;
- struct ib_rmpp_mad rmpp_mad;
- struct ib_smp smp;
- } mad;
+ u8 mad[0];
} __attribute__ ((packed));
struct ib_rmpp_segment {
@@ -150,6 +148,7 @@ struct ib_mad_local_private {
struct ib_mad_private *mad_priv;
struct ib_mad_agent_private *recv_mad_agent;
struct ib_mad_send_wr_private *mad_send_wr;
+ size_t return_wc_byte_len;
};
struct ib_mad_mgmt_method_table {
@@ -213,8 +212,8 @@ struct ib_mad_port_private {
int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
struct ib_mad_send_wr_private *
-ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
- struct ib_mad_recv_wc *mad_recv_wc);
+ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
+ const struct ib_mad_recv_wc *mad_recv_wc);
void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
struct ib_mad_send_wc *mad_send_wc);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index f37878c9c..382941b46 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2005 Intel Inc. All rights reserved.
* Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -67,6 +68,7 @@ struct mad_rmpp_recv {
u8 mgmt_class;
u8 class_version;
u8 method;
+ u8 base_version;
};
static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
@@ -139,7 +141,8 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, 1, hdr_len,
- 0, GFP_KERNEL);
+ 0, GFP_KERNEL,
+ IB_MGMT_BASE_VERSION);
if (IS_ERR(msg))
return;
@@ -165,7 +168,8 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, 1,
- hdr_len, 0, GFP_KERNEL);
+ hdr_len, 0, GFP_KERNEL,
+ IB_MGMT_BASE_VERSION);
if (IS_ERR(msg))
ib_destroy_ah(ah);
else {
@@ -316,6 +320,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
rmpp_recv->class_version = mad_hdr->class_version;
rmpp_recv->method = mad_hdr->method;
+ rmpp_recv->base_version = mad_hdr->base_version;
return rmpp_recv;
error: kfree(rmpp_recv);
@@ -431,14 +436,23 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
{
struct ib_rmpp_mad *rmpp_mad;
int hdr_size, data_size, pad;
+ bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device,
+ rmpp_recv->agent->qp_info->port_priv->port_num);
rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
- data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
- pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
- if (pad > IB_MGMT_RMPP_DATA || pad < 0)
- pad = 0;
+ if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) {
+ data_size = sizeof(struct opa_rmpp_mad) - hdr_size;
+ pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
+ if (pad > OPA_MGMT_RMPP_DATA || pad < 0)
+ pad = 0;
+ } else {
+ data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
+ pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
+ if (pad > IB_MGMT_RMPP_DATA || pad < 0)
+ pad = 0;
+ }
return hdr_size + rmpp_recv->seg_num * data_size - pad;
}
@@ -570,13 +584,14 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
if (mad_send_wr->seg_num == 1) {
rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
- paylen = mad_send_wr->send_buf.seg_count * IB_MGMT_RMPP_DATA -
- mad_send_wr->pad;
+ paylen = (mad_send_wr->send_buf.seg_count *
+ mad_send_wr->send_buf.seg_rmpp_size) -
+ mad_send_wr->pad;
}
if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
- paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
+ paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad;
}
rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index fa17b552f..2cb865c7c 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -780,8 +780,7 @@ static void mcast_event_handler(struct ib_event_handler *handler,
int index;
dev = container_of(handler, struct mcast_device, event_handler);
- if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
- IB_LINK_LAYER_INFINIBAND)
+ if (!rdma_cap_ib_mcast(dev->device, event->element.port_num))
return;
index = event->element.port_num - dev->start_port;
@@ -808,24 +807,16 @@ static void mcast_add_one(struct ib_device *device)
int i;
int count = 0;
- if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
- return;
-
dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
GFP_KERNEL);
if (!dev)
return;
- if (device->node_type == RDMA_NODE_IB_SWITCH)
- dev->start_port = dev->end_port = 0;
- else {
- dev->start_port = 1;
- dev->end_port = device->phys_port_cnt;
- }
+ dev->start_port = rdma_start_port(device);
+ dev->end_port = rdma_end_port(device);
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
- if (rdma_port_get_link_layer(device, dev->start_port + i) !=
- IB_LINK_LAYER_INFINIBAND)
+ if (!rdma_cap_ib_mcast(device, dev->start_port + i))
continue;
port = &dev->port[i];
port->dev = dev;
@@ -863,8 +854,7 @@ static void mcast_remove_one(struct ib_device *device)
flush_workqueue(mcast_wq);
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
- if (rdma_port_get_link_layer(device, dev->start_port + i) ==
- IB_LINK_LAYER_INFINIBAND) {
+ if (rdma_cap_ib_mcast(device, dev->start_port + i)) {
port = &dev->port[i];
deref_port(port);
wait_for_completion(&port->comp);
diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h
new file mode 100644
index 000000000..3bfab3505
--- /dev/null
+++ b/drivers/infiniband/core/opa_smi.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __OPA_SMI_H_
+#define __OPA_SMI_H_
+
+#include <rdma/ib_smi.h>
+#include <rdma/opa_smi.h>
+
+#include "smi.h"
+
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
+ int port_num, int phys_port_cnt);
+int opa_smi_get_fwd_port(struct opa_smp *smp);
+extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
+extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
+ bool is_switch, int port_num);
+
+/*
+ * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
+ * via process_mad
+ */
+static inline enum smi_action opa_smi_check_local_smp(struct opa_smp *smp,
+ struct ib_device *device)
+{
+ /* C14-9:3 -- We're at the end of the DR segment of path */
+ /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
+ return (device->process_mad &&
+ !opa_get_smp_direction(smp) &&
+ (smp->hop_ptr == smp->hop_cnt + 1)) ?
+ IB_SMI_HANDLE : IB_SMI_DISCARD;
+}
+
+/*
+ * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
+ * via process_mad
+ */
+static inline enum smi_action opa_smi_check_local_returning_smp(struct opa_smp *smp,
+ struct ib_device *device)
+{
+ /* C14-13:3 -- We're at the end of the DR segment of path */
+ /* C14-13:4 -- Hop Pointer == 0 -> give to SM */
+ return (device->process_mad &&
+ opa_get_smp_direction(smp) &&
+ !smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD;
+}
+
+#endif /* __OPA_SMI_H_ */
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index c38f030f0..ca919f429 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -450,7 +450,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
struct ib_sa_port *port =
&sa_dev->port[event->element.port_num - sa_dev->start_port];
- if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)
+ if (!rdma_cap_ib_sa(handler->device, port->port_num))
return;
spin_lock_irqsave(&port->ah_lock, flags);
@@ -540,7 +540,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
ah_attr->port_num = port_num;
ah_attr->static_rate = rec->rate;
- force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET;
+ force_grh = rdma_cap_eth_ah(device, port_num);
if (rec->hop_limit > 1 || force_grh) {
ah_attr->ah_flags = IB_AH_GRH;
@@ -583,7 +583,8 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
query->mad_buf = ib_create_send_mad(query->port->agent, 1,
query->sm_ah->pkey_index,
0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
- gfp_mask);
+ gfp_mask,
+ IB_MGMT_BASE_VERSION);
if (IS_ERR(query->mad_buf)) {
kref_put(&query->sm_ah->ref, free_sm_ah);
return -ENOMEM;
@@ -1153,16 +1154,10 @@ static void ib_sa_add_one(struct ib_device *device)
{
struct ib_sa_device *sa_dev;
int s, e, i;
+ int count = 0;
- if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
- return;
-
- if (device->node_type == RDMA_NODE_IB_SWITCH)
- s = e = 0;
- else {
- s = 1;
- e = device->phys_port_cnt;
- }
+ s = rdma_start_port(device);
+ e = rdma_end_port(device);
sa_dev = kzalloc(sizeof *sa_dev +
(e - s + 1) * sizeof (struct ib_sa_port),
@@ -1175,7 +1170,7 @@ static void ib_sa_add_one(struct ib_device *device)
for (i = 0; i <= e - s; ++i) {
spin_lock_init(&sa_dev->port[i].ah_lock);
- if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)
+ if (!rdma_cap_ib_sa(device, i + 1))
continue;
sa_dev->port[i].sm_ah = NULL;
@@ -1189,8 +1184,13 @@ static void ib_sa_add_one(struct ib_device *device)
goto err;
INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
+
+ count++;
}
+ if (!count)
+ goto free;
+
ib_set_client_data(device, &sa_client, sa_dev);
/*
@@ -1204,19 +1204,20 @@ static void ib_sa_add_one(struct ib_device *device)
if (ib_register_event_handler(&sa_dev->event_handler))
goto err;
- for (i = 0; i <= e - s; ++i)
- if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+ for (i = 0; i <= e - s; ++i) {
+ if (rdma_cap_ib_sa(device, i + 1))
update_sm_ah(&sa_dev->port[i].update_task);
+ }
return;
err:
- while (--i >= 0)
- if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+ while (--i >= 0) {
+ if (rdma_cap_ib_sa(device, i + 1))
ib_unregister_mad_agent(sa_dev->port[i].agent);
-
+ }
+free:
kfree(sa_dev);
-
return;
}
@@ -1233,7 +1234,7 @@ static void ib_sa_remove_one(struct ib_device *device)
flush_workqueue(ib_wq);
for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
- if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
+ if (rdma_cap_ib_sa(device, i + 1)) {
ib_unregister_mad_agent(sa_dev->port[i].agent);
if (sa_dev->port[i].sm_ah)
kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 5855e4405..f19b23817 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -5,6 +5,7 @@
* Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
* Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -38,85 +39,82 @@
#include <rdma/ib_smi.h>
#include "smi.h"
-
-/*
- * Fixup a directed route SMP for sending
- * Return 0 if the SMP should be discarded
- */
-enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- u8 node_type, int port_num)
+#include "opa_smi.h"
+
+static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
+ u8 *hop_ptr, u8 hop_cnt,
+ const u8 *initial_path,
+ const u8 *return_path,
+ u8 direction,
+ bool dr_dlid_is_permissive,
+ bool dr_slid_is_permissive)
{
- u8 hop_ptr, hop_cnt;
-
- hop_ptr = smp->hop_ptr;
- hop_cnt = smp->hop_cnt;
-
/* See section 14.2.2.2, Vol 1 IB spec */
/* C14-6 -- valid hop_cnt values are from 0 to 63 */
if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
return IB_SMI_DISCARD;
- if (!ib_get_smp_direction(smp)) {
+ if (!direction) {
/* C14-9:1 */
- if (hop_cnt && hop_ptr == 0) {
- smp->hop_ptr++;
- return (smp->initial_path[smp->hop_ptr] ==
+ if (hop_cnt && *hop_ptr == 0) {
+ (*hop_ptr)++;
+ return (initial_path[*hop_ptr] ==
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-9:2 */
- if (hop_ptr && hop_ptr < hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (*hop_ptr && *hop_ptr < hop_cnt) {
+ if (!is_switch)
return IB_SMI_DISCARD;
- /* smp->return_path set when received */
- smp->hop_ptr++;
- return (smp->initial_path[smp->hop_ptr] ==
+ /* return_path set when received */
+ (*hop_ptr)++;
+ return (initial_path[*hop_ptr] ==
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-9:3 -- We're at the end of the DR segment of path */
- if (hop_ptr == hop_cnt) {
- /* smp->return_path set when received */
- smp->hop_ptr++;
- return (node_type == RDMA_NODE_IB_SWITCH ||
- smp->dr_dlid == IB_LID_PERMISSIVE ?
+ if (*hop_ptr == hop_cnt) {
+ /* return_path set when received */
+ (*hop_ptr)++;
+ return (is_switch ||
+ dr_dlid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
/* C14-9:5 -- Fail unreasonable hop pointer */
- return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
+ return (*hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
} else {
/* C14-13:1 */
- if (hop_cnt && hop_ptr == hop_cnt + 1) {
- smp->hop_ptr--;
- return (smp->return_path[smp->hop_ptr] ==
+ if (hop_cnt && *hop_ptr == hop_cnt + 1) {
+ (*hop_ptr)--;
+ return (return_path[*hop_ptr] ==
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:2 */
- if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
+ if (!is_switch)
return IB_SMI_DISCARD;
- smp->hop_ptr--;
- return (smp->return_path[smp->hop_ptr] ==
+ (*hop_ptr)--;
+ return (return_path[*hop_ptr] ==
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:3 -- at the end of the DR segment of path */
- if (hop_ptr == 1) {
- smp->hop_ptr--;
+ if (*hop_ptr == 1) {
+ (*hop_ptr)--;
/* C14-13:3 -- SMPs destined for SM shouldn't be here */
- return (node_type == RDMA_NODE_IB_SWITCH ||
- smp->dr_slid == IB_LID_PERMISSIVE ?
+ return (is_switch ||
+ dr_slid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:4 -- hop_ptr = 0 -> should have gone to SM */
- if (hop_ptr == 0)
+ if (*hop_ptr == 0)
return IB_SMI_HANDLE;
/* C14-13:5 -- Check for unreasonable hop pointer */
@@ -125,105 +123,163 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
}
/*
- * Adjust information for a received SMP
- * Return 0 if the SMP should be dropped
+ * Fixup a directed route SMP for sending
+ * Return IB_SMI_DISCARD if the SMP should be discarded
*/
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
- int port_num, int phys_port_cnt)
+enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
+ bool is_switch, int port_num)
{
- u8 hop_ptr, hop_cnt;
+ return __smi_handle_dr_smp_send(is_switch, port_num,
+ &smp->hop_ptr, smp->hop_cnt,
+ smp->initial_path,
+ smp->return_path,
+ ib_get_smp_direction(smp),
+ smp->dr_dlid == IB_LID_PERMISSIVE,
+ smp->dr_slid == IB_LID_PERMISSIVE);
+}
- hop_ptr = smp->hop_ptr;
- hop_cnt = smp->hop_cnt;
+enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
+ bool is_switch, int port_num)
+{
+ return __smi_handle_dr_smp_send(is_switch, port_num,
+ &smp->hop_ptr, smp->hop_cnt,
+ smp->route.dr.initial_path,
+ smp->route.dr.return_path,
+ opa_get_smp_direction(smp),
+ smp->route.dr.dr_dlid ==
+ OPA_LID_PERMISSIVE,
+ smp->route.dr.dr_slid ==
+ OPA_LID_PERMISSIVE);
+}
+static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
+ int phys_port_cnt,
+ u8 *hop_ptr, u8 hop_cnt,
+ const u8 *initial_path,
+ u8 *return_path,
+ u8 direction,
+ bool dr_dlid_is_permissive,
+ bool dr_slid_is_permissive)
+{
/* See section 14.2.2.2, Vol 1 IB spec */
/* C14-6 -- valid hop_cnt values are from 0 to 63 */
if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
return IB_SMI_DISCARD;
- if (!ib_get_smp_direction(smp)) {
+ if (!direction) {
/* C14-9:1 -- sender should have incremented hop_ptr */
- if (hop_cnt && hop_ptr == 0)
+ if (hop_cnt && *hop_ptr == 0)
return IB_SMI_DISCARD;
/* C14-9:2 -- intermediate hop */
- if (hop_ptr && hop_ptr < hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (*hop_ptr && *hop_ptr < hop_cnt) {
+ if (!is_switch)
return IB_SMI_DISCARD;
- smp->return_path[hop_ptr] = port_num;
- /* smp->hop_ptr updated when sending */
- return (smp->initial_path[hop_ptr+1] <= phys_port_cnt ?
+ return_path[*hop_ptr] = port_num;
+ /* hop_ptr updated when sending */
+ return (initial_path[*hop_ptr+1] <= phys_port_cnt ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-9:3 -- We're at the end of the DR segment of path */
- if (hop_ptr == hop_cnt) {
+ if (*hop_ptr == hop_cnt) {
if (hop_cnt)
- smp->return_path[hop_ptr] = port_num;
- /* smp->hop_ptr updated when sending */
+ return_path[*hop_ptr] = port_num;
+ /* hop_ptr updated when sending */
- return (node_type == RDMA_NODE_IB_SWITCH ||
- smp->dr_dlid == IB_LID_PERMISSIVE ?
+ return (is_switch ||
+ dr_dlid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
/* C14-9:5 -- fail unreasonable hop pointer */
- return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
+ return (*hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
} else {
/* C14-13:1 */
- if (hop_cnt && hop_ptr == hop_cnt + 1) {
- smp->hop_ptr--;
- return (smp->return_path[smp->hop_ptr] ==
+ if (hop_cnt && *hop_ptr == hop_cnt + 1) {
+ (*hop_ptr)--;
+ return (return_path[*hop_ptr] ==
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:2 */
- if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
+ if (!is_switch)
return IB_SMI_DISCARD;
- /* smp->hop_ptr updated when sending */
- return (smp->return_path[hop_ptr-1] <= phys_port_cnt ?
+ /* hop_ptr updated when sending */
+ return (return_path[*hop_ptr-1] <= phys_port_cnt ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:3 -- We're at the end of the DR segment of path */
- if (hop_ptr == 1) {
- if (smp->dr_slid == IB_LID_PERMISSIVE) {
+ if (*hop_ptr == 1) {
+ if (dr_slid_is_permissive) {
/* giving SMP to SM - update hop_ptr */
- smp->hop_ptr--;
+ (*hop_ptr)--;
return IB_SMI_HANDLE;
}
- /* smp->hop_ptr updated when sending */
- return (node_type == RDMA_NODE_IB_SWITCH ?
- IB_SMI_HANDLE : IB_SMI_DISCARD);
+ /* hop_ptr updated when sending */
+ return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:4 -- hop_ptr = 0 -> give to SM */
/* C14-13:5 -- Check for unreasonable hop pointer */
- return (hop_ptr == 0 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
+ return (*hop_ptr == 0 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
}
-enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
+/*
+ * Adjust information for a received SMP
+ * Return IB_SMI_DISCARD if the SMP should be dropped
+ */
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
+ int port_num, int phys_port_cnt)
{
- u8 hop_ptr, hop_cnt;
+ return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
+ &smp->hop_ptr, smp->hop_cnt,
+ smp->initial_path,
+ smp->return_path,
+ ib_get_smp_direction(smp),
+ smp->dr_dlid == IB_LID_PERMISSIVE,
+ smp->dr_slid == IB_LID_PERMISSIVE);
+}
- hop_ptr = smp->hop_ptr;
- hop_cnt = smp->hop_cnt;
+/*
+ * Adjust information for a received SMP
+ * Return IB_SMI_DISCARD if the SMP should be dropped
+ */
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
+ int port_num, int phys_port_cnt)
+{
+ return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
+ &smp->hop_ptr, smp->hop_cnt,
+ smp->route.dr.initial_path,
+ smp->route.dr.return_path,
+ opa_get_smp_direction(smp),
+ smp->route.dr.dr_dlid ==
+ OPA_LID_PERMISSIVE,
+ smp->route.dr.dr_slid ==
+ OPA_LID_PERMISSIVE);
+}
- if (!ib_get_smp_direction(smp)) {
+static enum smi_forward_action __smi_check_forward_dr_smp(u8 hop_ptr, u8 hop_cnt,
+ u8 direction,
+ bool dr_dlid_is_permissive,
+ bool dr_slid_is_permissive)
+{
+ if (!direction) {
/* C14-9:2 -- intermediate hop */
if (hop_ptr && hop_ptr < hop_cnt)
return IB_SMI_FORWARD;
/* C14-9:3 -- at the end of the DR segment of path */
if (hop_ptr == hop_cnt)
- return (smp->dr_dlid == IB_LID_PERMISSIVE ?
+ return (dr_dlid_is_permissive ?
IB_SMI_SEND : IB_SMI_LOCAL);
/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
@@ -236,10 +292,29 @@ enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
/* C14-13:3 -- at the end of the DR segment of path */
if (hop_ptr == 1)
- return (smp->dr_slid != IB_LID_PERMISSIVE ?
+ return (!dr_slid_is_permissive ?
IB_SMI_SEND : IB_SMI_LOCAL);
}
return IB_SMI_LOCAL;
+
+}
+
+enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
+{
+ return __smi_check_forward_dr_smp(smp->hop_ptr, smp->hop_cnt,
+ ib_get_smp_direction(smp),
+ smp->dr_dlid == IB_LID_PERMISSIVE,
+ smp->dr_slid == IB_LID_PERMISSIVE);
+}
+
+enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp)
+{
+ return __smi_check_forward_dr_smp(smp->hop_ptr, smp->hop_cnt,
+ opa_get_smp_direction(smp),
+ smp->route.dr.dr_dlid ==
+ OPA_LID_PERMISSIVE,
+ smp->route.dr.dr_slid ==
+ OPA_LID_PERMISSIVE);
}
/*
@@ -251,3 +326,13 @@ int smi_get_fwd_port(struct ib_smp *smp)
return (!ib_get_smp_direction(smp) ? smp->initial_path[smp->hop_ptr+1] :
smp->return_path[smp->hop_ptr-1]);
}
+
+/*
+ * Return the forwarding port number from initial_path for outgoing SMP and
+ * from return_path for returning SMP
+ */
+int opa_smi_get_fwd_port(struct opa_smp *smp)
+{
+ return !opa_get_smp_direction(smp) ? smp->route.dr.initial_path[smp->hop_ptr+1] :
+ smp->route.dr.return_path[smp->hop_ptr-1];
+}
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index aff96bac4..33c91c8a1 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -51,12 +51,12 @@ enum smi_forward_action {
IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */
};
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
int port_num, int phys_port_cnt);
int smi_get_fwd_port(struct ib_smp *smp);
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- u8 node_type, int port_num);
+ bool is_switch, int port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index cbd0383f6..0b84a9cdf 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -326,6 +326,8 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
int width = (tab_attr->index >> 16) & 0xff;
struct ib_mad *in_mad = NULL;
struct ib_mad *out_mad = NULL;
+ size_t mad_size = sizeof(*out_mad);
+ u16 out_mad_pkey_index = 0;
ssize_t ret;
if (!p->ibdev->process_mad)
@@ -347,7 +349,10 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
in_mad->data[41] = p->port_num; /* PortSelect field */
if ((p->ibdev->process_mad(p->ibdev, IB_MAD_IGNORE_MKEY,
- p->port_num, NULL, NULL, in_mad, out_mad) &
+ p->port_num, NULL, NULL,
+ (const struct ib_mad_hdr *)in_mad, mad_size,
+ (struct ib_mad_hdr *)out_mad, &mad_size,
+ &out_mad_pkey_index) &
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
ret = -EINVAL;
@@ -456,6 +461,7 @@ static void ib_device_release(struct device *device)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
+ kfree(dev->port_immutable);
kfree(dev);
}
@@ -864,7 +870,7 @@ int ib_device_register_sysfs(struct ib_device *device,
goto err_put;
}
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
+ if (rdma_cap_ib_switch(device)) {
ret = add_port(device, 0, port_callback);
if (ret)
goto err_put;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f2f63933e..009481073 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1193,6 +1193,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
return 0;
}
+static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static void ib_ucm_release_dev(struct device *dev)
{
struct ib_ucm_device *ucm_dev;
@@ -1202,7 +1203,7 @@ static void ib_ucm_release_dev(struct device *dev)
if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
clear_bit(ucm_dev->devnum, dev_map);
else
- clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
+ clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
kfree(ucm_dev);
}
@@ -1226,7 +1227,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static int find_overflow_devnum(void)
{
int ret;
@@ -1253,8 +1253,7 @@ static void ib_ucm_add_one(struct ib_device *device)
dev_t base;
struct ib_ucm_device *ucm_dev;
- if (!device->alloc_ucontext ||
- rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
+ if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1))
return;
ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 45d67e922..29b21213e 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -722,26 +722,13 @@ static ssize_t ucma_query_route(struct ucma_file *file,
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
resp.port_num = ctx->cm_id->port_num;
- switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
- case RDMA_TRANSPORT_IB:
- switch (rdma_port_get_link_layer(ctx->cm_id->device,
- ctx->cm_id->port_num)) {
- case IB_LINK_LAYER_INFINIBAND:
- ucma_copy_ib_route(&resp, &ctx->cm_id->route);
- break;
- case IB_LINK_LAYER_ETHERNET:
- ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
- break;
- default:
- break;
- }
- break;
- case RDMA_TRANSPORT_IWARP:
+
+ if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
+ ucma_copy_ib_route(&resp, &ctx->cm_id->route);
+ else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
+ ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
+ else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_iw_route(&resp, &ctx->cm_id->route);
- break;
- default:
- break;
- }
out:
if (copy_to_user((void __user *)(unsigned long)cmd.response,
@@ -1367,10 +1354,10 @@ static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
/* Acquire mutex's based on pointer comparison to prevent deadlock. */
if (file1 < file2) {
mutex_lock(&file1->mut);
- mutex_lock(&file2->mut);
+ mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&file2->mut);
- mutex_lock(&file1->mut);
+ mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
}
}
@@ -1629,6 +1616,7 @@ static void __exit ucma_cleanup(void)
device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
misc_deregister(&ucma_misc);
idr_destroy(&ctx_idr);
+ idr_destroy(&multicast_idr);
}
module_init(ucma_init);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 928cdd20e..35567fffa 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -99,7 +99,6 @@ struct ib_umad_port {
};
struct ib_umad_device {
- int start_port, end_port;
struct kobject kobj;
struct ib_umad_port port[0];
};
@@ -263,20 +262,23 @@ static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
{
struct ib_mad_recv_buf *recv_buf;
int left, seg_payload, offset, max_seg_payload;
+ size_t seg_size;
- /* We need enough room to copy the first (or only) MAD segment. */
recv_buf = &packet->recv_wc->recv_buf;
- if ((packet->length <= sizeof (*recv_buf->mad) &&
+ seg_size = packet->recv_wc->mad_seg_size;
+
+ /* We need enough room to copy the first (or only) MAD segment. */
+ if ((packet->length <= seg_size &&
count < hdr_size(file) + packet->length) ||
- (packet->length > sizeof (*recv_buf->mad) &&
- count < hdr_size(file) + sizeof (*recv_buf->mad)))
+ (packet->length > seg_size &&
+ count < hdr_size(file) + seg_size))
return -EINVAL;
if (copy_to_user(buf, &packet->mad, hdr_size(file)))
return -EFAULT;
buf += hdr_size(file);
- seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad));
+ seg_payload = min_t(int, packet->length, seg_size);
if (copy_to_user(buf, recv_buf->mad, seg_payload))
return -EFAULT;
@@ -293,7 +295,7 @@ static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
return -ENOSPC;
}
offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
- max_seg_payload = sizeof (struct ib_mad) - offset;
+ max_seg_payload = seg_size - offset;
for (left = packet->length - seg_payload, buf += seg_payload;
left; left -= seg_payload, buf += seg_payload) {
@@ -426,11 +428,11 @@ static int is_duplicate(struct ib_umad_file *file,
* the same TID, reject the second as a duplicate. This is more
* restrictive than required by the spec.
*/
- if (!ib_response_mad((struct ib_mad *) hdr)) {
- if (!ib_response_mad((struct ib_mad *) sent_hdr))
+ if (!ib_response_mad(hdr)) {
+ if (!ib_response_mad(sent_hdr))
return 1;
continue;
- } else if (!ib_response_mad((struct ib_mad *) sent_hdr))
+ } else if (!ib_response_mad(sent_hdr))
continue;
if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
@@ -451,6 +453,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
struct ib_rmpp_mad *rmpp_mad;
__be64 *tid;
int ret, data_len, hdr_len, copy_offset, rmpp_active;
+ u8 base_version;
if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
return -EINVAL;
@@ -517,11 +520,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
rmpp_active = 0;
}
+ base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version;
data_len = count - hdr_size(file) - hdr_len;
packet->msg = ib_create_send_mad(agent,
be32_to_cpu(packet->mad.hdr.qpn),
packet->mad.hdr.pkey_index, rmpp_active,
- hdr_len, data_len, GFP_KERNEL);
+ hdr_len, data_len, GFP_KERNEL,
+ base_version);
if (IS_ERR(packet->msg)) {
ret = PTR_ERR(packet->msg);
goto err_ah;
@@ -1273,16 +1278,10 @@ static void ib_umad_add_one(struct ib_device *device)
{
struct ib_umad_device *umad_dev;
int s, e, i;
+ int count = 0;
- if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
- return;
-
- if (device->node_type == RDMA_NODE_IB_SWITCH)
- s = e = 0;
- else {
- s = 1;
- e = device->phys_port_cnt;
- }
+ s = rdma_start_port(device);
+ e = rdma_end_port(device);
umad_dev = kzalloc(sizeof *umad_dev +
(e - s + 1) * sizeof (struct ib_umad_port),
@@ -1292,25 +1291,34 @@ static void ib_umad_add_one(struct ib_device *device)
kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
- umad_dev->start_port = s;
- umad_dev->end_port = e;
-
for (i = s; i <= e; ++i) {
+ if (!rdma_cap_ib_mad(device, i))
+ continue;
+
umad_dev->port[i - s].umad_dev = umad_dev;
if (ib_umad_init_port(device, i, umad_dev,
&umad_dev->port[i - s]))
goto err;
+
+ count++;
}
+ if (!count)
+ goto free;
+
ib_set_client_data(device, &umad_client, umad_dev);
return;
err:
- while (--i >= s)
- ib_umad_kill_port(&umad_dev->port[i - s]);
+ while (--i >= s) {
+ if (!rdma_cap_ib_mad(device, i))
+ continue;
+ ib_umad_kill_port(&umad_dev->port[i - s]);
+ }
+free:
kobject_put(&umad_dev->kobj);
}
@@ -1322,8 +1330,10 @@ static void ib_umad_remove_one(struct ib_device *device)
if (!umad_dev)
return;
- for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
- ib_umad_kill_port(&umad_dev->port[i]);
+ for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) {
+ if (rdma_cap_ib_mad(device, i + rdma_start_port(device)))
+ ib_umad_kill_port(&umad_dev->port[i]);
+ }
kobject_put(&umad_dev->kobj);
}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b716b0815..ba365b6d1 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -259,5 +259,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
IB_UVERBS_DECLARE_EX_CMD(create_flow);
IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
IB_UVERBS_DECLARE_EX_CMD(query_device);
+IB_UVERBS_DECLARE_EX_CMD(create_cq);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a9f048990..bbb02ffe8 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1330,40 +1330,37 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
return in_len;
}
-ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
- const char __user *buf, int in_len,
- int out_len)
+static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw,
+ struct ib_uverbs_ex_create_cq *cmd,
+ size_t cmd_sz,
+ int (*cb)(struct ib_uverbs_file *file,
+ struct ib_ucq_object *obj,
+ struct ib_uverbs_ex_create_cq_resp *resp,
+ struct ib_udata *udata,
+ void *context),
+ void *context)
{
- struct ib_uverbs_create_cq cmd;
- struct ib_uverbs_create_cq_resp resp;
- struct ib_udata udata;
struct ib_ucq_object *obj;
struct ib_uverbs_event_file *ev_file = NULL;
struct ib_cq *cq;
int ret;
+ struct ib_uverbs_ex_create_cq_resp resp;
+ struct ib_cq_init_attr attr = {};
- if (out_len < sizeof resp)
- return -ENOSPC;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
-
- if (cmd.comp_vector >= file->device->num_comp_vectors)
- return -EINVAL;
+ if (cmd->comp_vector >= file->device->num_comp_vectors)
+ return ERR_PTR(-EINVAL);
obj = kmalloc(sizeof *obj, GFP_KERNEL);
if (!obj)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
+ init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
down_write(&obj->uobject.mutex);
- if (cmd.comp_channel >= 0) {
- ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
+ if (cmd->comp_channel >= 0) {
+ ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
if (!ev_file) {
ret = -EINVAL;
goto err;
@@ -1376,9 +1373,14 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&obj->comp_list);
INIT_LIST_HEAD(&obj->async_list);
- cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
- cmd.comp_vector,
- file->ucontext, &udata);
+ attr.cqe = cmd->cqe;
+ attr.comp_vector = cmd->comp_vector;
+
+ if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
+ attr.flags = cmd->flags;
+
+ cq = file->device->ib_dev->create_cq(file->device->ib_dev, &attr,
+ file->ucontext, uhw);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
goto err_file;
@@ -1397,14 +1399,15 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
goto err_free;
memset(&resp, 0, sizeof resp);
- resp.cq_handle = obj->uobject.id;
- resp.cqe = cq->cqe;
+ resp.base.cq_handle = obj->uobject.id;
+ resp.base.cqe = cq->cqe;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
- ret = -EFAULT;
- goto err_copy;
- }
+ resp.response_length = offsetof(typeof(resp), response_length) +
+ sizeof(resp.response_length);
+
+ ret = cb(file, obj, &resp, ucore, context);
+ if (ret)
+ goto err_cb;
mutex_lock(&file->mutex);
list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
@@ -1414,9 +1417,9 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
up_write(&obj->uobject.mutex);
- return in_len;
+ return obj;
-err_copy:
+err_cb:
idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
err_free:
@@ -1428,7 +1431,106 @@ err_file:
err:
put_uobj_write(&obj->uobject);
- return ret;
+
+ return ERR_PTR(ret);
+}
+
+static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
+ struct ib_ucq_object *obj,
+ struct ib_uverbs_ex_create_cq_resp *resp,
+ struct ib_udata *ucore, void *context)
+{
+ if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
+ return -EFAULT;
+
+ return 0;
+}
+
+ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_create_cq cmd;
+ struct ib_uverbs_ex_create_cq cmd_ex;
+ struct ib_uverbs_create_cq_resp resp;
+ struct ib_udata ucore;
+ struct ib_udata uhw;
+ struct ib_ucq_object *obj;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, buf, sizeof(cmd)))
+ return -EFAULT;
+
+ INIT_UDATA(&ucore, buf, cmd.response, sizeof(cmd), sizeof(resp));
+
+ INIT_UDATA(&uhw, buf + sizeof(cmd),
+ (unsigned long)cmd.response + sizeof(resp),
+ in_len - sizeof(cmd), out_len - sizeof(resp));
+
+ memset(&cmd_ex, 0, sizeof(cmd_ex));
+ cmd_ex.user_handle = cmd.user_handle;
+ cmd_ex.cqe = cmd.cqe;
+ cmd_ex.comp_vector = cmd.comp_vector;
+ cmd_ex.comp_channel = cmd.comp_channel;
+
+ obj = create_cq(file, &ucore, &uhw, &cmd_ex,
+ offsetof(typeof(cmd_ex), comp_channel) +
+ sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
+ NULL);
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ return in_len;
+}
+
+static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
+ struct ib_ucq_object *obj,
+ struct ib_uverbs_ex_create_cq_resp *resp,
+ struct ib_udata *ucore, void *context)
+{
+ if (ib_copy_to_udata(ucore, resp, resp->response_length))
+ return -EFAULT;
+
+ return 0;
+}
+
+int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_create_cq_resp resp;
+ struct ib_uverbs_ex_create_cq cmd;
+ struct ib_ucq_object *obj;
+ int err;
+
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
+ err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+ if (err)
+ return err;
+
+ if (cmd.comp_mask)
+ return -EINVAL;
+
+ if (cmd.reserved)
+ return -EINVAL;
+
+ if (ucore->outlen < (offsetof(typeof(resp), response_length) +
+ sizeof(resp.response_length)))
+ return -ENOSPC;
+
+ obj = create_cq(file, ucore, uhw, &cmd,
+ min(ucore->inlen, sizeof(cmd)),
+ ib_uverbs_ex_create_cq_cb, NULL);
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ return 0;
}
ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
@@ -3324,7 +3426,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
if (ucore->outlen < resp.response_length)
return -ENOSPC;
- err = device->query_device(device, &attr);
+ memset(&attr, 0, sizeof(attr));
+
+ err = device->query_device(device, &attr, uhw);
if (err)
return err;
@@ -3348,6 +3452,18 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
#endif
resp.response_length += sizeof(resp.odp_caps);
+ if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
+ goto end;
+
+ resp.timestamp_mask = attr.timestamp_mask;
+ resp.response_length += sizeof(resp.timestamp_mask);
+
+ if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
+ goto end;
+
+ resp.hca_core_clock = attr.hca_core_clock;
+ resp.response_length += sizeof(resp.hca_core_clock);
+
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
if (err)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 88cce9bb7..f6eef2da7 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -124,6 +124,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
[IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
[IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device,
+ [IB_USER_VERBS_EX_CMD_CREATE_CQ] = ib_uverbs_ex_create_cq,
};
static void ib_uverbs_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index f93eb8da7..bac3fb406 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -48,6 +48,71 @@
#include "core_priv.h"
+static const char * const ib_events[] = {
+ [IB_EVENT_CQ_ERR] = "CQ error",
+ [IB_EVENT_QP_FATAL] = "QP fatal error",
+ [IB_EVENT_QP_REQ_ERR] = "QP request error",
+ [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
+ [IB_EVENT_COMM_EST] = "communication established",
+ [IB_EVENT_SQ_DRAINED] = "send queue drained",
+ [IB_EVENT_PATH_MIG] = "path migration successful",
+ [IB_EVENT_PATH_MIG_ERR] = "path migration error",
+ [IB_EVENT_DEVICE_FATAL] = "device fatal error",
+ [IB_EVENT_PORT_ACTIVE] = "port active",
+ [IB_EVENT_PORT_ERR] = "port error",
+ [IB_EVENT_LID_CHANGE] = "LID change",
+ [IB_EVENT_PKEY_CHANGE] = "P_key change",
+ [IB_EVENT_SM_CHANGE] = "SM change",
+ [IB_EVENT_SRQ_ERR] = "SRQ error",
+ [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
+ [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
+ [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
+ [IB_EVENT_GID_CHANGE] = "GID changed",
+};
+
+const char *ib_event_msg(enum ib_event_type event)
+{
+ size_t index = event;
+
+ return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
+ ib_events[index] : "unrecognized event";
+}
+EXPORT_SYMBOL(ib_event_msg);
+
+static const char * const wc_statuses[] = {
+ [IB_WC_SUCCESS] = "success",
+ [IB_WC_LOC_LEN_ERR] = "local length error",
+ [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
+ [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
+ [IB_WC_LOC_PROT_ERR] = "local protection error",
+ [IB_WC_WR_FLUSH_ERR] = "WR flushed",
+ [IB_WC_MW_BIND_ERR] = "memory management operation error",
+ [IB_WC_BAD_RESP_ERR] = "bad response error",
+ [IB_WC_LOC_ACCESS_ERR] = "local access error",
+ [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
+ [IB_WC_REM_ACCESS_ERR] = "remote access error",
+ [IB_WC_REM_OP_ERR] = "remote operation error",
+ [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
+ [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
+ [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
+ [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
+ [IB_WC_REM_ABORT_ERR] = "operation aborted",
+ [IB_WC_INV_EECN_ERR] = "invalid EE context number",
+ [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
+ [IB_WC_FATAL_ERR] = "fatal error",
+ [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
+ [IB_WC_GENERAL_ERR] = "general error",
+};
+
+const char *ib_wc_status_msg(enum ib_wc_status status)
+{
+ size_t index = status;
+
+ return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
+ wc_statuses[index] : "unrecognized status";
+}
+EXPORT_SYMBOL(ib_wc_status_msg);
+
__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
{
switch (rate) {
@@ -192,17 +257,16 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
}
EXPORT_SYMBOL(ib_create_ah);
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
- struct ib_grh *grh, struct ib_ah_attr *ah_attr)
+int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct ib_ah_attr *ah_attr)
{
u32 flow_class;
u16 gid_index;
int ret;
- int is_eth = (rdma_port_get_link_layer(device, port_num) ==
- IB_LINK_LAYER_ETHERNET);
memset(ah_attr, 0, sizeof *ah_attr);
- if (is_eth) {
+ if (rdma_cap_eth_ah(device, port_num)) {
if (!(wc->wc_flags & IB_WC_GRH))
return -EPROTOTYPE;
@@ -244,8 +308,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
}
EXPORT_SYMBOL(ib_init_ah_from_wc);
-struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
- struct ib_grh *grh, u8 port_num)
+struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
+ const struct ib_grh *grh, u8 port_num)
{
struct ib_ah_attr ah_attr;
int ret;
@@ -871,7 +935,7 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
union ib_gid sgid;
if ((*qp_attr_mask & IB_QP_AV) &&
- (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) {
+ (rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) {
ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
qp_attr->ah_attr.grh.sgid_index, &sgid);
if (ret)
@@ -1012,11 +1076,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
struct ib_cq *ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler,
void (*event_handler)(struct ib_event *, void *),
- void *cq_context, int cqe, int comp_vector)
+ void *cq_context,
+ const struct ib_cq_init_attr *cq_attr)
{
struct ib_cq *cq;
- cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
+ cq = device->create_cq(device, cq_attr, NULL, NULL);
if (!IS_ERR(cq)) {
cq->device = device;