summaryrefslogtreecommitdiff
path: root/drivers/nvme/host
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
commitd0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch)
tree7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /drivers/nvme/host
parente914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff)
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r--drivers/nvme/host/Kconfig19
-rw-r--r--drivers/nvme/host/Makefile6
-rw-r--r--drivers/nvme/host/core.c333
-rw-r--r--drivers/nvme/host/fabrics.c961
-rw-r--r--drivers/nvme/host/fabrics.h132
-rw-r--r--drivers/nvme/host/lightnvm.c4
-rw-r--r--drivers/nvme/host/nvme.h52
-rw-r--r--drivers/nvme/host/pci.c110
-rw-r--r--drivers/nvme/host/rdma.c2033
9 files changed, 3532 insertions, 118 deletions
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index d296fc3ae..f7d37a62f 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -24,3 +24,22 @@ config BLK_DEV_NVME_SCSI
to say N here, unless you run a distro that abuses the SCSI
emulation to provide stable device names for mount by id, like
some OpenSuSE and SLES versions.
+
+config NVME_FABRICS
+ tristate
+
+config NVME_RDMA
+ tristate "NVM Express over Fabrics RDMA host driver"
+ depends on INFINIBAND && BLOCK
+ select NVME_CORE
+ select NVME_FABRICS
+ select SG_POOL
+ help
+ This provides support for the NVMe over Fabrics protocol using
+ the RDMA (Infiniband, RoCE, iWarp) transport. This allows you
+ to use remote block devices exported using the NVMe protocol set.
+
+ To configure a NVMe over Fabrics controller use the nvme-cli tool
+ from https://github.com/linux-nvme/nvme-cli.
+
+ If unsure, say N.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 9a3ca892b..47abcec23 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,8 +1,14 @@
obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
+obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
+obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
nvme-core-y := core.o
nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
nvme-y += pci.o
+
+nvme-fabrics-y += fabrics.o
+
+nvme-rdma-y += rdma.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index d5fb55c0a..2feacc70b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -30,6 +30,7 @@
#include <asm/unaligned.h>
#include "nvme.h"
+#include "fabrics.h"
#define NVME_MINORS (1U << MINORBITS)
@@ -47,8 +48,10 @@ unsigned char shutdown_timeout = 5;
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
-static int nvme_major;
-module_param(nvme_major, int, 0);
+unsigned int nvme_max_retries = 5;
+module_param_named(max_retries, nvme_max_retries, uint, 0644);
+MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
+EXPORT_SYMBOL_GPL(nvme_max_retries);
static int nvme_char_major;
module_param(nvme_char_major, int, 0);
@@ -58,17 +61,38 @@ static DEFINE_SPINLOCK(dev_list_lock);
static struct class *nvme_class;
+void nvme_cancel_request(struct request *req, void *data, bool reserved)
+{
+ int status;
+
+ if (!blk_mq_request_started(req))
+ return;
+
+ dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
+ "Cancelling I/O %d", req->tag);
+
+ status = NVME_SC_ABORT_REQ;
+ if (blk_queue_dying(req->q))
+ status |= NVME_SC_DNR;
+ blk_mq_complete_request(req, status);
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_request);
+
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state)
{
- enum nvme_ctrl_state old_state = ctrl->state;
+ enum nvme_ctrl_state old_state;
bool changed = false;
spin_lock_irq(&ctrl->lock);
+
+ old_state = ctrl->state;
switch (new_state) {
case NVME_CTRL_LIVE:
switch (old_state) {
+ case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
+ case NVME_CTRL_RECONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -79,6 +103,16 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
+ case NVME_CTRL_RECONNECTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_RECONNECTING:
+ switch (old_state) {
+ case NVME_CTRL_LIVE:
changed = true;
/* FALLTHRU */
default:
@@ -89,6 +123,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
+ case NVME_CTRL_RECONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -107,11 +142,12 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
default:
break;
}
- spin_unlock_irq(&ctrl->lock);
if (changed)
ctrl->state = new_state;
+ spin_unlock_irq(&ctrl->lock);
+
return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
@@ -174,21 +210,21 @@ void nvme_requeue_req(struct request *req)
EXPORT_SYMBOL_GPL(nvme_requeue_req);
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, unsigned int flags)
+ struct nvme_command *cmd, unsigned int flags, int qid)
{
- bool write = cmd->common.opcode & 1;
struct request *req;
- req = blk_mq_alloc_request(q, write, flags);
+ if (qid == NVME_QID_ANY) {
+ req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags);
+ } else {
+ req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags,
+ qid ? qid - 1 : 0);
+ }
if (IS_ERR(req))
return req;
req->cmd_type = REQ_TYPE_DRV_PRIV;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
- req->__data_len = 0;
- req->__sector = (sector_t) -1;
- req->bio = req->biotail = NULL;
-
req->cmd = (unsigned char *)cmd;
req->cmd_len = sizeof(struct nvme_command);
@@ -290,9 +326,9 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
memcpy(cmd, req->cmd, sizeof(*cmd));
- else if (req->cmd_flags & REQ_FLUSH)
+ else if (req_op(req) == REQ_OP_FLUSH)
nvme_setup_flush(ns, cmd);
- else if (req->cmd_flags & REQ_DISCARD)
+ else if (req_op(req) == REQ_OP_DISCARD)
ret = nvme_setup_discard(ns, req, cmd);
else
nvme_setup_rw(ns, req, cmd);
@@ -307,12 +343,12 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
struct nvme_completion *cqe, void *buffer, unsigned bufflen,
- unsigned timeout)
+ unsigned timeout, int qid, int at_head, int flags)
{
struct request *req;
int ret;
- req = nvme_alloc_request(q, cmd, 0);
+ req = nvme_alloc_request(q, cmd, flags, qid);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -325,17 +361,19 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- blk_execute_rq(req->q, NULL, req, 0);
+ blk_execute_rq(req->q, NULL, req, at_head);
ret = req->errors;
out:
blk_mq_free_request(req);
return ret;
}
+EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0);
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
+ NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -344,7 +382,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
u32 *result, unsigned timeout)
{
- bool write = cmd->common.opcode & 1;
+ bool write = nvme_is_write(cmd);
struct nvme_completion cqe;
struct nvme_ns *ns = q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL;
@@ -353,7 +391,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void *meta = NULL;
int ret;
- req = nvme_alloc_request(q, cmd, 0);
+ req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -439,6 +477,74 @@ int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
result, timeout);
}
+static void nvme_keep_alive_end_io(struct request *rq, int error)
+{
+ struct nvme_ctrl *ctrl = rq->end_io_data;
+
+ blk_mq_free_request(rq);
+
+ if (error) {
+ dev_err(ctrl->device,
+ "failed nvme_keep_alive_end_io error=%d\n", error);
+ return;
+ }
+
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+}
+
+static int nvme_keep_alive(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command c;
+ struct request *rq;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_keep_alive;
+
+ rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
+ NVME_QID_ANY);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ rq->timeout = ctrl->kato * HZ;
+ rq->end_io_data = ctrl;
+
+ blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
+
+ return 0;
+}
+
+static void nvme_keep_alive_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_ctrl, ka_work);
+
+ if (nvme_keep_alive(ctrl)) {
+ /* allocation failure, reset the controller */
+ dev_err(ctrl->device, "keep-alive failed\n");
+ ctrl->ops->reset_ctrl(ctrl);
+ return;
+ }
+}
+
+void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+}
+EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
+
+void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
+{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+ cancel_delayed_work_sync(&ctrl->ka_work);
+}
+EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
+
int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
{
struct nvme_command c = { };
@@ -500,11 +606,12 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_get_features;
c.features.nsid = cpu_to_le32(nsid);
- c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
- if (ret >= 0)
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+ if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
return ret;
}
@@ -518,12 +625,13 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
- c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
- if (ret >= 0)
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+ if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
return ret;
}
@@ -558,11 +666,22 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
&result);
- if (status)
+ if (status < 0)
return status;
- nr_io_queues = min(result & 0xffff, result >> 16) + 1;
- *count = min(*count, nr_io_queues);
+ /*
+ * Degraded controllers might return an error when setting the queue
+ * count. We still want to be able to bring them online and offer
+ * access to the admin queue, as that might be only way to fix them up.
+ */
+ if (status > 0) {
+ dev_err(ctrl->dev, "Could not set queue count (%d)\n", status);
+ *count = 0;
+ } else {
+ nr_io_queues = min(result & 0xffff, result >> 16) + 1;
+ *count = min(*count, nr_io_queues);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
@@ -726,6 +845,7 @@ static void nvme_init_integrity(struct nvme_ns *ns)
{
struct blk_integrity integrity;
+ memset(&integrity, 0, sizeof(integrity));
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
integrity.profile = &t10_pi_type3_crc;
@@ -764,7 +884,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size;
- blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
+ blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
@@ -991,6 +1111,15 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
if (ret)
return ret;
+
+ /* Checking for ctrl->tagset is a trick to avoid sleeping on module
+ * load, since we only need the quirk on reset_controller. Notice
+ * that the HGST device needs this delay only in firmware activation
+ * procedure; unfortunately we have no (easy) way to verify this.
+ */
+ if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
+ msleep(NVME_QUIRK_DELAY_AMOUNT);
+
return nvme_wait_ready(ctrl, cap, false);
}
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
@@ -1088,6 +1217,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
struct nvme_id_ctrl *id;
u64 cap;
int ret, page_shift;
+ u32 max_hw_sectors;
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
if (ret) {
@@ -1120,9 +1250,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
memcpy(ctrl->model, id->mn, sizeof(id->mn));
memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
if (id->mdts)
- ctrl->max_hw_sectors = 1 << (id->mdts + page_shift - 9);
+ max_hw_sectors = 1 << (id->mdts + page_shift - 9);
else
- ctrl->max_hw_sectors = UINT_MAX;
+ max_hw_sectors = UINT_MAX;
+ ctrl->max_hw_sectors =
+ min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
unsigned int max_hw_sectors;
@@ -1138,9 +1270,33 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
nvme_set_queue_limits(ctrl, ctrl->admin_q);
+ ctrl->sgls = le32_to_cpu(id->sgls);
+ ctrl->kas = le16_to_cpu(id->kas);
+
+ if (ctrl->ops->is_fabrics) {
+ ctrl->icdoff = le16_to_cpu(id->icdoff);
+ ctrl->ioccsz = le32_to_cpu(id->ioccsz);
+ ctrl->iorcsz = le32_to_cpu(id->iorcsz);
+ ctrl->maxcmd = le16_to_cpu(id->maxcmd);
+
+ /*
+ * In fabrics we need to verify the cntlid matches the
+ * admin connect
+ */
+ if (ctrl->cntlid != le16_to_cpu(id->cntlid))
+ ret = -EINVAL;
+
+ if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+ dev_err(ctrl->dev,
+ "keep-alive support is mandatory for fabrics\n");
+ ret = -EINVAL;
+ }
+ } else {
+ ctrl->cntlid = le16_to_cpu(id->cntlid);
+ }
kfree(id);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(nvme_init_identify);
@@ -1322,7 +1478,7 @@ static struct attribute *nvme_ns_attrs[] = {
NULL,
};
-static umode_t nvme_attrs_are_visible(struct kobject *kobj,
+static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1341,7 +1497,7 @@ static umode_t nvme_attrs_are_visible(struct kobject *kobj,
static const struct attribute_group nvme_ns_attr_group = {
.attrs = nvme_ns_attrs,
- .is_visible = nvme_attrs_are_visible,
+ .is_visible = nvme_ns_attrs_are_visible,
};
#define nvme_show_str_function(field) \
@@ -1367,6 +1523,49 @@ nvme_show_str_function(serial);
nvme_show_str_function(firmware_rev);
nvme_show_int_function(cntlid);
+static ssize_t nvme_sysfs_delete(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (device_remove_file_self(dev, attr))
+ ctrl->ops->delete_ctrl(ctrl);
+ return count;
+}
+static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
+
+static ssize_t nvme_sysfs_show_transport(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
+}
+static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
+
+static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ ctrl->ops->get_subsysnqn(ctrl));
+}
+static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
+
+static ssize_t nvme_sysfs_show_address(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
+}
+static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -1374,11 +1573,38 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_serial.attr,
&dev_attr_firmware_rev.attr,
&dev_attr_cntlid.attr,
+ &dev_attr_delete_controller.attr,
+ &dev_attr_transport.attr,
+ &dev_attr_subsysnqn.attr,
+ &dev_attr_address.attr,
NULL
};
+#define CHECK_ATTR(ctrl, a, name) \
+ if ((a) == &dev_attr_##name.attr && \
+ !(ctrl)->ops->get_##name) \
+ return 0
+
+static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (a == &dev_attr_delete_controller.attr) {
+ if (!ctrl->ops->delete_ctrl)
+ return 0;
+ }
+
+ CHECK_ATTR(ctrl, a, subsysnqn);
+ CHECK_ATTR(ctrl, a, address);
+
+ return a->mode;
+}
+
static struct attribute_group nvme_dev_attrs_group = {
- .attrs = nvme_dev_attrs,
+ .attrs = nvme_dev_attrs,
+ .is_visible = nvme_dev_attrs_are_visible,
};
static const struct attribute_group *nvme_dev_attr_groups[] = {
@@ -1446,12 +1672,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
nvme_set_queue_limits(ctrl, ns->queue);
- disk->major = nvme_major;
- disk->first_minor = 0;
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
- disk->driverfs_dev = ctrl->device;
disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
@@ -1466,7 +1689,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (ns->type == NVME_NS_LIGHTNVM)
return;
- add_disk(ns->disk);
+ device_add_disk(ctrl->device, ns->disk);
if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_attr_group))
pr_warn("%s: failed to create sysfs group for identification\n",
@@ -1517,6 +1740,17 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
nvme_alloc_ns(ctrl, nsid);
}
+static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+ unsigned nsid)
+{
+ struct nvme_ns *ns, *next;
+
+ list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
+ if (ns->ns_id > nsid)
+ nvme_ns_remove(ns);
+ }
+}
+
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
{
struct nvme_ns *ns;
@@ -1531,7 +1765,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
for (i = 0; i < num_lists; i++) {
ret = nvme_identify_ns_list(ctrl, prev, ns_list);
if (ret)
- goto out;
+ goto free;
for (j = 0; j < min(nn, 1024U); j++) {
nsid = le32_to_cpu(ns_list[j]);
@@ -1551,22 +1785,20 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
nn -= j;
}
out:
+ nvme_remove_invalid_namespaces(ctrl, prev);
+ free:
kfree(ns_list);
return ret;
}
static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
{
- struct nvme_ns *ns, *next;
unsigned i;
for (i = 1; i <= nn; i++)
nvme_validate_ns(ctrl, i);
- list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->ns_id > nn)
- nvme_ns_remove(ns);
- }
+ nvme_remove_invalid_namespaces(ctrl, nn);
}
static void nvme_scan_work(struct work_struct *work)
@@ -1852,16 +2084,10 @@ int __init nvme_core_init(void)
{
int result;
- result = register_blkdev(nvme_major, "nvme");
- if (result < 0)
- return result;
- else if (result > 0)
- nvme_major = result;
-
result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
&nvme_dev_fops);
if (result < 0)
- goto unregister_blkdev;
+ return result;
else if (result > 0)
nvme_char_major = result;
@@ -1875,8 +2101,6 @@ int __init nvme_core_init(void)
unregister_chrdev:
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
- unregister_blkdev:
- unregister_blkdev(nvme_major, "nvme");
return result;
}
@@ -1884,7 +2108,6 @@ void nvme_core_exit(void)
{
class_destroy(nvme_class);
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
- unregister_blkdev(nvme_major, "nvme");
}
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
new file mode 100644
index 000000000..4eff49174
--- /dev/null
+++ b/drivers/nvme/host/fabrics.c
@@ -0,0 +1,961 @@
+/*
+ * NVMe over Fabrics common host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include "nvme.h"
+#include "fabrics.h"
+
+static LIST_HEAD(nvmf_transports);
+static DEFINE_MUTEX(nvmf_transports_mutex);
+
+static LIST_HEAD(nvmf_hosts);
+static DEFINE_MUTEX(nvmf_hosts_mutex);
+
+static struct nvmf_host *nvmf_default_host;
+
+static struct nvmf_host *__nvmf_host_find(const char *hostnqn)
+{
+ struct nvmf_host *host;
+
+ list_for_each_entry(host, &nvmf_hosts, list) {
+ if (!strcmp(host->nqn, hostnqn))
+ return host;
+ }
+
+ return NULL;
+}
+
+static struct nvmf_host *nvmf_host_add(const char *hostnqn)
+{
+ struct nvmf_host *host;
+
+ mutex_lock(&nvmf_hosts_mutex);
+ host = __nvmf_host_find(hostnqn);
+ if (host) {
+ kref_get(&host->ref);
+ goto out_unlock;
+ }
+
+ host = kmalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ goto out_unlock;
+
+ kref_init(&host->ref);
+ memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
+ uuid_be_gen(&host->id);
+
+ list_add_tail(&host->list, &nvmf_hosts);
+out_unlock:
+ mutex_unlock(&nvmf_hosts_mutex);
+ return host;
+}
+
+static struct nvmf_host *nvmf_host_default(void)
+{
+ struct nvmf_host *host;
+
+ host = kmalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ kref_init(&host->ref);
+ uuid_be_gen(&host->id);
+ snprintf(host->nqn, NVMF_NQN_SIZE,
+ "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
+
+ mutex_lock(&nvmf_hosts_mutex);
+ list_add_tail(&host->list, &nvmf_hosts);
+ mutex_unlock(&nvmf_hosts_mutex);
+
+ return host;
+}
+
+static void nvmf_host_destroy(struct kref *ref)
+{
+ struct nvmf_host *host = container_of(ref, struct nvmf_host, ref);
+
+ mutex_lock(&nvmf_hosts_mutex);
+ list_del(&host->list);
+ mutex_unlock(&nvmf_hosts_mutex);
+
+ kfree(host);
+}
+
+static void nvmf_host_put(struct nvmf_host *host)
+{
+ if (host)
+ kref_put(&host->ref, nvmf_host_destroy);
+}
+
+/**
+ * nvmf_get_address() - Get address/port
+ * @ctrl: Host NVMe controller instance which we got the address
+ * @buf: OUTPUT parameter that will contain the address/port
+ * @size: buffer size
+ */
+int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ return snprintf(buf, size, "traddr=%s,trsvcid=%s\n",
+ ctrl->opts->traddr, ctrl->opts->trsvcid);
+}
+EXPORT_SYMBOL_GPL(nvmf_get_address);
+
+/**
+ * nvmf_get_subsysnqn() - Get subsystem NQN
+ * @ctrl: Host NVMe controller instance which we got the NQN
+ */
+const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts->subsysnqn;
+}
+EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn);
+
+/**
+ * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated NVMe controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: OUTPUT parameter that will contain the value of
+ * the property after a successful read.
+ *
+ * Used by the host system to retrieve a 32-bit capsule property value
+ * from an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful read
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
+{
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.prop_get.opcode = nvme_fabrics_command;
+ cmd.prop_get.fctype = nvme_fabrics_type_property_get;
+ cmd.prop_get.offset = cpu_to_le32(off);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+
+ if (ret >= 0)
+ *val = le64_to_cpu(cqe.result64);
+ if (unlikely(ret != 0))
+ dev_err(ctrl->device,
+ "Property Get error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_read32);
+
+/**
+ * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: OUTPUT parameter that will contain the value of
+ * the property after a successful read.
+ *
+ * Used by the host system to retrieve a 64-bit capsule property value
+ * from an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful read
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.prop_get.opcode = nvme_fabrics_command;
+ cmd.prop_get.fctype = nvme_fabrics_type_property_get;
+ cmd.prop_get.attrib = 1;
+ cmd.prop_get.offset = cpu_to_le32(off);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+
+ if (ret >= 0)
+ *val = le64_to_cpu(cqe.result64);
+ if (unlikely(ret != 0))
+ dev_err(ctrl->device,
+ "Property Get error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_read64);
+
+/**
+ * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated NVMe controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: Input parameter that contains the value to be
+ * written to the property.
+ *
+ * Used by the NVMe host system to write a 32-bit capsule property value
+ * to an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful write
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+{
+ struct nvme_command cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.prop_set.opcode = nvme_fabrics_command;
+ cmd.prop_set.fctype = nvme_fabrics_type_property_set;
+ cmd.prop_set.attrib = 0;
+ cmd.prop_set.offset = cpu_to_le32(off);
+ cmd.prop_set.value = cpu_to_le64(val);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+ if (unlikely(ret))
+ dev_err(ctrl->device,
+ "Property Set error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_write32);
+
+/**
+ * nvmf_log_connect_error() - Error-parsing-diagnostic print
+ * out function for connect() errors.
+ *
+ * @ctrl: the specific /dev/nvmeX device that had the error.
+ *
+ * @errval: Error code to be decoded in a more human-friendly
+ * printout.
+ *
+ * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM.
+ *
+ * @cmd: This is the SQE portion of a submission capsule.
+ *
+ * @data: This is the "Data" portion of a submission capsule.
+ */
+static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
+ int errval, int offset, struct nvme_command *cmd,
+ struct nvmf_connect_data *data)
+{
+ int err_sctype = errval & (~NVME_SC_DNR);
+
+ switch (err_sctype) {
+
+ case (NVME_SC_CONNECT_INVALID_PARAM):
+ if (offset >> 16) {
+ char *inv_data = "Connect Invalid Data Parameter";
+
+ switch (offset & 0xffff) {
+ case (offsetof(struct nvmf_connect_data, cntlid)):
+ dev_err(ctrl->device,
+ "%s, cntlid: %d\n",
+ inv_data, data->cntlid);
+ break;
+ case (offsetof(struct nvmf_connect_data, hostnqn)):
+ dev_err(ctrl->device,
+ "%s, hostnqn \"%s\"\n",
+ inv_data, data->hostnqn);
+ break;
+ case (offsetof(struct nvmf_connect_data, subsysnqn)):
+ dev_err(ctrl->device,
+ "%s, subsysnqn \"%s\"\n",
+ inv_data, data->subsysnqn);
+ break;
+ default:
+ dev_err(ctrl->device,
+ "%s, starting byte offset: %d\n",
+ inv_data, offset & 0xffff);
+ break;
+ }
+ } else {
+ char *inv_sqe = "Connect Invalid SQE Parameter";
+
+ switch (offset) {
+ case (offsetof(struct nvmf_connect_command, qid)):
+ dev_err(ctrl->device,
+ "%s, qid %d\n",
+ inv_sqe, cmd->connect.qid);
+ break;
+ default:
+ dev_err(ctrl->device,
+ "%s, starting byte offset: %d\n",
+ inv_sqe, offset);
+ }
+ }
+ break;
+ default:
+ dev_err(ctrl->device,
+ "Connect command failed, error wo/DNR bit: %d\n",
+ err_sctype);
+ break;
+ } /* switch (err_sctype) */
+}
+
+/**
+ * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
+ * API function.
+ * @ctrl: Host nvme controller instance used to request
+ * a new NVMe controller allocation on the target
+ * system and establish an NVMe Admin connection to
+ * that controller.
+ *
+ * This function enables an NVMe host device to request a new allocation of
+ * an NVMe controller resource on a target system as well establish a
+ * fabrics-protocol connection of the NVMe Admin queue between the
+ * host system device and the allocated NVMe controller on the
+ * target system via a NVMe Fabrics "Connect" command.
+ *
+ * Return:
+ * 0: success
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ *
+ */
+int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ struct nvmf_connect_data *data;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.connect.opcode = nvme_fabrics_command;
+ cmd.connect.fctype = nvme_fabrics_type_connect;
+ cmd.connect.qid = 0;
+
+ /*
+ * fabrics spec sets a minimum of depth 32 for admin queue,
+ * so set the queue with this depth always until
+ * justification otherwise.
+ */
+ cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+
+ /*
+ * Set keep-alive timeout in seconds granularity (ms * 1000)
+ * and add a grace period for controller kato enforcement
+ */
+ cmd.connect.kato = ctrl->opts->discovery_nqn ? 0 :
+ cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000);
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
+ data->cntlid = cpu_to_le16(0xffff);
+ strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
+ strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe,
+ data, sizeof(*data), 0, NVME_QID_ANY, 1,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ if (ret) {
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+ &cmd, data);
+ goto out_free_data;
+ }
+
+ ctrl->cntlid = le16_to_cpu(cqe.result16);
+
+out_free_data:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
+
+/**
+ * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect"
+ * API function.
+ * @ctrl: Host nvme controller instance used to establish an
+ * NVMe I/O queue connection to the already allocated NVMe
+ * controller on the target system.
+ * @qid: NVMe I/O queue number for the new I/O connection between
+ * host and target (note qid == 0 is illegal as this is
+ * the Admin queue, per NVMe standard).
+ *
+ * This function issues a fabrics-protocol connection
+ * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
+ * between the host system device and the allocated NVMe controller
+ * on the target system.
+ *
+ * Return:
+ * 0: success
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
+{
+ struct nvme_command cmd;
+ struct nvmf_connect_data *data;
+ struct nvme_completion cqe;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.connect.opcode = nvme_fabrics_command;
+ cmd.connect.fctype = nvme_fabrics_type_connect;
+ cmd.connect.qid = cpu_to_le16(qid);
+ cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
+ data->cntlid = cpu_to_le16(ctrl->cntlid);
+ strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
+ strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+
+ ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &cqe,
+ data, sizeof(*data), 0, qid, 1,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ if (ret) {
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+ &cmd, data);
+ }
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
+
+/**
+ * nvmf_register_transport() - NVMe Fabrics Library registration function.
+ * @ops: Transport ops instance to be registered to the
+ * common fabrics library.
+ *
+ * API function that registers the type of specific transport fabric
+ * being implemented to the common NVMe fabrics library. Part of
+ * the overall init sequence of starting up a fabrics driver.
+ */
+void nvmf_register_transport(struct nvmf_transport_ops *ops)
+{
+ mutex_lock(&nvmf_transports_mutex);
+ list_add_tail(&ops->entry, &nvmf_transports);
+ mutex_unlock(&nvmf_transports_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmf_register_transport);
+
+/**
+ * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function.
+ * @ops: Transport ops instance to be unregistered from the
+ * common fabrics library.
+ *
+ * Fabrics API function that unregisters the type of specific transport
+ * fabric being implemented from the common NVMe fabrics library.
+ * Part of the overall exit sequence of unloading the implemented driver.
+ */
+void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
+{
+ mutex_lock(&nvmf_transports_mutex);
+ list_del(&ops->entry);
+ mutex_unlock(&nvmf_transports_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
+
+static struct nvmf_transport_ops *nvmf_lookup_transport(
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvmf_transport_ops *ops;
+
+ lockdep_assert_held(&nvmf_transports_mutex);
+
+ list_for_each_entry(ops, &nvmf_transports, entry) {
+ if (strcmp(ops->name, opts->transport) == 0)
+ return ops;
+ }
+
+ return NULL;
+}
+
+static const match_table_t opt_tokens = {
+ { NVMF_OPT_TRANSPORT, "transport=%s" },
+ { NVMF_OPT_TRADDR, "traddr=%s" },
+ { NVMF_OPT_TRSVCID, "trsvcid=%s" },
+ { NVMF_OPT_NQN, "nqn=%s" },
+ { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" },
+ { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" },
+ { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
+ { NVMF_OPT_KATO, "keep_alive_tmo=%d" },
+ { NVMF_OPT_HOSTNQN, "hostnqn=%s" },
+ { NVMF_OPT_ERR, NULL }
+};
+
+static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ size_t nqnlen = 0;
+
+ /* Set defaults */
+ opts->queue_size = NVMF_DEF_QUEUE_SIZE;
+ opts->nr_io_queues = num_online_cpus();
+ opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case NVMF_OPT_TRANSPORT:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->transport = p;
+ break;
+ case NVMF_OPT_NQN:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->subsysnqn = p;
+ nqnlen = strlen(opts->subsysnqn);
+ if (nqnlen >= NVMF_NQN_SIZE) {
+ pr_err("%s needs to be < %d bytes\n",
+ opts->subsysnqn, NVMF_NQN_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->discovery_nqn =
+ !(strcmp(opts->subsysnqn,
+ NVME_DISC_SUBSYS_NAME));
+ if (opts->discovery_nqn)
+ opts->nr_io_queues = 0;
+ break;
+ case NVMF_OPT_TRADDR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->traddr = p;
+ break;
+ case NVMF_OPT_TRSVCID:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->trsvcid = p;
+ break;
+ case NVMF_OPT_QUEUE_SIZE:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token < NVMF_MIN_QUEUE_SIZE ||
+ token > NVMF_MAX_QUEUE_SIZE) {
+ pr_err("Invalid queue_size %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->queue_size = token;
+ break;
+ case NVMF_OPT_NR_IO_QUEUES:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid number of IOQs %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_io_queues = min_t(unsigned int,
+ num_online_cpus(), token);
+ break;
+ case NVMF_OPT_KATO:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (opts->discovery_nqn) {
+ pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (token < 0) {
+ pr_err("Invalid keep_alive_tmo %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ } else if (token == 0) {
+ /* Allowed for debug */
+ pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
+ }
+ opts->kato = token;
+ break;
+ case NVMF_OPT_HOSTNQN:
+ if (opts->host) {
+ pr_err("hostnqn already user-assigned: %s\n",
+ opts->host->nqn);
+ ret = -EADDRINUSE;
+ goto out;
+ }
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ nqnlen = strlen(p);
+ if (nqnlen >= NVMF_NQN_SIZE) {
+ pr_err("%s needs to be < %d bytes\n",
+ p, NVMF_NQN_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->host = nvmf_host_add(p);
+ if (!opts->host) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ break;
+ case NVMF_OPT_RECONNECT_DELAY:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid reconnect_delay %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->reconnect_delay = token;
+ break;
+ default:
+ pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (!opts->host) {
+ kref_get(&nvmf_default_host->ref);
+ opts->host = nvmf_default_host;
+ }
+
+out:
+ if (!opts->discovery_nqn && !opts->kato)
+ opts->kato = NVME_DEFAULT_KATO;
+ kfree(options);
+ return ret;
+}
+
+static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
+ unsigned int required_opts)
+{
+ if ((opts->mask & required_opts) != required_opts) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
+ if ((opt_tokens[i].token & required_opts) &&
+ !(opt_tokens[i].token & opts->mask)) {
+ pr_warn("missing parameter '%s'\n",
+ opt_tokens[i].pattern);
+ }
+ }
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
+ unsigned int allowed_opts)
+{
+ if (opts->mask & ~allowed_opts) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
+ if (opt_tokens[i].token & ~allowed_opts) {
+ pr_warn("invalid parameter '%s'\n",
+ opt_tokens[i].pattern);
+ }
+ }
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void nvmf_free_options(struct nvmf_ctrl_options *opts)
+{
+ nvmf_host_put(opts->host);
+ kfree(opts->transport);
+ kfree(opts->traddr);
+ kfree(opts->trsvcid);
+ kfree(opts->subsysnqn);
+ kfree(opts);
+}
+EXPORT_SYMBOL_GPL(nvmf_free_options);
+
+#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
+#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
+ NVMF_OPT_KATO | NVMF_OPT_HOSTNQN)
+
+static struct nvme_ctrl *
+nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
+{
+ struct nvmf_ctrl_options *opts;
+ struct nvmf_transport_ops *ops;
+ struct nvme_ctrl *ctrl;
+ int ret;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ ret = nvmf_parse_options(opts, buf);
+ if (ret)
+ goto out_free_opts;
+
+ /*
+ * Check the generic options first as we need a valid transport for
+ * the lookup below. Then clear the generic flags so that transport
+ * drivers don't have to care about them.
+ */
+ ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS);
+ if (ret)
+ goto out_free_opts;
+ opts->mask &= ~NVMF_REQUIRED_OPTS;
+
+ mutex_lock(&nvmf_transports_mutex);
+ ops = nvmf_lookup_transport(opts);
+ if (!ops) {
+ pr_info("no handler found for transport %s.\n",
+ opts->transport);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = nvmf_check_required_opts(opts, ops->required_opts);
+ if (ret)
+ goto out_unlock;
+ ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
+ ops->allowed_opts | ops->required_opts);
+ if (ret)
+ goto out_unlock;
+
+ ctrl = ops->create_ctrl(dev, opts);
+ if (IS_ERR(ctrl)) {
+ ret = PTR_ERR(ctrl);
+ goto out_unlock;
+ }
+
+ mutex_unlock(&nvmf_transports_mutex);
+ return ctrl;
+
+out_unlock:
+ mutex_unlock(&nvmf_transports_mutex);
+out_free_opts:
+ nvmf_host_put(opts->host);
+ kfree(opts);
+ return ERR_PTR(ret);
+}
+
+static struct class *nvmf_class;
+static struct device *nvmf_device;
+static DEFINE_MUTEX(nvmf_dev_mutex);
+
+static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *seq_file = file->private_data;
+ struct nvme_ctrl *ctrl;
+ const char *buf;
+ int ret = 0;
+
+ if (count > PAGE_SIZE)
+ return -ENOMEM;
+
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ mutex_lock(&nvmf_dev_mutex);
+ if (seq_file->private) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ctrl = nvmf_create_ctrl(nvmf_device, buf, count);
+ if (IS_ERR(ctrl)) {
+ ret = PTR_ERR(ctrl);
+ goto out_unlock;
+ }
+
+ seq_file->private = ctrl;
+
+out_unlock:
+ mutex_unlock(&nvmf_dev_mutex);
+ kfree(buf);
+ return ret ? ret : count;
+}
+
+static int nvmf_dev_show(struct seq_file *seq_file, void *private)
+{
+ struct nvme_ctrl *ctrl;
+ int ret = 0;
+
+ mutex_lock(&nvmf_dev_mutex);
+ ctrl = seq_file->private;
+ if (!ctrl) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ seq_printf(seq_file, "instance=%d,cntlid=%d\n",
+ ctrl->instance, ctrl->cntlid);
+
+out_unlock:
+ mutex_unlock(&nvmf_dev_mutex);
+ return ret;
+}
+
+static int nvmf_dev_open(struct inode *inode, struct file *file)
+{
+ /*
+ * The miscdevice code initializes file->private_data, but doesn't
+ * make use of it later.
+ */
+ file->private_data = NULL;
+ return single_open(file, nvmf_dev_show, NULL);
+}
+
+static int nvmf_dev_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq_file = file->private_data;
+ struct nvme_ctrl *ctrl = seq_file->private;
+
+ if (ctrl)
+ nvme_put_ctrl(ctrl);
+ return single_release(inode, file);
+}
+
+static const struct file_operations nvmf_dev_fops = {
+ .owner = THIS_MODULE,
+ .write = nvmf_dev_write,
+ .read = seq_read,
+ .open = nvmf_dev_open,
+ .release = nvmf_dev_release,
+};
+
+static struct miscdevice nvmf_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "nvme-fabrics",
+ .fops = &nvmf_dev_fops,
+};
+
+static int __init nvmf_init(void)
+{
+ int ret;
+
+ nvmf_default_host = nvmf_host_default();
+ if (!nvmf_default_host)
+ return -ENOMEM;
+
+ nvmf_class = class_create(THIS_MODULE, "nvme-fabrics");
+ if (IS_ERR(nvmf_class)) {
+ pr_err("couldn't register class nvme-fabrics\n");
+ ret = PTR_ERR(nvmf_class);
+ goto out_free_host;
+ }
+
+ nvmf_device =
+ device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
+ if (IS_ERR(nvmf_device)) {
+ pr_err("couldn't create nvme-fabris device!\n");
+ ret = PTR_ERR(nvmf_device);
+ goto out_destroy_class;
+ }
+
+ ret = misc_register(&nvmf_misc);
+ if (ret) {
+ pr_err("couldn't register misc device: %d\n", ret);
+ goto out_destroy_device;
+ }
+
+ return 0;
+
+out_destroy_device:
+ device_destroy(nvmf_class, MKDEV(0, 0));
+out_destroy_class:
+ class_destroy(nvmf_class);
+out_free_host:
+ nvmf_host_put(nvmf_default_host);
+ return ret;
+}
+
+static void __exit nvmf_exit(void)
+{
+ misc_deregister(&nvmf_misc);
+ device_destroy(nvmf_class, MKDEV(0, 0));
+ class_destroy(nvmf_class);
+ nvmf_host_put(nvmf_default_host);
+
+ BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
+}
+
+MODULE_LICENSE("GPL v2");
+
+module_init(nvmf_init);
+module_exit(nvmf_exit);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
new file mode 100644
index 000000000..46e460aee
--- /dev/null
+++ b/drivers/nvme/host/fabrics.h
@@ -0,0 +1,132 @@
+/*
+ * NVMe over Fabrics common host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _NVME_FABRICS_H
+#define _NVME_FABRICS_H 1
+
+#include <linux/in.h>
+#include <linux/inet.h>
+
+#define NVMF_MIN_QUEUE_SIZE 16
+#define NVMF_MAX_QUEUE_SIZE 1024
+#define NVMF_DEF_QUEUE_SIZE 128
+#define NVMF_DEF_RECONNECT_DELAY 10
+
+/*
+ * Define a host as seen by the target. We allocate one at boot, but also
+ * allow the override it when creating controllers. This is both to provide
+ * persistence of the Host NQN over multiple boots, and to allow using
+ * multiple ones, for example in a container scenario. Because we must not
+ * use different Host NQNs with the same Host ID we generate a Host ID and
+ * use this structure to keep track of the relation between the two.
+ */
+struct nvmf_host {
+ struct kref ref;
+ struct list_head list;
+ char nqn[NVMF_NQN_SIZE];
+ uuid_be id;
+};
+
+/**
+ * enum nvmf_parsing_opts - used to define the sysfs parsing options used.
+ */
+enum {
+ NVMF_OPT_ERR = 0,
+ NVMF_OPT_TRANSPORT = 1 << 0,
+ NVMF_OPT_NQN = 1 << 1,
+ NVMF_OPT_TRADDR = 1 << 2,
+ NVMF_OPT_TRSVCID = 1 << 3,
+ NVMF_OPT_QUEUE_SIZE = 1 << 4,
+ NVMF_OPT_NR_IO_QUEUES = 1 << 5,
+ NVMF_OPT_TL_RETRY_COUNT = 1 << 6,
+ NVMF_OPT_KATO = 1 << 7,
+ NVMF_OPT_HOSTNQN = 1 << 8,
+ NVMF_OPT_RECONNECT_DELAY = 1 << 9,
+};
+
+/**
+ * struct nvmf_ctrl_options - Used to hold the options specified
+ * with the parsing opts enum.
+ * @mask: Used by the fabrics library to parse through sysfs options
+ * on adding a NVMe controller.
+ * @transport: Holds the fabric transport "technology name" (for a lack of
+ * better description) that will be used by an NVMe controller
+ * being added.
+ * @subsysnqn: Hold the fully qualified NQN subystem name (format defined
+ * in the NVMe specification, "NVMe Qualified Names").
+ * @traddr: network address that will be used by the host to communicate
+ * to the added NVMe controller.
+ * @trsvcid: network port used for host-controller communication.
+ * @queue_size: Number of IO queue elements.
+ * @nr_io_queues: Number of controller IO queues that will be established.
+ * @reconnect_delay: Time between two consecutive reconnect attempts.
+ * @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
+ * @kato: Keep-alive timeout.
+ * @host: Virtual NVMe host, contains the NQN and Host ID.
+ */
+struct nvmf_ctrl_options {
+ unsigned mask;
+ char *transport;
+ char *subsysnqn;
+ char *traddr;
+ char *trsvcid;
+ size_t queue_size;
+ unsigned int nr_io_queues;
+ unsigned int reconnect_delay;
+ bool discovery_nqn;
+ unsigned int kato;
+ struct nvmf_host *host;
+};
+
+/*
+ * struct nvmf_transport_ops - used to register a specific
+ * fabric implementation of NVMe fabrics.
+ * @entry: Used by the fabrics library to add the new
+ * registration entry to its linked-list internal tree.
+ * @name: Name of the NVMe fabric driver implementation.
+ * @required_opts: sysfs command-line options that must be specified
+ * when adding a new NVMe controller.
+ * @allowed_opts: sysfs command-line options that can be specified
+ * when adding a new NVMe controller.
+ * @create_ctrl(): function pointer that points to a non-NVMe
+ * implementation-specific fabric technology
+ * that would go into starting up that fabric
+ * for the purpose of conneciton to an NVMe controller
+ * using that fabric technology.
+ *
+ * Notes:
+ * 1. At minimum, 'required_opts' and 'allowed_opts' should
+ * be set to the same enum parsing options defined earlier.
+ * 2. create_ctrl() must be defined (even if it does nothing)
+ */
+struct nvmf_transport_ops {
+ struct list_head entry;
+ const char *name;
+ int required_opts;
+ int allowed_opts;
+ struct nvme_ctrl *(*create_ctrl)(struct device *dev,
+ struct nvmf_ctrl_options *opts);
+};
+
+int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
+int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
+int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
+int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
+void nvmf_register_transport(struct nvmf_transport_ops *ops);
+void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
+void nvmf_free_options(struct nvmf_ctrl_options *opts);
+const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl);
+int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
+
+#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index a0af05583..63f483daf 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -156,7 +156,7 @@ struct nvme_nvm_completion {
#define NVME_NVM_LP_MLC_PAIRS 886
struct nvme_nvm_lp_mlc {
- __u16 num_pairs;
+ __le16 num_pairs;
__u8 pairs[NVME_NVM_LP_MLC_PAIRS];
};
@@ -500,7 +500,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct bio *bio = rqd->bio;
struct nvme_nvm_command *cmd;
- rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
+ rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
if (IS_ERR(rq))
return -ENOMEM;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1daa0482d..ab18b7810 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -38,6 +38,11 @@ extern unsigned char admin_timeout;
extern unsigned char shutdown_timeout;
#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
+#define NVME_DEFAULT_KATO 5
+#define NVME_KATO_GRACE 10
+
+extern unsigned int nvme_max_retries;
+
enum {
NVME_NS_LBA = 0,
NVME_NS_LIGHTNVM = 1,
@@ -65,12 +70,26 @@ enum nvme_quirks {
* logical blocks.
*/
NVME_QUIRK_DISCARD_ZEROES = (1 << 2),
+
+ /*
+ * The controller needs a delay before starts checking the device
+ * readiness, which is done by reading the NVME_CSTS_RDY bit.
+ */
+ NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
};
+/* The below value is the specific amount of delay needed before checking
+ * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
+ * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
+ * found empirically.
+ */
+#define NVME_QUIRK_DELAY_AMOUNT 2000
+
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
NVME_CTRL_RESETTING,
+ NVME_CTRL_RECONNECTING,
NVME_CTRL_DELETING,
NVME_CTRL_DEAD,
};
@@ -80,6 +99,7 @@ struct nvme_ctrl {
spinlock_t lock;
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
+ struct request_queue *connect_q;
struct device *dev;
struct kref kref;
int instance;
@@ -107,10 +127,22 @@ struct nvme_ctrl {
u8 event_limit;
u8 vwc;
u32 vs;
+ u32 sgls;
+ u16 kas;
+ unsigned int kato;
bool subsystem;
unsigned long quirks;
struct work_struct scan_work;
struct work_struct async_event_work;
+ struct delayed_work ka_work;
+
+ /* Fabrics only */
+ u16 sqsize;
+ u32 ioccsz;
+ u32 iorcsz;
+ u16 icdoff;
+ u16 maxcmd;
+ struct nvmf_ctrl_options *opts;
};
/*
@@ -144,7 +176,9 @@ struct nvme_ns {
};
struct nvme_ctrl_ops {
+ const char *name;
struct module *module;
+ bool is_fabrics;
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -152,6 +186,9 @@ struct nvme_ctrl_ops {
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*post_scan)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
+ int (*delete_ctrl)(struct nvme_ctrl *ctrl);
+ const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl);
+ int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
};
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
@@ -177,7 +214,7 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
static inline unsigned nvme_map_len(struct request *rq)
{
- if (rq->cmd_flags & REQ_DISCARD)
+ if (req_op(rq) == REQ_OP_DISCARD)
return sizeof(struct nvme_dsm_range);
else
return blk_rq_bytes(rq);
@@ -185,7 +222,7 @@ static inline unsigned nvme_map_len(struct request *rq)
static inline void nvme_cleanup_cmd(struct request *req)
{
- if (req->cmd_flags & REQ_DISCARD)
+ if (req_op(req) == REQ_OP_DISCARD)
kfree(req->completion_data);
}
@@ -204,9 +241,11 @@ static inline int nvme_error_status(u16 status)
static inline bool nvme_req_needs_retry(struct request *req, u16 status)
{
return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
- (jiffies - req->start_time) < req->timeout;
+ (jiffies - req->start_time) < req->timeout &&
+ req->retries < nvme_max_retries;
}
+void nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
@@ -230,8 +269,9 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl);
+#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, unsigned int flags);
+ struct nvme_command *cmd, unsigned int flags, int qid);
void nvme_requeue_req(struct request *req);
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
@@ -239,7 +279,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
struct nvme_completion *cqe, void *buffer, unsigned bufflen,
- unsigned timeout);
+ unsigned timeout, int qid, int at_head, int flags);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result,
unsigned timeout);
@@ -256,6 +296,8 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
dma_addr_t dma_addr, u32 *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
+void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
+void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
struct sg_io_hdr;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index befac5b19..60f7eab11 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -310,6 +310,11 @@ static int nvme_init_iod(struct request *rq, unsigned size,
iod->npages = -1;
iod->nents = 0;
iod->length = size;
+
+ if (!(rq->cmd_flags & REQ_DONTPREP)) {
+ rq->retries = 0;
+ rq->cmd_flags |= REQ_DONTPREP;
+ }
return 0;
}
@@ -520,8 +525,8 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out_unmap;
}
- cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
- cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
+ cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
if (blk_integrity_rq(req))
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
return BLK_MQ_RQ_QUEUE_OK;
@@ -623,6 +628,7 @@ static void nvme_complete_rq(struct request *req)
if (unlikely(req->errors)) {
if (nvme_req_needs_retry(req, req->errors)) {
+ req->retries++;
nvme_requeue_req(req);
return;
}
@@ -901,7 +907,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
req->tag, nvmeq->qid);
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
- BLK_MQ_REQ_NOWAIT);
+ BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
if (IS_ERR(abort_req)) {
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
@@ -919,22 +925,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
return BLK_EH_RESET_TIMER;
}
-static void nvme_cancel_io(struct request *req, void *data, bool reserved)
-{
- int status;
-
- if (!blk_mq_request_started(req))
- return;
-
- dev_dbg_ratelimited(((struct nvme_dev *) data)->ctrl.device,
- "Cancelling I/O %d", req->tag);
-
- status = NVME_SC_ABORT_REQ;
- if (blk_queue_dying(req->q))
- status |= NVME_SC_DNR;
- blk_mq_complete_request(req, status);
-}
-
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
@@ -1399,16 +1389,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < 0)
return result;
- /*
- * Degraded controllers might return an error when setting the queue
- * count. We still want to be able to bring them online and offer
- * access to the admin queue, as that might be only way to fix them up.
- */
- if (result > 0) {
- dev_err(dev->ctrl.device,
- "Could not set queue count (%d)\n", result);
+ if (nr_io_queues == 0)
return 0;
- }
if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
@@ -1536,7 +1518,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
- req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
+ req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1561,15 +1543,10 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
reinit_completion(&dev->ioq_wait);
retry:
timeout = ADMIN_TIMEOUT;
- for (; i > 0; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
-
- if (!pass)
- nvme_suspend_queue(nvmeq);
- if (nvme_delete_queue(nvmeq, opcode))
+ for (; i > 0; i--, sent++)
+ if (nvme_delete_queue(dev->queues[i], opcode))
break;
- ++sent;
- }
+
while (sent--) {
timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
if (timeout == 0)
@@ -1679,14 +1656,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
static void nvme_dev_unmap(struct nvme_dev *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int bars;
-
if (dev->bar)
iounmap(dev->bar);
-
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(to_pci_dev(dev->dev));
}
static void nvme_pci_disable(struct nvme_dev *dev)
@@ -1716,19 +1688,25 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_stop_queues(&dev->ctrl);
csts = readl(dev->bar + NVME_REG_CSTS);
}
+
+ for (i = dev->queue_count - 1; i > 0; i--)
+ nvme_suspend_queue(dev->queues[i]);
+
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
- for (i = dev->queue_count - 1; i >= 0; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
- nvme_suspend_queue(nvmeq);
- }
+ /* A device might become IO incapable very soon during
+ * probe, before the admin queue is configured. Thus,
+ * queue_count can be 0 here.
+ */
+ if (dev->queue_count)
+ nvme_suspend_queue(dev->queues[0]);
} else {
nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
nvme_pci_disable(dev);
- blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
- blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
+ blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
+ blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
mutex_unlock(&dev->shutdown_lock);
}
@@ -1902,6 +1880,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+ .name = "pcie",
.module = THIS_MODULE,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
@@ -1914,13 +1893,9 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
static int nvme_dev_map(struct nvme_dev *dev)
{
- int bars;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (!bars)
- return -ENODEV;
- if (pci_request_selected_regions(pdev, bars, "nvme"))
+ if (pci_request_mem_regions(pdev, "nvme"))
return -ENODEV;
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -1929,7 +1904,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
return 0;
release:
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(pdev);
return -ENODEV;
}
@@ -1940,7 +1915,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
node = dev_to_node(&pdev->dev);
if (node == NUMA_NO_NODE)
- set_dev_node(&pdev->dev, 0);
+ set_dev_node(&pdev->dev, first_memory_node);
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
@@ -2037,6 +2012,24 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_put_ctrl(&dev->ctrl);
}
+static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ int ret = 0;
+
+ if (numvfs == 0) {
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "Cannot disable SR-IOV VFs while assigned\n");
+ return -EPERM;
+ }
+ pci_disable_sriov(pdev);
+ return 0;
+ }
+
+ ret = pci_enable_sriov(pdev, numvfs);
+ return ret ? ret : numvfs;
+}
+
#ifdef CONFIG_PM_SLEEP
static int nvme_suspend(struct device *dev)
{
@@ -2122,6 +2115,10 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_DISCARD_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
+ { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ 0, }
@@ -2137,6 +2134,7 @@ static struct pci_driver nvme_driver = {
.driver = {
.pm = &nvme_dev_pm_ops,
},
+ .sriov_configure = nvme_pci_sriov_configure,
.err_handler = &nvme_err_handler,
};
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
new file mode 100644
index 000000000..fbdb2267e
--- /dev/null
+++ b/drivers/nvme/host/rdma.c
@@ -0,0 +1,2033 @@
+/*
+ * NVMe over Fabrics RDMA host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/nvme.h>
+#include <asm/unaligned.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_cm.h>
+#include <linux/nvme-rdma.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+
+
+#define NVME_RDMA_CONNECT_TIMEOUT_MS 1000 /* 1 second */
+
+#define NVME_RDMA_MAX_SEGMENT_SIZE 0xffffff /* 24-bit SGL field */
+
+#define NVME_RDMA_MAX_SEGMENTS 256
+
+#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
+
+/*
+ * We handle AEN commands ourselves and don't even let the
+ * block layer know about them.
+ */
+#define NVME_RDMA_NR_AEN_COMMANDS 1
+#define NVME_RDMA_AQ_BLKMQ_DEPTH \
+ (NVMF_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
+
+struct nvme_rdma_device {
+ struct ib_device *dev;
+ struct ib_pd *pd;
+ struct ib_mr *mr;
+ struct kref ref;
+ struct list_head entry;
+};
+
+struct nvme_rdma_qe {
+ struct ib_cqe cqe;
+ void *data;
+ u64 dma;
+};
+
+struct nvme_rdma_queue;
+struct nvme_rdma_request {
+ struct ib_mr *mr;
+ struct nvme_rdma_qe sqe;
+ struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
+ u32 num_sge;
+ int nents;
+ bool inline_data;
+ struct ib_reg_wr reg_wr;
+ struct ib_cqe reg_cqe;
+ struct nvme_rdma_queue *queue;
+ struct sg_table sg_table;
+ struct scatterlist first_sgl[];
+};
+
+enum nvme_rdma_queue_flags {
+ NVME_RDMA_Q_CONNECTED = (1 << 0),
+ NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
+ NVME_RDMA_Q_DELETING = (1 << 2),
+};
+
+struct nvme_rdma_queue {
+ struct nvme_rdma_qe *rsp_ring;
+ u8 sig_count;
+ int queue_size;
+ size_t cmnd_capsule_len;
+ struct nvme_rdma_ctrl *ctrl;
+ struct nvme_rdma_device *device;
+ struct ib_cq *ib_cq;
+ struct ib_qp *qp;
+
+ unsigned long flags;
+ struct rdma_cm_id *cm_id;
+ int cm_error;
+ struct completion cm_done;
+};
+
+struct nvme_rdma_ctrl {
+ /* read and written in the hot path */
+ spinlock_t lock;
+
+ /* read only in the hot path */
+ struct nvme_rdma_queue *queues;
+ u32 queue_count;
+
+ /* other member variables */
+ struct blk_mq_tag_set tag_set;
+ struct work_struct delete_work;
+ struct work_struct reset_work;
+ struct work_struct err_work;
+
+ struct nvme_rdma_qe async_event_sqe;
+
+ int reconnect_delay;
+ struct delayed_work reconnect_work;
+
+ struct list_head list;
+
+ struct blk_mq_tag_set admin_tag_set;
+ struct nvme_rdma_device *device;
+
+ u64 cap;
+ u32 max_fr_pages;
+
+ union {
+ struct sockaddr addr;
+ struct sockaddr_in addr_in;
+ };
+
+ struct nvme_ctrl ctrl;
+};
+
+static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
+}
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_mutex);
+
+static LIST_HEAD(nvme_rdma_ctrl_list);
+static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
+
+static struct workqueue_struct *nvme_rdma_wq;
+
+/*
+ * Disabling this option makes small I/O goes faster, but is fundamentally
+ * unsafe. With it turned off we will have to register a global rkey that
+ * allows read and write access to all physical memory.
+ */
+static bool register_always = true;
+module_param(register_always, bool, 0444);
+MODULE_PARM_DESC(register_always,
+ "Use memory registration even for contiguous memory regions");
+
+static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event);
+static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+
+/* XXX: really should move to a generic header sooner or later.. */
+static inline void put_unaligned_le24(u32 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+ *p++ = val >> 16;
+}
+
+static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
+{
+ return queue - queue->ctrl->queues;
+}
+
+static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
+{
+ return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+}
+
+static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
+ kfree(qe->data);
+}
+
+static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ qe->data = kzalloc(capsule_size, GFP_KERNEL);
+ if (!qe->data)
+ return -ENOMEM;
+
+ qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
+ if (ib_dma_mapping_error(ibdev, qe->dma)) {
+ kfree(qe->data);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void nvme_rdma_free_ring(struct ib_device *ibdev,
+ struct nvme_rdma_qe *ring, size_t ib_queue_size,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < ib_queue_size; i++)
+ nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
+ kfree(ring);
+}
+
+static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
+ size_t ib_queue_size, size_t capsule_size,
+ enum dma_data_direction dir)
+{
+ struct nvme_rdma_qe *ring;
+ int i;
+
+ ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ for (i = 0; i < ib_queue_size; i++) {
+ if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
+ goto out_free_ring;
+ }
+
+ return ring;
+
+out_free_ring:
+ nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
+ return NULL;
+}
+
+static void nvme_rdma_qp_event(struct ib_event *event, void *context)
+{
+ pr_debug("QP event %d\n", event->event);
+}
+
+static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
+{
+ wait_for_completion_interruptible_timeout(&queue->cm_done,
+ msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
+ return queue->cm_error;
+}
+
+static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
+{
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_qp_init_attr init_attr;
+ int ret;
+
+ memset(&init_attr, 0, sizeof(init_attr));
+ init_attr.event_handler = nvme_rdma_qp_event;
+ /* +1 for drain */
+ init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
+ /* +1 for drain */
+ init_attr.cap.max_recv_wr = queue->queue_size + 1;
+ init_attr.cap.max_recv_sge = 1;
+ init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.send_cq = queue->ib_cq;
+ init_attr.recv_cq = queue->ib_cq;
+
+ ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
+
+ queue->qp = queue->cm_id->qp;
+ return ret;
+}
+
+static int nvme_rdma_reinit_request(void *data, struct request *rq)
+{
+ struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_device *dev = ctrl->device;
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ int ret = 0;
+
+ if (!req->mr->need_inval)
+ goto out;
+
+ ib_dereg_mr(req->mr);
+
+ req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
+ ctrl->max_fr_pages);
+ if (IS_ERR(req->mr)) {
+ ret = PTR_ERR(req->mr);
+ req->mr = NULL;
+ goto out;
+ }
+
+ req->mr->need_inval = false;
+
+out:
+ return ret;
+}
+
+static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
+ struct request *rq, unsigned int queue_idx)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
+ struct nvme_rdma_device *dev = queue->device;
+
+ if (req->mr)
+ ib_dereg_mr(req->mr);
+
+ nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+}
+
+static void nvme_rdma_exit_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx)
+{
+ return __nvme_rdma_exit_request(data, rq, hctx_idx + 1);
+}
+
+static void nvme_rdma_exit_admin_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx)
+{
+ return __nvme_rdma_exit_request(data, rq, 0);
+}
+
+static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
+ struct request *rq, unsigned int queue_idx)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int ret;
+
+ BUG_ON(queue_idx >= ctrl->queue_count);
+
+ ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
+ ctrl->max_fr_pages);
+ if (IS_ERR(req->mr)) {
+ ret = PTR_ERR(req->mr);
+ goto out_free_qe;
+ }
+
+ req->queue = queue;
+
+ return 0;
+
+out_free_qe:
+ nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+static int nvme_rdma_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ return __nvme_rdma_init_request(data, rq, hctx_idx + 1);
+}
+
+static int nvme_rdma_init_admin_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ return __nvme_rdma_init_request(data, rq, 0);
+}
+
+static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
+
+ BUG_ON(hctx_idx >= ctrl->queue_count);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_queue *queue = &ctrl->queues[0];
+
+ BUG_ON(hctx_idx != 0);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static void nvme_rdma_free_dev(struct kref *ref)
+{
+ struct nvme_rdma_device *ndev =
+ container_of(ref, struct nvme_rdma_device, ref);
+
+ mutex_lock(&device_list_mutex);
+ list_del(&ndev->entry);
+ mutex_unlock(&device_list_mutex);
+
+ if (!register_always)
+ ib_dereg_mr(ndev->mr);
+ ib_dealloc_pd(ndev->pd);
+
+ kfree(ndev);
+}
+
+static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
+{
+ kref_put(&dev->ref, nvme_rdma_free_dev);
+}
+
+static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
+{
+ return kref_get_unless_zero(&dev->ref);
+}
+
+static struct nvme_rdma_device *
+nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
+{
+ struct nvme_rdma_device *ndev;
+
+ mutex_lock(&device_list_mutex);
+ list_for_each_entry(ndev, &device_list, entry) {
+ if (ndev->dev->node_guid == cm_id->device->node_guid &&
+ nvme_rdma_dev_get(ndev))
+ goto out_unlock;
+ }
+
+ ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ goto out_err;
+
+ ndev->dev = cm_id->device;
+ kref_init(&ndev->ref);
+
+ ndev->pd = ib_alloc_pd(ndev->dev);
+ if (IS_ERR(ndev->pd))
+ goto out_free_dev;
+
+ if (!register_always) {
+ ndev->mr = ib_get_dma_mr(ndev->pd,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE);
+ if (IS_ERR(ndev->mr))
+ goto out_free_pd;
+ }
+
+ if (!(ndev->dev->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ dev_err(&ndev->dev->dev,
+ "Memory registrations not supported.\n");
+ goto out_free_mr;
+ }
+
+ list_add(&ndev->entry, &device_list);
+out_unlock:
+ mutex_unlock(&device_list_mutex);
+ return ndev;
+
+out_free_mr:
+ if (!register_always)
+ ib_dereg_mr(ndev->mr);
+out_free_pd:
+ ib_dealloc_pd(ndev->pd);
+out_free_dev:
+ kfree(ndev);
+out_err:
+ mutex_unlock(&device_list_mutex);
+ return NULL;
+}
+
+static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_device *dev;
+ struct ib_device *ibdev;
+
+ if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
+ return;
+
+ dev = queue->device;
+ ibdev = dev->dev;
+ rdma_destroy_qp(queue->cm_id);
+ ib_free_cq(queue->ib_cq);
+
+ nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+ sizeof(struct nvme_completion), DMA_FROM_DEVICE);
+
+ nvme_rdma_dev_put(dev);
+}
+
+static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_device *dev)
+{
+ struct ib_device *ibdev = dev->dev;
+ const int send_wr_factor = 3; /* MR, SEND, INV */
+ const int cq_factor = send_wr_factor + 1; /* + RECV */
+ int comp_vector, idx = nvme_rdma_queue_idx(queue);
+
+ int ret;
+
+ queue->device = dev;
+
+ /*
+ * The admin queue is barely used once the controller is live, so don't
+ * bother to spread it out.
+ */
+ if (idx == 0)
+ comp_vector = 0;
+ else
+ comp_vector = idx % ibdev->num_comp_vectors;
+
+
+ /* +1 for ib_stop_cq */
+ queue->ib_cq = ib_alloc_cq(dev->dev, queue,
+ cq_factor * queue->queue_size + 1, comp_vector,
+ IB_POLL_SOFTIRQ);
+ if (IS_ERR(queue->ib_cq)) {
+ ret = PTR_ERR(queue->ib_cq);
+ goto out;
+ }
+
+ ret = nvme_rdma_create_qp(queue, send_wr_factor);
+ if (ret)
+ goto out_destroy_ib_cq;
+
+ queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
+ sizeof(struct nvme_completion), DMA_FROM_DEVICE);
+ if (!queue->rsp_ring) {
+ ret = -ENOMEM;
+ goto out_destroy_qp;
+ }
+ set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
+
+ return 0;
+
+out_destroy_qp:
+ ib_destroy_qp(queue->qp);
+out_destroy_ib_cq:
+ ib_free_cq(queue->ib_cq);
+out:
+ return ret;
+}
+
+static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
+ int idx, size_t queue_size)
+{
+ struct nvme_rdma_queue *queue;
+ int ret;
+
+ queue = &ctrl->queues[idx];
+ queue->ctrl = ctrl;
+ init_completion(&queue->cm_done);
+
+ if (idx > 0)
+ queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
+ else
+ queue->cmnd_capsule_len = sizeof(struct nvme_command);
+
+ queue->queue_size = queue_size;
+
+ queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(queue->cm_id)) {
+ dev_info(ctrl->ctrl.device,
+ "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
+ return PTR_ERR(queue->cm_id);
+ }
+
+ queue->cm_error = -ETIMEDOUT;
+ ret = rdma_resolve_addr(queue->cm_id, NULL, &ctrl->addr,
+ NVME_RDMA_CONNECT_TIMEOUT_MS);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "rdma_resolve_addr failed (%d).\n", ret);
+ goto out_destroy_cm_id;
+ }
+
+ ret = nvme_rdma_wait_for_cm(queue);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "rdma_resolve_addr wait failed (%d).\n", ret);
+ goto out_destroy_cm_id;
+ }
+
+ clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
+ set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
+
+ return 0;
+
+out_destroy_cm_id:
+ nvme_rdma_destroy_queue_ib(queue);
+ rdma_destroy_id(queue->cm_id);
+ return ret;
+}
+
+static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+{
+ rdma_disconnect(queue->cm_id);
+ ib_drain_qp(queue->qp);
+}
+
+static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
+{
+ nvme_rdma_destroy_queue_ib(queue);
+ rdma_destroy_id(queue->cm_id);
+}
+
+static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
+{
+ if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
+ return;
+ nvme_rdma_stop_queue(queue);
+ nvme_rdma_free_queue(queue);
+}
+
+static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
+}
+
+static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ int i, ret = 0;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ int i, ret;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvme_rdma_init_queue(ctrl, i,
+ ctrl->ctrl.opts->queue_size);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "failed to initialize i/o queue: %d\n", ret);
+ goto out_free_queues;
+ }
+ }
+
+ return 0;
+
+out_free_queues:
+ for (i--; i >= 1; i--)
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
+
+ return ret;
+}
+
+static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
+{
+ nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_rdma_dev_put(ctrl->device);
+}
+
+static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ if (list_empty(&ctrl->list))
+ goto free_ctrl;
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_del(&ctrl->list);
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ kfree(ctrl->queues);
+ nvmf_free_options(nctrl->opts);
+free_ctrl:
+ kfree(ctrl);
+}
+
+static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_rdma_ctrl, reconnect_work);
+ bool changed;
+ int ret;
+
+ if (ctrl->queue_count > 1) {
+ nvme_rdma_free_io_queues(ctrl);
+
+ ret = blk_mq_reinit_tagset(&ctrl->tag_set);
+ if (ret)
+ goto requeue;
+ }
+
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
+
+ ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set);
+ if (ret)
+ goto requeue;
+
+ ret = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
+ if (ret)
+ goto requeue;
+
+ blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
+
+ ret = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (ret)
+ goto stop_admin_q;
+
+ ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+ if (ret)
+ goto stop_admin_q;
+
+ nvme_start_keep_alive(&ctrl->ctrl);
+
+ if (ctrl->queue_count > 1) {
+ ret = nvme_rdma_init_io_queues(ctrl);
+ if (ret)
+ goto stop_admin_q;
+
+ ret = nvme_rdma_connect_io_queues(ctrl);
+ if (ret)
+ goto stop_admin_q;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ if (ctrl->queue_count > 1) {
+ nvme_start_queues(&ctrl->ctrl);
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
+
+ dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
+
+ return;
+
+stop_admin_q:
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+requeue:
+ /* Make sure we are not resetting/deleting */
+ if (ctrl->ctrl.state == NVME_CTRL_RECONNECTING) {
+ dev_info(ctrl->ctrl.device,
+ "Failed reconnect attempt, requeueing...\n");
+ queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
+ ctrl->reconnect_delay * HZ);
+ }
+}
+
+static void nvme_rdma_error_recovery_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, err_work);
+ int i;
+
+ nvme_stop_keep_alive(&ctrl->ctrl);
+
+ for (i = 0; i < ctrl->queue_count; i++)
+ clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+
+ if (ctrl->queue_count > 1)
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+
+ /* We must take care of fastfail/requeue all our inflight requests */
+ if (ctrl->queue_count > 1)
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+
+ dev_info(ctrl->ctrl.device, "reconnecting in %d seconds\n",
+ ctrl->reconnect_delay);
+
+ queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
+ ctrl->reconnect_delay * HZ);
+}
+
+static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
+ return;
+
+ queue_work(nvme_rdma_wq, &ctrl->err_work);
+}
+
+static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
+ const char *op)
+{
+ struct nvme_rdma_queue *queue = cq->cq_context;
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+
+ if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ dev_info(ctrl->ctrl.device,
+ "%s for CQE 0x%p failed with status %s (%d)\n",
+ op, wc->wr_cqe,
+ ib_wc_status_msg(wc->status), wc->status);
+ nvme_rdma_error_recovery(ctrl);
+}
+
+static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "MEMREG");
+}
+
+static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
+}
+
+static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req)
+{
+ struct ib_send_wr *bad_wr;
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .next = NULL,
+ .num_sge = 0,
+ .send_flags = 0,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+
+ req->reg_cqe.done = nvme_rdma_inv_rkey_done;
+ wr.wr_cqe = &req->reg_cqe;
+
+ return ib_post_send(queue->qp, &wr, &bad_wr);
+}
+
+static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
+ struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int res;
+
+ if (!blk_rq_bytes(rq))
+ return;
+
+ if (req->mr->need_inval) {
+ res = nvme_rdma_inv_rkey(queue, req);
+ if (res < 0) {
+ dev_err(ctrl->ctrl.device,
+ "Queueing INV WR for rkey %#x failed (%d)\n",
+ req->mr->rkey, res);
+ nvme_rdma_error_recovery(queue->ctrl);
+ }
+ }
+
+ ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
+ req->nents, rq_data_dir(rq) ==
+ WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ nvme_cleanup_cmd(rq);
+ sg_free_table_chained(&req->sg_table, true);
+}
+
+static int nvme_rdma_set_sg_null(struct nvme_command *c)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+
+ sg->addr = 0;
+ put_unaligned_le24(0, sg->length);
+ put_unaligned_le32(0, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c)
+{
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+
+ req->sge[1].addr = sg_dma_address(req->sg_table.sgl);
+ req->sge[1].length = sg_dma_len(req->sg_table.sgl);
+ req->sge[1].lkey = queue->device->pd->local_dma_lkey;
+
+ sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
+ sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
+ sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
+
+ req->inline_data = true;
+ req->num_sge++;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+
+ sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
+ put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
+ put_unaligned_le32(queue->device->mr->rkey, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c,
+ int count)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ int nr;
+
+ nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
+ if (nr < count) {
+ if (nr < 0)
+ return nr;
+ return -EINVAL;
+ }
+
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ req->reg_cqe.done = nvme_rdma_memreg_done;
+ memset(&req->reg_wr, 0, sizeof(req->reg_wr));
+ req->reg_wr.wr.opcode = IB_WR_REG_MR;
+ req->reg_wr.wr.wr_cqe = &req->reg_cqe;
+ req->reg_wr.wr.num_sge = 0;
+ req->reg_wr.mr = req->mr;
+ req->reg_wr.key = req->mr->rkey;
+ req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+
+ req->mr->need_inval = true;
+
+ sg->addr = cpu_to_le64(req->mr->iova);
+ put_unaligned_le24(req->mr->length, sg->length);
+ put_unaligned_le32(req->mr->rkey, sg->key);
+ sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
+ NVME_SGL_FMT_INVALIDATE;
+
+ return 0;
+}
+
+static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
+ struct request *rq, unsigned int map_len,
+ struct nvme_command *c)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int nents, count;
+ int ret;
+
+ req->num_sge = 1;
+ req->inline_data = false;
+ req->mr->need_inval = false;
+
+ c->common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (!blk_rq_bytes(rq))
+ return nvme_rdma_set_sg_null(c);
+
+ req->sg_table.sgl = req->first_sgl;
+ ret = sg_alloc_table_chained(&req->sg_table, rq->nr_phys_segments,
+ req->sg_table.sgl);
+ if (ret)
+ return -ENOMEM;
+
+ nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
+ BUG_ON(nents > rq->nr_phys_segments);
+ req->nents = nents;
+
+ count = ib_dma_map_sg(ibdev, req->sg_table.sgl, nents,
+ rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (unlikely(count <= 0)) {
+ sg_free_table_chained(&req->sg_table, true);
+ return -EIO;
+ }
+
+ if (count == 1) {
+ if (rq_data_dir(rq) == WRITE &&
+ map_len <= nvme_rdma_inline_data_size(queue) &&
+ nvme_rdma_queue_idx(queue))
+ return nvme_rdma_map_sg_inline(queue, req, c);
+
+ if (!register_always)
+ return nvme_rdma_map_sg_single(queue, req, c);
+ }
+
+ return nvme_rdma_map_sg_fr(queue, req, c, count);
+}
+
+static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "SEND");
+}
+
+static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
+ struct ib_send_wr *first, bool flush)
+{
+ struct ib_send_wr wr, *bad_wr;
+ int ret;
+
+ sge->addr = qe->dma;
+ sge->length = sizeof(struct nvme_command),
+ sge->lkey = queue->device->pd->local_dma_lkey;
+
+ qe->cqe.done = nvme_rdma_send_done;
+
+ wr.next = NULL;
+ wr.wr_cqe = &qe->cqe;
+ wr.sg_list = sge;
+ wr.num_sge = num_sge;
+ wr.opcode = IB_WR_SEND;
+ wr.send_flags = 0;
+
+ /*
+ * Unsignalled send completions are another giant desaster in the
+ * IB Verbs spec: If we don't regularly post signalled sends
+ * the send queue will fill up and only a QP reset will rescue us.
+ * Would have been way to obvious to handle this in hardware or
+ * at least the RDMA stack..
+ *
+ * This messy and racy code sniplet is copy and pasted from the iSER
+ * initiator, and the magic '32' comes from there as well.
+ *
+ * Always signal the flushes. The magic request used for the flush
+ * sequencer is not allocated in our driver's tagset and it's
+ * triggered to be freed by blk_cleanup_queue(). So we need to
+ * always mark it as signaled to ensure that the "wr_cqe", which is
+ * embeded in request's payload, is not freed when __ib_process_cq()
+ * calls wr_cqe->done().
+ */
+ if ((++queue->sig_count % 32) == 0 || flush)
+ wr.send_flags |= IB_SEND_SIGNALED;
+
+ if (first)
+ first->next = &wr;
+ else
+ first = &wr;
+
+ ret = ib_post_send(queue->qp, first, &bad_wr);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "%s failed with error code %d\n", __func__, ret);
+ }
+ return ret;
+}
+
+static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_qe *qe)
+{
+ struct ib_recv_wr wr, *bad_wr;
+ struct ib_sge list;
+ int ret;
+
+ list.addr = qe->dma;
+ list.length = sizeof(struct nvme_completion);
+ list.lkey = queue->device->pd->local_dma_lkey;
+
+ qe->cqe.done = nvme_rdma_recv_done;
+
+ wr.next = NULL;
+ wr.wr_cqe = &qe->cqe;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+
+ ret = ib_post_recv(queue->qp, &wr, &bad_wr);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "%s failed with error code %d\n", __func__, ret);
+ }
+ return ret;
+}
+
+static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
+{
+ u32 queue_idx = nvme_rdma_queue_idx(queue);
+
+ if (queue_idx == 0)
+ return queue->ctrl->admin_tag_set.tags[queue_idx];
+ return queue->ctrl->tag_set.tags[queue_idx - 1];
+}
+
+static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
+ struct nvme_rdma_queue *queue = &ctrl->queues[0];
+ struct ib_device *dev = queue->device->dev;
+ struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
+ struct nvme_command *cmd = sqe->data;
+ struct ib_sge sge;
+ int ret;
+
+ if (WARN_ON_ONCE(aer_idx != 0))
+ return;
+
+ ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->common.opcode = nvme_admin_async_event;
+ cmd->common.command_id = NVME_RDMA_AQ_BLKMQ_DEPTH;
+ cmd->common.flags |= NVME_CMD_SGL_METABUF;
+ nvme_rdma_set_sg_null(cmd);
+
+ ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
+ DMA_TO_DEVICE);
+
+ ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
+ WARN_ON_ONCE(ret);
+}
+
+static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
+ struct nvme_completion *cqe, struct ib_wc *wc, int tag)
+{
+ u16 status = le16_to_cpu(cqe->status);
+ struct request *rq;
+ struct nvme_rdma_request *req;
+ int ret = 0;
+
+ status >>= 1;
+
+ rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "tag 0x%x on QP %#x not found\n",
+ cqe->command_id, queue->qp->qp_num);
+ nvme_rdma_error_recovery(queue->ctrl);
+ return ret;
+ }
+ req = blk_mq_rq_to_pdu(rq);
+
+ if (rq->cmd_type == REQ_TYPE_DRV_PRIV && rq->special)
+ memcpy(rq->special, cqe, sizeof(*cqe));
+
+ if (rq->tag == tag)
+ ret = 1;
+
+ if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
+ wc->ex.invalidate_rkey == req->mr->rkey)
+ req->mr->need_inval = false;
+
+ blk_mq_complete_request(rq, status);
+
+ return ret;
+}
+
+static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
+{
+ struct nvme_rdma_qe *qe =
+ container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
+ struct nvme_rdma_queue *queue = cq->cq_context;
+ struct ib_device *ibdev = queue->device->dev;
+ struct nvme_completion *cqe = qe->data;
+ const size_t len = sizeof(struct nvme_completion);
+ int ret = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvme_rdma_wr_error(cq, wc, "RECV");
+ return 0;
+ }
+
+ ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
+ /*
+ * AEN requests are special as they don't time out and can
+ * survive any kind of queue freeze and often don't respond to
+ * aborts. We don't even bother to allocate a struct request
+ * for them but rather special case them here.
+ */
+ if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
+ cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
+ nvme_complete_async_event(&queue->ctrl->ctrl, cqe);
+ else
+ ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
+ ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
+
+ nvme_rdma_post_recv(queue, qe);
+ return ret;
+}
+
+static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ __nvme_rdma_recv_done(cq, wc, -1);
+}
+
+static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
+{
+ int ret, i;
+
+ for (i = 0; i < queue->queue_size; i++) {
+ ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
+ if (ret)
+ goto out_destroy_queue_ib;
+ }
+
+ return 0;
+
+out_destroy_queue_ib:
+ nvme_rdma_destroy_queue_ib(queue);
+ return ret;
+}
+
+static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
+ struct rdma_cm_event *ev)
+{
+ if (ev->param.conn.private_data_len) {
+ struct nvme_rdma_cm_rej *rej =
+ (struct nvme_rdma_cm_rej *)ev->param.conn.private_data;
+
+ dev_err(queue->ctrl->ctrl.device,
+ "Connect rejected, status %d.", le16_to_cpu(rej->sts));
+ /* XXX: Think of something clever to do here... */
+ } else {
+ dev_err(queue->ctrl->ctrl.device,
+ "Connect rejected, no private data.\n");
+ }
+
+ return -ECONNRESET;
+}
+
+static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_device *dev;
+ int ret;
+
+ dev = nvme_rdma_find_get_device(queue->cm_id);
+ if (!dev) {
+ dev_err(queue->cm_id->device->dma_device,
+ "no client data found!\n");
+ return -ECONNREFUSED;
+ }
+
+ ret = nvme_rdma_create_queue_ib(queue, dev);
+ if (ret) {
+ nvme_rdma_dev_put(dev);
+ goto out;
+ }
+
+ ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "rdma_resolve_route failed (%d).\n",
+ queue->cm_error);
+ goto out_destroy_queue;
+ }
+
+ return 0;
+
+out_destroy_queue:
+ nvme_rdma_destroy_queue_ib(queue);
+out:
+ return ret;
+}
+
+static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ struct rdma_conn_param param = { };
+ struct nvme_rdma_cm_req priv = { };
+ int ret;
+
+ param.qp_num = queue->qp->qp_num;
+ param.flow_control = 1;
+
+ param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
+ /* maximum retry count */
+ param.retry_count = 7;
+ param.rnr_retry_count = 7;
+ param.private_data = &priv;
+ param.private_data_len = sizeof(priv);
+
+ priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+ priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
+ /*
+ * set the admin queue depth to the minimum size
+ * specified by the Fabrics standard.
+ */
+ if (priv.qid == 0) {
+ priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
+ priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+ } else {
+ /*
+ * current interpretation of the fabrics spec
+ * is at minimum you make hrqsize sqsize+1, or a
+ * 1's based representation of sqsize.
+ */
+ priv.hrqsize = cpu_to_le16(queue->queue_size);
+ priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
+ }
+
+ ret = rdma_connect(queue->cm_id, &param);
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "rdma_connect failed (%d).\n", ret);
+ goto out_destroy_queue_ib;
+ }
+
+ return 0;
+
+out_destroy_queue_ib:
+ nvme_rdma_destroy_queue_ib(queue);
+ return ret;
+}
+
+static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct nvme_rdma_queue *queue = cm_id->context;
+ int cm_error = 0;
+
+ dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
+ rdma_event_msg(ev->event), ev->event,
+ ev->status, cm_id);
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ cm_error = nvme_rdma_addr_resolved(queue);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ cm_error = nvme_rdma_route_resolved(queue);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ queue->cm_error = nvme_rdma_conn_established(queue);
+ /* complete cm_done regardless of success/failure */
+ complete(&queue->cm_done);
+ return 0;
+ case RDMA_CM_EVENT_REJECTED:
+ cm_error = nvme_rdma_conn_rejected(queue, ev);
+ break;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ dev_dbg(queue->ctrl->ctrl.device,
+ "CM error event %d\n", ev->event);
+ cm_error = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ dev_dbg(queue->ctrl->ctrl.device,
+ "disconnect received - connection closed\n");
+ nvme_rdma_error_recovery(queue->ctrl);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ /* device removal is handled via the ib_client API */
+ break;
+ default:
+ dev_err(queue->ctrl->ctrl.device,
+ "Unexpected RDMA CM event (%d)\n", ev->event);
+ nvme_rdma_error_recovery(queue->ctrl);
+ break;
+ }
+
+ if (cm_error) {
+ queue->cm_error = cm_error;
+ complete(&queue->cm_done);
+ }
+
+ return 0;
+}
+
+static enum blk_eh_timer_return
+nvme_rdma_timeout(struct request *rq, bool reserved)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+
+ /* queue error recovery */
+ nvme_rdma_error_recovery(req->queue->ctrl);
+
+ /* fail with DNR on cmd timeout */
+ rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+
+ return BLK_EH_HANDLED;
+}
+
+static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_rdma_queue *queue = hctx->driver_data;
+ struct request *rq = bd->rq;
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_qe *sqe = &req->sqe;
+ struct nvme_command *c = sqe->data;
+ bool flush = false;
+ struct ib_device *dev;
+ unsigned int map_len;
+ int ret;
+
+ WARN_ON_ONCE(rq->tag < 0);
+
+ dev = queue->device->dev;
+ ib_dma_sync_single_for_cpu(dev, sqe->dma,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+
+ ret = nvme_setup_cmd(ns, rq, c);
+ if (ret)
+ return ret;
+
+ c->common.command_id = rq->tag;
+ blk_mq_start_request(rq);
+
+ map_len = nvme_map_len(rq);
+ ret = nvme_rdma_map_data(queue, rq, map_len, c);
+ if (ret < 0) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Failed to map data (%d)\n", ret);
+ nvme_cleanup_cmd(rq);
+ goto err;
+ }
+
+ ib_dma_sync_single_for_device(dev, sqe->dma,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+
+ if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
+ flush = true;
+ ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
+ req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
+ if (ret) {
+ nvme_rdma_unmap_data(queue, rq);
+ goto err;
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+err:
+ return (ret == -ENOMEM || ret == -EAGAIN) ?
+ BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+}
+
+static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
+{
+ struct nvme_rdma_queue *queue = hctx->driver_data;
+ struct ib_cq *cq = queue->ib_cq;
+ struct ib_wc wc;
+ int found = 0;
+
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ while (ib_poll_cq(cq, 1, &wc) > 0) {
+ struct ib_cqe *cqe = wc.wr_cqe;
+
+ if (cqe) {
+ if (cqe->done == nvme_rdma_recv_done)
+ found |= __nvme_rdma_recv_done(cq, &wc, tag);
+ else
+ cqe->done(cq, &wc);
+ }
+ }
+
+ return found;
+}
+
+static void nvme_rdma_complete_rq(struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ int error = 0;
+
+ nvme_rdma_unmap_data(queue, rq);
+
+ if (unlikely(rq->errors)) {
+ if (nvme_req_needs_retry(rq, rq->errors)) {
+ nvme_requeue_req(rq);
+ return;
+ }
+
+ if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ error = rq->errors;
+ else
+ error = nvme_error_status(rq->errors);
+ }
+
+ blk_mq_end_request(rq, error);
+}
+
+static struct blk_mq_ops nvme_rdma_mq_ops = {
+ .queue_rq = nvme_rdma_queue_rq,
+ .complete = nvme_rdma_complete_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = nvme_rdma_init_request,
+ .exit_request = nvme_rdma_exit_request,
+ .reinit_request = nvme_rdma_reinit_request,
+ .init_hctx = nvme_rdma_init_hctx,
+ .poll = nvme_rdma_poll,
+ .timeout = nvme_rdma_timeout,
+};
+
+static struct blk_mq_ops nvme_rdma_admin_mq_ops = {
+ .queue_rq = nvme_rdma_queue_rq,
+ .complete = nvme_rdma_complete_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = nvme_rdma_init_admin_request,
+ .exit_request = nvme_rdma_exit_admin_request,
+ .reinit_request = nvme_rdma_reinit_request,
+ .init_hctx = nvme_rdma_init_admin_hctx,
+ .timeout = nvme_rdma_timeout,
+};
+
+static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
+{
+ int error;
+
+ error = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
+ if (error)
+ return error;
+
+ ctrl->device = ctrl->queues[0].device;
+
+ /*
+ * We need a reference on the device as long as the tag_set is alive,
+ * as the MRs in the request structures need a valid ib_device.
+ */
+ error = -EINVAL;
+ if (!nvme_rdma_dev_get(ctrl->device))
+ goto out_free_queue;
+
+ ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+ ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+
+ memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
+ ctrl->admin_tag_set.ops = &nvme_rdma_admin_mq_ops;
+ ctrl->admin_tag_set.queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH;
+ ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
+ ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
+ SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ ctrl->admin_tag_set.driver_data = ctrl;
+ ctrl->admin_tag_set.nr_hw_queues = 1;
+ ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+
+ error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ if (error)
+ goto out_put_dev;
+
+ ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+ if (IS_ERR(ctrl->ctrl.admin_q)) {
+ error = PTR_ERR(ctrl->ctrl.admin_q);
+ goto out_free_tagset;
+ }
+
+ error = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_queue;
+
+ error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (error) {
+ dev_err(ctrl->ctrl.device,
+ "prop_get NVME_REG_CAP failed\n");
+ goto out_cleanup_queue;
+ }
+
+ ctrl->ctrl.sqsize =
+ min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+
+ error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+ if (error)
+ goto out_cleanup_queue;
+
+ ctrl->ctrl.max_hw_sectors =
+ (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
+
+ error = nvme_init_identify(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_queue;
+
+ error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
+ &ctrl->async_event_sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ if (error)
+ goto out_cleanup_queue;
+
+ nvme_start_keep_alive(&ctrl->ctrl);
+
+ return 0;
+
+out_cleanup_queue:
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_free_tagset:
+ /* disconnect and drain the queue before freeing the tagset */
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_put_dev:
+ nvme_rdma_dev_put(ctrl->device);
+out_free_queue:
+ nvme_rdma_free_queue(&ctrl->queues[0]);
+ return error;
+}
+
+static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
+{
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ cancel_work_sync(&ctrl->err_work);
+ cancel_delayed_work_sync(&ctrl->reconnect_work);
+
+ if (ctrl->queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+ nvme_rdma_free_io_queues(ctrl);
+ }
+
+ if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
+ nvme_shutdown_ctrl(&ctrl->ctrl);
+
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl);
+}
+
+static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
+{
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ if (shutdown)
+ nvme_rdma_shutdown_ctrl(ctrl);
+
+ if (ctrl->ctrl.tagset) {
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ nvme_rdma_dev_put(ctrl->device);
+ }
+
+ nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static void nvme_rdma_del_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, delete_work);
+
+ __nvme_rdma_remove_ctrl(ctrl, true);
+}
+
+static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
+ return -EBUSY;
+
+ if (!queue_work(nvme_rdma_wq, &ctrl->delete_work))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+ int ret = 0;
+
+ /*
+ * Keep a reference until all work is flushed since
+ * __nvme_rdma_del_ctrl can free the ctrl mem
+ */
+ if (!kref_get_unless_zero(&ctrl->ctrl.kref))
+ return -EBUSY;
+ ret = __nvme_rdma_del_ctrl(ctrl);
+ if (!ret)
+ flush_work(&ctrl->delete_work);
+ nvme_put_ctrl(&ctrl->ctrl);
+ return ret;
+}
+
+static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, delete_work);
+
+ __nvme_rdma_remove_ctrl(ctrl, false);
+}
+
+static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, reset_work);
+ int ret;
+ bool changed;
+
+ nvme_rdma_shutdown_ctrl(ctrl);
+
+ ret = nvme_rdma_configure_admin_queue(ctrl);
+ if (ret) {
+ /* ctrl is already shutdown, just remove the ctrl */
+ INIT_WORK(&ctrl->delete_work, nvme_rdma_remove_ctrl_work);
+ goto del_dead_ctrl;
+ }
+
+ if (ctrl->queue_count > 1) {
+ ret = blk_mq_reinit_tagset(&ctrl->tag_set);
+ if (ret)
+ goto del_dead_ctrl;
+
+ ret = nvme_rdma_init_io_queues(ctrl);
+ if (ret)
+ goto del_dead_ctrl;
+
+ ret = nvme_rdma_connect_io_queues(ctrl);
+ if (ret)
+ goto del_dead_ctrl;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ if (ctrl->queue_count > 1) {
+ nvme_start_queues(&ctrl->ctrl);
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
+
+ return;
+
+del_dead_ctrl:
+ /* Deleting this dead controller... */
+ dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
+ WARN_ON(!queue_work(nvme_rdma_wq, &ctrl->delete_work));
+}
+
+static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
+ return -EBUSY;
+
+ if (!queue_work(nvme_rdma_wq, &ctrl->reset_work))
+ return -EBUSY;
+
+ flush_work(&ctrl->reset_work);
+
+ return 0;
+}
+
+static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
+ .name = "rdma",
+ .module = THIS_MODULE,
+ .is_fabrics = true,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .reset_ctrl = nvme_rdma_reset_ctrl,
+ .free_ctrl = nvme_rdma_free_ctrl,
+ .submit_async_event = nvme_rdma_submit_async_event,
+ .delete_ctrl = nvme_rdma_del_ctrl,
+ .get_subsysnqn = nvmf_get_subsysnqn,
+ .get_address = nvmf_get_address,
+};
+
+static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ int ret;
+
+ ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+ if (ret)
+ return ret;
+
+ ctrl->queue_count = opts->nr_io_queues + 1;
+ if (ctrl->queue_count < 2)
+ return 0;
+
+ dev_info(ctrl->ctrl.device,
+ "creating %d I/O queues.\n", opts->nr_io_queues);
+
+ ret = nvme_rdma_init_io_queues(ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * We need a reference on the device as long as the tag_set is alive,
+ * as the MRs in the request structures need a valid ib_device.
+ */
+ ret = -EINVAL;
+ if (!nvme_rdma_dev_get(ctrl->device))
+ goto out_free_io_queues;
+
+ memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
+ ctrl->tag_set.ops = &nvme_rdma_mq_ops;
+ ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
+ ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+ ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
+ SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ ctrl->tag_set.driver_data = ctrl;
+ ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+ ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
+
+ ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ if (ret)
+ goto out_put_dev;
+ ctrl->ctrl.tagset = &ctrl->tag_set;
+
+ ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+ if (IS_ERR(ctrl->ctrl.connect_q)) {
+ ret = PTR_ERR(ctrl->ctrl.connect_q);
+ goto out_free_tag_set;
+ }
+
+ ret = nvme_rdma_connect_io_queues(ctrl);
+ if (ret)
+ goto out_cleanup_connect_q;
+
+ return 0;
+
+out_cleanup_connect_q:
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+out_free_tag_set:
+ blk_mq_free_tag_set(&ctrl->tag_set);
+out_put_dev:
+ nvme_rdma_dev_put(ctrl->device);
+out_free_io_queues:
+ nvme_rdma_free_io_queues(ctrl);
+ return ret;
+}
+
+static int nvme_rdma_parse_ipaddr(struct sockaddr_in *in_addr, char *p)
+{
+ u8 *addr = (u8 *)&in_addr->sin_addr.s_addr;
+ size_t buflen = strlen(p);
+
+ /* XXX: handle IPv6 addresses */
+
+ if (buflen > INET_ADDRSTRLEN)
+ return -EINVAL;
+ if (in4_pton(p, buflen, addr, '\0', NULL) == 0)
+ return -EINVAL;
+ in_addr->sin_family = AF_INET;
+ return 0;
+}
+
+static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_rdma_ctrl *ctrl;
+ int ret;
+ bool changed;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+ ctrl->ctrl.opts = opts;
+ INIT_LIST_HEAD(&ctrl->list);
+
+ ret = nvme_rdma_parse_ipaddr(&ctrl->addr_in, opts->traddr);
+ if (ret) {
+ pr_err("malformed IP address passed: %s\n", opts->traddr);
+ goto out_free_ctrl;
+ }
+
+ if (opts->mask & NVMF_OPT_TRSVCID) {
+ u16 port;
+
+ ret = kstrtou16(opts->trsvcid, 0, &port);
+ if (ret)
+ goto out_free_ctrl;
+
+ ctrl->addr_in.sin_port = cpu_to_be16(port);
+ } else {
+ ctrl->addr_in.sin_port = cpu_to_be16(NVME_RDMA_IP_PORT);
+ }
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
+ 0 /* no quirks, we're perfect! */);
+ if (ret)
+ goto out_free_ctrl;
+
+ ctrl->reconnect_delay = opts->reconnect_delay;
+ INIT_DELAYED_WORK(&ctrl->reconnect_work,
+ nvme_rdma_reconnect_ctrl_work);
+ INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
+ INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
+ INIT_WORK(&ctrl->reset_work, nvme_rdma_reset_ctrl_work);
+ spin_lock_init(&ctrl->lock);
+
+ ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.kato = opts->kato;
+
+ ret = -ENOMEM;
+ ctrl->queues = kcalloc(ctrl->queue_count, sizeof(*ctrl->queues),
+ GFP_KERNEL);
+ if (!ctrl->queues)
+ goto out_uninit_ctrl;
+
+ ret = nvme_rdma_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_kfree_queues;
+
+ /* sanity check icdoff */
+ if (ctrl->ctrl.icdoff) {
+ dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
+ goto out_remove_admin_queue;
+ }
+
+ /* sanity check keyed sgls */
+ if (!(ctrl->ctrl.sgls & (1 << 20))) {
+ dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n");
+ goto out_remove_admin_queue;
+ }
+
+ if (opts->queue_size > ctrl->ctrl.maxcmd) {
+ /* warn if maxcmd is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl maxcmd %u, clamping down\n",
+ opts->queue_size, ctrl->ctrl.maxcmd);
+ opts->queue_size = ctrl->ctrl.maxcmd;
+ }
+
+ if (opts->nr_io_queues) {
+ ret = nvme_rdma_create_io_queues(ctrl);
+ if (ret)
+ goto out_remove_admin_queue;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
+ ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+
+ kref_get(&ctrl->ctrl.kref);
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ if (opts->nr_io_queues) {
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
+
+ return &ctrl->ctrl;
+
+out_remove_admin_queue:
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl);
+out_kfree_queues:
+ kfree(ctrl->queues);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+ if (ret > 0)
+ ret = -EIO;
+ return ERR_PTR(ret);
+out_free_ctrl:
+ kfree(ctrl);
+ return ERR_PTR(ret);
+}
+
+static struct nvmf_transport_ops nvme_rdma_transport = {
+ .name = "rdma",
+ .required_opts = NVMF_OPT_TRADDR,
+ .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY,
+ .create_ctrl = nvme_rdma_create_ctrl,
+};
+
+static void nvme_rdma_add_one(struct ib_device *ib_device)
+{
+}
+
+static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
+{
+ struct nvme_rdma_ctrl *ctrl;
+
+ /* Delete all controllers using this device */
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
+ if (ctrl->device->dev != ib_device)
+ continue;
+ dev_info(ctrl->ctrl.device,
+ "Removing ctrl: NQN \"%s\", addr %pISp\n",
+ ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+ __nvme_rdma_del_ctrl(ctrl);
+ }
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ flush_workqueue(nvme_rdma_wq);
+}
+
+static struct ib_client nvme_rdma_ib_client = {
+ .name = "nvme_rdma",
+ .add = nvme_rdma_add_one,
+ .remove = nvme_rdma_remove_one
+};
+
+static int __init nvme_rdma_init_module(void)
+{
+ int ret;
+
+ nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
+ if (!nvme_rdma_wq)
+ return -ENOMEM;
+
+ ret = ib_register_client(&nvme_rdma_ib_client);
+ if (ret) {
+ destroy_workqueue(nvme_rdma_wq);
+ return ret;
+ }
+
+ nvmf_register_transport(&nvme_rdma_transport);
+ return 0;
+}
+
+static void __exit nvme_rdma_cleanup_module(void)
+{
+ nvmf_unregister_transport(&nvme_rdma_transport);
+ ib_unregister_client(&nvme_rdma_ib_client);
+ destroy_workqueue(nvme_rdma_wq);
+}
+
+module_init(nvme_rdma_init_module);
+module_exit(nvme_rdma_cleanup_module);
+
+MODULE_LICENSE("GPL v2");