summaryrefslogtreecommitdiff
path: root/drivers/block/nvme-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/nvme-core.c')
-rw-r--r--drivers/block/nvme-core.c339
1 files changed, 231 insertions, 108 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 7920c2741..ccc0c1f93 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -72,6 +72,10 @@ module_param(nvme_char_major, int, 0);
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
+static bool use_cmb_sqes = true;
+module_param(use_cmb_sqes, bool, 0644);
+MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
+
static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
@@ -103,6 +107,7 @@ struct nvme_queue {
char irqname[24]; /* nvme4294967295-65535\0 */
spinlock_t q_lock;
struct nvme_command *sq_cmds;
+ struct nvme_command __iomem *sq_cmds_io;
volatile struct nvme_completion *cqes;
struct blk_mq_tags **tags;
dma_addr_t sq_dma_addr;
@@ -379,27 +384,28 @@ static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag,
*
* Safe to use from interrupt context
*/
-static int __nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd)
{
u16 tail = nvmeq->sq_tail;
- memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+ if (nvmeq->sq_cmds_io)
+ memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
+ else
+ memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+
if (++tail == nvmeq->q_depth)
tail = 0;
writel(tail, nvmeq->q_db);
nvmeq->sq_tail = tail;
-
- return 0;
}
-static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
unsigned long flags;
- int ret;
spin_lock_irqsave(&nvmeq->q_lock, flags);
- ret = __nvme_submit_cmd(nvmeq, cmd);
+ __nvme_submit_cmd(nvmeq, cmd);
spin_unlock_irqrestore(&nvmeq->q_lock, flags);
- return ret;
}
static __le64 **iod_list(struct nvme_iod *iod)
@@ -597,31 +603,34 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_iod *iod = ctx;
struct request *req = iod_get_private(iod);
struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
-
u16 status = le16_to_cpup(&cqe->status) >> 1;
+ bool requeue = false;
+ int error = 0;
if (unlikely(status)) {
if (!(status & NVME_SC_DNR || blk_noretry_request(req))
&& (jiffies - req->start_time) < req->timeout) {
unsigned long flags;
+ requeue = true;
blk_mq_requeue_request(req);
spin_lock_irqsave(req->q->queue_lock, flags);
if (!blk_queue_stopped(req->q))
blk_mq_kick_requeue_list(req->q);
spin_unlock_irqrestore(req->q->queue_lock, flags);
- return;
+ goto release_iod;
}
+
if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
if (cmd_rq->ctx == CMD_CTX_CANCELLED)
- req->errors = -EINTR;
+ error = -EINTR;
else
- req->errors = status;
+ error = status;
} else {
- req->errors = nvme_error_status(status);
+ error = nvme_error_status(status);
}
- } else
- req->errors = 0;
+ }
+
if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
u32 result = le32_to_cpup(&cqe->result);
req->special = (void *)(uintptr_t)result;
@@ -630,8 +639,9 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
if (cmd_rq->aborted)
dev_warn(nvmeq->dev->dev,
"completing aborted command with status:%04x\n",
- status);
+ error);
+release_iod:
if (iod->nents) {
dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -644,7 +654,8 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
}
nvme_free_iod(nvmeq->dev, iod);
- blk_mq_complete_request(req);
+ if (likely(!requeue))
+ blk_mq_complete_request(req, error);
}
/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -730,18 +741,16 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
struct nvme_iod *iod)
{
- struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+ struct nvme_command cmnd;
- memcpy(cmnd, req->cmd, sizeof(struct nvme_command));
- cmnd->rw.command_id = req->tag;
+ memcpy(&cmnd, req->cmd, sizeof(cmnd));
+ cmnd.rw.command_id = req->tag;
if (req->nr_phys_segments) {
- cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
- cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
+ cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
}
- if (++nvmeq->sq_tail == nvmeq->q_depth)
- nvmeq->sq_tail = 0;
- writel(nvmeq->sq_tail, nvmeq->q_db);
+ __nvme_submit_cmd(nvmeq, &cmnd);
}
/*
@@ -754,45 +763,41 @@ static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
{
struct nvme_dsm_range *range =
(struct nvme_dsm_range *)iod_list(iod)[0];
- struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+ struct nvme_command cmnd;
range->cattr = cpu_to_le32(0);
range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
- memset(cmnd, 0, sizeof(*cmnd));
- cmnd->dsm.opcode = nvme_cmd_dsm;
- cmnd->dsm.command_id = req->tag;
- cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
- cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
- cmnd->dsm.nr = 0;
- cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+ memset(&cmnd, 0, sizeof(cmnd));
+ cmnd.dsm.opcode = nvme_cmd_dsm;
+ cmnd.dsm.command_id = req->tag;
+ cmnd.dsm.nsid = cpu_to_le32(ns->ns_id);
+ cmnd.dsm.prp1 = cpu_to_le64(iod->first_dma);
+ cmnd.dsm.nr = 0;
+ cmnd.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
- if (++nvmeq->sq_tail == nvmeq->q_depth)
- nvmeq->sq_tail = 0;
- writel(nvmeq->sq_tail, nvmeq->q_db);
+ __nvme_submit_cmd(nvmeq, &cmnd);
}
static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
int cmdid)
{
- struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+ struct nvme_command cmnd;
- memset(cmnd, 0, sizeof(*cmnd));
- cmnd->common.opcode = nvme_cmd_flush;
- cmnd->common.command_id = cmdid;
- cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+ memset(&cmnd, 0, sizeof(cmnd));
+ cmnd.common.opcode = nvme_cmd_flush;
+ cmnd.common.command_id = cmdid;
+ cmnd.common.nsid = cpu_to_le32(ns->ns_id);
- if (++nvmeq->sq_tail == nvmeq->q_depth)
- nvmeq->sq_tail = 0;
- writel(nvmeq->sq_tail, nvmeq->q_db);
+ __nvme_submit_cmd(nvmeq, &cmnd);
}
static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
struct nvme_ns *ns)
{
struct request *req = iod_get_private(iod);
- struct nvme_command *cmnd;
+ struct nvme_command cmnd;
u16 control = 0;
u32 dsmgmt = 0;
@@ -804,19 +809,16 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
- cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
- memset(cmnd, 0, sizeof(*cmnd));
-
- cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
- cmnd->rw.command_id = req->tag;
- cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
- cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
- cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
- cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+ memset(&cmnd, 0, sizeof(cmnd));
+ cmnd.rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+ cmnd.rw.command_id = req->tag;
+ cmnd.rw.nsid = cpu_to_le32(ns->ns_id);
+ cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
+ cmnd.rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ cmnd.rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
- if (blk_integrity_rq(req)) {
- cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
+ if (ns->ms) {
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
control |= NVME_RW_PRINFO_PRCHK_GUARD;
@@ -825,19 +827,21 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
case NVME_NS_DPS_PI_TYPE2:
control |= NVME_RW_PRINFO_PRCHK_GUARD |
NVME_RW_PRINFO_PRCHK_REF;
- cmnd->rw.reftag = cpu_to_le32(
+ cmnd.rw.reftag = cpu_to_le32(
nvme_block_nr(ns, blk_rq_pos(req)));
break;
}
- } else if (ns->ms)
- control |= NVME_RW_PRINFO_PRACT;
+ if (blk_integrity_rq(req))
+ cmnd.rw.metadata =
+ cpu_to_le64(sg_dma_address(iod->meta_sg));
+ else
+ control |= NVME_RW_PRINFO_PRACT;
+ }
- cmnd->rw.control = cpu_to_le16(control);
- cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+ cmnd.rw.control = cpu_to_le16(control);
+ cmnd.rw.dsmgmt = cpu_to_le32(dsmgmt);
- if (++nvmeq->sq_tail == nvmeq->q_depth)
- nvmeq->sq_tail = 0;
- writel(nvmeq->sq_tail, nvmeq->q_db);
+ __nvme_submit_cmd(nvmeq, &cmnd);
return 0;
}
@@ -864,8 +868,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ns && ns->ms && !blk_integrity_rq(req)) {
if (!(ns->pi_type && ns->ms == 8) &&
req->cmd_type != REQ_TYPE_DRV_PRIV) {
- req->errors = -EFAULT;
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, -EFAULT);
return BLK_MQ_RQ_QUEUE_OK;
}
}
@@ -1080,7 +1083,8 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
c.common.command_id = req->tag;
blk_mq_free_request(req);
- return __nvme_submit_cmd(nvmeq, &c);
+ __nvme_submit_cmd(nvmeq, &c);
+ return 0;
}
static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
@@ -1103,7 +1107,8 @@ static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
cmd->common.command_id = req->tag;
- return nvme_submit_cmd(nvmeq, cmd);
+ nvme_submit_cmd(nvmeq, cmd);
+ return 0;
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -1315,12 +1320,7 @@ static void nvme_abort_req(struct request *req)
dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
nvmeq->qid);
- if (nvme_submit_cmd(dev->queues[0], &cmd) < 0) {
- dev_warn(nvmeq->q_dmadev,
- "Could not abort I/O %d QID %d",
- req->tag, nvmeq->qid);
- blk_mq_free_request(abort_req);
- }
+ nvme_submit_cmd(dev->queues[0], &cmd);
}
static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
@@ -1374,7 +1374,8 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ if (nvmeq->sq_cmds)
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
kfree(nvmeq);
}
@@ -1447,6 +1448,47 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
spin_unlock_irq(&nvmeq->q_lock);
}
+static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
+ int entry_size)
+{
+ int q_depth = dev->q_depth;
+ unsigned q_size_aligned = roundup(q_depth * entry_size, dev->page_size);
+
+ if (q_size_aligned * nr_io_queues > dev->cmb_size) {
+ u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
+ mem_per_q = round_down(mem_per_q, dev->page_size);
+ q_depth = div_u64(mem_per_q, entry_size);
+
+ /*
+ * Ensure the reduced q_depth is above some threshold where it
+ * would be better to map queues in system memory with the
+ * original depth
+ */
+ if (q_depth < 64)
+ return -ENOMEM;
+ }
+
+ return q_depth;
+}
+
+static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+ int qid, int depth)
+{
+ if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+ unsigned offset = (qid - 1) *
+ roundup(SQ_SIZE(depth), dev->page_size);
+ nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
+ nvmeq->sq_cmds_io = dev->cmb + offset;
+ } else {
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->sq_cmds)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth)
{
@@ -1459,9 +1501,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
if (!nvmeq->cqes)
goto free_nvmeq;
- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
- &nvmeq->sq_dma_addr, GFP_KERNEL);
- if (!nvmeq->sq_cmds)
+ if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
goto free_cqdma;
nvmeq->q_dmadev = dev->dev;
@@ -1696,6 +1736,12 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
page_shift = dev_page_max;
}
+ dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
+ NVME_CAP_NSSRC(cap) : 0;
+
+ if (dev->subsystem && (readl(&dev->bar->csts) & NVME_CSTS_NSSRO))
+ writel(NVME_CSTS_NSSRO, &dev->bar->csts);
+
result = nvme_disable_ctrl(dev, cap);
if (result < 0)
return result;
@@ -1764,7 +1810,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
length = (io.nblocks + 1) << ns->lba_shift;
meta_len = (io.nblocks + 1) * ns->ms;
- metadata = (void __user *)(unsigned long)io.metadata;
+ metadata = (void __user *)(uintptr_t)io.metadata;
write = io.opcode & 1;
if (ns->ext) {
@@ -1804,7 +1850,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.metadata = cpu_to_le64(meta_dma);
status = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
- (void __user *)io.addr, length, NULL, 0);
+ (void __user *)(uintptr_t)io.addr, length, NULL, 0);
unmap:
if (meta) {
if (status == NVME_SC_SUCCESS && !write) {
@@ -1846,7 +1892,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c,
- NULL, (void __user *)cmd.addr, cmd.data_len,
+ NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
&cmd.result, timeout);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
@@ -1856,6 +1902,15 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
return status;
}
+static int nvme_subsys_reset(struct nvme_dev *dev)
+{
+ if (!dev->subsystem)
+ return -ENOTTY;
+
+ writel(0x4E564D65, &dev->bar->nssr); /* "NVMe" */
+ return 0;
+}
+
static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long arg)
{
@@ -1935,7 +1990,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
ns->queue->limits.discard_zeroes_data = 0;
ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size;
- ns->queue->limits.max_discard_sectors = 0xffffffff;
+ blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
@@ -1989,7 +2044,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
!ns->ext)
nvme_init_integrity(ns);
- if (ns->ms && !blk_get_integrity(disk))
+ if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
set_capacity(disk, 0);
else
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
@@ -2020,7 +2075,10 @@ static int nvme_kthread(void *data)
spin_lock(&dev_list_lock);
list_for_each_entry_safe(dev, next, &dev_list, node) {
int i;
- if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
+ u32 csts = readl(&dev->bar->csts);
+
+ if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
+ csts & NVME_CSTS_CFS) {
if (work_busy(&dev->reset_work))
continue;
list_del_init(&dev->node);
@@ -2067,7 +2125,6 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
goto out_free_ns;
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
- queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, ns->queue);
ns->dev = dev;
ns->queue->queuedata = ns;
@@ -2081,12 +2138,16 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
list_add_tail(&ns->list, &dev->namespaces);
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
- if (dev->max_hw_sectors)
+ if (dev->max_hw_sectors) {
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
+ blk_queue_max_segments(ns->queue,
+ ((dev->max_hw_sectors << 9) / dev->page_size) + 1);
+ }
if (dev->stripe_size)
blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
if (dev->vwc & NVME_CTRL_VWC_PRESENT)
blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
+ blk_queue_virt_boundary(ns->queue, dev->page_size - 1);
disk->major = nvme_major;
disk->first_minor = 0;
@@ -2159,6 +2220,58 @@ static int set_queue_count(struct nvme_dev *dev, int count)
return min(result & 0xffff, result >> 16) + 1;
}
+static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+{
+ u64 szu, size, offset;
+ u32 cmbloc;
+ resource_size_t bar_size;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ void __iomem *cmb;
+ dma_addr_t dma_addr;
+
+ if (!use_cmb_sqes)
+ return NULL;
+
+ dev->cmbsz = readl(&dev->bar->cmbsz);
+ if (!(NVME_CMB_SZ(dev->cmbsz)))
+ return NULL;
+
+ cmbloc = readl(&dev->bar->cmbloc);
+
+ szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
+ size = szu * NVME_CMB_SZ(dev->cmbsz);
+ offset = szu * NVME_CMB_OFST(cmbloc);
+ bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
+
+ if (offset > bar_size)
+ return NULL;
+
+ /*
+ * Controllers may support a CMB size larger than their BAR,
+ * for example, due to being behind a bridge. Reduce the CMB to
+ * the reported size of the BAR
+ */
+ if (size > bar_size - offset)
+ size = bar_size - offset;
+
+ dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
+ cmb = ioremap_wc(dma_addr, size);
+ if (!cmb)
+ return NULL;
+
+ dev->cmb_dma_addr = dma_addr;
+ dev->cmb_size = size;
+ return cmb;
+}
+
+static inline void nvme_release_cmb(struct nvme_dev *dev)
+{
+ if (dev->cmb) {
+ iounmap(dev->cmb);
+ dev->cmb = NULL;
+ }
+}
+
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
@@ -2177,6 +2290,15 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < nr_io_queues)
nr_io_queues = result;
+ if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
+ result = nvme_cmb_qdepth(dev, nr_io_queues,
+ sizeof(struct nvme_command));
+ if (result > 0)
+ dev->q_depth = result;
+ else
+ nvme_release_cmb(dev);
+ }
+
size = db_bar_size(dev, nr_io_queues);
if (size > 8192) {
iounmap(dev->bar);
@@ -2321,6 +2443,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
list_sort(NULL, &dev->namespaces, ns_cmp);
}
+static void nvme_set_irq_hints(struct nvme_dev *dev)
+{
+ struct nvme_queue *nvmeq;
+ int i;
+
+ for (i = 0; i < dev->online_queues; i++) {
+ nvmeq = dev->queues[i];
+
+ if (!nvmeq->tags || !(*nvmeq->tags))
+ continue;
+
+ irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+ blk_mq_tags_cpumask(*nvmeq->tags));
+ }
+}
+
static void nvme_dev_scan(struct work_struct *work)
{
struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
@@ -2332,6 +2470,7 @@ static void nvme_dev_scan(struct work_struct *work)
return;
nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
kfree(ctrl);
+ nvme_set_irq_hints(dev);
}
/*
@@ -2344,7 +2483,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
int res;
- unsigned nn;
struct nvme_id_ctrl *ctrl;
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
@@ -2354,7 +2492,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
return -EIO;
}
- nn = le32_to_cpup(&ctrl->nn);
dev->oncs = le16_to_cpup(&ctrl->oncs);
dev->abort_limit = ctrl->acl + 1;
dev->vwc = ctrl->vwc;
@@ -2440,6 +2577,8 @@ static int nvme_dev_map(struct nvme_dev *dev)
dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
+ if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
+ dev->cmb = nvme_map_cmb(dev);
return 0;
@@ -2820,6 +2959,8 @@ static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
case NVME_IOCTL_RESET:
dev_warn(dev->dev, "resetting controller\n");
return nvme_reset(dev);
+ case NVME_IOCTL_SUBSYS_RESET:
+ return nvme_subsys_reset(dev);
default:
return -ENOTTY;
}
@@ -2833,22 +2974,6 @@ static const struct file_operations nvme_dev_fops = {
.compat_ioctl = nvme_dev_ioctl,
};
-static void nvme_set_irq_hints(struct nvme_dev *dev)
-{
- struct nvme_queue *nvmeq;
- int i;
-
- for (i = 0; i < dev->online_queues; i++) {
- nvmeq = dev->queues[i];
-
- if (!nvmeq->tags || !(*nvmeq->tags))
- continue;
-
- irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
- blk_mq_tags_cpumask(*nvmeq->tags));
- }
-}
-
static int nvme_dev_start(struct nvme_dev *dev)
{
int result;
@@ -2890,8 +3015,6 @@ static int nvme_dev_start(struct nvme_dev *dev)
if (result)
goto free_tags;
- nvme_set_irq_hints(dev);
-
dev->event_limit = 1;
return result;
@@ -2942,7 +3065,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
} else {
nvme_unfreeze_queues(dev);
nvme_dev_add(dev);
- nvme_set_irq_hints(dev);
}
return 0;
}
@@ -3145,6 +3267,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dev_remove_admin(dev);
device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
nvme_free_queues(dev, 0);
+ nvme_release_cmb(dev);
nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}