summaryrefslogtreecommitdiff
path: root/drivers/lightnvm
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-03-25 03:53:42 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-03-25 03:53:42 -0300
commit03dd4cb26d967f9588437b0fc9cc0e8353322bb7 (patch)
treefa581f6dc1c0596391690d1f67eceef3af8246dc /drivers/lightnvm
parentd4e493caf788ef44982e131ff9c786546904d934 (diff)
Linux-libre 4.5-gnu
Diffstat (limited to 'drivers/lightnvm')
-rw-r--r--drivers/lightnvm/Makefile2
-rw-r--r--drivers/lightnvm/core.c347
-rw-r--r--drivers/lightnvm/gennvm.c198
-rw-r--r--drivers/lightnvm/rrpc.c106
-rw-r--r--drivers/lightnvm/rrpc.h18
-rw-r--r--drivers/lightnvm/sysblk.c741
6 files changed, 1261 insertions, 151 deletions
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 7e0f42acb..a7a0a22cf 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,6 @@
# Makefile for Open-Channel SSDs.
#
-obj-$(CONFIG_NVM) := core.o
+obj-$(CONFIG_NVM) := core.o sysblk.o
obj-$(CONFIG_NVM_GENNVM) += gennvm.o
obj-$(CONFIG_NVM_RRPC) += rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 8f41b245c..9f6acd5d1 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
+#include <linux/sched/sysctl.h>
#include <uapi/linux/lightnvm.h>
static LIST_HEAD(nvm_targets);
@@ -105,6 +106,9 @@ struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
lockdep_assert_held(&nvm_lock);
list_for_each_entry(mt, &nvm_mgrs, list) {
+ if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
+ continue;
+
ret = mt->register_mgr(dev);
if (ret < 0) {
pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
@@ -166,6 +170,20 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
return NULL;
}
+struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
+ unsigned long flags)
+{
+ return dev->mt->get_blk_unlocked(dev, lun, flags);
+}
+EXPORT_SYMBOL(nvm_get_blk_unlocked);
+
+/* Assumes that all valid pages have already been moved on release to bm */
+void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
+{
+ return dev->mt->put_blk_unlocked(dev, blk);
+}
+EXPORT_SYMBOL(nvm_put_blk_unlocked);
+
struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
unsigned long flags)
{
@@ -192,6 +210,206 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
}
EXPORT_SYMBOL(nvm_erase_blk);
+void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+ int i;
+
+ if (rqd->nr_pages > 1) {
+ for (i = 0; i < rqd->nr_pages; i++)
+ rqd->ppa_list[i] = dev_to_generic_addr(dev,
+ rqd->ppa_list[i]);
+ } else {
+ rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
+ }
+}
+EXPORT_SYMBOL(nvm_addr_to_generic_mode);
+
+void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+ int i;
+
+ if (rqd->nr_pages > 1) {
+ for (i = 0; i < rqd->nr_pages; i++)
+ rqd->ppa_list[i] = generic_to_dev_addr(dev,
+ rqd->ppa_list[i]);
+ } else {
+ rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
+ }
+}
+EXPORT_SYMBOL(nvm_generic_to_addr_mode);
+
+int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
+ struct ppa_addr *ppas, int nr_ppas)
+{
+ int i, plane_cnt, pl_idx;
+
+ if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
+ rqd->nr_pages = 1;
+ rqd->ppa_addr = ppas[0];
+
+ return 0;
+ }
+
+ plane_cnt = (1 << dev->plane_mode);
+ rqd->nr_pages = plane_cnt * nr_ppas;
+
+ if (dev->ops->max_phys_sect < rqd->nr_pages)
+ return -EINVAL;
+
+ rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
+ if (!rqd->ppa_list) {
+ pr_err("nvm: failed to allocate dma memory\n");
+ return -ENOMEM;
+ }
+
+ for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+ for (i = 0; i < nr_ppas; i++) {
+ ppas[i].g.pl = pl_idx;
+ rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(nvm_set_rqd_ppalist);
+
+void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+ if (!rqd->ppa_list)
+ return;
+
+ nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
+}
+EXPORT_SYMBOL(nvm_free_rqd_ppalist);
+
+int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
+{
+ struct nvm_rq rqd;
+ int ret;
+
+ if (!dev->ops->erase_block)
+ return 0;
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
+ if (ret)
+ return ret;
+
+ nvm_generic_to_addr_mode(dev, &rqd);
+
+ ret = dev->ops->erase_block(dev, &rqd);
+
+ nvm_free_rqd_ppalist(dev, &rqd);
+
+ return ret;
+}
+EXPORT_SYMBOL(nvm_erase_ppa);
+
+void nvm_end_io(struct nvm_rq *rqd, int error)
+{
+ rqd->error = error;
+ rqd->end_io(rqd);
+}
+EXPORT_SYMBOL(nvm_end_io);
+
+static void nvm_end_io_sync(struct nvm_rq *rqd)
+{
+ struct completion *waiting = rqd->wait;
+
+ rqd->wait = NULL;
+
+ complete(waiting);
+}
+
+int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
+ int opcode, int flags, void *buf, int len)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct nvm_rq rqd;
+ struct bio *bio;
+ int ret;
+ unsigned long hang_check;
+
+ bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(bio))
+ return -ENOMEM;
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+ ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
+ if (ret) {
+ bio_put(bio);
+ return ret;
+ }
+
+ rqd.opcode = opcode;
+ rqd.bio = bio;
+ rqd.wait = &wait;
+ rqd.dev = dev;
+ rqd.end_io = nvm_end_io_sync;
+ rqd.flags = flags;
+ nvm_generic_to_addr_mode(dev, &rqd);
+
+ ret = dev->ops->submit_io(dev, &rqd);
+
+ /* Prevent hang_check timer from firing at us during very long I/O */
+ hang_check = sysctl_hung_task_timeout_secs;
+ if (hang_check)
+ while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
+ else
+ wait_for_completion_io(&wait);
+
+ nvm_free_rqd_ppalist(dev, &rqd);
+
+ return rqd.error;
+}
+EXPORT_SYMBOL(nvm_submit_ppa);
+
+static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
+{
+ int i;
+
+ dev->lps_per_blk = dev->pgs_per_blk;
+ dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
+ if (!dev->lptbl)
+ return -ENOMEM;
+
+ /* Just a linear array */
+ for (i = 0; i < dev->lps_per_blk; i++)
+ dev->lptbl[i] = i;
+
+ return 0;
+}
+
+static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
+{
+ int i, p;
+ struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
+
+ if (!mlc->num_pairs)
+ return 0;
+
+ dev->lps_per_blk = mlc->num_pairs;
+ dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
+ if (!dev->lptbl)
+ return -ENOMEM;
+
+ /* The lower page table encoding consists of a list of bytes, where each
+ * has a lower and an upper half. The first half byte maintains the
+ * increment value and every value after is an offset added to the
+ * previous incrementation value */
+ dev->lptbl[0] = mlc->pairs[0] & 0xF;
+ for (i = 1; i < dev->lps_per_blk; i++) {
+ p = mlc->pairs[i >> 1];
+ if (i & 0x1) /* upper */
+ dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
+ else /* lower */
+ dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
+ }
+
+ return 0;
+}
+
static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
@@ -206,6 +424,7 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->sec_size = grp->csecs;
dev->oob_size = grp->sos;
dev->sec_per_pg = grp->fpg_sz / grp->csecs;
+ dev->mccap = grp->mccap;
memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
dev->plane_mode = NVM_PLANE_SINGLE;
@@ -216,11 +435,23 @@ static int nvm_core_init(struct nvm_dev *dev)
return -EINVAL;
}
- if (grp->fmtype != 0 && grp->fmtype != 1) {
+ switch (grp->fmtype) {
+ case NVM_ID_FMTYPE_SLC:
+ if (nvm_init_slc_tbl(dev, grp))
+ return -ENOMEM;
+ break;
+ case NVM_ID_FMTYPE_MLC:
+ if (nvm_init_mlc_tbl(dev, grp))
+ return -ENOMEM;
+ break;
+ default:
pr_err("nvm: flash type not supported\n");
return -EINVAL;
}
+ if (!dev->lps_per_blk)
+ pr_info("nvm: lower page programming table missing\n");
+
if (grp->mpos & 0x020202)
dev->plane_mode = NVM_PLANE_DOUBLE;
if (grp->mpos & 0x040404)
@@ -238,6 +469,7 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->nr_chnls;
dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
INIT_LIST_HEAD(&dev->online_targets);
+ mutex_init(&dev->mlock);
return 0;
}
@@ -249,6 +481,8 @@ static void nvm_free(struct nvm_dev *dev)
if (dev->mt)
dev->mt->unregister_mgr(dev);
+
+ kfree(dev->lptbl);
}
static int nvm_init(struct nvm_dev *dev)
@@ -338,9 +572,18 @@ int nvm_register(struct request_queue *q, char *disk_name,
}
}
+ if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
+ ret = nvm_get_sysblock(dev, &dev->sb);
+ if (!ret)
+ pr_err("nvm: device not initialized.\n");
+ else if (ret < 0)
+ pr_err("nvm: err (%d) on device initialization\n", ret);
+ }
+
/* register device with a supported media manager */
down_write(&nvm_lock);
- dev->mt = nvm_init_mgr(dev);
+ if (ret > 0)
+ dev->mt = nvm_init_mgr(dev);
list_add(&dev->devices, &nvm_devices);
up_write(&nvm_lock);
@@ -788,6 +1031,102 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
return __nvm_configure_remove(&remove);
}
+static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
+{
+ info->seqnr = 1;
+ info->erase_cnt = 0;
+ info->version = 1;
+}
+
+static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
+{
+ struct nvm_dev *dev;
+ struct nvm_sb_info info;
+ int ret;
+
+ down_write(&nvm_lock);
+ dev = nvm_find_nvm_dev(init->dev);
+ up_write(&nvm_lock);
+ if (!dev) {
+ pr_err("nvm: device not found\n");
+ return -EINVAL;
+ }
+
+ nvm_setup_nvm_sb_info(&info);
+
+ strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
+ info.fs_ppa.ppa = -1;
+
+ if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
+ ret = nvm_init_sysblock(dev, &info);
+ if (ret)
+ return ret;
+ }
+
+ memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
+
+ down_write(&nvm_lock);
+ dev->mt = nvm_init_mgr(dev);
+ up_write(&nvm_lock);
+
+ return 0;
+}
+
+static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
+{
+ struct nvm_ioctl_dev_init init;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
+ return -EFAULT;
+
+ if (init.flags != 0) {
+ pr_err("nvm: no flags supported\n");
+ return -EINVAL;
+ }
+
+ init.dev[DISK_NAME_LEN - 1] = '\0';
+
+ return __nvm_ioctl_dev_init(&init);
+}
+
+static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
+{
+ struct nvm_ioctl_dev_factory fact;
+ struct nvm_dev *dev;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
+ return -EFAULT;
+
+ fact.dev[DISK_NAME_LEN - 1] = '\0';
+
+ if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
+ return -EINVAL;
+
+ down_write(&nvm_lock);
+ dev = nvm_find_nvm_dev(fact.dev);
+ up_write(&nvm_lock);
+ if (!dev) {
+ pr_err("nvm: device not found\n");
+ return -EINVAL;
+ }
+
+ if (dev->mt) {
+ dev->mt->unregister_mgr(dev);
+ dev->mt = NULL;
+ }
+
+ if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
+ return nvm_dev_factory(dev, fact.flags);
+
+ return 0;
+}
+
static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
@@ -801,6 +1140,10 @@ static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
return nvm_ioctl_dev_create(file, argp);
case NVM_DEV_REMOVE:
return nvm_ioctl_dev_remove(file, argp);
+ case NVM_DEV_INIT:
+ return nvm_ioctl_dev_init(file, argp);
+ case NVM_DEV_FACTORY:
+ return nvm_ioctl_dev_factory(file, argp);
}
return 0;
}
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index a54b33995..7fb725b16 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -60,7 +60,8 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
lun->vlun.lun_id = i % dev->luns_per_chnl;
lun->vlun.chnl_id = i / dev->luns_per_chnl;
lun->vlun.nr_free_blocks = dev->blks_per_lun;
- lun->vlun.nr_inuse_blocks = 0;
+ lun->vlun.nr_open_blocks = 0;
+ lun->vlun.nr_closed_blocks = 0;
lun->vlun.nr_bad_blocks = 0;
}
return 0;
@@ -89,6 +90,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
list_move_tail(&blk->list, &lun->bb_list);
lun->vlun.nr_bad_blocks++;
+ lun->vlun.nr_free_blocks--;
}
return 0;
@@ -133,15 +135,15 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
pba = pba - (dev->sec_per_lun * lun_id);
blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
- if (!blk->type) {
+ if (!blk->state) {
/* at this point, we don't know anything about the
* block. It's up to the FTL on top to re-etablish the
- * block state
+ * block state. The block is assumed to be open.
*/
list_move_tail(&blk->list, &lun->used_list);
- blk->type = 1;
+ blk->state = NVM_BLK_ST_OPEN;
lun->vlun.nr_free_blocks--;
- lun->vlun.nr_inuse_blocks++;
+ lun->vlun.nr_open_blocks++;
}
}
@@ -255,14 +257,14 @@ static void gennvm_unregister(struct nvm_dev *dev)
module_put(THIS_MODULE);
}
-static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
+static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
struct nvm_lun *vlun, unsigned long flags)
{
struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
struct nvm_block *blk = NULL;
int is_gc = flags & NVM_IOTYPE_GC;
- spin_lock(&vlun->lock);
+ assert_spin_locked(&vlun->lock);
if (list_empty(&lun->free_list)) {
pr_err_ratelimited("gennvm: lun %u have no free pages available",
@@ -275,83 +277,64 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
blk = list_first_entry(&lun->free_list, struct nvm_block, list);
list_move_tail(&blk->list, &lun->used_list);
- blk->type = 1;
+ blk->state = NVM_BLK_ST_OPEN;
lun->vlun.nr_free_blocks--;
- lun->vlun.nr_inuse_blocks++;
+ lun->vlun.nr_open_blocks++;
out:
+ return blk;
+}
+
+static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
+ struct nvm_lun *vlun, unsigned long flags)
+{
+ struct nvm_block *blk;
+
+ spin_lock(&vlun->lock);
+ blk = gennvm_get_blk_unlocked(dev, vlun, flags);
spin_unlock(&vlun->lock);
return blk;
}
-static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
+static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
{
struct nvm_lun *vlun = blk->lun;
struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
- spin_lock(&vlun->lock);
+ assert_spin_locked(&vlun->lock);
- switch (blk->type) {
- case 1:
+ if (blk->state & NVM_BLK_ST_OPEN) {
list_move_tail(&blk->list, &lun->free_list);
+ lun->vlun.nr_open_blocks--;
lun->vlun.nr_free_blocks++;
- lun->vlun.nr_inuse_blocks--;
- blk->type = 0;
- break;
- case 2:
+ blk->state = NVM_BLK_ST_FREE;
+ } else if (blk->state & NVM_BLK_ST_CLOSED) {
+ list_move_tail(&blk->list, &lun->free_list);
+ lun->vlun.nr_closed_blocks--;
+ lun->vlun.nr_free_blocks++;
+ blk->state = NVM_BLK_ST_FREE;
+ } else if (blk->state & NVM_BLK_ST_BAD) {
list_move_tail(&blk->list, &lun->bb_list);
lun->vlun.nr_bad_blocks++;
- lun->vlun.nr_inuse_blocks--;
- break;
- default:
+ blk->state = NVM_BLK_ST_BAD;
+ } else {
WARN_ON_ONCE(1);
pr_err("gennvm: erroneous block type (%lu -> %u)\n",
- blk->id, blk->type);
+ blk->id, blk->state);
list_move_tail(&blk->list, &lun->bb_list);
lun->vlun.nr_bad_blocks++;
- lun->vlun.nr_inuse_blocks--;
- }
-
- spin_unlock(&vlun->lock);
-}
-
-static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
- int i;
-
- if (rqd->nr_pages > 1) {
- for (i = 0; i < rqd->nr_pages; i++)
- rqd->ppa_list[i] = dev_to_generic_addr(dev,
- rqd->ppa_list[i]);
- } else {
- rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
+ blk->state = NVM_BLK_ST_BAD;
}
}
-static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
- int i;
-
- if (rqd->nr_pages > 1) {
- for (i = 0; i < rqd->nr_pages; i++)
- rqd->ppa_list[i] = generic_to_dev_addr(dev,
- rqd->ppa_list[i]);
- } else {
- rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
- }
-}
-
-static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
{
- if (!dev->ops->submit_io)
- return 0;
-
- /* Convert address space */
- gennvm_generic_to_addr_mode(dev, rqd);
+ struct nvm_lun *vlun = blk->lun;
- rqd->dev = dev;
- return dev->ops->submit_io(dev, rqd);
+ spin_lock(&vlun->lock);
+ gennvm_put_blk_unlocked(dev, blk);
+ spin_unlock(&vlun->lock);
}
static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
@@ -376,7 +359,7 @@ static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
blk = &lun->vlun.blocks[ppa->g.blk];
/* will be moved to bb list on put_blk from target */
- blk->type = type;
+ blk->state = type;
}
/* mark block bad. It is expected the target recover from the error. */
@@ -390,77 +373,51 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
if (dev->ops->set_bb_tbl(dev, rqd, 1))
return;
- gennvm_addr_to_generic_mode(dev, rqd);
+ nvm_addr_to_generic_mode(dev, rqd);
/* look up blocks and mark them as bad */
if (rqd->nr_pages > 1)
for (i = 0; i < rqd->nr_pages; i++)
- gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
+ gennvm_blk_set_type(dev, &rqd->ppa_list[i],
+ NVM_BLK_ST_BAD);
else
- gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
+ gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
}
-static int gennvm_end_io(struct nvm_rq *rqd, int error)
+static void gennvm_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_instance *ins = rqd->ins;
- int ret = 0;
- switch (error) {
+ switch (rqd->error) {
case NVM_RSP_SUCCESS:
- break;
case NVM_RSP_ERR_EMPTYPAGE:
break;
case NVM_RSP_ERR_FAILWRITE:
gennvm_mark_blk_bad(rqd->dev, rqd);
- default:
- ret++;
}
- ret += ins->tt->end_io(rqd, error);
-
- return ret;
+ ins->tt->end_io(rqd);
}
-static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
- unsigned long flags)
+static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
- int plane_cnt = 0, pl_idx, ret;
- struct ppa_addr addr;
- struct nvm_rq rqd;
-
- if (!dev->ops->erase_block)
- return 0;
-
- addr = block_to_ppa(dev, blk);
-
- if (dev->plane_mode == NVM_PLANE_SINGLE) {
- rqd.nr_pages = 1;
- rqd.ppa_addr = addr;
- } else {
- plane_cnt = (1 << dev->plane_mode);
- rqd.nr_pages = plane_cnt;
-
- rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
- &rqd.dma_ppa_list);
- if (!rqd.ppa_list) {
- pr_err("gennvm: failed to allocate dma memory\n");
- return -ENOMEM;
- }
-
- for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
- addr.g.pl = pl_idx;
- rqd.ppa_list[pl_idx] = addr;
- }
- }
+ if (!dev->ops->submit_io)
+ return -ENODEV;
- gennvm_generic_to_addr_mode(dev, &rqd);
+ /* Convert address space */
+ nvm_generic_to_addr_mode(dev, rqd);
- ret = dev->ops->erase_block(dev, &rqd);
+ rqd->dev = dev;
+ rqd->end_io = gennvm_end_io;
+ return dev->ops->submit_io(dev, rqd);
+}
- if (plane_cnt)
- nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
+static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
+ unsigned long flags)
+{
+ struct ppa_addr addr = block_to_ppa(dev, blk);
- return ret;
+ return nvm_erase_ppa(dev, &addr, 1);
}
static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
@@ -480,10 +437,11 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
gennvm_for_each_lun(gn, lun, i) {
spin_lock(&lun->vlun.lock);
- pr_info("%s: lun%8u\t%u\t%u\t%u\n",
+ pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
dev->name, i,
lun->vlun.nr_free_blocks,
- lun->vlun.nr_inuse_blocks,
+ lun->vlun.nr_open_blocks,
+ lun->vlun.nr_closed_blocks,
lun->vlun.nr_bad_blocks);
spin_unlock(&lun->vlun.lock);
@@ -491,21 +449,23 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
}
static struct nvmm_type gennvm = {
- .name = "gennvm",
- .version = {0, 1, 0},
+ .name = "gennvm",
+ .version = {0, 1, 0},
+
+ .register_mgr = gennvm_register,
+ .unregister_mgr = gennvm_unregister,
- .register_mgr = gennvm_register,
- .unregister_mgr = gennvm_unregister,
+ .get_blk_unlocked = gennvm_get_blk_unlocked,
+ .put_blk_unlocked = gennvm_put_blk_unlocked,
- .get_blk = gennvm_get_blk,
- .put_blk = gennvm_put_blk,
+ .get_blk = gennvm_get_blk,
+ .put_blk = gennvm_put_blk,
- .submit_io = gennvm_submit_io,
- .end_io = gennvm_end_io,
- .erase_blk = gennvm_erase_blk,
+ .submit_io = gennvm_submit_io,
+ .erase_blk = gennvm_erase_blk,
- .get_lun = gennvm_get_lun,
- .lun_info_print = gennvm_lun_info_print,
+ .get_lun = gennvm_get_lun,
+ .lun_info_print = gennvm_lun_info_print,
};
static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 134e4faba..307db1ea2 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -179,16 +179,23 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
unsigned long flags)
{
+ struct nvm_lun *lun = rlun->parent;
struct nvm_block *blk;
struct rrpc_block *rblk;
- blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
- if (!blk)
+ spin_lock(&lun->lock);
+ blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
+ if (!blk) {
+ pr_err("nvm: rrpc: cannot get new block from media manager\n");
+ spin_unlock(&lun->lock);
return NULL;
+ }
rblk = &rlun->blocks[blk->id];
- blk->priv = rblk;
+ list_add_tail(&rblk->list, &rlun->open_list);
+ spin_unlock(&lun->lock);
+ blk->priv = rblk;
bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
rblk->next_page = 0;
rblk->nr_invalid_pages = 0;
@@ -199,7 +206,13 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- nvm_put_blk(rrpc->dev, rblk->parent);
+ struct rrpc_lun *rlun = rblk->rlun;
+ struct nvm_lun *lun = rlun->parent;
+
+ spin_lock(&lun->lock);
+ nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
+ list_del(&rblk->list);
+ spin_unlock(&lun->lock);
}
static void rrpc_put_blks(struct rrpc *rrpc)
@@ -287,6 +300,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
}
page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
+ if (!page) {
+ bio_put(bio);
+ return -ENOMEM;
+ }
while ((slot = find_first_zero_bit(rblk->invalid_pages,
nr_pgs_per_blk)) < nr_pgs_per_blk) {
@@ -328,6 +345,10 @@ try:
goto finished;
}
wait_for_completion_io(&wait);
+ if (bio->bi_error) {
+ rrpc_inflight_laddr_release(rrpc, rqd);
+ goto finished;
+ }
bio_reset(bio);
reinit_completion(&wait);
@@ -350,6 +371,8 @@ try:
wait_for_completion_io(&wait);
rrpc_inflight_laddr_release(rrpc, rqd);
+ if (bio->bi_error)
+ goto finished;
bio_reset(bio);
}
@@ -373,16 +396,26 @@ static void rrpc_block_gc(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct nvm_dev *dev = rrpc->dev;
+ struct nvm_lun *lun = rblk->parent->lun;
+ struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+ mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
if (rrpc_move_valid_pages(rrpc, rblk))
- goto done;
+ goto put_back;
+
+ if (nvm_erase_blk(dev, rblk->parent))
+ goto put_back;
- nvm_erase_blk(dev, rblk->parent);
rrpc_put_blk(rrpc, rblk);
-done:
- mempool_free(gcb, rrpc->gcb_pool);
+
+ return;
+
+put_back:
+ spin_lock(&rlun->lock);
+ list_add_tail(&rblk->prio, &rlun->prio_list);
+ spin_unlock(&rlun->lock);
}
/* the block with highest number of invalid pages, will be in the beginning
@@ -427,7 +460,7 @@ static void rrpc_lun_gc(struct work_struct *work)
if (nr_blocks_need < rrpc->nr_luns)
nr_blocks_need = rrpc->nr_luns;
- spin_lock(&lun->lock);
+ spin_lock(&rlun->lock);
while (nr_blocks_need > lun->nr_free_blocks &&
!list_empty(&rlun->prio_list)) {
struct rrpc_block *rblock = block_prio_find_max(rlun);
@@ -436,16 +469,16 @@ static void rrpc_lun_gc(struct work_struct *work)
if (!rblock->nr_invalid_pages)
break;
+ gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
+ if (!gcb)
+ break;
+
list_del_init(&rblock->prio);
BUG_ON(!block_is_full(rrpc, rblock));
pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb)
- break;
-
gcb->rrpc = rrpc;
gcb->rblk = rblock;
INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
@@ -454,7 +487,7 @@ static void rrpc_lun_gc(struct work_struct *work)
nr_blocks_need--;
}
- spin_unlock(&lun->lock);
+ spin_unlock(&rlun->lock);
/* TODO: Hint that request queue can be started again */
}
@@ -635,12 +668,24 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
lun = rblk->parent->lun;
cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
+ if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
+ struct nvm_block *blk = rblk->parent;
+ struct rrpc_lun *rlun = rblk->rlun;
+
+ spin_lock(&lun->lock);
+ lun->nr_open_blocks--;
+ lun->nr_closed_blocks++;
+ blk->state &= ~NVM_BLK_ST_OPEN;
+ blk->state |= NVM_BLK_ST_CLOSED;
+ list_move_tail(&rblk->list, &rlun->closed_list);
+ spin_unlock(&lun->lock);
+
rrpc_run_gc(rrpc, rblk);
+ }
}
}
-static int rrpc_end_io(struct nvm_rq *rqd, int error)
+static void rrpc_end_io(struct nvm_rq *rqd)
{
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
@@ -650,11 +695,12 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
if (bio_data_dir(rqd->bio) == WRITE)
rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+ bio_put(rqd->bio);
+
if (rrqd->flags & NVM_IOTYPE_GC)
- return 0;
+ return;
rrpc_unlock_rq(rrpc, rqd);
- bio_put(rqd->bio);
if (npages > 1)
nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
@@ -662,8 +708,6 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
mempool_free(rqd, rrpc->rq_pool);
-
- return 0;
}
static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
@@ -841,6 +885,13 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
err = nvm_submit_io(rrpc->dev, rqd);
if (err) {
pr_err("rrpc: I/O submission failed: %d\n", err);
+ bio_put(bio);
+ if (!(flags & NVM_IOTYPE_GC)) {
+ rrpc_unlock_rq(rrpc, rqd);
+ if (rqd->nr_pages > 1)
+ nvm_dev_dma_free(rrpc->dev,
+ rqd->ppa_list, rqd->dma_ppa_list);
+ }
return NVM_IO_ERR;
}
@@ -1090,6 +1141,11 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
struct rrpc_lun *rlun;
int i, j;
+ if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+ pr_err("rrpc: number of pages per block too high.");
+ return -EINVAL;
+ }
+
spin_lock_init(&rrpc->rev_lock);
rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
@@ -1101,16 +1157,13 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
for (i = 0; i < rrpc->nr_luns; i++) {
struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
- if (dev->pgs_per_blk >
- MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
- pr_err("rrpc: number of pages per block too high.");
- goto err;
- }
-
rlun = &rrpc->luns[i];
rlun->rrpc = rrpc;
rlun->parent = lun;
INIT_LIST_HEAD(&rlun->prio_list);
+ INIT_LIST_HEAD(&rlun->open_list);
+ INIT_LIST_HEAD(&rlun->closed_list);
+
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
spin_lock_init(&rlun->lock);
@@ -1127,6 +1180,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
struct nvm_block *blk = &lun->blocks[j];
rblk->parent = blk;
+ rblk->rlun = rlun;
INIT_LIST_HEAD(&rblk->prio);
spin_lock_init(&rblk->lock);
}
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index a9696a06c..f7b373363 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -54,7 +54,9 @@ struct rrpc_rq {
struct rrpc_block {
struct nvm_block *parent;
+ struct rrpc_lun *rlun;
struct list_head prio;
+ struct list_head list;
#define MAX_INVALID_PAGES_STORAGE 8
/* Bitmap for invalid page intries */
@@ -73,7 +75,16 @@ struct rrpc_lun {
struct nvm_lun *parent;
struct rrpc_block *cur, *gc_cur;
struct rrpc_block *blocks; /* Reference to block allocation */
- struct list_head prio_list; /* Blocks that may be GC'ed */
+
+ struct list_head prio_list; /* Blocks that may be GC'ed */
+ struct list_head open_list; /* In-use open blocks. These are blocks
+ * that can be both written to and read
+ * from
+ */
+ struct list_head closed_list; /* In-use closed blocks. These are
+ * blocks that can _only_ be read from
+ */
+
struct work_struct ws_gc;
spinlock_t lock;
@@ -163,8 +174,7 @@ static inline sector_t rrpc_get_sector(sector_t laddr)
static inline int request_intersects(struct rrpc_inflight_rq *r,
sector_t laddr_start, sector_t laddr_end)
{
- return (laddr_end >= r->l_start && laddr_end <= r->l_end) &&
- (laddr_start >= r->l_start && laddr_start <= r->l_end);
+ return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
}
static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
@@ -173,6 +183,8 @@ static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
sector_t laddr_end = laddr + pages - 1;
struct rrpc_inflight_rq *rtmp;
+ WARN_ON(irqs_disabled());
+
spin_lock_irq(&rrpc->inflights.lock);
list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
new file mode 100644
index 000000000..321de1f15
--- /dev/null
+++ b/drivers/lightnvm/sysblk.c
@@ -0,0 +1,741 @@
+/*
+ * Copyright (C) 2015 Matias Bjorling. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#include <linux/lightnvm.h>
+
+#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
+#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
+ * enables ~1.5M updates per sysblk unit
+ */
+
+struct sysblk_scan {
+ /* A row is a collection of flash blocks for a system block. */
+ int nr_rows;
+ int row;
+ int act_blk[MAX_SYSBLKS];
+
+ int nr_ppas;
+ struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
+};
+
+static inline int scan_ppa_idx(int row, int blkid)
+{
+ return (row * MAX_BLKS_PR_SYSBLK) + blkid;
+}
+
+void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
+{
+ info->seqnr = be32_to_cpu(sb->seqnr);
+ info->erase_cnt = be32_to_cpu(sb->erase_cnt);
+ info->version = be16_to_cpu(sb->version);
+ strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
+ info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
+}
+
+void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
+{
+ sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
+ sb->seqnr = cpu_to_be32(info->seqnr);
+ sb->erase_cnt = cpu_to_be32(info->erase_cnt);
+ sb->version = cpu_to_be16(info->version);
+ strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
+ sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
+}
+
+static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
+{
+ int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
+ int i;
+
+ for (i = 0; i < nr_rows; i++)
+ sysblk_ppas[i].ppa = 0;
+
+ /* if possible, place sysblk at first channel, middle channel and last
+ * channel of the device. If not, create only one or two sys blocks
+ */
+ switch (dev->nr_chnls) {
+ case 2:
+ sysblk_ppas[1].g.ch = 1;
+ /* fall-through */
+ case 1:
+ sysblk_ppas[0].g.ch = 0;
+ break;
+ default:
+ sysblk_ppas[0].g.ch = 0;
+ sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
+ sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
+ break;
+ }
+
+ return nr_rows;
+}
+
+void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
+ struct ppa_addr *sysblk_ppas)
+{
+ memset(s, 0, sizeof(struct sysblk_scan));
+ s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
+}
+
+static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
+ void *private)
+{
+ struct sysblk_scan *s = private;
+ int i, nr_sysblk = 0;
+
+ for (i = 0; i < nr_blks; i++) {
+ if (blks[i] != NVM_BLK_T_HOST)
+ continue;
+
+ if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
+ pr_err("nvm: too many host blks\n");
+ return -EINVAL;
+ }
+
+ ppa.g.blk = i;
+
+ s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
+ s->nr_ppas++;
+ nr_sysblk++;
+ }
+
+ return 0;
+}
+
+static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
+ struct ppa_addr *ppas, nvm_bb_update_fn *fn)
+{
+ struct ppa_addr dppa;
+ int i, ret;
+
+ s->nr_ppas = 0;
+
+ for (i = 0; i < s->nr_rows; i++) {
+ dppa = generic_to_dev_addr(dev, ppas[i]);
+ s->row = i;
+
+ ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
+ if (ret) {
+ pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
+ ppas[i].g.ch,
+ ppas[i].g.blk);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * scans a block for latest sysblk.
+ * Returns:
+ * 0 - newer sysblk not found. PPA is updated to latest page.
+ * 1 - newer sysblk found and stored in *cur. PPA is updated to
+ * next valid page.
+ * <0- error.
+ */
+static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
+ struct nvm_system_block *sblk)
+{
+ struct nvm_system_block *cur;
+ int pg, cursz, ret, found = 0;
+
+ /* the full buffer for a flash page is allocated. Only the first of it
+ * contains the system block information
+ */
+ cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
+ cur = kmalloc(cursz, GFP_KERNEL);
+ if (!cur)
+ return -ENOMEM;
+
+ /* perform linear scan through the block */
+ for (pg = 0; pg < dev->lps_per_blk; pg++) {
+ ppa->g.pg = ppa_to_slc(dev, pg);
+
+ ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
+ cur, cursz);
+ if (ret) {
+ if (ret == NVM_RSP_ERR_EMPTYPAGE) {
+ pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
+ ppa->g.ch,
+ ppa->g.lun,
+ ppa->g.blk,
+ ppa->g.pg);
+ break;
+ }
+ pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
+ ret,
+ ppa->g.ch,
+ ppa->g.lun,
+ ppa->g.blk,
+ ppa->g.pg);
+ break; /* if we can't read a page, continue to the
+ * next blk
+ */
+ }
+
+ if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
+ pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
+ ppa->g.ch,
+ ppa->g.lun,
+ ppa->g.blk,
+ ppa->g.pg);
+ break; /* last valid page already found */
+ }
+
+ if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
+ continue;
+
+ memcpy(sblk, cur, sizeof(struct nvm_system_block));
+ found = 1;
+ }
+
+ kfree(cur);
+
+ return found;
+}
+
+static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
+{
+ struct nvm_rq rqd;
+ int ret;
+
+ if (s->nr_ppas > dev->ops->max_phys_sect) {
+ pr_err("nvm: unable to update all sysblocks atomically\n");
+ return -EINVAL;
+ }
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
+ nvm_generic_to_addr_mode(dev, &rqd);
+
+ ret = dev->ops->set_bb_tbl(dev, &rqd, type);
+ nvm_free_rqd_ppalist(dev, &rqd);
+ if (ret) {
+ pr_err("nvm: sysblk failed bb mark\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
+ void *private)
+{
+ struct sysblk_scan *s = private;
+ struct ppa_addr *sppa;
+ int i, blkid = 0;
+
+ for (i = 0; i < nr_blks; i++) {
+ if (blks[i] == NVM_BLK_T_HOST)
+ return -EEXIST;
+
+ if (blks[i] != NVM_BLK_T_FREE)
+ continue;
+
+ sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
+ sppa->g.ch = ppa.g.ch;
+ sppa->g.lun = ppa.g.lun;
+ sppa->g.blk = i;
+ s->nr_ppas++;
+ blkid++;
+
+ pr_debug("nvm: use (%u %u %u) as sysblk\n",
+ sppa->g.ch, sppa->g.lun, sppa->g.blk);
+ if (blkid > MAX_BLKS_PR_SYSBLK - 1)
+ return 0;
+ }
+
+ pr_err("nvm: sysblk failed get sysblk\n");
+ return -EINVAL;
+}
+
+static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
+ struct sysblk_scan *s)
+{
+ struct nvm_system_block nvmsb;
+ void *buf;
+ int i, sect, ret, bufsz;
+ struct ppa_addr *ppas;
+
+ nvm_cpu_to_sysblk(&nvmsb, info);
+
+ /* buffer for flash page */
+ bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
+
+ ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
+ if (!ppas) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Write and verify */
+ for (i = 0; i < s->nr_rows; i++) {
+ ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
+
+ pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
+ ppas[0].g.ch,
+ ppas[0].g.lun,
+ ppas[0].g.blk,
+ ppas[0].g.pg);
+
+ /* Expand to all sectors within a flash page */
+ if (dev->sec_per_pg > 1) {
+ for (sect = 1; sect < dev->sec_per_pg; sect++) {
+ ppas[sect].ppa = ppas[0].ppa;
+ ppas[sect].g.sec = sect;
+ }
+ }
+
+ ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
+ NVM_IO_SLC_MODE, buf, bufsz);
+ if (ret) {
+ pr_err("nvm: sysblk failed program (%u %u %u)\n",
+ ppas[0].g.ch,
+ ppas[0].g.lun,
+ ppas[0].g.blk);
+ break;
+ }
+
+ ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
+ NVM_IO_SLC_MODE, buf, bufsz);
+ if (ret) {
+ pr_err("nvm: sysblk failed read (%u %u %u)\n",
+ ppas[0].g.ch,
+ ppas[0].g.lun,
+ ppas[0].g.blk);
+ break;
+ }
+
+ if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
+ pr_err("nvm: sysblk failed verify (%u %u %u)\n",
+ ppas[0].g.ch,
+ ppas[0].g.lun,
+ ppas[0].g.blk);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ kfree(ppas);
+err:
+ kfree(buf);
+
+ return ret;
+}
+
+static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
+{
+ int i, ret;
+ unsigned long nxt_blk;
+ struct ppa_addr *ppa;
+
+ for (i = 0; i < s->nr_rows; i++) {
+ nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
+ ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
+ ppa->g.pg = ppa_to_slc(dev, 0);
+
+ ret = nvm_erase_ppa(dev, ppa, 1);
+ if (ret)
+ return ret;
+
+ s->act_blk[i] = nxt_blk;
+ }
+
+ return 0;
+}
+
+int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
+{
+ struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
+ struct sysblk_scan s;
+ struct nvm_system_block *cur;
+ int i, j, found = 0;
+ int ret = -ENOMEM;
+
+ /*
+ * 1. setup sysblk locations
+ * 2. get bad block list
+ * 3. filter on host-specific (type 3)
+ * 4. iterate through all and find the highest seq nr.
+ * 5. return superblock information
+ */
+
+ if (!dev->ops->get_bb_tbl)
+ return -EINVAL;
+
+ nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
+
+ mutex_lock(&dev->mlock);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
+ if (ret)
+ goto err_sysblk;
+
+ /* no sysblocks initialized */
+ if (!s.nr_ppas)
+ goto err_sysblk;
+
+ cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
+ if (!cur)
+ goto err_sysblk;
+
+ /* find the latest block across all sysblocks */
+ for (i = 0; i < s.nr_rows; i++) {
+ for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
+ struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
+
+ ret = nvm_scan_block(dev, &ppa, cur);
+ if (ret > 0)
+ found = 1;
+ else if (ret < 0)
+ break;
+ }
+ }
+
+ nvm_sysblk_to_cpu(info, cur);
+
+ kfree(cur);
+err_sysblk:
+ mutex_unlock(&dev->mlock);
+
+ if (found)
+ return 1;
+ return ret;
+}
+
+int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
+{
+ /* 1. for each latest superblock
+ * 2. if room
+ * a. write new flash page entry with the updated information
+ * 3. if no room
+ * a. find next available block on lun (linear search)
+ * if none, continue to next lun
+ * if none at all, report error. also report that it wasn't
+ * possible to write to all superblocks.
+ * c. write data to block.
+ */
+ struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
+ struct sysblk_scan s;
+ struct nvm_system_block *cur;
+ int i, j, ppaidx, found = 0;
+ int ret = -ENOMEM;
+
+ if (!dev->ops->get_bb_tbl)
+ return -EINVAL;
+
+ nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
+
+ mutex_lock(&dev->mlock);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
+ if (ret)
+ goto err_sysblk;
+
+ cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
+ if (!cur)
+ goto err_sysblk;
+
+ /* Get the latest sysblk for each sysblk row */
+ for (i = 0; i < s.nr_rows; i++) {
+ found = 0;
+ for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
+ ppaidx = scan_ppa_idx(i, j);
+ ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
+ if (ret > 0) {
+ s.act_blk[i] = j;
+ found = 1;
+ } else if (ret < 0)
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_err("nvm: no valid sysblks found to update\n");
+ ret = -EINVAL;
+ goto err_cur;
+ }
+
+ /*
+ * All sysblocks found. Check that they have same page id in their flash
+ * blocks
+ */
+ for (i = 1; i < s.nr_rows; i++) {
+ struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
+ struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
+
+ if (l.g.pg != r.g.pg) {
+ pr_err("nvm: sysblks not on same page. Previous update failed.\n");
+ ret = -EINVAL;
+ goto err_cur;
+ }
+ }
+
+ /*
+ * Check that there haven't been another update to the seqnr since we
+ * began
+ */
+ if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
+ pr_err("nvm: seq is not sequential\n");
+ ret = -EINVAL;
+ goto err_cur;
+ }
+
+ /*
+ * When all pages in a block has been written, a new block is selected
+ * and writing is performed on the new block.
+ */
+ if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
+ dev->lps_per_blk - 1) {
+ ret = nvm_prepare_new_sysblks(dev, &s);
+ if (ret)
+ goto err_cur;
+ }
+
+ ret = nvm_write_and_verify(dev, new, &s);
+err_cur:
+ kfree(cur);
+err_sysblk:
+ mutex_unlock(&dev->mlock);
+
+ return ret;
+}
+
+int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
+{
+ struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
+ struct sysblk_scan s;
+ int ret;
+
+ /*
+ * 1. select master blocks and select first available blks
+ * 2. get bad block list
+ * 3. mark MAX_SYSBLKS block as host-based device allocated.
+ * 4. write and verify data to block
+ */
+
+ if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
+ return -EINVAL;
+
+ if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
+ pr_err("nvm: memory does not support SLC access\n");
+ return -EINVAL;
+ }
+
+ /* Index all sysblocks and mark them as host-driven */
+ nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
+
+ mutex_lock(&dev->mlock);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
+ if (ret)
+ goto err_mark;
+
+ ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
+ if (ret)
+ goto err_mark;
+
+ /* Write to the first block of each row */
+ ret = nvm_write_and_verify(dev, info, &s);
+err_mark:
+ mutex_unlock(&dev->mlock);
+ return ret;
+}
+
+struct factory_blks {
+ struct nvm_dev *dev;
+ int flags;
+ unsigned long *blks;
+};
+
+static int factory_nblks(int nblks)
+{
+ /* Round up to nearest BITS_PER_LONG */
+ return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
+}
+
+static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
+{
+ int nblks = factory_nblks(dev->blks_per_lun);
+
+ return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
+ BITS_PER_LONG;
+}
+
+static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
+ void *private)
+{
+ struct factory_blks *f = private;
+ struct nvm_dev *dev = f->dev;
+ int i, lunoff;
+
+ lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
+
+ /* non-set bits correspond to the block must be erased */
+ for (i = 0; i < nr_blks; i++) {
+ switch (blks[i]) {
+ case NVM_BLK_T_FREE:
+ if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
+ set_bit(i, &f->blks[lunoff]);
+ break;
+ case NVM_BLK_T_HOST:
+ if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
+ set_bit(i, &f->blks[lunoff]);
+ break;
+ case NVM_BLK_T_GRWN_BAD:
+ if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
+ set_bit(i, &f->blks[lunoff]);
+ break;
+ default:
+ set_bit(i, &f->blks[lunoff]);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
+ int max_ppas, struct factory_blks *f)
+{
+ struct ppa_addr ppa;
+ int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
+ unsigned long *offset;
+
+ while (!done) {
+ done = 1;
+ for (ch = 0; ch < dev->nr_chnls; ch++) {
+ for (lun = 0; lun < dev->luns_per_chnl; lun++) {
+ idx = factory_blk_offset(dev, ch, lun);
+ offset = &f->blks[idx];
+
+ blkid = find_first_zero_bit(offset,
+ dev->blks_per_lun);
+ if (blkid >= dev->blks_per_lun)
+ continue;
+ set_bit(blkid, offset);
+
+ ppa.ppa = 0;
+ ppa.g.ch = ch;
+ ppa.g.lun = lun;
+ ppa.g.blk = blkid;
+ pr_debug("nvm: erase ppa (%u %u %u)\n",
+ ppa.g.ch,
+ ppa.g.lun,
+ ppa.g.blk);
+
+ erase_list[ppa_cnt] = ppa;
+ ppa_cnt++;
+ done = 0;
+
+ if (ppa_cnt == max_ppas)
+ return ppa_cnt;
+ }
+ }
+ }
+
+ return ppa_cnt;
+}
+
+static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
+ nvm_bb_update_fn *fn, void *priv)
+{
+ struct ppa_addr dev_ppa;
+ int ret;
+
+ dev_ppa = generic_to_dev_addr(dev, ppa);
+
+ ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
+ if (ret)
+ pr_err("nvm: failed bb tbl for ch%u lun%u\n",
+ ppa.g.ch, ppa.g.blk);
+ return ret;
+}
+
+static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
+{
+ int ch, lun, ret;
+ struct ppa_addr ppa;
+
+ ppa.ppa = 0;
+ for (ch = 0; ch < dev->nr_chnls; ch++) {
+ for (lun = 0; lun < dev->luns_per_chnl; lun++) {
+ ppa.g.ch = ch;
+ ppa.g.lun = lun;
+
+ ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
+ f);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int nvm_dev_factory(struct nvm_dev *dev, int flags)
+{
+ struct factory_blks f;
+ struct ppa_addr *ppas;
+ int ppa_cnt, ret = -ENOMEM;
+ int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
+ struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
+ struct sysblk_scan s;
+
+ f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
+ GFP_KERNEL);
+ if (!f.blks)
+ return ret;
+
+ ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
+ if (!ppas)
+ goto err_blks;
+
+ f.dev = dev;
+ f.flags = flags;
+
+ /* create list of blks to be erased */
+ ret = nvm_fact_select_blks(dev, &f);
+ if (ret)
+ goto err_ppas;
+
+ /* continue to erase until list of blks until empty */
+ while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
+ nvm_erase_ppa(dev, ppas, ppa_cnt);
+
+ /* mark host reserved blocks free */
+ if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
+ nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
+ mutex_lock(&dev->mlock);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
+ sysblk_get_host_blks);
+ if (!ret)
+ ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
+ mutex_unlock(&dev->mlock);
+ }
+err_ppas:
+ kfree(ppas);
+err_blks:
+ kfree(f.blks);
+ return ret;
+}
+EXPORT_SYMBOL(nvm_dev_factory);