diff options
Diffstat (limited to 'drivers/lightnvm')
-rw-r--r-- | drivers/lightnvm/core.c | 370 | ||||
-rw-r--r-- | drivers/lightnvm/gennvm.c | 100 | ||||
-rw-r--r-- | drivers/lightnvm/rrpc.c | 42 | ||||
-rw-r--r-- | drivers/lightnvm/rrpc.h | 2 | ||||
-rw-r--r-- | drivers/lightnvm/sysblk.c | 284 |
5 files changed, 467 insertions, 331 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 0dc9a80ad..160c1a683 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -30,23 +30,35 @@ #include <linux/sched/sysctl.h> #include <uapi/linux/lightnvm.h> -static LIST_HEAD(nvm_targets); +static LIST_HEAD(nvm_tgt_types); static LIST_HEAD(nvm_mgrs); static LIST_HEAD(nvm_devices); +static LIST_HEAD(nvm_targets); static DECLARE_RWSEM(nvm_lock); +static struct nvm_target *nvm_find_target(const char *name) +{ + struct nvm_target *tgt; + + list_for_each_entry(tgt, &nvm_targets, list) + if (!strcmp(name, tgt->disk->disk_name)) + return tgt; + + return NULL; +} + static struct nvm_tgt_type *nvm_find_target_type(const char *name) { struct nvm_tgt_type *tt; - list_for_each_entry(tt, &nvm_targets, list) + list_for_each_entry(tt, &nvm_tgt_types, list) if (!strcmp(name, tt->name)) return tt; return NULL; } -int nvm_register_target(struct nvm_tgt_type *tt) +int nvm_register_tgt_type(struct nvm_tgt_type *tt) { int ret = 0; @@ -54,14 +66,14 @@ int nvm_register_target(struct nvm_tgt_type *tt) if (nvm_find_target_type(tt->name)) ret = -EEXIST; else - list_add(&tt->list, &nvm_targets); + list_add(&tt->list, &nvm_tgt_types); up_write(&nvm_lock); return ret; } -EXPORT_SYMBOL(nvm_register_target); +EXPORT_SYMBOL(nvm_register_tgt_type); -void nvm_unregister_target(struct nvm_tgt_type *tt) +void nvm_unregister_tgt_type(struct nvm_tgt_type *tt) { if (!tt) return; @@ -70,20 +82,20 @@ void nvm_unregister_target(struct nvm_tgt_type *tt) list_del(&tt->list); up_write(&nvm_lock); } -EXPORT_SYMBOL(nvm_unregister_target); +EXPORT_SYMBOL(nvm_unregister_tgt_type); void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags, dma_addr_t *dma_handler) { - return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags, + return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags, dma_handler); } EXPORT_SYMBOL(nvm_dev_dma_alloc); -void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list, +void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler) { - dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler); + dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler); } EXPORT_SYMBOL(nvm_dev_dma_free); @@ -214,8 +226,8 @@ void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) { int i; - if (rqd->nr_pages > 1) { - for (i = 0; i < rqd->nr_pages; i++) + if (rqd->nr_ppas > 1) { + for (i = 0; i < rqd->nr_ppas; i++) rqd->ppa_list[i] = dev_to_generic_addr(dev, rqd->ppa_list[i]); } else { @@ -228,8 +240,8 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) { int i; - if (rqd->nr_pages > 1) { - for (i = 0; i < rqd->nr_pages; i++) + if (rqd->nr_ppas > 1) { + for (i = 0; i < rqd->nr_ppas; i++) rqd->ppa_list[i] = generic_to_dev_addr(dev, rqd->ppa_list[i]); } else { @@ -239,33 +251,36 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) EXPORT_SYMBOL(nvm_generic_to_addr_mode); int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, - struct ppa_addr *ppas, int nr_ppas) + struct ppa_addr *ppas, int nr_ppas, int vblk) { int i, plane_cnt, pl_idx; - if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) { - rqd->nr_pages = 1; + if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) { + rqd->nr_ppas = nr_ppas; rqd->ppa_addr = ppas[0]; return 0; } - plane_cnt = dev->plane_mode; - rqd->nr_pages = plane_cnt * nr_ppas; - - if (dev->ops->max_phys_sect < rqd->nr_pages) - return -EINVAL; - + rqd->nr_ppas = nr_ppas; rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list); if (!rqd->ppa_list) { pr_err("nvm: failed to allocate dma memory\n"); return -ENOMEM; } - for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { + if (!vblk) { + for (i = 0; i < nr_ppas; i++) + rqd->ppa_list[i] = ppas[i]; + } else { + plane_cnt = dev->plane_mode; + rqd->nr_ppas *= plane_cnt; + for (i = 0; i < nr_ppas; i++) { - ppas[i].g.pl = pl_idx; - rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i]; + for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { + ppas[i].g.pl = pl_idx; + rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i]; + } } } @@ -292,7 +307,7 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas) memset(&rqd, 0, sizeof(struct nvm_rq)); - ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas); + ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1); if (ret) return ret; @@ -322,11 +337,10 @@ static void nvm_end_io_sync(struct nvm_rq *rqd) complete(waiting); } -int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, - int opcode, int flags, void *buf, int len) +int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode, + int flags, void *buf, int len) { DECLARE_COMPLETION_ONSTACK(wait); - struct nvm_rq rqd; struct bio *bio; int ret; unsigned long hang_check; @@ -335,23 +349,21 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, if (IS_ERR_OR_NULL(bio)) return -ENOMEM; - memset(&rqd, 0, sizeof(struct nvm_rq)); - ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas); + nvm_generic_to_addr_mode(dev, rqd); + + rqd->dev = dev; + rqd->opcode = opcode; + rqd->flags = flags; + rqd->bio = bio; + rqd->wait = &wait; + rqd->end_io = nvm_end_io_sync; + + ret = dev->ops->submit_io(dev, rqd); if (ret) { bio_put(bio); return ret; } - rqd.opcode = opcode; - rqd.bio = bio; - rqd.wait = &wait; - rqd.dev = dev; - rqd.end_io = nvm_end_io_sync; - rqd.flags = flags; - nvm_generic_to_addr_mode(dev, &rqd); - - ret = dev->ops->submit_io(dev, &rqd); - /* Prevent hang_check timer from firing at us during very long I/O */ hang_check = sysctl_hung_task_timeout_secs; if (hang_check) @@ -359,12 +371,113 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, else wait_for_completion_io(&wait); + return rqd->error; +} + +/** + * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must + * take to free ppa list if necessary. + * @dev: device + * @ppa_list: user created ppa_list + * @nr_ppas: length of ppa_list + * @opcode: device opcode + * @flags: device flags + * @buf: data buffer + * @len: data buffer length + */ +int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list, + int nr_ppas, int opcode, int flags, void *buf, int len) +{ + struct nvm_rq rqd; + + if (dev->ops->max_phys_sect < nr_ppas) + return -EINVAL; + + memset(&rqd, 0, sizeof(struct nvm_rq)); + + rqd.nr_ppas = nr_ppas; + if (nr_ppas > 1) + rqd.ppa_list = ppa_list; + else + rqd.ppa_addr = ppa_list[0]; + + return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len); +} +EXPORT_SYMBOL(nvm_submit_ppa_list); + +/** + * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded + * as single, dual, quad plane PPAs depending on device type. + * @dev: device + * @ppa: user created ppa_list + * @nr_ppas: length of ppa_list + * @opcode: device opcode + * @flags: device flags + * @buf: data buffer + * @len: data buffer length + */ +int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, + int opcode, int flags, void *buf, int len) +{ + struct nvm_rq rqd; + int ret; + + memset(&rqd, 0, sizeof(struct nvm_rq)); + ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1); + if (ret) + return ret; + + ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len); + nvm_free_rqd_ppalist(dev, &rqd); - return rqd.error; + return ret; } EXPORT_SYMBOL(nvm_submit_ppa); +/* + * folds a bad block list from its plane representation to its virtual + * block representation. The fold is done in place and reduced size is + * returned. + * + * If any of the planes status are bad or grown bad block, the virtual block + * is marked bad. If not bad, the first plane state acts as the block state. + */ +int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) +{ + int blk, offset, pl, blktype; + + if (nr_blks != dev->blks_per_lun * dev->plane_mode) + return -EINVAL; + + for (blk = 0; blk < dev->blks_per_lun; blk++) { + offset = blk * dev->plane_mode; + blktype = blks[offset]; + + /* Bad blocks on any planes take precedence over other types */ + for (pl = 0; pl < dev->plane_mode; pl++) { + if (blks[offset + pl] & + (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { + blktype = blks[offset + pl]; + break; + } + } + + blks[blk] = blktype; + } + + return dev->blks_per_lun; +} +EXPORT_SYMBOL(nvm_bb_tbl_fold); + +int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks) +{ + ppa = generic_to_dev_addr(dev, ppa); + + return dev->ops->get_bb_tbl(dev, ppa, blks); +} +EXPORT_SYMBOL(nvm_get_bb_tbl); + static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) { int i; @@ -414,6 +527,7 @@ static int nvm_core_init(struct nvm_dev *dev) { struct nvm_id *id = &dev->identity; struct nvm_id_group *grp = &id->groups[0]; + int ret; /* device values */ dev->nr_chnls = grp->num_ch; @@ -421,6 +535,8 @@ static int nvm_core_init(struct nvm_dev *dev) dev->pgs_per_blk = grp->num_pg; dev->blks_per_lun = grp->num_blk; dev->nr_planes = grp->num_pln; + dev->fpg_size = grp->fpg_sz; + dev->pfpg_size = grp->fpg_sz * grp->num_pln; dev->sec_size = grp->csecs; dev->oob_size = grp->sos; dev->sec_per_pg = grp->fpg_sz / grp->csecs; @@ -430,33 +546,16 @@ static int nvm_core_init(struct nvm_dev *dev) dev->plane_mode = NVM_PLANE_SINGLE; dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; - if (grp->mtype != 0) { - pr_err("nvm: memory type not supported\n"); - return -EINVAL; - } - - switch (grp->fmtype) { - case NVM_ID_FMTYPE_SLC: - if (nvm_init_slc_tbl(dev, grp)) - return -ENOMEM; - break; - case NVM_ID_FMTYPE_MLC: - if (nvm_init_mlc_tbl(dev, grp)) - return -ENOMEM; - break; - default: - pr_err("nvm: flash type not supported\n"); - return -EINVAL; - } - - if (!dev->lps_per_blk) - pr_info("nvm: lower page programming table missing\n"); - if (grp->mpos & 0x020202) dev->plane_mode = NVM_PLANE_DOUBLE; if (grp->mpos & 0x040404) dev->plane_mode = NVM_PLANE_QUAD; + if (grp->mtype != 0) { + pr_err("nvm: memory type not supported\n"); + return -EINVAL; + } + /* calculated values */ dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes; dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk; @@ -468,11 +567,73 @@ static int nvm_core_init(struct nvm_dev *dev) sizeof(unsigned long), GFP_KERNEL); if (!dev->lun_map) return -ENOMEM; - INIT_LIST_HEAD(&dev->online_targets); + + switch (grp->fmtype) { + case NVM_ID_FMTYPE_SLC: + if (nvm_init_slc_tbl(dev, grp)) { + ret = -ENOMEM; + goto err_fmtype; + } + break; + case NVM_ID_FMTYPE_MLC: + if (nvm_init_mlc_tbl(dev, grp)) { + ret = -ENOMEM; + goto err_fmtype; + } + break; + default: + pr_err("nvm: flash type not supported\n"); + ret = -EINVAL; + goto err_fmtype; + } + mutex_init(&dev->mlock); spin_lock_init(&dev->lock); return 0; +err_fmtype: + kfree(dev->lun_map); + return ret; +} + +static void nvm_remove_target(struct nvm_target *t) +{ + struct nvm_tgt_type *tt = t->type; + struct gendisk *tdisk = t->disk; + struct request_queue *q = tdisk->queue; + + lockdep_assert_held(&nvm_lock); + + del_gendisk(tdisk); + blk_cleanup_queue(q); + + if (tt->exit) + tt->exit(tdisk->private_data); + + put_disk(tdisk); + + list_del(&t->list); + kfree(t); +} + +static void nvm_free_mgr(struct nvm_dev *dev) +{ + struct nvm_target *tgt, *tmp; + + if (!dev->mt) + return; + + down_write(&nvm_lock); + list_for_each_entry_safe(tgt, tmp, &nvm_targets, list) { + if (tgt->dev != dev) + continue; + + nvm_remove_target(tgt); + } + up_write(&nvm_lock); + + dev->mt->unregister_mgr(dev); + dev->mt = NULL; } static void nvm_free(struct nvm_dev *dev) @@ -480,10 +641,10 @@ static void nvm_free(struct nvm_dev *dev) if (!dev) return; - if (dev->mt) - dev->mt->unregister_mgr(dev); + nvm_free_mgr(dev); kfree(dev->lptbl); + kfree(dev->lun_map); } static int nvm_init(struct nvm_dev *dev) @@ -530,8 +691,8 @@ err: static void nvm_exit(struct nvm_dev *dev) { - if (dev->ppalist_pool) - dev->ops->destroy_dma_pool(dev->ppalist_pool); + if (dev->dma_pool) + dev->ops->destroy_dma_pool(dev->dma_pool); nvm_free(dev); pr_info("nvm: successfully unloaded\n"); @@ -565,9 +726,9 @@ int nvm_register(struct request_queue *q, char *disk_name, } if (dev->ops->max_phys_sect > 1) { - dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist"); - if (!dev->ppalist_pool) { - pr_err("nvm: could not create ppa pool\n"); + dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); + if (!dev->dma_pool) { + pr_err("nvm: could not create dma pool\n"); ret = -ENOMEM; goto err_init; } @@ -613,7 +774,6 @@ void nvm_unregister(char *disk_name) up_write(&nvm_lock); nvm_exit(dev); - kfree(dev->lun_map); kfree(dev); } EXPORT_SYMBOL(nvm_unregister); @@ -645,12 +805,11 @@ static int nvm_create_target(struct nvm_dev *dev, return -EINVAL; } - list_for_each_entry(t, &dev->online_targets, list) { - if (!strcmp(create->tgtname, t->disk->disk_name)) { - pr_err("nvm: target name already exists.\n"); - up_write(&nvm_lock); - return -EINVAL; - } + t = nvm_find_target(create->tgtname); + if (t) { + pr_err("nvm: target name already exists.\n"); + up_write(&nvm_lock); + return -EINVAL; } up_write(&nvm_lock); @@ -688,9 +847,10 @@ static int nvm_create_target(struct nvm_dev *dev, t->type = tt; t->disk = tdisk; + t->dev = dev; down_write(&nvm_lock); - list_add_tail(&t->list, &dev->online_targets); + list_add_tail(&t->list, &nvm_targets); up_write(&nvm_lock); return 0; @@ -703,26 +863,6 @@ err_t: return -ENOMEM; } -static void nvm_remove_target(struct nvm_target *t) -{ - struct nvm_tgt_type *tt = t->type; - struct gendisk *tdisk = t->disk; - struct request_queue *q = tdisk->queue; - - lockdep_assert_held(&nvm_lock); - - del_gendisk(tdisk); - blk_cleanup_queue(q); - - if (tt->exit) - tt->exit(tdisk->private_data); - - put_disk(tdisk); - - list_del(&t->list); - kfree(t); -} - static int __nvm_configure_create(struct nvm_ioctl_create *create) { struct nvm_dev *dev; @@ -753,26 +893,19 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create) static int __nvm_configure_remove(struct nvm_ioctl_remove *remove) { - struct nvm_target *t = NULL; - struct nvm_dev *dev; - int ret = -1; + struct nvm_target *t; down_write(&nvm_lock); - list_for_each_entry(dev, &nvm_devices, devices) - list_for_each_entry(t, &dev->online_targets, list) { - if (!strcmp(remove->tgtname, t->disk->disk_name)) { - nvm_remove_target(t); - ret = 0; - break; - } - } - up_write(&nvm_lock); - - if (ret) { + t = nvm_find_target(remove->tgtname); + if (!t) { pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname); + up_write(&nvm_lock); return -EINVAL; } + nvm_remove_target(t); + up_write(&nvm_lock); + return 0; } @@ -921,7 +1054,7 @@ static long nvm_ioctl_info(struct file *file, void __user *arg) info->version[2] = NVM_VERSION_PATCH; down_write(&nvm_lock); - list_for_each_entry(tt, &nvm_targets, list) { + list_for_each_entry(tt, &nvm_tgt_types, list) { struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter]; tgt->version[0] = tt->version[0]; @@ -1118,10 +1251,7 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) return -EINVAL; } - if (dev->mt) { - dev->mt->unregister_mgr(dev); - dev->mt = NULL; - } + nvm_free_mgr(dev); if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) return nvm_dev_factory(dev, fact.flags); diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 72e124a39..ec9fb6876 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -129,27 +129,25 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) return 0; } -static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, - void *private) +static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa, + u8 *blks, int nr_blks) { - struct gen_nvm *gn = private; struct nvm_dev *dev = gn->dev; struct gen_lun *lun; struct nvm_block *blk; int i; + nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); + if (nr_blks < 0) + return nr_blks; + lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun]; - for (i = 0; i < nr_blocks; i++) { + for (i = 0; i < nr_blks; i++) { if (blks[i] == 0) continue; blk = &lun->vlun.blocks[i]; - if (!blk) { - pr_err("gennvm: BB data is out of bounds.\n"); - return -EINVAL; - } - list_move_tail(&blk->list, &lun->bb_list); lun->vlun.nr_bad_blocks++; lun->vlun.nr_free_blocks--; @@ -216,13 +214,21 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) struct gen_lun *lun; struct nvm_block *block; sector_t lun_iter, blk_iter, cur_block_id = 0; - int ret; + int ret, nr_blks; + u8 *blks; + + nr_blks = dev->blks_per_lun * dev->plane_mode; + blks = kmalloc(nr_blks, GFP_KERNEL); + if (!blks) + return -ENOMEM; gennvm_for_each_lun(gn, lun, lun_iter) { lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) * dev->blks_per_lun); - if (!lun->vlun.blocks) + if (!lun->vlun.blocks) { + kfree(blks); return -ENOMEM; + } for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) { block = &lun->vlun.blocks[blk_iter]; @@ -246,14 +252,15 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) ppa.ppa = 0; ppa.g.ch = lun->vlun.chnl_id; - ppa.g.lun = lun->vlun.id; - ppa = generic_to_dev_addr(dev, ppa); + ppa.g.lun = lun->vlun.lun_id; + + ret = nvm_get_bb_tbl(dev, ppa, blks); + if (ret) + pr_err("gennvm: could not get BB table\n"); - ret = dev->ops->get_bb_tbl(dev, ppa, - dev->blks_per_lun, - gennvm_block_bb, gn); + ret = gennvm_block_bb(gn, ppa, blks, nr_blks); if (ret) - pr_err("gennvm: could not read BB table\n"); + pr_err("gennvm: BB table map failed\n"); } } @@ -266,6 +273,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) } } + kfree(blks); return 0; } @@ -399,64 +407,60 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) spin_unlock(&vlun->lock); } -static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, - int type) +static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type) { struct gen_nvm *gn = dev->mp; struct gen_lun *lun; struct nvm_block *blk; - if (unlikely(ppa->g.ch > dev->nr_chnls || - ppa->g.lun > dev->luns_per_chnl || - ppa->g.blk > dev->blks_per_lun)) { + pr_debug("gennvm: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n", + ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type); + + if (unlikely(ppa.g.ch > dev->nr_chnls || + ppa.g.lun > dev->luns_per_chnl || + ppa.g.blk > dev->blks_per_lun)) { WARN_ON_ONCE(1); pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u", - ppa->g.ch, dev->nr_chnls, - ppa->g.lun, dev->luns_per_chnl, - ppa->g.blk, dev->blks_per_lun); + ppa.g.ch, dev->nr_chnls, + ppa.g.lun, dev->luns_per_chnl, + ppa.g.blk, dev->blks_per_lun); return; } - lun = &gn->luns[ppa->g.lun * ppa->g.ch]; - blk = &lun->vlun.blocks[ppa->g.blk]; + lun = &gn->luns[ppa.g.lun * ppa.g.ch]; + blk = &lun->vlun.blocks[ppa.g.blk]; /* will be moved to bb list on put_blk from target */ blk->state = type; } -/* mark block bad. It is expected the target recover from the error. */ +/* + * mark block bad in gennvm. It is expected that the target recovers separately + */ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) { - int i; - - if (!dev->ops->set_bb_tbl) - return; - - if (dev->ops->set_bb_tbl(dev, rqd, 1)) - return; + int bit = -1; + int max_secs = dev->ops->max_phys_sect; + void *comp_bits = &rqd->ppa_status; nvm_addr_to_generic_mode(dev, rqd); /* look up blocks and mark them as bad */ - if (rqd->nr_pages > 1) - for (i = 0; i < rqd->nr_pages; i++) - gennvm_blk_set_type(dev, &rqd->ppa_list[i], - NVM_BLK_ST_BAD); - else - gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD); + if (rqd->nr_ppas == 1) { + gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD); + return; + } + + while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs) + gennvm_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD); } static void gennvm_end_io(struct nvm_rq *rqd) { struct nvm_tgt_instance *ins = rqd->ins; - switch (rqd->error) { - case NVM_RSP_SUCCESS: - case NVM_RSP_ERR_EMPTYPAGE: - break; - case NVM_RSP_ERR_FAILWRITE: + if (rqd->error == NVM_RSP_ERR_FAILWRITE) gennvm_mark_blk_bad(rqd->dev, rqd); - } ins->tt->end_io(rqd); } @@ -539,6 +543,8 @@ static struct nvmm_type gennvm = { .submit_io = gennvm_submit_io, .erase_blk = gennvm_erase_blk, + .mark_blk = gennvm_mark_blk, + .get_lun = gennvm_get_lun, .reserve_lun = gennvm_reserve_lun, .release_lun = gennvm_release_lun, diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 3ab6495c3..2103e97a9 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -405,9 +405,8 @@ static void rrpc_block_gc(struct work_struct *work) ws_gc); struct rrpc *rrpc = gcb->rrpc; struct rrpc_block *rblk = gcb->rblk; + struct rrpc_lun *rlun = rblk->rlun; struct nvm_dev *dev = rrpc->dev; - struct nvm_lun *lun = rblk->parent->lun; - struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset]; mempool_free(gcb, rrpc->gcb_pool); pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id); @@ -508,9 +507,9 @@ static void rrpc_gc_queue(struct work_struct *work) ws_gc); struct rrpc *rrpc = gcb->rrpc; struct rrpc_block *rblk = gcb->rblk; + struct rrpc_lun *rlun = rblk->rlun; struct nvm_lun *lun = rblk->parent->lun; struct nvm_block *blk = rblk->parent; - struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset]; spin_lock(&rlun->lock); list_add_tail(&rblk->prio, &rlun->prio_list); @@ -696,7 +695,7 @@ static void rrpc_end_io(struct nvm_rq *rqd) { struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); - uint8_t npages = rqd->nr_pages; + uint8_t npages = rqd->nr_ppas; sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; if (bio_data_dir(rqd->bio) == WRITE) @@ -711,8 +710,6 @@ static void rrpc_end_io(struct nvm_rq *rqd) if (npages > 1) nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); - if (rqd->metadata) - nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata); mempool_free(rqd, rrpc->rq_pool); } @@ -886,7 +883,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, bio_get(bio); rqd->bio = bio; rqd->ins = &rrpc->instance; - rqd->nr_pages = nr_pages; + rqd->nr_ppas = nr_pages; rrq->flags = flags; err = nvm_submit_io(rrpc->dev, rqd); @@ -895,7 +892,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, bio_put(bio); if (!(flags & NVM_IOTYPE_GC)) { rrpc_unlock_rq(rrpc, rqd); - if (rqd->nr_pages > 1) + if (rqd->nr_ppas > 1) nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); } @@ -1039,11 +1036,8 @@ static int rrpc_map_init(struct rrpc *rrpc) { struct nvm_dev *dev = rrpc->dev; sector_t i; - u64 slba; int ret; - slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9); - rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects); if (!rrpc->trans_map) return -ENOMEM; @@ -1065,8 +1059,8 @@ static int rrpc_map_init(struct rrpc *rrpc) return 0; /* Bring up the mapping table from device */ - ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects, rrpc_l2p_update, - rrpc); + ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects, + rrpc_l2p_update, rrpc); if (ret) { pr_err("nvm: rrpc: could not read L2P table.\n"); return -EINVAL; @@ -1207,10 +1201,6 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); spin_lock_init(&rlun->lock); - - rrpc->total_blocks += dev->blks_per_lun; - rrpc->nr_sects += dev->sec_per_lun; - } return 0; @@ -1224,18 +1214,24 @@ static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin) struct nvm_dev *dev = rrpc->dev; struct nvmm_type *mt = dev->mt; sector_t size = rrpc->nr_sects * dev->sec_size; + int ret; size >>= 9; - return mt->get_area(dev, begin, size); + ret = mt->get_area(dev, begin, size); + if (!ret) + *begin >>= (ilog2(dev->sec_size) - 9); + + return ret; } static void rrpc_area_free(struct rrpc *rrpc) { struct nvm_dev *dev = rrpc->dev; struct nvmm_type *mt = dev->mt; + sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9); - mt->put_area(dev, rrpc->soffset); + mt->put_area(dev, begin); } static void rrpc_free(struct rrpc *rrpc) @@ -1268,7 +1264,7 @@ static sector_t rrpc_capacity(void *private) sector_t reserved, provisioned; /* cur, gc, and two emergency blocks for each lun */ - reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4; + reserved = rrpc->nr_luns * dev->sec_per_blk * 4; provisioned = rrpc->nr_sects - reserved; if (reserved > rrpc->nr_sects) { @@ -1388,6 +1384,8 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, INIT_WORK(&rrpc->ws_requeue, rrpc_requeue); rrpc->nr_luns = lun_end - lun_begin + 1; + rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns; + rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns; /* simple round-robin strategy */ atomic_set(&rrpc->next_lun, -1); @@ -1468,12 +1466,12 @@ static struct nvm_tgt_type tt_rrpc = { static int __init rrpc_module_init(void) { - return nvm_register_target(&tt_rrpc); + return nvm_register_tgt_type(&tt_rrpc); } static void rrpc_module_exit(void) { - nvm_unregister_target(&tt_rrpc); + nvm_unregister_tgt_type(&tt_rrpc); } module_init(rrpc_module_init); diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h index 2653484a3..87e84b5fc 100644 --- a/drivers/lightnvm/rrpc.h +++ b/drivers/lightnvm/rrpc.h @@ -251,7 +251,7 @@ static inline void rrpc_unlock_laddr(struct rrpc *rrpc, static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd) { struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); - uint8_t pages = rqd->nr_pages; + uint8_t pages = rqd->nr_ppas; BUG_ON((r->l_start + pages) > rrpc->nr_sects); diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c index 321de1f15..994697ac7 100644 --- a/drivers/lightnvm/sysblk.c +++ b/drivers/lightnvm/sysblk.c @@ -93,12 +93,51 @@ void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s, s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas); } -static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks, - void *private) +static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa, + u8 *blks, int nr_blks, + struct sysblk_scan *s) +{ + struct ppa_addr *sppa; + int i, blkid = 0; + + nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); + if (nr_blks < 0) + return nr_blks; + + for (i = 0; i < nr_blks; i++) { + if (blks[i] == NVM_BLK_T_HOST) + return -EEXIST; + + if (blks[i] != NVM_BLK_T_FREE) + continue; + + sppa = &s->ppas[scan_ppa_idx(s->row, blkid)]; + sppa->g.ch = ppa.g.ch; + sppa->g.lun = ppa.g.lun; + sppa->g.blk = i; + s->nr_ppas++; + blkid++; + + pr_debug("nvm: use (%u %u %u) as sysblk\n", + sppa->g.ch, sppa->g.lun, sppa->g.blk); + if (blkid > MAX_BLKS_PR_SYSBLK - 1) + return 0; + } + + pr_err("nvm: sysblk failed get sysblk\n"); + return -EINVAL; +} + +static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa, + u8 *blks, int nr_blks, + struct sysblk_scan *s) { - struct sysblk_scan *s = private; int i, nr_sysblk = 0; + nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); + if (nr_blks < 0) + return nr_blks; + for (i = 0; i < nr_blks; i++) { if (blks[i] != NVM_BLK_T_HOST) continue; @@ -119,26 +158,42 @@ static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks, } static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s, - struct ppa_addr *ppas, nvm_bb_update_fn *fn) + struct ppa_addr *ppas, int get_free) { - struct ppa_addr dppa; - int i, ret; + int i, nr_blks, ret = 0; + u8 *blks; s->nr_ppas = 0; + nr_blks = dev->blks_per_lun * dev->plane_mode; + + blks = kmalloc(nr_blks, GFP_KERNEL); + if (!blks) + return -ENOMEM; for (i = 0; i < s->nr_rows; i++) { - dppa = generic_to_dev_addr(dev, ppas[i]); s->row = i; - ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s); + ret = nvm_get_bb_tbl(dev, ppas[i], blks); if (ret) { pr_err("nvm: failed bb tbl for ppa (%u %u)\n", ppas[i].g.ch, ppas[i].g.blk); - return ret; + goto err_get; } + + if (get_free) + ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks, + s); + else + ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks, + s); + + if (ret) + goto err_get; } +err_get: + kfree(blks); return ret; } @@ -154,13 +209,12 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa, struct nvm_system_block *sblk) { struct nvm_system_block *cur; - int pg, cursz, ret, found = 0; + int pg, ret, found = 0; /* the full buffer for a flash page is allocated. Only the first of it * contains the system block information */ - cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes; - cur = kmalloc(cursz, GFP_KERNEL); + cur = kmalloc(dev->pfpg_size, GFP_KERNEL); if (!cur) return -ENOMEM; @@ -169,7 +223,7 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa, ppa->g.pg = ppa_to_slc(dev, pg); ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE, - cur, cursz); + cur, dev->pfpg_size); if (ret) { if (ret == NVM_RSP_ERR_EMPTYPAGE) { pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n", @@ -223,10 +277,10 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type) memset(&rqd, 0, sizeof(struct nvm_rq)); - nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas); + nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1); nvm_generic_to_addr_mode(dev, &rqd); - ret = dev->ops->set_bb_tbl(dev, &rqd, type); + ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); nvm_free_rqd_ppalist(dev, &rqd); if (ret) { pr_err("nvm: sysblk failed bb mark\n"); @@ -236,50 +290,17 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type) return 0; } -static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks, - void *private) -{ - struct sysblk_scan *s = private; - struct ppa_addr *sppa; - int i, blkid = 0; - - for (i = 0; i < nr_blks; i++) { - if (blks[i] == NVM_BLK_T_HOST) - return -EEXIST; - - if (blks[i] != NVM_BLK_T_FREE) - continue; - - sppa = &s->ppas[scan_ppa_idx(s->row, blkid)]; - sppa->g.ch = ppa.g.ch; - sppa->g.lun = ppa.g.lun; - sppa->g.blk = i; - s->nr_ppas++; - blkid++; - - pr_debug("nvm: use (%u %u %u) as sysblk\n", - sppa->g.ch, sppa->g.lun, sppa->g.blk); - if (blkid > MAX_BLKS_PR_SYSBLK - 1) - return 0; - } - - pr_err("nvm: sysblk failed get sysblk\n"); - return -EINVAL; -} - static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info, struct sysblk_scan *s) { struct nvm_system_block nvmsb; void *buf; - int i, sect, ret, bufsz; + int i, sect, ret = 0; struct ppa_addr *ppas; nvm_cpu_to_sysblk(&nvmsb, info); - /* buffer for flash page */ - bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes; - buf = kzalloc(bufsz, GFP_KERNEL); + buf = kzalloc(dev->pfpg_size, GFP_KERNEL); if (!buf) return -ENOMEM; memcpy(buf, &nvmsb, sizeof(struct nvm_system_block)); @@ -309,7 +330,7 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info, } ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE, - NVM_IO_SLC_MODE, buf, bufsz); + NVM_IO_SLC_MODE, buf, dev->pfpg_size); if (ret) { pr_err("nvm: sysblk failed program (%u %u %u)\n", ppas[0].g.ch, @@ -319,7 +340,7 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info, } ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD, - NVM_IO_SLC_MODE, buf, bufsz); + NVM_IO_SLC_MODE, buf, dev->pfpg_size); if (ret) { pr_err("nvm: sysblk failed read (%u %u %u)\n", ppas[0].g.ch, @@ -388,7 +409,7 @@ int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info) nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); mutex_lock(&dev->mlock); - ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks); + ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); if (ret) goto err_sysblk; @@ -448,7 +469,7 @@ int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new) nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); mutex_lock(&dev->mlock); - ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks); + ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); if (ret) goto err_sysblk; @@ -546,7 +567,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info) nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); mutex_lock(&dev->mlock); - ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks); + ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1); if (ret) goto err_mark; @@ -561,52 +582,49 @@ err_mark: return ret; } -struct factory_blks { - struct nvm_dev *dev; - int flags; - unsigned long *blks; -}; - static int factory_nblks(int nblks) { /* Round up to nearest BITS_PER_LONG */ return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); } -static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun) +static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa) { int nblks = factory_nblks(dev->blks_per_lun); - return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) / + return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) / BITS_PER_LONG; } -static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks, - void *private) +static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa, + u8 *blks, int nr_blks, + unsigned long *blk_bitmap, int flags) { - struct factory_blks *f = private; - struct nvm_dev *dev = f->dev; int i, lunoff; - lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun); + nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); + if (nr_blks < 0) + return nr_blks; + + lunoff = factory_blk_offset(dev, ppa); /* non-set bits correspond to the block must be erased */ for (i = 0; i < nr_blks; i++) { switch (blks[i]) { case NVM_BLK_T_FREE: - if (f->flags & NVM_FACTORY_ERASE_ONLY_USER) - set_bit(i, &f->blks[lunoff]); + if (flags & NVM_FACTORY_ERASE_ONLY_USER) + set_bit(i, &blk_bitmap[lunoff]); break; case NVM_BLK_T_HOST: - if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS)) - set_bit(i, &f->blks[lunoff]); + if (!(flags & NVM_FACTORY_RESET_HOST_BLKS)) + set_bit(i, &blk_bitmap[lunoff]); break; case NVM_BLK_T_GRWN_BAD: - if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS)) - set_bit(i, &f->blks[lunoff]); + if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS)) + set_bit(i, &blk_bitmap[lunoff]); break; default: - set_bit(i, &f->blks[lunoff]); + set_bit(i, &blk_bitmap[lunoff]); break; } } @@ -615,7 +633,7 @@ static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks, } static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list, - int max_ppas, struct factory_blks *f) + int max_ppas, unsigned long *blk_bitmap) { struct ppa_addr ppa; int ch, lun, blkid, idx, done = 0, ppa_cnt = 0; @@ -623,111 +641,95 @@ static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list, while (!done) { done = 1; - for (ch = 0; ch < dev->nr_chnls; ch++) { - for (lun = 0; lun < dev->luns_per_chnl; lun++) { - idx = factory_blk_offset(dev, ch, lun); - offset = &f->blks[idx]; - - blkid = find_first_zero_bit(offset, - dev->blks_per_lun); - if (blkid >= dev->blks_per_lun) - continue; - set_bit(blkid, offset); - - ppa.ppa = 0; - ppa.g.ch = ch; - ppa.g.lun = lun; - ppa.g.blk = blkid; - pr_debug("nvm: erase ppa (%u %u %u)\n", - ppa.g.ch, - ppa.g.lun, - ppa.g.blk); - - erase_list[ppa_cnt] = ppa; - ppa_cnt++; - done = 0; - - if (ppa_cnt == max_ppas) - return ppa_cnt; - } + nvm_for_each_lun_ppa(dev, ppa, ch, lun) { + idx = factory_blk_offset(dev, ppa); + offset = &blk_bitmap[idx]; + + blkid = find_first_zero_bit(offset, + dev->blks_per_lun); + if (blkid >= dev->blks_per_lun) + continue; + set_bit(blkid, offset); + + ppa.g.blk = blkid; + pr_debug("nvm: erase ppa (%u %u %u)\n", + ppa.g.ch, + ppa.g.lun, + ppa.g.blk); + + erase_list[ppa_cnt] = ppa; + ppa_cnt++; + done = 0; + + if (ppa_cnt == max_ppas) + return ppa_cnt; } } return ppa_cnt; } -static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, - nvm_bb_update_fn *fn, void *priv) +static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap, + int flags) { - struct ppa_addr dev_ppa; - int ret; + struct ppa_addr ppa; + int ch, lun, nr_blks, ret = 0; + u8 *blks; - dev_ppa = generic_to_dev_addr(dev, ppa); + nr_blks = dev->blks_per_lun * dev->plane_mode; + blks = kmalloc(nr_blks, GFP_KERNEL); + if (!blks) + return -ENOMEM; - ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv); - if (ret) - pr_err("nvm: failed bb tbl for ch%u lun%u\n", + nvm_for_each_lun_ppa(dev, ppa, ch, lun) { + ret = nvm_get_bb_tbl(dev, ppa, blks); + if (ret) + pr_err("nvm: failed bb tbl for ch%u lun%u\n", ppa.g.ch, ppa.g.blk); - return ret; -} -static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f) -{ - int ch, lun, ret; - struct ppa_addr ppa; - - ppa.ppa = 0; - for (ch = 0; ch < dev->nr_chnls; ch++) { - for (lun = 0; lun < dev->luns_per_chnl; lun++) { - ppa.g.ch = ch; - ppa.g.lun = lun; - - ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks, - f); - if (ret) - return ret; - } + ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap, + flags); + if (ret) + break; } - return 0; + kfree(blks); + return ret; } int nvm_dev_factory(struct nvm_dev *dev, int flags) { - struct factory_blks f; struct ppa_addr *ppas; int ppa_cnt, ret = -ENOMEM; int max_ppas = dev->ops->max_phys_sect / dev->nr_planes; struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; struct sysblk_scan s; + unsigned long *blk_bitmap; - f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns, + blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns, GFP_KERNEL); - if (!f.blks) + if (!blk_bitmap) return ret; ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL); if (!ppas) goto err_blks; - f.dev = dev; - f.flags = flags; - /* create list of blks to be erased */ - ret = nvm_fact_select_blks(dev, &f); + ret = nvm_fact_select_blks(dev, blk_bitmap, flags); if (ret) goto err_ppas; /* continue to erase until list of blks until empty */ - while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0) + while ((ppa_cnt = + nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0) nvm_erase_ppa(dev, ppas, ppa_cnt); /* mark host reserved blocks free */ if (flags & NVM_FACTORY_RESET_HOST_BLKS) { nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); mutex_lock(&dev->mlock); - ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, - sysblk_get_host_blks); + ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); if (!ret) ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE); mutex_unlock(&dev->mlock); @@ -735,7 +737,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags) err_ppas: kfree(ppas); err_blks: - kfree(f.blks); + kfree(blk_bitmap); return ret; } EXPORT_SYMBOL(nvm_dev_factory); |