summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMatias Bjørling <m@bjorling.me>2016-05-06 18:02:58 (GMT)
committerJens Axboe <axboe@fb.com>2016-05-06 18:51:10 (GMT)
commit22e8c9766a669d49cf3749d397082a5cd93374a9 (patch)
treec24facba8321c5359bf6f111f2ca892cf894b62a /drivers
parent4891d120b9cd419f4350b11e1231083745dcdc8b (diff)
downloadlinux-22e8c9766a669d49cf3749d397082a5cd93374a9.tar.xz
lightnvm: move block fold outside of get_bb_tbl()
The get block table command returns a list of blocks and planes with their associated state. Users, such as gennvm and sysblk, manages all planes as a single virtual block. It was therefore natural to fold the bad block list before it is returned. However, to allow users, which manages on a per-plane block level, to also use the interface, the get_bb_tbl interface is changed to not fold by default and instead let the caller fold if necessary. Reviewed by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/lightnvm/core.c35
-rw-r--r--drivers/lightnvm/gennvm.c14
-rw-r--r--drivers/lightnvm/sysblk.c29
-rw-r--r--drivers/nvme/host/lightnvm.c47
4 files changed, 69 insertions, 56 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 652b8c7..4cadbe0 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -420,6 +420,41 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
}
EXPORT_SYMBOL(nvm_submit_ppa);
+/*
+ * folds a bad block list from its plane representation to its virtual
+ * block representation. The fold is done in place and reduced size is
+ * returned.
+ *
+ * If any of the planes status are bad or grown bad block, the virtual block
+ * is marked bad. If not bad, the first plane state acts as the block state.
+ */
+int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
+{
+ int blk, offset, pl, blktype;
+
+ if (nr_blks != dev->blks_per_lun * dev->plane_mode)
+ return -EINVAL;
+
+ for (blk = 0; blk < dev->blks_per_lun; blk++) {
+ offset = blk * dev->plane_mode;
+ blktype = blks[offset];
+
+ /* Bad blocks on any planes take precedence over other types */
+ for (pl = 0; pl < dev->plane_mode; pl++) {
+ if (blks[offset + pl] &
+ (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
+ blktype = blks[offset + pl];
+ break;
+ }
+ }
+
+ blks[blk] = blktype;
+ }
+
+ return dev->blks_per_lun;
+}
+EXPORT_SYMBOL(nvm_bb_tbl_fold);
+
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
int i;
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 72e124a..6096077 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -129,18 +129,21 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
return 0;
}
-static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
- void *private)
+static int gennvm_block_bb(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks, void *private)
{
struct gen_nvm *gn = private;
- struct nvm_dev *dev = gn->dev;
struct gen_lun *lun;
struct nvm_block *blk;
int i;
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
- for (i = 0; i < nr_blocks; i++) {
+ for (i = 0; i < nr_blks; i++) {
if (blks[i] == 0)
continue;
@@ -250,8 +253,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
ppa = generic_to_dev_addr(dev, ppa);
ret = dev->ops->get_bb_tbl(dev, ppa,
- dev->blks_per_lun,
- gennvm_block_bb, gn);
+ gennvm_block_bb, gn);
if (ret)
pr_err("gennvm: could not read BB table\n");
}
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index b6971f8..7fce588 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -93,12 +93,16 @@ void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
}
-static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
+static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks, void *private)
{
struct sysblk_scan *s = private;
int i, nr_sysblk = 0;
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
for (i = 0; i < nr_blks; i++) {
if (blks[i] != NVM_BLK_T_HOST)
continue;
@@ -130,7 +134,7 @@ static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
dppa = generic_to_dev_addr(dev, ppas[i]);
s->row = i;
- ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
+ ret = dev->ops->get_bb_tbl(dev, dppa, fn, s);
if (ret) {
pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
ppas[i].g.ch,
@@ -235,13 +239,17 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
return 0;
}
-static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
+static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks, void *private)
{
struct sysblk_scan *s = private;
struct ppa_addr *sppa;
int i, blkid = 0;
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
for (i = 0; i < nr_blks; i++) {
if (blks[i] == NVM_BLK_T_HOST)
return -EEXIST;
@@ -578,13 +586,16 @@ static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
BITS_PER_LONG;
}
-static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
+static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks, void *private)
{
struct factory_blks *f = private;
- struct nvm_dev *dev = f->dev;
int i, lunoff;
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
/* non-set bits correspond to the block must be erased */
@@ -661,7 +672,7 @@ static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
dev_ppa = generic_to_dev_addr(dev, ppa);
- ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
+ ret = dev->ops->get_bb_tbl(dev, dev_ppa, fn, priv);
if (ret)
pr_err("nvm: failed bb tbl for ch%u lun%u\n",
ppa.g.ch, ppa.g.blk);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 9461dd6..d289980 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -387,41 +387,16 @@ out:
return ret;
}
-static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
- int nr_dst_blks, u8 *dst_blks,
- int nr_src_blks, u8 *src_blks)
-{
- int blk, offset, pl, blktype;
-
- for (blk = 0; blk < nr_dst_blks; blk++) {
- offset = blk * nvmdev->plane_mode;
- blktype = src_blks[offset];
-
- /* Bad blocks on any planes take precedence over other types */
- for (pl = 0; pl < nvmdev->plane_mode; pl++) {
- if (src_blks[offset + pl] &
- (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
- blktype = src_blks[offset + pl];
- break;
- }
- }
-
- dst_blks[blk] = blktype;
- }
-}
-
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
- int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
- void *priv)
+ nvm_bb_update_fn *update_bbtbl, void *priv)
{
struct request_queue *q = nvmdev->q;
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- u8 *dst_blks = NULL;
- int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
- int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
+ int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
+ int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
@@ -432,12 +407,6 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
if (!bb_tbl)
return -ENOMEM;
- dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
- if (!dst_blks) {
- ret = -ENOMEM;
- goto out;
- }
-
ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
bb_tbl, tblsz);
if (ret) {
@@ -459,21 +428,17 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}
- if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
+ if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
ret = -EINVAL;
dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
- le32_to_cpu(bb_tbl->tblks), nr_src_blks);
+ le32_to_cpu(bb_tbl->tblks), nr_blks);
goto out;
}
- nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
- nr_src_blks, bb_tbl->blk);
-
ppa = dev_to_generic_addr(nvmdev, ppa);
- ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
+ ret = update_bbtbl(nvmdev, ppa, bb_tbl->blk, nr_blks, priv);
out:
- kfree(dst_blks);
kfree(bb_tbl);
return ret;
}