aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/core.c
diff options
context:
space:
mode:
authorJavier González <javier@cnexlabs.com>2016-11-28 16:39:14 -0500
committerJens Axboe <axboe@fb.com>2016-11-29 14:12:51 -0500
commit333ba053d145d6f9152f6b0311a345b876f0fed1 (patch)
tree5fe4cb83b1422aaa40a7ce5ff6a824fd3d323046 /drivers/lightnvm/core.c
parentda2d7cb828ce2714c603827ac5a6e1c98a02e861 (diff)
lightnvm: transform target get/set bad block
Since targets are given a virtual target device, it is necessary to translate all communication between targets and the backend device. Implement the translation layer for get/set bad block table. Signed-off-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm/core.c')
-rw-r--r--drivers/lightnvm/core.c58
1 files changed, 58 insertions, 0 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 07bf989d2f77..7622e3dc5d82 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -175,6 +175,26 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
175 return NULL; 175 return NULL;
176} 176}
177 177
178static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
179 struct nvm_rq *rqd)
180{
181 struct nvm_dev *dev = tgt_dev->parent;
182 int i;
183
184 if (rqd->nr_ppas > 1) {
185 for (i = 0; i < rqd->nr_ppas; i++) {
186 rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
187 rqd->ppa_list[i], TRANS_TGT_TO_DEV);
188 rqd->ppa_list[i] = generic_to_dev_addr(dev,
189 rqd->ppa_list[i]);
190 }
191 } else {
192 rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
193 TRANS_TGT_TO_DEV);
194 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
195 }
196}
197
178int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas, 198int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
179 int type) 199 int type)
180{ 200{
@@ -202,6 +222,34 @@ int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
202} 222}
203EXPORT_SYMBOL(nvm_set_bb_tbl); 223EXPORT_SYMBOL(nvm_set_bb_tbl);
204 224
225int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
226 int nr_ppas, int type)
227{
228 struct nvm_dev *dev = tgt_dev->parent;
229 struct nvm_rq rqd;
230 int ret;
231
232 if (nr_ppas > dev->ops->max_phys_sect) {
233 pr_err("nvm: unable to update all blocks atomically\n");
234 return -EINVAL;
235 }
236
237 memset(&rqd, 0, sizeof(struct nvm_rq));
238
239 nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
240 nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
241
242 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
243 nvm_free_rqd_ppalist(dev, &rqd);
244 if (ret) {
245 pr_err("nvm: sysblk failed bb mark\n");
246 return -EINVAL;
247 }
248
249 return 0;
250}
251EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
252
205int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev) 253int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
206{ 254{
207 struct nvm_dev *dev = tgt_dev->parent; 255 struct nvm_dev *dev = tgt_dev->parent;
@@ -519,6 +567,16 @@ int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
519} 567}
520EXPORT_SYMBOL(nvm_get_bb_tbl); 568EXPORT_SYMBOL(nvm_get_bb_tbl);
521 569
570int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
571 u8 *blks)
572{
573 struct nvm_dev *dev = tgt_dev->parent;
574
575 ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
576 return nvm_get_bb_tbl(dev, ppa, blks);
577}
578EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
579
522static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) 580static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
523{ 581{
524 struct nvm_geo *geo = &dev->geo; 582 struct nvm_geo *geo = &dev->geo;