aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/core.c
diff options
context:
space:
mode:
authorMatias Bjørling <m@bjorling.me>2016-01-12 01:49:20 -0500
committerJens Axboe <axboe@fb.com>2016-01-12 10:21:16 -0500
commitabd805ec9f51f37db9da63dda44c3f4b4ae8ad57 (patch)
tree5298213b1795db79ebd1769e0d2c66b865d71e5a /drivers/lightnvm/core.c
parent069368e91879a3a640cfae4bdc1f9f8cc99c93a0 (diff)
lightnvm: refactor rqd ppa list into set/free
A device may be driven in single, double or quad plane mode. In that case, the rqd must have either one, two, or four PPAs set for a single PPA sent to the device. Refactor this logic into their own functions to be shared by program/erase/read in the core. Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm/core.c')
-rw-r--r--drivers/lightnvm/core.c71
1 files changed, 50 insertions, 21 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 6134339aa6cf..081b0f59b773 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -220,40 +220,69 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
220} 220}
221EXPORT_SYMBOL(nvm_generic_to_addr_mode); 221EXPORT_SYMBOL(nvm_generic_to_addr_mode);
222 222
223int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa) 223int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
224 struct ppa_addr *ppas, int nr_ppas)
224{ 225{
225 int plane_cnt = 0, pl_idx, ret; 226 int i, plane_cnt, pl_idx;
226 struct nvm_rq rqd; 227
228 if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
229 rqd->nr_pages = 1;
230 rqd->ppa_addr = ppas[0];
227 231
228 if (!dev->ops->erase_block)
229 return 0; 232 return 0;
233 }
230 234
231 if (dev->plane_mode == NVM_PLANE_SINGLE) { 235 plane_cnt = (1 << dev->plane_mode);
232 rqd.nr_pages = 1; 236 rqd->nr_pages = plane_cnt * nr_ppas;
233 rqd.ppa_addr = ppa; 237
234 } else { 238 if (dev->ops->max_phys_sect < rqd->nr_pages)
235 plane_cnt = (1 << dev->plane_mode); 239 return -EINVAL;
236 rqd.nr_pages = plane_cnt; 240
237 241 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
238 rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, 242 if (!rqd->ppa_list) {
239 &rqd.dma_ppa_list); 243 pr_err("nvm: failed to allocate dma memory\n");
240 if (!rqd.ppa_list) { 244 return -ENOMEM;
241 pr_err("nvm: failed to allocate dma memory\n"); 245 }
242 return -ENOMEM;
243 }
244 246
247 for (i = 0; i < nr_ppas; i++) {
245 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { 248 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
246 ppa.g.pl = pl_idx; 249 ppas[i].g.pl = pl_idx;
247 rqd.ppa_list[pl_idx] = ppa; 250 rqd->ppa_list[(i * plane_cnt) + pl_idx] = ppas[i];
248 } 251 }
249 } 252 }
250 253
254 return 0;
255}
256EXPORT_SYMBOL(nvm_set_rqd_ppalist);
257
258void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
259{
260 if (!rqd->ppa_list)
261 return;
262
263 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
264}
265EXPORT_SYMBOL(nvm_free_rqd_ppalist);
266
267int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa)
268{
269 struct nvm_rq rqd;
270 int ret;
271
272 if (!dev->ops->erase_block)
273 return 0;
274
275 memset(&rqd, 0, sizeof(struct nvm_rq));
276
277 ret = nvm_set_rqd_ppalist(dev, &rqd, &ppa, 1);
278 if (ret)
279 return ret;
280
251 nvm_generic_to_addr_mode(dev, &rqd); 281 nvm_generic_to_addr_mode(dev, &rqd);
252 282
253 ret = dev->ops->erase_block(dev, &rqd); 283 ret = dev->ops->erase_block(dev, &rqd);
254 284
255 if (plane_cnt) 285 nvm_free_rqd_ppalist(dev, &rqd);
256 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
257 286
258 return ret; 287 return ret;
259} 288}