diff options
author | Matias Bjørling <m@bjorling.me> | 2016-01-12 01:49:19 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-01-12 10:21:16 -0500 |
commit | 069368e91879a3a640cfae4bdc1f9f8cc99c93a0 (patch) | |
tree | 57a2d4df9b3c9d705afc058b952b3ab450364b6d /drivers/lightnvm/core.c | |
parent | c27278bddd75a3ee755c8e83c6bcc3fdd7271ef6 (diff) |
lightnvm: move ppa erase logic to core
A device may function in single, dual or quad plane mode. The gennvm
media manager manages this with explicit helpers. They convert a single
ppa to 1, 2 or 4 separate ppas in a ppa list. To aid implementation of
recovery and system blocks, this functionality can be moved directly
into the core.
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm/core.c')
-rw-r--r-- | drivers/lightnvm/core.c | 67 |
1 files changed, 67 insertions, 0 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 8f41b245cd55..6134339aa6cf 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c | |||
@@ -192,6 +192,73 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk) | |||
192 | } | 192 | } |
193 | EXPORT_SYMBOL(nvm_erase_blk); | 193 | EXPORT_SYMBOL(nvm_erase_blk); |
194 | 194 | ||
195 | void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) | ||
196 | { | ||
197 | int i; | ||
198 | |||
199 | if (rqd->nr_pages > 1) { | ||
200 | for (i = 0; i < rqd->nr_pages; i++) | ||
201 | rqd->ppa_list[i] = dev_to_generic_addr(dev, | ||
202 | rqd->ppa_list[i]); | ||
203 | } else { | ||
204 | rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); | ||
205 | } | ||
206 | } | ||
207 | EXPORT_SYMBOL(nvm_addr_to_generic_mode); | ||
208 | |||
209 | void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) | ||
210 | { | ||
211 | int i; | ||
212 | |||
213 | if (rqd->nr_pages > 1) { | ||
214 | for (i = 0; i < rqd->nr_pages; i++) | ||
215 | rqd->ppa_list[i] = generic_to_dev_addr(dev, | ||
216 | rqd->ppa_list[i]); | ||
217 | } else { | ||
218 | rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); | ||
219 | } | ||
220 | } | ||
221 | EXPORT_SYMBOL(nvm_generic_to_addr_mode); | ||
222 | |||
223 | int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa) | ||
224 | { | ||
225 | int plane_cnt = 0, pl_idx, ret; | ||
226 | struct nvm_rq rqd; | ||
227 | |||
228 | if (!dev->ops->erase_block) | ||
229 | return 0; | ||
230 | |||
231 | if (dev->plane_mode == NVM_PLANE_SINGLE) { | ||
232 | rqd.nr_pages = 1; | ||
233 | rqd.ppa_addr = ppa; | ||
234 | } else { | ||
235 | plane_cnt = (1 << dev->plane_mode); | ||
236 | rqd.nr_pages = plane_cnt; | ||
237 | |||
238 | rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, | ||
239 | &rqd.dma_ppa_list); | ||
240 | if (!rqd.ppa_list) { | ||
241 | pr_err("nvm: failed to allocate dma memory\n"); | ||
242 | return -ENOMEM; | ||
243 | } | ||
244 | |||
245 | for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { | ||
246 | ppa.g.pl = pl_idx; | ||
247 | rqd.ppa_list[pl_idx] = ppa; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | nvm_generic_to_addr_mode(dev, &rqd); | ||
252 | |||
253 | ret = dev->ops->erase_block(dev, &rqd); | ||
254 | |||
255 | if (plane_cnt) | ||
256 | nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list); | ||
257 | |||
258 | return ret; | ||
259 | } | ||
260 | EXPORT_SYMBOL(nvm_erase_ppa); | ||
261 | |||
195 | static int nvm_core_init(struct nvm_dev *dev) | 262 | static int nvm_core_init(struct nvm_dev *dev) |
196 | { | 263 | { |
197 | struct nvm_id *id = &dev->identity; | 264 | struct nvm_id *id = &dev->identity; |