summaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/pblk-init.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-14 18:32:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-14 18:32:19 -0500
commite2c5923c349c1738fe8fda980874d93f6fb2e5b6 (patch)
treeb97a90170c45211bcc437761653aa8016c34afcd /drivers/lightnvm/pblk-init.c
parentabc36be236358162202e86ad88616ff95a755101 (diff)
parenta04b5de5050ab8b891128eb2c47a0916fe8622e1 (diff)
Merge branch 'for-4.15/block' of git://git.kernel.dk/linux-block
Pull core block layer updates from Jens Axboe: "This is the main pull request for block storage for 4.15-rc1. Nothing out of the ordinary in here, and no API changes or anything like that. Just various new features for drivers, core changes, etc. In particular, this pull request contains: - A patch series from Bart, closing the whole on blk/scsi-mq queue quescing. - A series from Christoph, building towards hidden gendisks (for multipath) and ability to move bio chains around. - NVMe - Support for native multipath for NVMe (Christoph). - Userspace notifications for AENs (Keith). - Command side-effects support (Keith). - SGL support (Chaitanya Kulkarni) - FC fixes and improvements (James Smart) - Lots of fixes and tweaks (Various) - bcache - New maintainer (Michael Lyle) - Writeback control improvements (Michael) - Various fixes (Coly, Elena, Eric, Liang, et al) - lightnvm updates, mostly centered around the pblk interface (Javier, Hans, and Rakesh). - Removal of unused bio/bvec kmap atomic interfaces (me, Christoph) - Writeback series that fix the much discussed hundreds of millions of sync-all units. This goes all the way, as discussed previously (me). - Fix for missing wakeup on writeback timer adjustments (Yafang Shao). - Fix laptop mode on blk-mq (me). - {mq,name} tupple lookup for IO schedulers, allowing us to have alias names. This means you can use 'deadline' on both !mq and on mq (where it's called mq-deadline). (me). - blktrace race fix, oopsing on sg load (me). - blk-mq optimizations (me). - Obscure waitqueue race fix for kyber (Omar). - NBD fixes (Josef). - Disable writeback throttling by default on bfq, like we do on cfq (Luca Miccio). - Series from Ming that enable us to treat flush requests on blk-mq like any other request. This is a really nice cleanup. - Series from Ming that improves merging on blk-mq with schedulers, getting us closer to flipping the switch on scsi-mq again. - BFQ updates (Paolo). - blk-mq atomic flags memory ordering fixes (Peter Z). - Loop cgroup support (Shaohua). - Lots of minor fixes from lots of different folks, both for core and driver code" * 'for-4.15/block' of git://git.kernel.dk/linux-block: (294 commits) nvme: fix visibility of "uuid" ns attribute blk-mq: fixup some comment typos and lengths ide: ide-atapi: fix compile error with defining macro DEBUG blk-mq: improve tag waiting setup for non-shared tags brd: remove unused brd_mutex blk-mq: only run the hardware queue if IO is pending block: avoid null pointer dereference on null disk fs: guard_bio_eod() needs to consider partitions xtensa/simdisk: fix compile error nvme: expose subsys attribute to sysfs nvme: create 'slaves' and 'holders' entries for hidden controllers block: create 'slaves' and 'holders' entries for hidden gendisks nvme: also expose the namespace identification sysfs files for mpath nodes nvme: implement multipath access to nvme subsystems nvme: track shared namespaces nvme: introduce a nvme_ns_ids structure nvme: track subsystems block, nvme: Introduce blk_mq_req_flags_t block, scsi: Make SCSI quiesce and resume work reliably block: Add the QUEUE_FLAG_PREEMPT_ONLY request queue flag ...
Diffstat (limited to 'drivers/lightnvm/pblk-init.c')
-rw-r--r--drivers/lightnvm/pblk-init.c197
1 files changed, 121 insertions, 76 deletions
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 1b0f61233c21..f62112ba5482 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -20,8 +20,8 @@
20 20
21#include "pblk.h" 21#include "pblk.h"
22 22
23static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache, 23static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
24 *pblk_w_rq_cache, *pblk_line_meta_cache; 24 *pblk_w_rq_cache;
25static DECLARE_RWSEM(pblk_lock); 25static DECLARE_RWSEM(pblk_lock);
26struct bio_set *pblk_bio_set; 26struct bio_set *pblk_bio_set;
27 27
@@ -46,7 +46,7 @@ static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
46 * user I/Os. Unless stalled, the rate limiter leaves at least 256KB 46 * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
47 * available for user I/O. 47 * available for user I/O.
48 */ 48 */
49 if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl))) 49 if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
50 blk_queue_split(q, &bio); 50 blk_queue_split(q, &bio);
51 51
52 return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER); 52 return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
@@ -76,6 +76,28 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
76 return BLK_QC_T_NONE; 76 return BLK_QC_T_NONE;
77} 77}
78 78
79static size_t pblk_trans_map_size(struct pblk *pblk)
80{
81 int entry_size = 8;
82
83 if (pblk->ppaf_bitsize < 32)
84 entry_size = 4;
85
86 return entry_size * pblk->rl.nr_secs;
87}
88
89#ifdef CONFIG_NVM_DEBUG
90static u32 pblk_l2p_crc(struct pblk *pblk)
91{
92 size_t map_size;
93 u32 crc = ~(u32)0;
94
95 map_size = pblk_trans_map_size(pblk);
96 crc = crc32_le(crc, pblk->trans_map, map_size);
97 return crc;
98}
99#endif
100
79static void pblk_l2p_free(struct pblk *pblk) 101static void pblk_l2p_free(struct pblk *pblk)
80{ 102{
81 vfree(pblk->trans_map); 103 vfree(pblk->trans_map);
@@ -85,12 +107,10 @@ static int pblk_l2p_init(struct pblk *pblk)
85{ 107{
86 sector_t i; 108 sector_t i;
87 struct ppa_addr ppa; 109 struct ppa_addr ppa;
88 int entry_size = 8; 110 size_t map_size;
89 111
90 if (pblk->ppaf_bitsize < 32) 112 map_size = pblk_trans_map_size(pblk);
91 entry_size = 4; 113 pblk->trans_map = vmalloc(map_size);
92
93 pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
94 if (!pblk->trans_map) 114 if (!pblk->trans_map)
95 return -ENOMEM; 115 return -ENOMEM;
96 116
@@ -132,7 +152,6 @@ static int pblk_rwb_init(struct pblk *pblk)
132} 152}
133 153
134/* Minimum pages needed within a lun */ 154/* Minimum pages needed within a lun */
135#define PAGE_POOL_SIZE 16
136#define ADDR_POOL_SIZE 64 155#define ADDR_POOL_SIZE 64
137 156
138static int pblk_set_ppaf(struct pblk *pblk) 157static int pblk_set_ppaf(struct pblk *pblk)
@@ -182,12 +201,10 @@ static int pblk_set_ppaf(struct pblk *pblk)
182 201
183static int pblk_init_global_caches(struct pblk *pblk) 202static int pblk_init_global_caches(struct pblk *pblk)
184{ 203{
185 char cache_name[PBLK_CACHE_NAME_LEN];
186
187 down_write(&pblk_lock); 204 down_write(&pblk_lock);
188 pblk_blk_ws_cache = kmem_cache_create("pblk_blk_ws", 205 pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
189 sizeof(struct pblk_line_ws), 0, 0, NULL); 206 sizeof(struct pblk_line_ws), 0, 0, NULL);
190 if (!pblk_blk_ws_cache) { 207 if (!pblk_ws_cache) {
191 up_write(&pblk_lock); 208 up_write(&pblk_lock);
192 return -ENOMEM; 209 return -ENOMEM;
193 } 210 }
@@ -195,7 +212,7 @@ static int pblk_init_global_caches(struct pblk *pblk)
195 pblk_rec_cache = kmem_cache_create("pblk_rec", 212 pblk_rec_cache = kmem_cache_create("pblk_rec",
196 sizeof(struct pblk_rec_ctx), 0, 0, NULL); 213 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
197 if (!pblk_rec_cache) { 214 if (!pblk_rec_cache) {
198 kmem_cache_destroy(pblk_blk_ws_cache); 215 kmem_cache_destroy(pblk_ws_cache);
199 up_write(&pblk_lock); 216 up_write(&pblk_lock);
200 return -ENOMEM; 217 return -ENOMEM;
201 } 218 }
@@ -203,7 +220,7 @@ static int pblk_init_global_caches(struct pblk *pblk)
203 pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size, 220 pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
204 0, 0, NULL); 221 0, 0, NULL);
205 if (!pblk_g_rq_cache) { 222 if (!pblk_g_rq_cache) {
206 kmem_cache_destroy(pblk_blk_ws_cache); 223 kmem_cache_destroy(pblk_ws_cache);
207 kmem_cache_destroy(pblk_rec_cache); 224 kmem_cache_destroy(pblk_rec_cache);
208 up_write(&pblk_lock); 225 up_write(&pblk_lock);
209 return -ENOMEM; 226 return -ENOMEM;
@@ -212,30 +229,25 @@ static int pblk_init_global_caches(struct pblk *pblk)
212 pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size, 229 pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
213 0, 0, NULL); 230 0, 0, NULL);
214 if (!pblk_w_rq_cache) { 231 if (!pblk_w_rq_cache) {
215 kmem_cache_destroy(pblk_blk_ws_cache); 232 kmem_cache_destroy(pblk_ws_cache);
216 kmem_cache_destroy(pblk_rec_cache); 233 kmem_cache_destroy(pblk_rec_cache);
217 kmem_cache_destroy(pblk_g_rq_cache); 234 kmem_cache_destroy(pblk_g_rq_cache);
218 up_write(&pblk_lock); 235 up_write(&pblk_lock);
219 return -ENOMEM; 236 return -ENOMEM;
220 } 237 }
221
222 snprintf(cache_name, sizeof(cache_name), "pblk_line_m_%s",
223 pblk->disk->disk_name);
224 pblk_line_meta_cache = kmem_cache_create(cache_name,
225 pblk->lm.sec_bitmap_len, 0, 0, NULL);
226 if (!pblk_line_meta_cache) {
227 kmem_cache_destroy(pblk_blk_ws_cache);
228 kmem_cache_destroy(pblk_rec_cache);
229 kmem_cache_destroy(pblk_g_rq_cache);
230 kmem_cache_destroy(pblk_w_rq_cache);
231 up_write(&pblk_lock);
232 return -ENOMEM;
233 }
234 up_write(&pblk_lock); 238 up_write(&pblk_lock);
235 239
236 return 0; 240 return 0;
237} 241}
238 242
243static void pblk_free_global_caches(struct pblk *pblk)
244{
245 kmem_cache_destroy(pblk_ws_cache);
246 kmem_cache_destroy(pblk_rec_cache);
247 kmem_cache_destroy(pblk_g_rq_cache);
248 kmem_cache_destroy(pblk_w_rq_cache);
249}
250
239static int pblk_core_init(struct pblk *pblk) 251static int pblk_core_init(struct pblk *pblk)
240{ 252{
241 struct nvm_tgt_dev *dev = pblk->dev; 253 struct nvm_tgt_dev *dev = pblk->dev;
@@ -247,70 +259,80 @@ static int pblk_core_init(struct pblk *pblk)
247 if (pblk_init_global_caches(pblk)) 259 if (pblk_init_global_caches(pblk))
248 return -ENOMEM; 260 return -ENOMEM;
249 261
250 pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); 262 /* Internal bios can be at most the sectors signaled by the device. */
251 if (!pblk->page_pool) 263 pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
252 return -ENOMEM; 264 0);
265 if (!pblk->page_bio_pool)
266 goto free_global_caches;
253 267
254 pblk->line_ws_pool = mempool_create_slab_pool(PBLK_WS_POOL_SIZE, 268 pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
255 pblk_blk_ws_cache); 269 pblk_ws_cache);
256 if (!pblk->line_ws_pool) 270 if (!pblk->gen_ws_pool)
257 goto free_page_pool; 271 goto free_page_bio_pool;
258 272
259 pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache); 273 pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
260 if (!pblk->rec_pool) 274 if (!pblk->rec_pool)
261 goto free_blk_ws_pool; 275 goto free_gen_ws_pool;
262 276
263 pblk->g_rq_pool = mempool_create_slab_pool(PBLK_READ_REQ_POOL_SIZE, 277 pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
264 pblk_g_rq_cache); 278 pblk_g_rq_cache);
265 if (!pblk->g_rq_pool) 279 if (!pblk->r_rq_pool)
266 goto free_rec_pool; 280 goto free_rec_pool;
267 281
268 pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns * 2, 282 pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
283 pblk_g_rq_cache);
284 if (!pblk->e_rq_pool)
285 goto free_r_rq_pool;
286
287 pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
269 pblk_w_rq_cache); 288 pblk_w_rq_cache);
270 if (!pblk->w_rq_pool) 289 if (!pblk->w_rq_pool)
271 goto free_g_rq_pool; 290 goto free_e_rq_pool;
272
273 pblk->line_meta_pool =
274 mempool_create_slab_pool(PBLK_META_POOL_SIZE,
275 pblk_line_meta_cache);
276 if (!pblk->line_meta_pool)
277 goto free_w_rq_pool;
278 291
279 pblk->close_wq = alloc_workqueue("pblk-close-wq", 292 pblk->close_wq = alloc_workqueue("pblk-close-wq",
280 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS); 293 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
281 if (!pblk->close_wq) 294 if (!pblk->close_wq)
282 goto free_line_meta_pool; 295 goto free_w_rq_pool;
283 296
284 pblk->bb_wq = alloc_workqueue("pblk-bb-wq", 297 pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
285 WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 298 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
286 if (!pblk->bb_wq) 299 if (!pblk->bb_wq)
287 goto free_close_wq; 300 goto free_close_wq;
288 301
289 if (pblk_set_ppaf(pblk)) 302 pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
303 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
304 if (!pblk->r_end_wq)
290 goto free_bb_wq; 305 goto free_bb_wq;
291 306
307 if (pblk_set_ppaf(pblk))
308 goto free_r_end_wq;
309
292 if (pblk_rwb_init(pblk)) 310 if (pblk_rwb_init(pblk))
293 goto free_bb_wq; 311 goto free_r_end_wq;
294 312
295 INIT_LIST_HEAD(&pblk->compl_list); 313 INIT_LIST_HEAD(&pblk->compl_list);
296 return 0; 314 return 0;
297 315
316free_r_end_wq:
317 destroy_workqueue(pblk->r_end_wq);
298free_bb_wq: 318free_bb_wq:
299 destroy_workqueue(pblk->bb_wq); 319 destroy_workqueue(pblk->bb_wq);
300free_close_wq: 320free_close_wq:
301 destroy_workqueue(pblk->close_wq); 321 destroy_workqueue(pblk->close_wq);
302free_line_meta_pool:
303 mempool_destroy(pblk->line_meta_pool);
304free_w_rq_pool: 322free_w_rq_pool:
305 mempool_destroy(pblk->w_rq_pool); 323 mempool_destroy(pblk->w_rq_pool);
306free_g_rq_pool: 324free_e_rq_pool:
307 mempool_destroy(pblk->g_rq_pool); 325 mempool_destroy(pblk->e_rq_pool);
326free_r_rq_pool:
327 mempool_destroy(pblk->r_rq_pool);
308free_rec_pool: 328free_rec_pool:
309 mempool_destroy(pblk->rec_pool); 329 mempool_destroy(pblk->rec_pool);
310free_blk_ws_pool: 330free_gen_ws_pool:
311 mempool_destroy(pblk->line_ws_pool); 331 mempool_destroy(pblk->gen_ws_pool);
312free_page_pool: 332free_page_bio_pool:
313 mempool_destroy(pblk->page_pool); 333 mempool_destroy(pblk->page_bio_pool);
334free_global_caches:
335 pblk_free_global_caches(pblk);
314 return -ENOMEM; 336 return -ENOMEM;
315} 337}
316 338
@@ -319,21 +341,20 @@ static void pblk_core_free(struct pblk *pblk)
319 if (pblk->close_wq) 341 if (pblk->close_wq)
320 destroy_workqueue(pblk->close_wq); 342 destroy_workqueue(pblk->close_wq);
321 343
344 if (pblk->r_end_wq)
345 destroy_workqueue(pblk->r_end_wq);
346
322 if (pblk->bb_wq) 347 if (pblk->bb_wq)
323 destroy_workqueue(pblk->bb_wq); 348 destroy_workqueue(pblk->bb_wq);
324 349
325 mempool_destroy(pblk->page_pool); 350 mempool_destroy(pblk->page_bio_pool);
326 mempool_destroy(pblk->line_ws_pool); 351 mempool_destroy(pblk->gen_ws_pool);
327 mempool_destroy(pblk->rec_pool); 352 mempool_destroy(pblk->rec_pool);
328 mempool_destroy(pblk->g_rq_pool); 353 mempool_destroy(pblk->r_rq_pool);
354 mempool_destroy(pblk->e_rq_pool);
329 mempool_destroy(pblk->w_rq_pool); 355 mempool_destroy(pblk->w_rq_pool);
330 mempool_destroy(pblk->line_meta_pool);
331 356
332 kmem_cache_destroy(pblk_blk_ws_cache); 357 pblk_free_global_caches(pblk);
333 kmem_cache_destroy(pblk_rec_cache);
334 kmem_cache_destroy(pblk_g_rq_cache);
335 kmem_cache_destroy(pblk_w_rq_cache);
336 kmem_cache_destroy(pblk_line_meta_cache);
337} 358}
338 359
339static void pblk_luns_free(struct pblk *pblk) 360static void pblk_luns_free(struct pblk *pblk)
@@ -372,13 +393,11 @@ static void pblk_line_meta_free(struct pblk *pblk)
372 kfree(l_mg->bb_aux); 393 kfree(l_mg->bb_aux);
373 kfree(l_mg->vsc_list); 394 kfree(l_mg->vsc_list);
374 395
375 spin_lock(&l_mg->free_lock);
376 for (i = 0; i < PBLK_DATA_LINES; i++) { 396 for (i = 0; i < PBLK_DATA_LINES; i++) {
377 kfree(l_mg->sline_meta[i]); 397 kfree(l_mg->sline_meta[i]);
378 pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type); 398 pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
379 kfree(l_mg->eline_meta[i]); 399 kfree(l_mg->eline_meta[i]);
380 } 400 }
381 spin_unlock(&l_mg->free_lock);
382 401
383 kfree(pblk->lines); 402 kfree(pblk->lines);
384} 403}
@@ -507,6 +526,13 @@ static int pblk_lines_configure(struct pblk *pblk, int flags)
507 } 526 }
508 } 527 }
509 528
529#ifdef CONFIG_NVM_DEBUG
530 pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
531#endif
532
533 /* Free full lines directly as GC has not been started yet */
534 pblk_gc_free_full_lines(pblk);
535
510 if (!line) { 536 if (!line) {
511 /* Configure next line for user data */ 537 /* Configure next line for user data */
512 line = pblk_line_get_first_data(pblk); 538 line = pblk_line_get_first_data(pblk);
@@ -630,7 +656,10 @@ static int pblk_lines_alloc_metadata(struct pblk *pblk)
630 656
631fail_free_emeta: 657fail_free_emeta:
632 while (--i >= 0) { 658 while (--i >= 0) {
633 vfree(l_mg->eline_meta[i]->buf); 659 if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
660 vfree(l_mg->eline_meta[i]->buf);
661 else
662 kfree(l_mg->eline_meta[i]->buf);
634 kfree(l_mg->eline_meta[i]); 663 kfree(l_mg->eline_meta[i]);
635 } 664 }
636 665
@@ -681,8 +710,8 @@ static int pblk_lines_init(struct pblk *pblk)
681 lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long); 710 lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
682 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long); 711 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
683 lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long); 712 lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
684 lm->high_thrs = lm->sec_per_line / 2; 713 lm->mid_thrs = lm->sec_per_line / 2;
685 lm->mid_thrs = lm->sec_per_line / 4; 714 lm->high_thrs = lm->sec_per_line / 4;
686 lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs; 715 lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
687 716
688 /* Calculate necessary pages for smeta. See comment over struct 717 /* Calculate necessary pages for smeta. See comment over struct
@@ -713,9 +742,13 @@ add_emeta_page:
713 goto add_emeta_page; 742 goto add_emeta_page;
714 } 743 }
715 744
716 lm->emeta_bb = geo->nr_luns - i; 745 lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
717 lm->min_blk_line = 1 + DIV_ROUND_UP(lm->smeta_sec + lm->emeta_sec[0], 746
718 geo->sec_per_blk); 747 lm->min_blk_line = 1;
748 if (geo->nr_luns > 1)
749 lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
750 lm->emeta_sec[0], geo->sec_per_blk);
751
719 if (lm->min_blk_line > lm->blk_per_line) { 752 if (lm->min_blk_line > lm->blk_per_line) {
720 pr_err("pblk: config. not supported. Min. LUN in line:%d\n", 753 pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
721 lm->blk_per_line); 754 lm->blk_per_line);
@@ -890,6 +923,11 @@ static void pblk_exit(void *private)
890 down_write(&pblk_lock); 923 down_write(&pblk_lock);
891 pblk_gc_exit(pblk); 924 pblk_gc_exit(pblk);
892 pblk_tear_down(pblk); 925 pblk_tear_down(pblk);
926
927#ifdef CONFIG_NVM_DEBUG
928 pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
929#endif
930
893 pblk_free(pblk); 931 pblk_free(pblk);
894 up_write(&pblk_lock); 932 up_write(&pblk_lock);
895} 933}
@@ -911,7 +949,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
911 int ret; 949 int ret;
912 950
913 if (dev->identity.dom & NVM_RSP_L2P) { 951 if (dev->identity.dom & NVM_RSP_L2P) {
914 pr_err("pblk: device-side L2P table not supported. (%x)\n", 952 pr_err("pblk: host-side L2P table not supported. (%x)\n",
915 dev->identity.dom); 953 dev->identity.dom);
916 return ERR_PTR(-EINVAL); 954 return ERR_PTR(-EINVAL);
917 } 955 }
@@ -923,6 +961,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
923 pblk->dev = dev; 961 pblk->dev = dev;
924 pblk->disk = tdisk; 962 pblk->disk = tdisk;
925 pblk->state = PBLK_STATE_RUNNING; 963 pblk->state = PBLK_STATE_RUNNING;
964 pblk->gc.gc_enabled = 0;
926 965
927 spin_lock_init(&pblk->trans_lock); 966 spin_lock_init(&pblk->trans_lock);
928 spin_lock_init(&pblk->lock); 967 spin_lock_init(&pblk->lock);
@@ -944,6 +983,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
944 atomic_long_set(&pblk->recov_writes, 0); 983 atomic_long_set(&pblk->recov_writes, 0);
945 atomic_long_set(&pblk->recov_writes, 0); 984 atomic_long_set(&pblk->recov_writes, 0);
946 atomic_long_set(&pblk->recov_gc_writes, 0); 985 atomic_long_set(&pblk->recov_gc_writes, 0);
986 atomic_long_set(&pblk->recov_gc_reads, 0);
947#endif 987#endif
948 988
949 atomic_long_set(&pblk->read_failed, 0); 989 atomic_long_set(&pblk->read_failed, 0);
@@ -1012,6 +1052,10 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1012 pblk->rwb.nr_entries); 1052 pblk->rwb.nr_entries);
1013 1053
1014 wake_up_process(pblk->writer_ts); 1054 wake_up_process(pblk->writer_ts);
1055
1056 /* Check if we need to start GC */
1057 pblk_gc_should_kick(pblk);
1058
1015 return pblk; 1059 return pblk;
1016 1060
1017fail_stop_writer: 1061fail_stop_writer:
@@ -1044,6 +1088,7 @@ static struct nvm_tgt_type tt_pblk = {
1044 1088
1045 .sysfs_init = pblk_sysfs_init, 1089 .sysfs_init = pblk_sysfs_init,
1046 .sysfs_exit = pblk_sysfs_exit, 1090 .sysfs_exit = pblk_sysfs_exit,
1091 .owner = THIS_MODULE,
1047}; 1092};
1048 1093
1049static int __init pblk_module_init(void) 1094static int __init pblk_module_init(void)