summaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/pblk-rb.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-14 18:32:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-14 18:32:19 -0500
commite2c5923c349c1738fe8fda980874d93f6fb2e5b6 (patch)
treeb97a90170c45211bcc437761653aa8016c34afcd /drivers/lightnvm/pblk-rb.c
parentabc36be236358162202e86ad88616ff95a755101 (diff)
parenta04b5de5050ab8b891128eb2c47a0916fe8622e1 (diff)
Merge branch 'for-4.15/block' of git://git.kernel.dk/linux-block
Pull core block layer updates from Jens Axboe: "This is the main pull request for block storage for 4.15-rc1. Nothing out of the ordinary in here, and no API changes or anything like that. Just various new features for drivers, core changes, etc. In particular, this pull request contains: - A patch series from Bart, closing the whole on blk/scsi-mq queue quescing. - A series from Christoph, building towards hidden gendisks (for multipath) and ability to move bio chains around. - NVMe - Support for native multipath for NVMe (Christoph). - Userspace notifications for AENs (Keith). - Command side-effects support (Keith). - SGL support (Chaitanya Kulkarni) - FC fixes and improvements (James Smart) - Lots of fixes and tweaks (Various) - bcache - New maintainer (Michael Lyle) - Writeback control improvements (Michael) - Various fixes (Coly, Elena, Eric, Liang, et al) - lightnvm updates, mostly centered around the pblk interface (Javier, Hans, and Rakesh). - Removal of unused bio/bvec kmap atomic interfaces (me, Christoph) - Writeback series that fix the much discussed hundreds of millions of sync-all units. This goes all the way, as discussed previously (me). - Fix for missing wakeup on writeback timer adjustments (Yafang Shao). - Fix laptop mode on blk-mq (me). - {mq,name} tupple lookup for IO schedulers, allowing us to have alias names. This means you can use 'deadline' on both !mq and on mq (where it's called mq-deadline). (me). - blktrace race fix, oopsing on sg load (me). - blk-mq optimizations (me). - Obscure waitqueue race fix for kyber (Omar). - NBD fixes (Josef). - Disable writeback throttling by default on bfq, like we do on cfq (Luca Miccio). - Series from Ming that enable us to treat flush requests on blk-mq like any other request. This is a really nice cleanup. - Series from Ming that improves merging on blk-mq with schedulers, getting us closer to flipping the switch on scsi-mq again. - BFQ updates (Paolo). - blk-mq atomic flags memory ordering fixes (Peter Z). - Loop cgroup support (Shaohua). - Lots of minor fixes from lots of different folks, both for core and driver code" * 'for-4.15/block' of git://git.kernel.dk/linux-block: (294 commits) nvme: fix visibility of "uuid" ns attribute blk-mq: fixup some comment typos and lengths ide: ide-atapi: fix compile error with defining macro DEBUG blk-mq: improve tag waiting setup for non-shared tags brd: remove unused brd_mutex blk-mq: only run the hardware queue if IO is pending block: avoid null pointer dereference on null disk fs: guard_bio_eod() needs to consider partitions xtensa/simdisk: fix compile error nvme: expose subsys attribute to sysfs nvme: create 'slaves' and 'holders' entries for hidden controllers block: create 'slaves' and 'holders' entries for hidden gendisks nvme: also expose the namespace identification sysfs files for mpath nodes nvme: implement multipath access to nvme subsystems nvme: track shared namespaces nvme: introduce a nvme_ns_ids structure nvme: track subsystems block, nvme: Introduce blk_mq_req_flags_t block, scsi: Make SCSI quiesce and resume work reliably block: Add the QUEUE_FLAG_PREEMPT_ONLY request queue flag ...
Diffstat (limited to 'drivers/lightnvm/pblk-rb.c')
-rw-r--r--drivers/lightnvm/pblk-rb.c30
1 files changed, 12 insertions, 18 deletions
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 9bc32578a766..b8f78e401482 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -201,8 +201,7 @@ unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
201 return subm; 201 return subm;
202} 202}
203 203
204static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd, 204static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
205 unsigned int to_update)
206{ 205{
207 struct pblk *pblk = container_of(rb, struct pblk, rwb); 206 struct pblk *pblk = container_of(rb, struct pblk, rwb);
208 struct pblk_line *line; 207 struct pblk_line *line;
@@ -213,7 +212,7 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
213 int flags; 212 int flags;
214 213
215 for (i = 0; i < to_update; i++) { 214 for (i = 0; i < to_update; i++) {
216 entry = &rb->entries[*l2p_upd]; 215 entry = &rb->entries[rb->l2p_update];
217 w_ctx = &entry->w_ctx; 216 w_ctx = &entry->w_ctx;
218 217
219 flags = READ_ONCE(entry->w_ctx.flags); 218 flags = READ_ONCE(entry->w_ctx.flags);
@@ -230,7 +229,7 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
230 line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)]; 229 line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)];
231 kref_put(&line->ref, pblk_line_put); 230 kref_put(&line->ref, pblk_line_put);
232 clean_wctx(w_ctx); 231 clean_wctx(w_ctx);
233 *l2p_upd = (*l2p_upd + 1) & (rb->nr_entries - 1); 232 rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1);
234 } 233 }
235 234
236 pblk_rl_out(&pblk->rl, user_io, gc_io); 235 pblk_rl_out(&pblk->rl, user_io, gc_io);
@@ -258,7 +257,7 @@ static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
258 257
259 count = nr_entries - space; 258 count = nr_entries - space;
260 /* l2p_update used exclusively under rb->w_lock */ 259 /* l2p_update used exclusively under rb->w_lock */
261 ret = __pblk_rb_update_l2p(rb, &rb->l2p_update, count); 260 ret = __pblk_rb_update_l2p(rb, count);
262 261
263out: 262out:
264 return ret; 263 return ret;
@@ -280,7 +279,7 @@ void pblk_rb_sync_l2p(struct pblk_rb *rb)
280 sync = smp_load_acquire(&rb->sync); 279 sync = smp_load_acquire(&rb->sync);
281 280
282 to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries); 281 to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
283 __pblk_rb_update_l2p(rb, &rb->l2p_update, to_update); 282 __pblk_rb_update_l2p(rb, to_update);
284 283
285 spin_unlock(&rb->w_lock); 284 spin_unlock(&rb->w_lock);
286} 285}
@@ -325,8 +324,8 @@ void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
325} 324}
326 325
327void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data, 326void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
328 struct pblk_w_ctx w_ctx, struct pblk_line *gc_line, 327 struct pblk_w_ctx w_ctx, struct pblk_line *line,
329 unsigned int ring_pos) 328 u64 paddr, unsigned int ring_pos)
330{ 329{
331 struct pblk *pblk = container_of(rb, struct pblk, rwb); 330 struct pblk *pblk = container_of(rb, struct pblk, rwb);
332 struct pblk_rb_entry *entry; 331 struct pblk_rb_entry *entry;
@@ -341,7 +340,7 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
341 340
342 __pblk_rb_write_entry(rb, data, w_ctx, entry); 341 __pblk_rb_write_entry(rb, data, w_ctx, entry);
343 342
344 if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, gc_line)) 343 if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr))
345 entry->w_ctx.lba = ADDR_EMPTY; 344 entry->w_ctx.lba = ADDR_EMPTY;
346 345
347 flags = w_ctx.flags | PBLK_WRITTEN_DATA; 346 flags = w_ctx.flags | PBLK_WRITTEN_DATA;
@@ -355,7 +354,6 @@ static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
355{ 354{
356 struct pblk_rb_entry *entry; 355 struct pblk_rb_entry *entry;
357 unsigned int subm, sync_point; 356 unsigned int subm, sync_point;
358 int flags;
359 357
360 subm = READ_ONCE(rb->subm); 358 subm = READ_ONCE(rb->subm);
361 359
@@ -369,12 +367,6 @@ static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
369 sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1); 367 sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
370 entry = &rb->entries[sync_point]; 368 entry = &rb->entries[sync_point];
371 369
372 flags = READ_ONCE(entry->w_ctx.flags);
373 flags |= PBLK_FLUSH_ENTRY;
374
375 /* Release flags on context. Protect from writes */
376 smp_store_release(&entry->w_ctx.flags, flags);
377
378 /* Protect syncs */ 370 /* Protect syncs */
379 smp_store_release(&rb->sync_point, sync_point); 371 smp_store_release(&rb->sync_point, sync_point);
380 372
@@ -454,6 +446,7 @@ static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
454 446
455 /* Protect from read count */ 447 /* Protect from read count */
456 smp_store_release(&rb->mem, mem); 448 smp_store_release(&rb->mem, mem);
449
457 return 1; 450 return 1;
458} 451}
459 452
@@ -558,12 +551,13 @@ out:
558 * persist data on the write buffer to the media. 551 * persist data on the write buffer to the media.
559 */ 552 */
560unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd, 553unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
561 struct bio *bio, unsigned int pos, 554 unsigned int pos, unsigned int nr_entries,
562 unsigned int nr_entries, unsigned int count) 555 unsigned int count)
563{ 556{
564 struct pblk *pblk = container_of(rb, struct pblk, rwb); 557 struct pblk *pblk = container_of(rb, struct pblk, rwb);
565 struct request_queue *q = pblk->dev->q; 558 struct request_queue *q = pblk->dev->q;
566 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); 559 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
560 struct bio *bio = rqd->bio;
567 struct pblk_rb_entry *entry; 561 struct pblk_rb_entry *entry;
568 struct page *page; 562 struct page *page;
569 unsigned int pad = 0, to_read = nr_entries; 563 unsigned int pad = 0, to_read = nr_entries;