aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-25 16:30:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-25 16:30:23 -0400
commitb8dcdab36f5394a09b66516057cccf61a81a3877 (patch)
treee08659de431a585acd3804464535334161e5a8cc
parentdb84abf5f8075a739bfd73ddc432e6b8fe3ec4b6 (diff)
parentc125311d96b1bfcce0f5930a4f0fdfe39ea14f7c (diff)
Merge tag 'for-linus-20180825' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A few small fixes for this merge window: - Locking imbalance fix for bcache (Shan Hai) - A few small fixes for wbt. One is a cleanup/prep, one is a fix for an existing issue, and the last two are fixes for changes that went into this merge window (me)" * tag 'for-linus-20180825' of git://git.kernel.dk/linux-block: blk-wbt: don't maintain inflight counts if disabled blk-wbt: fix has-sleeper queueing check blk-wbt: use wq_has_sleeper() for wq active check blk-wbt: move disable check into get_limit() bcache: release dc->writeback_lock properly in bch_writeback_thread()
-rw-r--r--block/blk-sysfs.c19
-rw-r--r--block/blk-wbt.c37
-rw-r--r--drivers/md/bcache/writeback.c4
3 files changed, 38 insertions, 22 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index bb109bb0a055..3772671cf2bc 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -453,9 +453,26 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
453 else if (val >= 0) 453 else if (val >= 0)
454 val *= 1000ULL; 454 val *= 1000ULL;
455 455
456 wbt_set_min_lat(q, val); 456 /*
457 * Ensure that the queue is idled, in case the latency update
458 * ends up either enabling or disabling wbt completely. We can't
459 * have IO inflight if that happens.
460 */
461 if (q->mq_ops) {
462 blk_mq_freeze_queue(q);
463 blk_mq_quiesce_queue(q);
464 } else
465 blk_queue_bypass_start(q);
457 466
467 wbt_set_min_lat(q, val);
458 wbt_update_limits(q); 468 wbt_update_limits(q);
469
470 if (q->mq_ops) {
471 blk_mq_unquiesce_queue(q);
472 blk_mq_unfreeze_queue(q);
473 } else
474 blk_queue_bypass_end(q);
475
459 return count; 476 return count;
460} 477}
461 478
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index bb93c7c2b182..84507d3e9a98 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -118,7 +118,7 @@ static void rwb_wake_all(struct rq_wb *rwb)
118 for (i = 0; i < WBT_NUM_RWQ; i++) { 118 for (i = 0; i < WBT_NUM_RWQ; i++) {
119 struct rq_wait *rqw = &rwb->rq_wait[i]; 119 struct rq_wait *rqw = &rwb->rq_wait[i];
120 120
121 if (waitqueue_active(&rqw->wait)) 121 if (wq_has_sleeper(&rqw->wait))
122 wake_up_all(&rqw->wait); 122 wake_up_all(&rqw->wait);
123 } 123 }
124} 124}
@@ -162,7 +162,7 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
162 if (inflight && inflight >= limit) 162 if (inflight && inflight >= limit)
163 return; 163 return;
164 164
165 if (waitqueue_active(&rqw->wait)) { 165 if (wq_has_sleeper(&rqw->wait)) {
166 int diff = limit - inflight; 166 int diff = limit - inflight;
167 167
168 if (!inflight || diff >= rwb->wb_background / 2) 168 if (!inflight || diff >= rwb->wb_background / 2)
@@ -449,6 +449,13 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
449{ 449{
450 unsigned int limit; 450 unsigned int limit;
451 451
452 /*
453 * If we got disabled, just return UINT_MAX. This ensures that
454 * we'll properly inc a new IO, and dec+wakeup at the end.
455 */
456 if (!rwb_enabled(rwb))
457 return UINT_MAX;
458
452 if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD) 459 if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
453 return rwb->wb_background; 460 return rwb->wb_background;
454 461
@@ -485,31 +492,17 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
485{ 492{
486 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); 493 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
487 DECLARE_WAITQUEUE(wait, current); 494 DECLARE_WAITQUEUE(wait, current);
495 bool has_sleeper;
488 496
489 /* 497 has_sleeper = wq_has_sleeper(&rqw->wait);
490 * inc it here even if disabled, since we'll dec it at completion. 498 if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
491 * this only happens if the task was sleeping in __wbt_wait(),
492 * and someone turned it off at the same time.
493 */
494 if (!rwb_enabled(rwb)) {
495 atomic_inc(&rqw->inflight);
496 return;
497 }
498
499 if (!waitqueue_active(&rqw->wait)
500 && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
501 return; 499 return;
502 500
503 add_wait_queue_exclusive(&rqw->wait, &wait); 501 add_wait_queue_exclusive(&rqw->wait, &wait);
504 do { 502 do {
505 set_current_state(TASK_UNINTERRUPTIBLE); 503 set_current_state(TASK_UNINTERRUPTIBLE);
506 504
507 if (!rwb_enabled(rwb)) { 505 if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
508 atomic_inc(&rqw->inflight);
509 break;
510 }
511
512 if (rq_wait_inc_below(rqw, get_limit(rwb, rw)))
513 break; 506 break;
514 507
515 if (lock) { 508 if (lock) {
@@ -518,6 +511,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
518 spin_lock_irq(lock); 511 spin_lock_irq(lock);
519 } else 512 } else
520 io_schedule(); 513 io_schedule();
514 has_sleeper = false;
521 } while (1); 515 } while (1);
522 516
523 __set_current_state(TASK_RUNNING); 517 __set_current_state(TASK_RUNNING);
@@ -546,6 +540,9 @@ static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
546{ 540{
547 enum wbt_flags flags = 0; 541 enum wbt_flags flags = 0;
548 542
543 if (!rwb_enabled(rwb))
544 return 0;
545
549 if (bio_op(bio) == REQ_OP_READ) { 546 if (bio_op(bio) == REQ_OP_READ) {
550 flags = WBT_READ; 547 flags = WBT_READ;
551 } else if (wbt_should_throttle(rwb, bio)) { 548 } else if (wbt_should_throttle(rwb, bio)) {
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 6be05bd7ca67..08c3a9f9676c 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -685,8 +685,10 @@ static int bch_writeback_thread(void *arg)
685 * data on cache. BCACHE_DEV_DETACHING flag is set in 685 * data on cache. BCACHE_DEV_DETACHING flag is set in
686 * bch_cached_dev_detach(). 686 * bch_cached_dev_detach().
687 */ 687 */
688 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 688 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
689 up_write(&dc->writeback_lock);
689 break; 690 break;
691 }
690 } 692 }
691 693
692 up_write(&dc->writeback_lock); 694 up_write(&dc->writeback_lock);