aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-wbt.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-05-07 11:57:08 -0400
committerJens Axboe <axboe@kernel.dk>2018-05-08 17:10:56 -0400
commit8bea60901974ad44b06b08d52e1dd421ea8c6e9c (patch)
treeafd05bb8580a9e41afef562c639920307ceae486 /block/blk-wbt.c
parent825843b0adb7c95e8cbab35e6fee64980e29ade8 (diff)
blk-wbt: pass in enum wbt_flags to get_rq_wait()
This is in preparation for having more write queues, in which case we would have needed to pass in more information than just a simple 'is_kswapd' boolean. Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-wbt.c')
-rw-r--r--block/blk-wbt.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 3e34b41bcefc..25d202345965 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -101,9 +101,13 @@ static bool wb_recent_wait(struct rq_wb *rwb)
101 return time_before(jiffies, wb->dirty_sleep + HZ); 101 return time_before(jiffies, wb->dirty_sleep + HZ);
102} 102}
103 103
104static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb, bool is_kswapd) 104static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
105 enum wbt_flags wb_acct)
105{ 106{
106 return &rwb->rq_wait[is_kswapd]; 107 if (wb_acct & WBT_KSWAPD)
108 return &rwb->rq_wait[WBT_RWQ_KSWAPD];
109
110 return &rwb->rq_wait[WBT_RWQ_BG];
107} 111}
108 112
109static void rwb_wake_all(struct rq_wb *rwb) 113static void rwb_wake_all(struct rq_wb *rwb)
@@ -126,7 +130,7 @@ void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
126 if (!(wb_acct & WBT_TRACKED)) 130 if (!(wb_acct & WBT_TRACKED))
127 return; 131 return;
128 132
129 rqw = get_rq_wait(rwb, wb_acct & WBT_KSWAPD); 133 rqw = get_rq_wait(rwb, wb_acct);
130 inflight = atomic_dec_return(&rqw->inflight); 134 inflight = atomic_dec_return(&rqw->inflight);
131 135
132 /* 136 /*
@@ -529,11 +533,12 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
529 * Block if we will exceed our limit, or if we are currently waiting for 533 * Block if we will exceed our limit, or if we are currently waiting for
530 * the timer to kick off queuing again. 534 * the timer to kick off queuing again.
531 */ 535 */
532static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock) 536static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
537 unsigned long rw, spinlock_t *lock)
533 __releases(lock) 538 __releases(lock)
534 __acquires(lock) 539 __acquires(lock)
535{ 540{
536 struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd()); 541 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
537 DEFINE_WAIT(wait); 542 DEFINE_WAIT(wait);
538 543
539 if (may_queue(rwb, rqw, &wait, rw)) 544 if (may_queue(rwb, rqw, &wait, rw))
@@ -584,7 +589,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
584 */ 589 */
585enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock) 590enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
586{ 591{
587 unsigned int ret = 0; 592 enum wbt_flags ret = 0;
588 593
589 if (!rwb_enabled(rwb)) 594 if (!rwb_enabled(rwb))
590 return 0; 595 return 0;
@@ -598,14 +603,14 @@ enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
598 return ret; 603 return ret;
599 } 604 }
600 605
601 __wbt_wait(rwb, bio->bi_opf, lock); 606 if (current_is_kswapd())
607 ret |= WBT_KSWAPD;
608
609 __wbt_wait(rwb, ret, bio->bi_opf, lock);
602 610
603 if (!blk_stat_is_active(rwb->cb)) 611 if (!blk_stat_is_active(rwb->cb))
604 rwb_arm_timer(rwb); 612 rwb_arm_timer(rwb);
605 613
606 if (current_is_kswapd())
607 ret |= WBT_KSWAPD;
608
609 return ret | WBT_TRACKED; 614 return ret | WBT_TRACKED;
610} 615}
611 616