aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:14:59 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:22 -0500
commit6ecf23afab13c39d3bb0e2d826d0984b0dd53733 (patch)
tree48436e2eb507d623ff2c2332aa34e9b7380f33e1
parentd732580b4eb31553c63744a47d590f770cafb8f0 (diff)
block: extend queue bypassing to cover blkcg policies
Extend queue bypassing such that dying queue is always bypassing and blk-throttle is drained on bypass. With blkcg policies updated to test blk_queue_bypass() instead of blk_queue_dead(), this ensures that no bio or request is held by or going through blkcg policies on a bypassing queue. This will be used to implement blkg cleanup on elevator switches and policy changes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c12
-rw-r--r--block/blk-throttle.c4
2 files changed, 10 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 98ddef430093..7713c73d9590 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -372,8 +372,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
372 if (q->elevator) 372 if (q->elevator)
373 elv_drain_elevator(q); 373 elv_drain_elevator(q);
374 374
375 if (drain_all) 375 blk_throtl_drain(q);
376 blk_throtl_drain(q);
377 376
378 /* 377 /*
379 * This function might be called on a queue which failed 378 * This function might be called on a queue which failed
@@ -415,8 +414,8 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
415 * 414 *
416 * In bypass mode, only the dispatch FIFO queue of @q is used. This 415 * In bypass mode, only the dispatch FIFO queue of @q is used. This
417 * function makes @q enter bypass mode and drains all requests which were 416 * function makes @q enter bypass mode and drains all requests which were
418 * issued before. On return, it's guaranteed that no request has ELVPRIV 417 * throttled or issued before. On return, it's guaranteed that no request
419 * set. 418 * is being throttled or has ELVPRIV set.
420 */ 419 */
421void blk_queue_bypass_start(struct request_queue *q) 420void blk_queue_bypass_start(struct request_queue *q)
422{ 421{
@@ -461,6 +460,11 @@ void blk_cleanup_queue(struct request_queue *q)
461 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 460 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
462 461
463 spin_lock_irq(lock); 462 spin_lock_irq(lock);
463
464 /* dead queue is permanently in bypass mode till released */
465 q->bypass_depth++;
466 queue_flag_set(QUEUE_FLAG_BYPASS, q);
467
464 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 468 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
465 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 469 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
466 queue_flag_set(QUEUE_FLAG_DEAD, q); 470 queue_flag_set(QUEUE_FLAG_DEAD, q);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5eed6a76721d..702c0e64e09f 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
310 struct request_queue *q = td->queue; 310 struct request_queue *q = td->queue;
311 311
312 /* no throttling for dead queue */ 312 /* no throttling for dead queue */
313 if (unlikely(blk_queue_dead(q))) 313 if (unlikely(blk_queue_bypass(q)))
314 return NULL; 314 return NULL;
315 315
316 rcu_read_lock(); 316 rcu_read_lock();
@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
335 spin_lock_irq(q->queue_lock); 335 spin_lock_irq(q->queue_lock);
336 336
337 /* Make sure @q is still alive */ 337 /* Make sure @q is still alive */
338 if (unlikely(blk_queue_dead(q))) { 338 if (unlikely(blk_queue_bypass(q))) {
339 kfree(tg); 339 kfree(tg);
340 return NULL; 340 return NULL;
341 } 341 }