diff options
author | Tejun Heo <tj@kernel.org> | 2011-10-19 08:32:38 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-10-19 08:32:38 -0400 |
commit | e3c78ca524d230bc145e902625e88c392a58ddf3 (patch) | |
tree | 833eb544dd4180fd626f60da17788aae7830f4dc | |
parent | 315fceee81155ef2aeed9316ca72aeea9347db5c (diff) |
block: reorganize queue draining
Reorganize queue draining related code in preparation of queue exit
changes.
* Factor out actual draining from elv_quiesce_start() to
blk_drain_queue().
* Make elv_quiesce_start/end() responsible for their own locking.
* Replace open-coded ELVSWITCH clearing in elevator_switch() with
elv_quiesce_end().
This patch doesn't cause any visible functional difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-core.c | 28 | ||||
-rw-r--r-- | block/blk.h | 1 | ||||
-rw-r--r-- | block/elevator.c | 37 |
3 files changed, 40 insertions, 26 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index a3d2fdc8ed1c..149149dd7f7b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/task_io_accounting_ops.h> | 28 | #include <linux/task_io_accounting_ops.h> |
29 | #include <linux/fault-inject.h> | 29 | #include <linux/fault-inject.h> |
30 | #include <linux/list_sort.h> | 30 | #include <linux/list_sort.h> |
31 | #include <linux/delay.h> | ||
31 | 32 | ||
32 | #define CREATE_TRACE_POINTS | 33 | #define CREATE_TRACE_POINTS |
33 | #include <trace/events/block.h> | 34 | #include <trace/events/block.h> |
@@ -345,6 +346,33 @@ void blk_put_queue(struct request_queue *q) | |||
345 | } | 346 | } |
346 | EXPORT_SYMBOL(blk_put_queue); | 347 | EXPORT_SYMBOL(blk_put_queue); |
347 | 348 | ||
349 | /** | ||
350 | * blk_drain_queue - drain requests from request_queue | ||
351 | * @q: queue to drain | ||
352 | * | ||
353 | * Drain ELV_PRIV requests from @q. The caller is responsible for ensuring | ||
354 | * that no new requests which need to be drained are queued. | ||
355 | */ | ||
356 | void blk_drain_queue(struct request_queue *q) | ||
357 | { | ||
358 | while (true) { | ||
359 | int nr_rqs; | ||
360 | |||
361 | spin_lock_irq(q->queue_lock); | ||
362 | |||
363 | elv_drain_elevator(q); | ||
364 | |||
365 | __blk_run_queue(q); | ||
366 | nr_rqs = q->rq.elvpriv; | ||
367 | |||
368 | spin_unlock_irq(q->queue_lock); | ||
369 | |||
370 | if (!nr_rqs) | ||
371 | break; | ||
372 | msleep(10); | ||
373 | } | ||
374 | } | ||
375 | |||
348 | /* | 376 | /* |
349 | * Note: If a driver supplied the queue lock, it is disconnected | 377 | * Note: If a driver supplied the queue lock, it is disconnected |
350 | * by this function. The actual state of the lock doesn't matter | 378 | * by this function. The actual state of the lock doesn't matter |
diff --git a/block/blk.h b/block/blk.h index da247ba2aeaf..2b66dc21a493 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -15,6 +15,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
15 | struct bio *bio); | 15 | struct bio *bio); |
16 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | 16 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
17 | struct bio *bio); | 17 | struct bio *bio); |
18 | void blk_drain_queue(struct request_queue *q); | ||
18 | void blk_dequeue_request(struct request *rq); | 19 | void blk_dequeue_request(struct request *rq); |
19 | void __blk_queue_free_tags(struct request_queue *q); | 20 | void __blk_queue_free_tags(struct request_queue *q); |
20 | bool __blk_end_bidi_request(struct request *rq, int error, | 21 | bool __blk_end_bidi_request(struct request *rq, int error, |
diff --git a/block/elevator.c b/block/elevator.c index cb332cb7ac6b..74a277ffed39 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/compiler.h> | 33 | #include <linux/compiler.h> |
34 | #include <linux/delay.h> | ||
35 | #include <linux/blktrace_api.h> | 34 | #include <linux/blktrace_api.h> |
36 | #include <linux/hash.h> | 35 | #include <linux/hash.h> |
37 | #include <linux/uaccess.h> | 36 | #include <linux/uaccess.h> |
@@ -606,43 +605,35 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) | |||
606 | void elv_drain_elevator(struct request_queue *q) | 605 | void elv_drain_elevator(struct request_queue *q) |
607 | { | 606 | { |
608 | static int printed; | 607 | static int printed; |
608 | |||
609 | lockdep_assert_held(q->queue_lock); | ||
610 | |||
609 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) | 611 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) |
610 | ; | 612 | ; |
611 | if (q->nr_sorted == 0) | 613 | if (q->nr_sorted && printed++ < 10) { |
612 | return; | ||
613 | if (printed++ < 10) { | ||
614 | printk(KERN_ERR "%s: forced dispatching is broken " | 614 | printk(KERN_ERR "%s: forced dispatching is broken " |
615 | "(nr_sorted=%u), please report this\n", | 615 | "(nr_sorted=%u), please report this\n", |
616 | q->elevator->elevator_type->elevator_name, q->nr_sorted); | 616 | q->elevator->elevator_type->elevator_name, q->nr_sorted); |
617 | } | 617 | } |
618 | } | 618 | } |
619 | 619 | ||
620 | /* | ||
621 | * Call with queue lock held, interrupts disabled | ||
622 | */ | ||
623 | void elv_quiesce_start(struct request_queue *q) | 620 | void elv_quiesce_start(struct request_queue *q) |
624 | { | 621 | { |
625 | if (!q->elevator) | 622 | if (!q->elevator) |
626 | return; | 623 | return; |
627 | 624 | ||
625 | spin_lock_irq(q->queue_lock); | ||
628 | queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); | 626 | queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); |
627 | spin_unlock_irq(q->queue_lock); | ||
629 | 628 | ||
630 | /* | 629 | blk_drain_queue(q); |
631 | * make sure we don't have any requests in flight | ||
632 | */ | ||
633 | elv_drain_elevator(q); | ||
634 | while (q->rq.elvpriv) { | ||
635 | __blk_run_queue(q); | ||
636 | spin_unlock_irq(q->queue_lock); | ||
637 | msleep(10); | ||
638 | spin_lock_irq(q->queue_lock); | ||
639 | elv_drain_elevator(q); | ||
640 | } | ||
641 | } | 630 | } |
642 | 631 | ||
643 | void elv_quiesce_end(struct request_queue *q) | 632 | void elv_quiesce_end(struct request_queue *q) |
644 | { | 633 | { |
634 | spin_lock_irq(q->queue_lock); | ||
645 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | 635 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); |
636 | spin_unlock_irq(q->queue_lock); | ||
646 | } | 637 | } |
647 | 638 | ||
648 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) | 639 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
@@ -972,7 +963,6 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
972 | /* | 963 | /* |
973 | * Turn on BYPASS and drain all requests w/ elevator private data | 964 | * Turn on BYPASS and drain all requests w/ elevator private data |
974 | */ | 965 | */ |
975 | spin_lock_irq(q->queue_lock); | ||
976 | elv_quiesce_start(q); | 966 | elv_quiesce_start(q); |
977 | 967 | ||
978 | /* | 968 | /* |
@@ -983,8 +973,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
983 | /* | 973 | /* |
984 | * attach and start new elevator | 974 | * attach and start new elevator |
985 | */ | 975 | */ |
976 | spin_lock_irq(q->queue_lock); | ||
986 | elevator_attach(q, e, data); | 977 | elevator_attach(q, e, data); |
987 | |||
988 | spin_unlock_irq(q->queue_lock); | 978 | spin_unlock_irq(q->queue_lock); |
989 | 979 | ||
990 | if (old_elevator->registered) { | 980 | if (old_elevator->registered) { |
@@ -999,9 +989,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
999 | * finally exit old elevator and turn off BYPASS. | 989 | * finally exit old elevator and turn off BYPASS. |
1000 | */ | 990 | */ |
1001 | elevator_exit(old_elevator); | 991 | elevator_exit(old_elevator); |
1002 | spin_lock_irq(q->queue_lock); | ||
1003 | elv_quiesce_end(q); | 992 | elv_quiesce_end(q); |
1004 | spin_unlock_irq(q->queue_lock); | ||
1005 | 993 | ||
1006 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); | 994 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); |
1007 | 995 | ||
@@ -1015,10 +1003,7 @@ fail_register: | |||
1015 | elevator_exit(e); | 1003 | elevator_exit(e); |
1016 | q->elevator = old_elevator; | 1004 | q->elevator = old_elevator; |
1017 | elv_register_queue(q); | 1005 | elv_register_queue(q); |
1018 | 1006 | elv_quiesce_end(q); | |
1019 | spin_lock_irq(q->queue_lock); | ||
1020 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | ||
1021 | spin_unlock_irq(q->queue_lock); | ||
1022 | 1007 | ||
1023 | return err; | 1008 | return err; |
1024 | } | 1009 | } |