diff options
author | Tejun Heo <tj@kernel.org> | 2011-03-02 08:48:05 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-02 08:48:05 -0500 |
commit | 1654e7411a1ad4999fe7890ef51d2a2bbb1fcf76 (patch) | |
tree | c8071cf8cc1aef9e776697b72aaca5a22a47c3d7 | |
parent | 291d24f6d9e7bbef81454fade8a44720665c7302 (diff) |
block: add @force_kblockd to __blk_run_queue()
__blk_run_queue() automatically either calls q->request_fn() directly
or schedules kblockd depending on whether the function is recursed.
blk-flush implementation needs to be able to explicitly choose
kblockd. Add @force_kblockd.
All the current users are converted to specify %false for the
parameter and this patch doesn't introduce any behavior change.
stable: This is prerequisite for fixing ide oops caused by the new
blk-flush implementation.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Jan Beulich <JBeulich@novell.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: stable@kernel.org
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r-- | block/blk-core.c | 11 | ||||
-rw-r--r-- | block/blk-flush.c | 2 | ||||
-rw-r--r-- | block/cfq-iosched.c | 6 | ||||
-rw-r--r-- | block/elevator.c | 4 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 2 | ||||
-rw-r--r-- | drivers/scsi/scsi_transport_fc.c | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 2 |
7 files changed, 15 insertions, 14 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 792ece276160..518dd423a5fe 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q) | |||
352 | WARN_ON(!irqs_disabled()); | 352 | WARN_ON(!irqs_disabled()); |
353 | 353 | ||
354 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); | 354 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
355 | __blk_run_queue(q); | 355 | __blk_run_queue(q, false); |
356 | } | 356 | } |
357 | EXPORT_SYMBOL(blk_start_queue); | 357 | EXPORT_SYMBOL(blk_start_queue); |
358 | 358 | ||
@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
403 | /** | 403 | /** |
404 | * __blk_run_queue - run a single device queue | 404 | * __blk_run_queue - run a single device queue |
405 | * @q: The queue to run | 405 | * @q: The queue to run |
406 | * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. | ||
406 | * | 407 | * |
407 | * Description: | 408 | * Description: |
408 | * See @blk_run_queue. This variant must be called with the queue lock | 409 | * See @blk_run_queue. This variant must be called with the queue lock |
409 | * held and interrupts disabled. | 410 | * held and interrupts disabled. |
410 | * | 411 | * |
411 | */ | 412 | */ |
412 | void __blk_run_queue(struct request_queue *q) | 413 | void __blk_run_queue(struct request_queue *q, bool force_kblockd) |
413 | { | 414 | { |
414 | blk_remove_plug(q); | 415 | blk_remove_plug(q); |
415 | 416 | ||
@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q) | |||
423 | * Only recurse once to avoid overrunning the stack, let the unplug | 424 | * Only recurse once to avoid overrunning the stack, let the unplug |
424 | * handling reinvoke the handler shortly if we already got there. | 425 | * handling reinvoke the handler shortly if we already got there. |
425 | */ | 426 | */ |
426 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | 427 | if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { |
427 | q->request_fn(q); | 428 | q->request_fn(q); |
428 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | 429 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
429 | } else { | 430 | } else { |
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q) | |||
446 | unsigned long flags; | 447 | unsigned long flags; |
447 | 448 | ||
448 | spin_lock_irqsave(q->queue_lock, flags); | 449 | spin_lock_irqsave(q->queue_lock, flags); |
449 | __blk_run_queue(q); | 450 | __blk_run_queue(q, false); |
450 | spin_unlock_irqrestore(q->queue_lock, flags); | 451 | spin_unlock_irqrestore(q->queue_lock, flags); |
451 | } | 452 | } |
452 | EXPORT_SYMBOL(blk_run_queue); | 453 | EXPORT_SYMBOL(blk_run_queue); |
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
1053 | 1054 | ||
1054 | drive_stat_acct(rq, 1); | 1055 | drive_stat_acct(rq, 1); |
1055 | __elv_add_request(q, rq, where, 0); | 1056 | __elv_add_request(q, rq, where, 0); |
1056 | __blk_run_queue(q); | 1057 | __blk_run_queue(q, false); |
1057 | spin_unlock_irqrestore(q->queue_lock, flags); | 1058 | spin_unlock_irqrestore(q->queue_lock, flags); |
1058 | } | 1059 | } |
1059 | EXPORT_SYMBOL(blk_insert_request); | 1060 | EXPORT_SYMBOL(blk_insert_request); |
diff --git a/block/blk-flush.c b/block/blk-flush.c index 54b123d6563e..56adaa8d55cd 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -69,7 +69,7 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q, | |||
69 | * queue. Kick the queue in those cases. | 69 | * queue. Kick the queue in those cases. |
70 | */ | 70 | */ |
71 | if (was_empty && next_rq) | 71 | if (was_empty && next_rq) |
72 | __blk_run_queue(q); | 72 | __blk_run_queue(q, false); |
73 | } | 73 | } |
74 | 74 | ||
75 | static void pre_flush_end_io(struct request *rq, int error) | 75 | static void pre_flush_end_io(struct request *rq, int error) |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 7be4c7959625..ea83a4f0c27d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3355 | cfqd->busy_queues > 1) { | 3355 | cfqd->busy_queues > 1) { |
3356 | cfq_del_timer(cfqd, cfqq); | 3356 | cfq_del_timer(cfqd, cfqq); |
3357 | cfq_clear_cfqq_wait_request(cfqq); | 3357 | cfq_clear_cfqq_wait_request(cfqq); |
3358 | __blk_run_queue(cfqd->queue); | 3358 | __blk_run_queue(cfqd->queue, false); |
3359 | } else { | 3359 | } else { |
3360 | cfq_blkiocg_update_idle_time_stats( | 3360 | cfq_blkiocg_update_idle_time_stats( |
3361 | &cfqq->cfqg->blkg); | 3361 | &cfqq->cfqg->blkg); |
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3370 | * this new queue is RT and the current one is BE | 3370 | * this new queue is RT and the current one is BE |
3371 | */ | 3371 | */ |
3372 | cfq_preempt_queue(cfqd, cfqq); | 3372 | cfq_preempt_queue(cfqd, cfqq); |
3373 | __blk_run_queue(cfqd->queue); | 3373 | __blk_run_queue(cfqd->queue, false); |
3374 | } | 3374 | } |
3375 | } | 3375 | } |
3376 | 3376 | ||
@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work) | |||
3731 | struct request_queue *q = cfqd->queue; | 3731 | struct request_queue *q = cfqd->queue; |
3732 | 3732 | ||
3733 | spin_lock_irq(q->queue_lock); | 3733 | spin_lock_irq(q->queue_lock); |
3734 | __blk_run_queue(cfqd->queue); | 3734 | __blk_run_queue(cfqd->queue, false); |
3735 | spin_unlock_irq(q->queue_lock); | 3735 | spin_unlock_irq(q->queue_lock); |
3736 | } | 3736 | } |
3737 | 3737 | ||
diff --git a/block/elevator.c b/block/elevator.c index 2569512830d3..236e93c1f46c 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q) | |||
602 | */ | 602 | */ |
603 | elv_drain_elevator(q); | 603 | elv_drain_elevator(q); |
604 | while (q->rq.elvpriv) { | 604 | while (q->rq.elvpriv) { |
605 | __blk_run_queue(q); | 605 | __blk_run_queue(q, false); |
606 | spin_unlock_irq(q->queue_lock); | 606 | spin_unlock_irq(q->queue_lock); |
607 | msleep(10); | 607 | msleep(10); |
608 | spin_lock_irq(q->queue_lock); | 608 | spin_lock_irq(q->queue_lock); |
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
651 | * with anything. There's no point in delaying queue | 651 | * with anything. There's no point in delaying queue |
652 | * processing. | 652 | * processing. |
653 | */ | 653 | */ |
654 | __blk_run_queue(q); | 654 | __blk_run_queue(q, false); |
655 | break; | 655 | break; |
656 | 656 | ||
657 | case ELEVATOR_INSERT_SORT: | 657 | case ELEVATOR_INSERT_SORT: |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9045c52abd25..fb2bb35c62cb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q) | |||
443 | &sdev->request_queue->queue_flags); | 443 | &sdev->request_queue->queue_flags); |
444 | if (flagset) | 444 | if (flagset) |
445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); | 445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); |
446 | __blk_run_queue(sdev->request_queue); | 446 | __blk_run_queue(sdev->request_queue, false); |
447 | if (flagset) | 447 | if (flagset) |
448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); | 448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); |
449 | spin_unlock(sdev->request_queue->queue_lock); | 449 | spin_unlock(sdev->request_queue->queue_lock); |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 998c01be3234..5c3ccfc6b622 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) | |||
3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); | 3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); |
3830 | if (flagset) | 3830 | if (flagset) |
3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); | 3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); |
3832 | __blk_run_queue(rport->rqst_q); | 3832 | __blk_run_queue(rport->rqst_q, false); |
3833 | if (flagset) | 3833 | if (flagset) |
3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); | 3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); |
3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); | 3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index dd8cd0f47e3a..d5063e1b5555 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q); | |||
699 | extern void blk_stop_queue(struct request_queue *q); | 699 | extern void blk_stop_queue(struct request_queue *q); |
700 | extern void blk_sync_queue(struct request_queue *q); | 700 | extern void blk_sync_queue(struct request_queue *q); |
701 | extern void __blk_stop_queue(struct request_queue *q); | 701 | extern void __blk_stop_queue(struct request_queue *q); |
702 | extern void __blk_run_queue(struct request_queue *); | 702 | extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); |
703 | extern void blk_run_queue(struct request_queue *); | 703 | extern void blk_run_queue(struct request_queue *); |
704 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 704 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
705 | struct rq_map_data *, void __user *, unsigned long, | 705 | struct rq_map_data *, void __user *, unsigned long, |