diff options
author | Tejun Heo <tj@kernel.org> | 2011-03-04 13:09:02 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-03-04 13:09:02 -0500 |
commit | e83a46bbb1d4c03defd733a64b727632a40059ad (patch) | |
tree | c4bc4822b2d3af1bf38095f531adc0a2aac054a5 /block/blk-core.c | |
parent | da527770007fce8e4541947d47918248286da875 (diff) | |
parent | fd51469fb68b987032e46297e0a4fe9020063c20 (diff) |
Merge branch 'for-linus' of ../linux-2.6-block into block-for-2.6.39/core
This merge creates two set of conflicts. One is simple context
conflicts caused by removal of throtl_scheduled_delayed_work() in
for-linus and removal of throtl_shutdown_timer_wq() in
for-2.6.39/core.
The other is caused by commit 255bb490c8 (block: blk-flush shouldn't
call directly into q->request_fn() __blk_run_queue()) in for-linus
crashing with FLUSH reimplementation in for-2.6.39/core. The conflict
isn't trivial but the resolution is straight-forward.
* __blk_run_queue() calls in flush_end_io() and flush_data_end_io()
should be called with @force_kblockd set to %true.
* elv_insert() in blk_kick_flush() should use
%ELEVATOR_INSERT_REQUEUE.
Both changes are to avoid invoking ->request_fn() directly from
request completion path and closely match the changes in the commit
255bb490c8.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 18 |
1 files changed, 6 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index accff29ad674..74d496ccf4d7 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -342,7 +342,7 @@ void blk_start_queue(struct request_queue *q) | |||
342 | WARN_ON(!irqs_disabled()); | 342 | WARN_ON(!irqs_disabled()); |
343 | 343 | ||
344 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); | 344 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
345 | __blk_run_queue(q); | 345 | __blk_run_queue(q, false); |
346 | } | 346 | } |
347 | EXPORT_SYMBOL(blk_start_queue); | 347 | EXPORT_SYMBOL(blk_start_queue); |
348 | 348 | ||
@@ -396,13 +396,14 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
396 | /** | 396 | /** |
397 | * __blk_run_queue - run a single device queue | 397 | * __blk_run_queue - run a single device queue |
398 | * @q: The queue to run | 398 | * @q: The queue to run |
399 | * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. | ||
399 | * | 400 | * |
400 | * Description: | 401 | * Description: |
401 | * See @blk_run_queue. This variant must be called with the queue lock | 402 | * See @blk_run_queue. This variant must be called with the queue lock |
402 | * held and interrupts disabled. | 403 | * held and interrupts disabled. |
403 | * | 404 | * |
404 | */ | 405 | */ |
405 | void __blk_run_queue(struct request_queue *q) | 406 | void __blk_run_queue(struct request_queue *q, bool force_kblockd) |
406 | { | 407 | { |
407 | blk_remove_plug(q); | 408 | blk_remove_plug(q); |
408 | 409 | ||
@@ -416,7 +417,7 @@ void __blk_run_queue(struct request_queue *q) | |||
416 | * Only recurse once to avoid overrunning the stack, let the unplug | 417 | * Only recurse once to avoid overrunning the stack, let the unplug |
417 | * handling reinvoke the handler shortly if we already got there. | 418 | * handling reinvoke the handler shortly if we already got there. |
418 | */ | 419 | */ |
419 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | 420 | if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { |
420 | q->request_fn(q); | 421 | q->request_fn(q); |
421 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | 422 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
422 | } else { | 423 | } else { |
@@ -439,7 +440,7 @@ void blk_run_queue(struct request_queue *q) | |||
439 | unsigned long flags; | 440 | unsigned long flags; |
440 | 441 | ||
441 | spin_lock_irqsave(q->queue_lock, flags); | 442 | spin_lock_irqsave(q->queue_lock, flags); |
442 | __blk_run_queue(q); | 443 | __blk_run_queue(q, false); |
443 | spin_unlock_irqrestore(q->queue_lock, flags); | 444 | spin_unlock_irqrestore(q->queue_lock, flags); |
444 | } | 445 | } |
445 | EXPORT_SYMBOL(blk_run_queue); | 446 | EXPORT_SYMBOL(blk_run_queue); |
@@ -1085,7 +1086,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
1085 | 1086 | ||
1086 | drive_stat_acct(rq, 1); | 1087 | drive_stat_acct(rq, 1); |
1087 | __elv_add_request(q, rq, where, 0); | 1088 | __elv_add_request(q, rq, where, 0); |
1088 | __blk_run_queue(q); | 1089 | __blk_run_queue(q, false); |
1089 | spin_unlock_irqrestore(q->queue_lock, flags); | 1090 | spin_unlock_irqrestore(q->queue_lock, flags); |
1090 | } | 1091 | } |
1091 | EXPORT_SYMBOL(blk_insert_request); | 1092 | EXPORT_SYMBOL(blk_insert_request); |
@@ -2642,13 +2643,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
2642 | } | 2643 | } |
2643 | EXPORT_SYMBOL(kblockd_schedule_work); | 2644 | EXPORT_SYMBOL(kblockd_schedule_work); |
2644 | 2645 | ||
2645 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
2646 | struct delayed_work *dwork, unsigned long delay) | ||
2647 | { | ||
2648 | return queue_delayed_work(kblockd_workqueue, dwork, delay); | ||
2649 | } | ||
2650 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | ||
2651 | |||
2652 | int __init blk_dev_init(void) | 2646 | int __init blk_dev_init(void) |
2653 | { | 2647 | { |
2654 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 2648 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * |