diff options
author | Tejun Heo <tj@kernel.org> | 2009-04-22 22:05:17 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-04-28 01:37:33 -0400 |
commit | a538cd03be6f363d039daa94199c28cfbd508455 (patch) | |
tree | 47d327e3339f5f07c4f4386537fab526310344a7 /block | |
parent | db2dbb12dc47a50c7a4c5678f526014063e486f6 (diff) |
block: merge blk_invoke_request_fn() into __blk_run_queue()
__blk_run_queue wraps blk_invoke_request_fn() such that it
additionally removes plug and bails out early if the queue is empty.
Both extra operations have their own pending mechanisms and don't
cause any harm correctness-wise when they are done superflously.
The only user of blk_invoke_request_fn() being blk_start_queue(),
there isn't much reason to keep both functions around. Merge
blk_invoke_request_fn() into __blk_run_queue() and make
blk_start_queue() use __blk_run_queue() instead.
[ Impact: merge two subtly different internal functions ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 35 |
1 files changed, 14 insertions, 21 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 41bc0ff75e28..02f53bc00e4c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -333,24 +333,6 @@ void blk_unplug(struct request_queue *q) | |||
333 | } | 333 | } |
334 | EXPORT_SYMBOL(blk_unplug); | 334 | EXPORT_SYMBOL(blk_unplug); |
335 | 335 | ||
336 | static void blk_invoke_request_fn(struct request_queue *q) | ||
337 | { | ||
338 | if (unlikely(blk_queue_stopped(q))) | ||
339 | return; | ||
340 | |||
341 | /* | ||
342 | * one level of recursion is ok and is much faster than kicking | ||
343 | * the unplug handling | ||
344 | */ | ||
345 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | ||
346 | q->request_fn(q); | ||
347 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | ||
348 | } else { | ||
349 | queue_flag_set(QUEUE_FLAG_PLUGGED, q); | ||
350 | kblockd_schedule_work(q, &q->unplug_work); | ||
351 | } | ||
352 | } | ||
353 | |||
354 | /** | 336 | /** |
355 | * blk_start_queue - restart a previously stopped queue | 337 | * blk_start_queue - restart a previously stopped queue |
356 | * @q: The &struct request_queue in question | 338 | * @q: The &struct request_queue in question |
@@ -365,7 +347,7 @@ void blk_start_queue(struct request_queue *q) | |||
365 | WARN_ON(!irqs_disabled()); | 347 | WARN_ON(!irqs_disabled()); |
366 | 348 | ||
367 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); | 349 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
368 | blk_invoke_request_fn(q); | 350 | __blk_run_queue(q); |
369 | } | 351 | } |
370 | EXPORT_SYMBOL(blk_start_queue); | 352 | EXPORT_SYMBOL(blk_start_queue); |
371 | 353 | ||
@@ -425,12 +407,23 @@ void __blk_run_queue(struct request_queue *q) | |||
425 | { | 407 | { |
426 | blk_remove_plug(q); | 408 | blk_remove_plug(q); |
427 | 409 | ||
410 | if (unlikely(blk_queue_stopped(q))) | ||
411 | return; | ||
412 | |||
413 | if (elv_queue_empty(q)) | ||
414 | return; | ||
415 | |||
428 | /* | 416 | /* |
429 | * Only recurse once to avoid overrunning the stack, let the unplug | 417 | * Only recurse once to avoid overrunning the stack, let the unplug |
430 | * handling reinvoke the handler shortly if we already got there. | 418 | * handling reinvoke the handler shortly if we already got there. |
431 | */ | 419 | */ |
432 | if (!elv_queue_empty(q)) | 420 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { |
433 | blk_invoke_request_fn(q); | 421 | q->request_fn(q); |
422 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | ||
423 | } else { | ||
424 | queue_flag_set(QUEUE_FLAG_PLUGGED, q); | ||
425 | kblockd_schedule_work(q, &q->unplug_work); | ||
426 | } | ||
434 | } | 427 | } |
435 | EXPORT_SYMBOL(__blk_run_queue); | 428 | EXPORT_SYMBOL(__blk_run_queue); |
436 | 429 | ||