aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c19
-rw-r--r--block/elevator.c7
2 files changed, 20 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2d053b584410..91532f2d2fa7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -325,6 +325,9 @@ EXPORT_SYMBOL(blk_unplug);
325 325
326static void blk_invoke_request_fn(struct request_queue *q) 326static void blk_invoke_request_fn(struct request_queue *q)
327{ 327{
328 if (unlikely(blk_queue_stopped(q)))
329 return;
330
328 /* 331 /*
329 * one level of recursion is ok and is much faster than kicking 332 * one level of recursion is ok and is much faster than kicking
330 * the unplug handling 333 * the unplug handling
@@ -399,8 +402,13 @@ void blk_sync_queue(struct request_queue *q)
399EXPORT_SYMBOL(blk_sync_queue); 402EXPORT_SYMBOL(blk_sync_queue);
400 403
401/** 404/**
402 * blk_run_queue - run a single device queue 405 * __blk_run_queue - run a single device queue
403 * @q: The queue to run 406 * @q: The queue to run
407 *
408 * Description:
409 * See @blk_run_queue. This variant must be called with the queue lock
410 * held and interrupts disabled.
411 *
404 */ 412 */
405void __blk_run_queue(struct request_queue *q) 413void __blk_run_queue(struct request_queue *q)
406{ 414{
@@ -418,6 +426,12 @@ EXPORT_SYMBOL(__blk_run_queue);
418/** 426/**
419 * blk_run_queue - run a single device queue 427 * blk_run_queue - run a single device queue
420 * @q: The queue to run 428 * @q: The queue to run
429 *
430 * Description:
431 * Invoke request handling on this queue, if it has pending work to do.
432 * May be used to restart queueing when a request has completed. Also
433 * See @blk_start_queueing.
434 *
421 */ 435 */
422void blk_run_queue(struct request_queue *q) 436void blk_run_queue(struct request_queue *q)
423{ 437{
@@ -884,7 +898,8 @@ EXPORT_SYMBOL(blk_get_request);
884 * 898 *
885 * This is basically a helper to remove the need to know whether a queue 899 * This is basically a helper to remove the need to know whether a queue
886 * is plugged or not if someone just wants to initiate dispatch of requests 900 * is plugged or not if someone just wants to initiate dispatch of requests
887 * for this queue. 901 * for this queue. Should be used to start queueing on a device outside
902 * of ->request_fn() context. Also see @blk_run_queue.
888 * 903 *
889 * The queue lock must be held with interrupts disabled. 904 * The queue lock must be held with interrupts disabled.
890 */ 905 */
diff --git a/block/elevator.c b/block/elevator.c
index 9482ffa1aae6..59173a69ebdf 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -612,7 +612,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
612 * processing. 612 * processing.
613 */ 613 */
614 blk_remove_plug(q); 614 blk_remove_plug(q);
615 q->request_fn(q); 615 blk_start_queueing(q);
616 break; 616 break;
617 617
618 case ELEVATOR_INSERT_SORT: 618 case ELEVATOR_INSERT_SORT:
@@ -950,7 +950,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
950 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 950 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
951 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) { 951 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
952 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 952 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
953 q->request_fn(q); 953 blk_start_queueing(q);
954 } 954 }
955 } 955 }
956} 956}
@@ -1109,8 +1109,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1109 elv_drain_elevator(q); 1109 elv_drain_elevator(q);
1110 1110
1111 while (q->rq.elvpriv) { 1111 while (q->rq.elvpriv) {
1112 blk_remove_plug(q); 1112 blk_start_queueing(q);
1113 q->request_fn(q);
1114 spin_unlock_irq(q->queue_lock); 1113 spin_unlock_irq(q->queue_lock);
1115 msleep(10); 1114 msleep(10);
1116 spin_lock_irq(q->queue_lock); 1115 spin_lock_irq(q->queue_lock);