diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/elevator.c | 8 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 17 |
2 files changed, 22 insertions, 3 deletions
diff --git a/block/elevator.c b/block/elevator.c index 29825792cbd5..8768a367fdde 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -333,6 +333,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) | |||
333 | { | 333 | { |
334 | struct list_head *pos; | 334 | struct list_head *pos; |
335 | unsigned ordseq; | 335 | unsigned ordseq; |
336 | int unplug_it = 1; | ||
336 | 337 | ||
337 | blk_add_trace_rq(q, rq, BLK_TA_INSERT); | 338 | blk_add_trace_rq(q, rq, BLK_TA_INSERT); |
338 | 339 | ||
@@ -399,6 +400,11 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) | |||
399 | } | 400 | } |
400 | 401 | ||
401 | list_add_tail(&rq->queuelist, pos); | 402 | list_add_tail(&rq->queuelist, pos); |
403 | /* | ||
404 | * most requeues happen because of a busy condition, don't | ||
405 | * force unplug of the queue for that case. | ||
406 | */ | ||
407 | unplug_it = 0; | ||
402 | break; | 408 | break; |
403 | 409 | ||
404 | default: | 410 | default: |
@@ -407,7 +413,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) | |||
407 | BUG(); | 413 | BUG(); |
408 | } | 414 | } |
409 | 415 | ||
410 | if (blk_queue_plugged(q)) { | 416 | if (unplug_it && blk_queue_plugged(q)) { |
411 | int nrq = q->rq.count[READ] + q->rq.count[WRITE] | 417 | int nrq = q->rq.count[READ] + q->rq.count[WRITE] |
412 | - q->in_flight; | 418 | - q->in_flight; |
413 | 419 | ||
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index e5041a02e21f..eac48bec1479 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -1732,8 +1732,21 @@ void blk_run_queue(struct request_queue *q) | |||
1732 | 1732 | ||
1733 | spin_lock_irqsave(q->queue_lock, flags); | 1733 | spin_lock_irqsave(q->queue_lock, flags); |
1734 | blk_remove_plug(q); | 1734 | blk_remove_plug(q); |
1735 | if (!elv_queue_empty(q)) | 1735 | |
1736 | q->request_fn(q); | 1736 | /* |
1737 | * Only recurse once to avoid overrunning the stack, let the unplug | ||
1738 | * handling reinvoke the handler shortly if we already got there. | ||
1739 | */ | ||
1740 | if (!elv_queue_empty(q)) { | ||
1741 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | ||
1742 | q->request_fn(q); | ||
1743 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | ||
1744 | } else { | ||
1745 | blk_plug_device(q); | ||
1746 | kblockd_schedule_work(&q->unplug_work); | ||
1747 | } | ||
1748 | } | ||
1749 | |||
1737 | spin_unlock_irqrestore(q->queue_lock, flags); | 1750 | spin_unlock_irqrestore(q->queue_lock, flags); |
1738 | } | 1751 | } |
1739 | EXPORT_SYMBOL(blk_run_queue); | 1752 | EXPORT_SYMBOL(blk_run_queue); |