aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-05-11 02:20:16 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-05-11 15:38:59 -0400
commitdac07ec121de66b6be988b14ae2cd9ce45357b21 (patch)
tree95359f17d0857fd52f399cef072097523227d032
parentf358166a9405e4f1d8e50d8f415c26d95505b6de (diff)
[BLOCK] limit request_fn recursion
Don't recurse back into the driver even if the unplug threshold is met, when the driver asks for a requeue. This is both silly from a logical point of view (requeues typically happen due to driver/hardware shortage), and also dangerous since we could hit an endless request_fn -> requeue -> unplug -> request_fn loop and crash on stack overrun. Also limit blk_run_queue() to one level of recursion, similar to how blk_start_queue() works. This patch fixed a real problem with SLES10 and lpfc, and it could hit any SCSI lld that returns non-zero from it's ->queuecommand() handler. Signed-off-by: Jens Axboe <axboe@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--block/elevator.c8
-rw-r--r--block/ll_rw_blk.c17
2 files changed, 22 insertions, 3 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 29825792cbd5..8768a367fdde 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -333,6 +333,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
333{ 333{
334 struct list_head *pos; 334 struct list_head *pos;
335 unsigned ordseq; 335 unsigned ordseq;
336 int unplug_it = 1;
336 337
337 blk_add_trace_rq(q, rq, BLK_TA_INSERT); 338 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
338 339
@@ -399,6 +400,11 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
399 } 400 }
400 401
401 list_add_tail(&rq->queuelist, pos); 402 list_add_tail(&rq->queuelist, pos);
403 /*
404 * most requeues happen because of a busy condition, don't
405 * force unplug of the queue for that case.
406 */
407 unplug_it = 0;
402 break; 408 break;
403 409
404 default: 410 default:
@@ -407,7 +413,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
407 BUG(); 413 BUG();
408 } 414 }
409 415
410 if (blk_queue_plugged(q)) { 416 if (unplug_it && blk_queue_plugged(q)) {
411 int nrq = q->rq.count[READ] + q->rq.count[WRITE] 417 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
412 - q->in_flight; 418 - q->in_flight;
413 419
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index e5041a02e21f..eac48bec1479 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1732,8 +1732,21 @@ void blk_run_queue(struct request_queue *q)
1732 1732
1733 spin_lock_irqsave(q->queue_lock, flags); 1733 spin_lock_irqsave(q->queue_lock, flags);
1734 blk_remove_plug(q); 1734 blk_remove_plug(q);
1735 if (!elv_queue_empty(q)) 1735
1736 q->request_fn(q); 1736 /*
1737 * Only recurse once to avoid overrunning the stack, let the unplug
1738 * handling reinvoke the handler shortly if we already got there.
1739 */
1740 if (!elv_queue_empty(q)) {
1741 if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
1742 q->request_fn(q);
1743 clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
1744 } else {
1745 blk_plug_device(q);
1746 kblockd_schedule_work(&q->unplug_work);
1747 }
1748 }
1749
1737 spin_unlock_irqrestore(q->queue_lock, flags); 1750 spin_unlock_irqrestore(q->queue_lock, flags);
1738} 1751}
1739EXPORT_SYMBOL(blk_run_queue); 1752EXPORT_SYMBOL(blk_run_queue);