aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-04-19 07:32:46 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-04-19 07:32:46 -0400
commitc21e6beba8835d09bb80e34961430b13e60381c5 (patch)
treecdf6f6d40130b95e641ab5db1de0f6ecc179054a /drivers/scsi/scsi_lib.c
parent5f45c69589b7d2953584e6cd0b31e35dbe960ad0 (diff)
block: get rid of QUEUE_FLAG_REENTER
We are currently using this flag to check whether it's safe to call into ->request_fn(). If it is set, we punt to kblockd. But we get a lot of false positives and excessive punts to kblockd, which hurts performance. The only real abuser of this infrastructure is SCSI. So export the async queue run and convert SCSI over to use that. There's room for improvement in that SCSI need not always use the async call, but this fixes our performance issue and they can fix that up in due time. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c17
1 files changed, 1 insertions, 16 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ab55c2fa7ce2..e9901b8f8443 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q)
411 list_splice_init(&shost->starved_list, &starved_list); 411 list_splice_init(&shost->starved_list, &starved_list);
412 412
413 while (!list_empty(&starved_list)) { 413 while (!list_empty(&starved_list)) {
414 int flagset;
415
416 /* 414 /*
417 * As long as shost is accepting commands and we have 415 * As long as shost is accepting commands and we have
418 * starved queues, call blk_run_queue. scsi_request_fn 416 * starved queues, call blk_run_queue. scsi_request_fn
@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q)
435 continue; 433 continue;
436 } 434 }
437 435
438 spin_unlock(shost->host_lock); 436 blk_run_queue_async(sdev->request_queue);
439
440 spin_lock(sdev->request_queue->queue_lock);
441 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
442 !test_bit(QUEUE_FLAG_REENTER,
443 &sdev->request_queue->queue_flags);
444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue);
447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock);
450
451 spin_lock(shost->host_lock);
452 } 437 }
453 /* put any unprocessed entries back */ 438 /* put any unprocessed entries back */
454 list_splice(&starved_list, &shost->starved_list); 439 list_splice(&starved_list, &shost->starved_list);