diff options
author | Nick Piggin <npiggin@suse.de> | 2008-04-29 08:48:33 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-04-29 08:48:33 -0400 |
commit | 75ad23bc0fcb4f992a5d06982bf0857ab1738e9e (patch) | |
tree | 8668ef63b1f420252ae41aed9e13737d49fd8054 /drivers/scsi/scsi_lib.c | |
parent | 68154e90c9d1492d570671ae181d9a8f8530da55 (diff) |
block: make queue flags non-atomic
We can save some atomic ops in the IO path, if we clearly define
the rules of how to modify the queue flags.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 31 |
1 files changed, 18 insertions, 13 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 67f412bb4974..d545ad1cf47a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -536,6 +536,9 @@ static void scsi_run_queue(struct request_queue *q) | |||
536 | !shost->host_blocked && !shost->host_self_blocked && | 536 | !shost->host_blocked && !shost->host_self_blocked && |
537 | !((shost->can_queue > 0) && | 537 | !((shost->can_queue > 0) && |
538 | (shost->host_busy >= shost->can_queue))) { | 538 | (shost->host_busy >= shost->can_queue))) { |
539 | |||
540 | int flagset; | ||
541 | |||
539 | /* | 542 | /* |
540 | * As long as shost is accepting commands and we have | 543 | * As long as shost is accepting commands and we have |
541 | * starved queues, call blk_run_queue. scsi_request_fn | 544 | * starved queues, call blk_run_queue. scsi_request_fn |
@@ -549,19 +552,20 @@ static void scsi_run_queue(struct request_queue *q) | |||
549 | sdev = list_entry(shost->starved_list.next, | 552 | sdev = list_entry(shost->starved_list.next, |
550 | struct scsi_device, starved_entry); | 553 | struct scsi_device, starved_entry); |
551 | list_del_init(&sdev->starved_entry); | 554 | list_del_init(&sdev->starved_entry); |
552 | spin_unlock_irqrestore(shost->host_lock, flags); | 555 | spin_unlock(shost->host_lock); |
553 | 556 | ||
557 | spin_lock(sdev->request_queue->queue_lock); | ||
558 | flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && | ||
559 | !test_bit(QUEUE_FLAG_REENTER, | ||
560 | &sdev->request_queue->queue_flags); | ||
561 | if (flagset) | ||
562 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); | ||
563 | __blk_run_queue(sdev->request_queue); | ||
564 | if (flagset) | ||
565 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); | ||
566 | spin_unlock(sdev->request_queue->queue_lock); | ||
554 | 567 | ||
555 | if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && | 568 | spin_lock(shost->host_lock); |
556 | !test_and_set_bit(QUEUE_FLAG_REENTER, | ||
557 | &sdev->request_queue->queue_flags)) { | ||
558 | blk_run_queue(sdev->request_queue); | ||
559 | clear_bit(QUEUE_FLAG_REENTER, | ||
560 | &sdev->request_queue->queue_flags); | ||
561 | } else | ||
562 | blk_run_queue(sdev->request_queue); | ||
563 | |||
564 | spin_lock_irqsave(shost->host_lock, flags); | ||
565 | if (unlikely(!list_empty(&sdev->starved_entry))) | 569 | if (unlikely(!list_empty(&sdev->starved_entry))) |
566 | /* | 570 | /* |
567 | * sdev lost a race, and was put back on the | 571 | * sdev lost a race, and was put back on the |
@@ -1585,8 +1589,9 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |||
1585 | 1589 | ||
1586 | blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); | 1590 | blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); |
1587 | 1591 | ||
1592 | /* New queue, no concurrency on queue_flags */ | ||
1588 | if (!shost->use_clustering) | 1593 | if (!shost->use_clustering) |
1589 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 1594 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); |
1590 | 1595 | ||
1591 | /* | 1596 | /* |
1592 | * set a reasonable default alignment on word boundaries: the | 1597 | * set a reasonable default alignment on word boundaries: the |