diff options
author | Mike Christie <michaelc@cs.wisc.edu> | 2008-11-11 14:42:35 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-11-16 09:13:58 -0500 |
commit | 2a3a59e5c977654d3aad5bc11cc0aca2303a7f44 (patch) | |
tree | 4449aa49696e54a3a62fc595449be8caa89188f1 /drivers/scsi/scsi_lib.c | |
parent | 9bf1a2445f3c569098b8de7097ca324e65abecc2 (diff) |
[SCSI] Fix hang in starved list processing
Close possible infinite loop with interrupts off when devices are
added back to the starved list.
Fixes: http://bugzilla.kernel.org/show_bug.cgi?id=11898
Reported-by: <alex.shi@intel.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 23 |
1 files changed, 11 insertions, 12 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f5d3b96890dc..fa45a1a66867 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -567,15 +567,18 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost) | |||
567 | */ | 567 | */ |
568 | static void scsi_run_queue(struct request_queue *q) | 568 | static void scsi_run_queue(struct request_queue *q) |
569 | { | 569 | { |
570 | struct scsi_device *starved_head = NULL, *sdev = q->queuedata; | 570 | struct scsi_device *sdev = q->queuedata; |
571 | struct Scsi_Host *shost = sdev->host; | 571 | struct Scsi_Host *shost = sdev->host; |
572 | LIST_HEAD(starved_list); | ||
572 | unsigned long flags; | 573 | unsigned long flags; |
573 | 574 | ||
574 | if (scsi_target(sdev)->single_lun) | 575 | if (scsi_target(sdev)->single_lun) |
575 | scsi_single_lun_run(sdev); | 576 | scsi_single_lun_run(sdev); |
576 | 577 | ||
577 | spin_lock_irqsave(shost->host_lock, flags); | 578 | spin_lock_irqsave(shost->host_lock, flags); |
578 | while (!list_empty(&shost->starved_list) && !scsi_host_is_busy(shost)) { | 579 | list_splice_init(&shost->starved_list, &starved_list); |
580 | |||
581 | while (!list_empty(&starved_list)) { | ||
579 | int flagset; | 582 | int flagset; |
580 | 583 | ||
581 | /* | 584 | /* |
@@ -588,24 +591,18 @@ static void scsi_run_queue(struct request_queue *q) | |||
588 | * scsi_request_fn must get the host_lock before checking | 591 | * scsi_request_fn must get the host_lock before checking |
589 | * or modifying starved_list or starved_entry. | 592 | * or modifying starved_list or starved_entry. |
590 | */ | 593 | */ |
591 | sdev = list_entry(shost->starved_list.next, | 594 | if (scsi_host_is_busy(shost)) |
592 | struct scsi_device, starved_entry); | ||
593 | /* | ||
594 | * The *queue_ready functions can add a device back onto the | ||
595 | * starved list's tail, so we must check for a infinite loop. | ||
596 | */ | ||
597 | if (sdev == starved_head) | ||
598 | break; | 595 | break; |
599 | if (!starved_head) | ||
600 | starved_head = sdev; | ||
601 | 596 | ||
597 | sdev = list_entry(starved_list.next, | ||
598 | struct scsi_device, starved_entry); | ||
599 | list_del_init(&sdev->starved_entry); | ||
602 | if (scsi_target_is_busy(scsi_target(sdev))) { | 600 | if (scsi_target_is_busy(scsi_target(sdev))) { |
603 | list_move_tail(&sdev->starved_entry, | 601 | list_move_tail(&sdev->starved_entry, |
604 | &shost->starved_list); | 602 | &shost->starved_list); |
605 | continue; | 603 | continue; |
606 | } | 604 | } |
607 | 605 | ||
608 | list_del_init(&sdev->starved_entry); | ||
609 | spin_unlock(shost->host_lock); | 606 | spin_unlock(shost->host_lock); |
610 | 607 | ||
611 | spin_lock(sdev->request_queue->queue_lock); | 608 | spin_lock(sdev->request_queue->queue_lock); |
@@ -621,6 +618,8 @@ static void scsi_run_queue(struct request_queue *q) | |||
621 | 618 | ||
622 | spin_lock(shost->host_lock); | 619 | spin_lock(shost->host_lock); |
623 | } | 620 | } |
621 | /* put any unprocessed entries back */ | ||
622 | list_splice(&starved_list, &shost->starved_list); | ||
624 | spin_unlock_irqrestore(shost->host_lock, flags); | 623 | spin_unlock_irqrestore(shost->host_lock, flags); |
625 | 624 | ||
626 | blk_run_queue(q); | 625 | blk_run_queue(q); |