diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-05 18:32:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-05 18:32:53 -0400 |
commit | cd3f07d1e6e59fade92a8edaff9315cc534199cf (patch) | |
tree | a2f30bccdfad1826a81c098c6766c2a4345fc171 /drivers/scsi/hpsa.c | |
parent | dc822e57ee05856291a8c9324d2309076ee5f5e2 (diff) | |
parent | f7c9c6bb14f3104608a3a83cadea10a6943d2804 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (45 commits)
[SCSI] Fix block queue and elevator memory leak in scsi_alloc_sdev
[SCSI] scsi_dh_alua: Fix the time inteval for alua rtpg commands
[SCSI] scsi_transport_iscsi: Fix documentation os parameter
[SCSI] mv_sas: OCZ RevoDrive3 & zDrive R4 support
[SCSI] libfc: improve flogi retries to avoid lport stuck
[SCSI] libfc: avoid exchanges collision during lport reset
[SCSI] libfc: fix checking FC_TYPE_BLS
[SCSI] edd: Treat "XPRS" host bus type the same as "PCI"
[SCSI] isci: overriding max_concurr_spinup oem parameter by max(oem, user)
[SCSI] isci: revert bcn filtering
[SCSI] isci: Fix hard reset timeout conditions.
[SCSI] isci: No need to manage the pending reset bit on pending requests.
[SCSI] isci: Remove redundant isci_request.ttype field.
[SCSI] isci: Fix task management for SMP, SATA and on dev remove.
[SCSI] isci: No task_done callbacks in error handler paths.
[SCSI] isci: Handle task request timeouts correctly.
[SCSI] isci: Fix tag leak in tasks and terminated requests.
[SCSI] isci: Immediately fail I/O to removed devices.
[SCSI] isci: Lookup device references through requests in completions.
[SCSI] ipr: add definitions for additional adapter
...
Diffstat (limited to 'drivers/scsi/hpsa.c')
-rw-r--r-- | drivers/scsi/hpsa.c | 221 |
1 files changed, 200 insertions, 21 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index bbdc9f960a66..e76107b2ade3 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/bitmap.h> | 48 | #include <linux/bitmap.h> |
49 | #include <linux/atomic.h> | 49 | #include <linux/atomic.h> |
50 | #include <linux/kthread.h> | 50 | #include <linux/kthread.h> |
51 | #include <linux/jiffies.h> | ||
51 | #include "hpsa_cmd.h" | 52 | #include "hpsa_cmd.h" |
52 | #include "hpsa.h" | 53 | #include "hpsa.h" |
53 | 54 | ||
@@ -127,6 +128,10 @@ static struct board_type products[] = { | |||
127 | 128 | ||
128 | static int number_of_controllers; | 129 | static int number_of_controllers; |
129 | 130 | ||
131 | static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list); | ||
132 | static spinlock_t lockup_detector_lock; | ||
133 | static struct task_struct *hpsa_lockup_detector; | ||
134 | |||
130 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); | 135 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); |
131 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); | 136 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); |
132 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); | 137 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); |
@@ -484,6 +489,7 @@ static struct scsi_host_template hpsa_driver_template = { | |||
484 | #endif | 489 | #endif |
485 | .sdev_attrs = hpsa_sdev_attrs, | 490 | .sdev_attrs = hpsa_sdev_attrs, |
486 | .shost_attrs = hpsa_shost_attrs, | 491 | .shost_attrs = hpsa_shost_attrs, |
492 | .max_sectors = 8192, | ||
487 | }; | 493 | }; |
488 | 494 | ||
489 | 495 | ||
@@ -566,16 +572,16 @@ static int hpsa_find_target_lun(struct ctlr_info *h, | |||
566 | * assumes h->devlock is held | 572 | * assumes h->devlock is held |
567 | */ | 573 | */ |
568 | int i, found = 0; | 574 | int i, found = 0; |
569 | DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA); | 575 | DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); |
570 | 576 | ||
571 | memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3); | 577 | memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3); |
572 | 578 | ||
573 | for (i = 0; i < h->ndevices; i++) { | 579 | for (i = 0; i < h->ndevices; i++) { |
574 | if (h->dev[i]->bus == bus && h->dev[i]->target != -1) | 580 | if (h->dev[i]->bus == bus && h->dev[i]->target != -1) |
575 | set_bit(h->dev[i]->target, lun_taken); | 581 | set_bit(h->dev[i]->target, lun_taken); |
576 | } | 582 | } |
577 | 583 | ||
578 | for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) { | 584 | for (i = 0; i < HPSA_MAX_DEVICES; i++) { |
579 | if (!test_bit(i, lun_taken)) { | 585 | if (!test_bit(i, lun_taken)) { |
580 | /* *bus = 1; */ | 586 | /* *bus = 1; */ |
581 | *target = i; | 587 | *target = i; |
@@ -598,7 +604,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, | |||
598 | unsigned char addr1[8], addr2[8]; | 604 | unsigned char addr1[8], addr2[8]; |
599 | struct hpsa_scsi_dev_t *sd; | 605 | struct hpsa_scsi_dev_t *sd; |
600 | 606 | ||
601 | if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) { | 607 | if (n >= HPSA_MAX_DEVICES) { |
602 | dev_err(&h->pdev->dev, "too many devices, some will be " | 608 | dev_err(&h->pdev->dev, "too many devices, some will be " |
603 | "inaccessible.\n"); | 609 | "inaccessible.\n"); |
604 | return -1; | 610 | return -1; |
@@ -673,7 +679,7 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, | |||
673 | struct hpsa_scsi_dev_t *removed[], int *nremoved) | 679 | struct hpsa_scsi_dev_t *removed[], int *nremoved) |
674 | { | 680 | { |
675 | /* assumes h->devlock is held */ | 681 | /* assumes h->devlock is held */ |
676 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); | 682 | BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
677 | removed[*nremoved] = h->dev[entry]; | 683 | removed[*nremoved] = h->dev[entry]; |
678 | (*nremoved)++; | 684 | (*nremoved)++; |
679 | 685 | ||
@@ -702,7 +708,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, | |||
702 | int i; | 708 | int i; |
703 | struct hpsa_scsi_dev_t *sd; | 709 | struct hpsa_scsi_dev_t *sd; |
704 | 710 | ||
705 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); | 711 | BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
706 | 712 | ||
707 | sd = h->dev[entry]; | 713 | sd = h->dev[entry]; |
708 | removed[*nremoved] = h->dev[entry]; | 714 | removed[*nremoved] = h->dev[entry]; |
@@ -814,10 +820,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, | |||
814 | int nadded, nremoved; | 820 | int nadded, nremoved; |
815 | struct Scsi_Host *sh = NULL; | 821 | struct Scsi_Host *sh = NULL; |
816 | 822 | ||
817 | added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA, | 823 | added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); |
818 | GFP_KERNEL); | 824 | removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); |
819 | removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA, | ||
820 | GFP_KERNEL); | ||
821 | 825 | ||
822 | if (!added || !removed) { | 826 | if (!added || !removed) { |
823 | dev_warn(&h->pdev->dev, "out of memory in " | 827 | dev_warn(&h->pdev->dev, "out of memory in " |
@@ -1338,6 +1342,22 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, | |||
1338 | wait_for_completion(&wait); | 1342 | wait_for_completion(&wait); |
1339 | } | 1343 | } |
1340 | 1344 | ||
1345 | static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, | ||
1346 | struct CommandList *c) | ||
1347 | { | ||
1348 | unsigned long flags; | ||
1349 | |||
1350 | /* If controller lockup detected, fake a hardware error. */ | ||
1351 | spin_lock_irqsave(&h->lock, flags); | ||
1352 | if (unlikely(h->lockup_detected)) { | ||
1353 | spin_unlock_irqrestore(&h->lock, flags); | ||
1354 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; | ||
1355 | } else { | ||
1356 | spin_unlock_irqrestore(&h->lock, flags); | ||
1357 | hpsa_scsi_do_simple_cmd_core(h, c); | ||
1358 | } | ||
1359 | } | ||
1360 | |||
1341 | static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, | 1361 | static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, |
1342 | struct CommandList *c, int data_direction) | 1362 | struct CommandList *c, int data_direction) |
1343 | { | 1363 | { |
@@ -1735,7 +1755,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, | |||
1735 | if (is_scsi_rev_5(h)) | 1755 | if (is_scsi_rev_5(h)) |
1736 | return 0; /* p1210m doesn't need to do this. */ | 1756 | return 0; /* p1210m doesn't need to do this. */ |
1737 | 1757 | ||
1738 | #define MAX_MSA2XXX_ENCLOSURES 32 | ||
1739 | if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { | 1758 | if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { |
1740 | dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " | 1759 | dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " |
1741 | "enclosures exceeded. Check your hardware " | 1760 | "enclosures exceeded. Check your hardware " |
@@ -1846,8 +1865,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1846 | int raid_ctlr_position; | 1865 | int raid_ctlr_position; |
1847 | DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); | 1866 | DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); |
1848 | 1867 | ||
1849 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, | 1868 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); |
1850 | GFP_KERNEL); | ||
1851 | physdev_list = kzalloc(reportlunsize, GFP_KERNEL); | 1869 | physdev_list = kzalloc(reportlunsize, GFP_KERNEL); |
1852 | logdev_list = kzalloc(reportlunsize, GFP_KERNEL); | 1870 | logdev_list = kzalloc(reportlunsize, GFP_KERNEL); |
1853 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); | 1871 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); |
@@ -1870,6 +1888,13 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1870 | 1888 | ||
1871 | /* Allocate the per device structures */ | 1889 | /* Allocate the per device structures */ |
1872 | for (i = 0; i < ndevs_to_allocate; i++) { | 1890 | for (i = 0; i < ndevs_to_allocate; i++) { |
1891 | if (i >= HPSA_MAX_DEVICES) { | ||
1892 | dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." | ||
1893 | " %d devices ignored.\n", HPSA_MAX_DEVICES, | ||
1894 | ndevs_to_allocate - HPSA_MAX_DEVICES); | ||
1895 | break; | ||
1896 | } | ||
1897 | |||
1873 | currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); | 1898 | currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); |
1874 | if (!currentsd[i]) { | 1899 | if (!currentsd[i]) { |
1875 | dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", | 1900 | dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", |
@@ -1956,7 +1981,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1956 | default: | 1981 | default: |
1957 | break; | 1982 | break; |
1958 | } | 1983 | } |
1959 | if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA) | 1984 | if (ncurrent >= HPSA_MAX_DEVICES) |
1960 | break; | 1985 | break; |
1961 | } | 1986 | } |
1962 | adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); | 1987 | adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); |
@@ -2048,8 +2073,14 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, | |||
2048 | } | 2073 | } |
2049 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); | 2074 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); |
2050 | 2075 | ||
2051 | /* Need a lock as this is being allocated from the pool */ | ||
2052 | spin_lock_irqsave(&h->lock, flags); | 2076 | spin_lock_irqsave(&h->lock, flags); |
2077 | if (unlikely(h->lockup_detected)) { | ||
2078 | spin_unlock_irqrestore(&h->lock, flags); | ||
2079 | cmd->result = DID_ERROR << 16; | ||
2080 | done(cmd); | ||
2081 | return 0; | ||
2082 | } | ||
2083 | /* Need a lock as this is being allocated from the pool */ | ||
2053 | c = cmd_alloc(h); | 2084 | c = cmd_alloc(h); |
2054 | spin_unlock_irqrestore(&h->lock, flags); | 2085 | spin_unlock_irqrestore(&h->lock, flags); |
2055 | if (c == NULL) { /* trouble... */ | 2086 | if (c == NULL) { /* trouble... */ |
@@ -2601,7 +2632,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
2601 | c->SG[0].Len = iocommand.buf_size; | 2632 | c->SG[0].Len = iocommand.buf_size; |
2602 | c->SG[0].Ext = 0; /* we are not chaining*/ | 2633 | c->SG[0].Ext = 0; /* we are not chaining*/ |
2603 | } | 2634 | } |
2604 | hpsa_scsi_do_simple_cmd_core(h, c); | 2635 | hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); |
2605 | if (iocommand.buf_size > 0) | 2636 | if (iocommand.buf_size > 0) |
2606 | hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); | 2637 | hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); |
2607 | check_ioctl_unit_attention(h, c); | 2638 | check_ioctl_unit_attention(h, c); |
@@ -2724,7 +2755,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
2724 | c->SG[i].Ext = 0; | 2755 | c->SG[i].Ext = 0; |
2725 | } | 2756 | } |
2726 | } | 2757 | } |
2727 | hpsa_scsi_do_simple_cmd_core(h, c); | 2758 | hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); |
2728 | if (sg_used) | 2759 | if (sg_used) |
2729 | hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); | 2760 | hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); |
2730 | check_ioctl_unit_attention(h, c); | 2761 | check_ioctl_unit_attention(h, c); |
@@ -2872,6 +2903,8 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
2872 | c->Request.Timeout = 0; | 2903 | c->Request.Timeout = 0; |
2873 | c->Request.CDB[0] = BMIC_WRITE; | 2904 | c->Request.CDB[0] = BMIC_WRITE; |
2874 | c->Request.CDB[6] = BMIC_CACHE_FLUSH; | 2905 | c->Request.CDB[6] = BMIC_CACHE_FLUSH; |
2906 | c->Request.CDB[7] = (size >> 8) & 0xFF; | ||
2907 | c->Request.CDB[8] = size & 0xFF; | ||
2875 | break; | 2908 | break; |
2876 | case TEST_UNIT_READY: | 2909 | case TEST_UNIT_READY: |
2877 | c->Request.CDBLen = 6; | 2910 | c->Request.CDBLen = 6; |
@@ -3091,6 +3124,7 @@ static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id) | |||
3091 | if (interrupt_not_for_us(h)) | 3124 | if (interrupt_not_for_us(h)) |
3092 | return IRQ_NONE; | 3125 | return IRQ_NONE; |
3093 | spin_lock_irqsave(&h->lock, flags); | 3126 | spin_lock_irqsave(&h->lock, flags); |
3127 | h->last_intr_timestamp = get_jiffies_64(); | ||
3094 | while (interrupt_pending(h)) { | 3128 | while (interrupt_pending(h)) { |
3095 | raw_tag = get_next_completion(h); | 3129 | raw_tag = get_next_completion(h); |
3096 | while (raw_tag != FIFO_EMPTY) | 3130 | while (raw_tag != FIFO_EMPTY) |
@@ -3110,6 +3144,7 @@ static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id) | |||
3110 | return IRQ_NONE; | 3144 | return IRQ_NONE; |
3111 | 3145 | ||
3112 | spin_lock_irqsave(&h->lock, flags); | 3146 | spin_lock_irqsave(&h->lock, flags); |
3147 | h->last_intr_timestamp = get_jiffies_64(); | ||
3113 | raw_tag = get_next_completion(h); | 3148 | raw_tag = get_next_completion(h); |
3114 | while (raw_tag != FIFO_EMPTY) | 3149 | while (raw_tag != FIFO_EMPTY) |
3115 | raw_tag = next_command(h); | 3150 | raw_tag = next_command(h); |
@@ -3126,6 +3161,7 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) | |||
3126 | if (interrupt_not_for_us(h)) | 3161 | if (interrupt_not_for_us(h)) |
3127 | return IRQ_NONE; | 3162 | return IRQ_NONE; |
3128 | spin_lock_irqsave(&h->lock, flags); | 3163 | spin_lock_irqsave(&h->lock, flags); |
3164 | h->last_intr_timestamp = get_jiffies_64(); | ||
3129 | while (interrupt_pending(h)) { | 3165 | while (interrupt_pending(h)) { |
3130 | raw_tag = get_next_completion(h); | 3166 | raw_tag = get_next_completion(h); |
3131 | while (raw_tag != FIFO_EMPTY) { | 3167 | while (raw_tag != FIFO_EMPTY) { |
@@ -3146,6 +3182,7 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) | |||
3146 | u32 raw_tag; | 3182 | u32 raw_tag; |
3147 | 3183 | ||
3148 | spin_lock_irqsave(&h->lock, flags); | 3184 | spin_lock_irqsave(&h->lock, flags); |
3185 | h->last_intr_timestamp = get_jiffies_64(); | ||
3149 | raw_tag = get_next_completion(h); | 3186 | raw_tag = get_next_completion(h); |
3150 | while (raw_tag != FIFO_EMPTY) { | 3187 | while (raw_tag != FIFO_EMPTY) { |
3151 | if (hpsa_tag_contains_index(raw_tag)) | 3188 | if (hpsa_tag_contains_index(raw_tag)) |
@@ -4090,6 +4127,149 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) | |||
4090 | kfree(h); | 4127 | kfree(h); |
4091 | } | 4128 | } |
4092 | 4129 | ||
4130 | static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h) | ||
4131 | { | ||
4132 | assert_spin_locked(&lockup_detector_lock); | ||
4133 | if (!hpsa_lockup_detector) | ||
4134 | return; | ||
4135 | if (h->lockup_detected) | ||
4136 | return; /* already stopped the lockup detector */ | ||
4137 | list_del(&h->lockup_list); | ||
4138 | } | ||
4139 | |||
4140 | /* Called when controller lockup detected. */ | ||
4141 | static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) | ||
4142 | { | ||
4143 | struct CommandList *c = NULL; | ||
4144 | |||
4145 | assert_spin_locked(&h->lock); | ||
4146 | /* Mark all outstanding commands as failed and complete them. */ | ||
4147 | while (!list_empty(list)) { | ||
4148 | c = list_entry(list->next, struct CommandList, list); | ||
4149 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; | ||
4150 | finish_cmd(c, c->Header.Tag.lower); | ||
4151 | } | ||
4152 | } | ||
4153 | |||
4154 | static void controller_lockup_detected(struct ctlr_info *h) | ||
4155 | { | ||
4156 | unsigned long flags; | ||
4157 | |||
4158 | assert_spin_locked(&lockup_detector_lock); | ||
4159 | remove_ctlr_from_lockup_detector_list(h); | ||
4160 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | ||
4161 | spin_lock_irqsave(&h->lock, flags); | ||
4162 | h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); | ||
4163 | spin_unlock_irqrestore(&h->lock, flags); | ||
4164 | dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", | ||
4165 | h->lockup_detected); | ||
4166 | pci_disable_device(h->pdev); | ||
4167 | spin_lock_irqsave(&h->lock, flags); | ||
4168 | fail_all_cmds_on_list(h, &h->cmpQ); | ||
4169 | fail_all_cmds_on_list(h, &h->reqQ); | ||
4170 | spin_unlock_irqrestore(&h->lock, flags); | ||
4171 | } | ||
4172 | |||
4173 | #define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ) | ||
4174 | #define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2) | ||
4175 | |||
4176 | static void detect_controller_lockup(struct ctlr_info *h) | ||
4177 | { | ||
4178 | u64 now; | ||
4179 | u32 heartbeat; | ||
4180 | unsigned long flags; | ||
4181 | |||
4182 | assert_spin_locked(&lockup_detector_lock); | ||
4183 | now = get_jiffies_64(); | ||
4184 | /* If we've received an interrupt recently, we're ok. */ | ||
4185 | if (time_after64(h->last_intr_timestamp + | ||
4186 | (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) | ||
4187 | return; | ||
4188 | |||
4189 | /* | ||
4190 | * If we've already checked the heartbeat recently, we're ok. | ||
4191 | * This could happen if someone sends us a signal. We | ||
4192 | * otherwise don't care about signals in this thread. | ||
4193 | */ | ||
4194 | if (time_after64(h->last_heartbeat_timestamp + | ||
4195 | (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) | ||
4196 | return; | ||
4197 | |||
4198 | /* If heartbeat has not changed since we last looked, we're not ok. */ | ||
4199 | spin_lock_irqsave(&h->lock, flags); | ||
4200 | heartbeat = readl(&h->cfgtable->HeartBeat); | ||
4201 | spin_unlock_irqrestore(&h->lock, flags); | ||
4202 | if (h->last_heartbeat == heartbeat) { | ||
4203 | controller_lockup_detected(h); | ||
4204 | return; | ||
4205 | } | ||
4206 | |||
4207 | /* We're ok. */ | ||
4208 | h->last_heartbeat = heartbeat; | ||
4209 | h->last_heartbeat_timestamp = now; | ||
4210 | } | ||
4211 | |||
4212 | static int detect_controller_lockup_thread(void *notused) | ||
4213 | { | ||
4214 | struct ctlr_info *h; | ||
4215 | unsigned long flags; | ||
4216 | |||
4217 | while (1) { | ||
4218 | struct list_head *this, *tmp; | ||
4219 | |||
4220 | schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL); | ||
4221 | if (kthread_should_stop()) | ||
4222 | break; | ||
4223 | spin_lock_irqsave(&lockup_detector_lock, flags); | ||
4224 | list_for_each_safe(this, tmp, &hpsa_ctlr_list) { | ||
4225 | h = list_entry(this, struct ctlr_info, lockup_list); | ||
4226 | detect_controller_lockup(h); | ||
4227 | } | ||
4228 | spin_unlock_irqrestore(&lockup_detector_lock, flags); | ||
4229 | } | ||
4230 | return 0; | ||
4231 | } | ||
4232 | |||
4233 | static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h) | ||
4234 | { | ||
4235 | unsigned long flags; | ||
4236 | |||
4237 | spin_lock_irqsave(&lockup_detector_lock, flags); | ||
4238 | list_add_tail(&h->lockup_list, &hpsa_ctlr_list); | ||
4239 | spin_unlock_irqrestore(&lockup_detector_lock, flags); | ||
4240 | } | ||
4241 | |||
4242 | static void start_controller_lockup_detector(struct ctlr_info *h) | ||
4243 | { | ||
4244 | /* Start the lockup detector thread if not already started */ | ||
4245 | if (!hpsa_lockup_detector) { | ||
4246 | spin_lock_init(&lockup_detector_lock); | ||
4247 | hpsa_lockup_detector = | ||
4248 | kthread_run(detect_controller_lockup_thread, | ||
4249 | NULL, "hpsa"); | ||
4250 | } | ||
4251 | if (!hpsa_lockup_detector) { | ||
4252 | dev_warn(&h->pdev->dev, | ||
4253 | "Could not start lockup detector thread\n"); | ||
4254 | return; | ||
4255 | } | ||
4256 | add_ctlr_to_lockup_detector_list(h); | ||
4257 | } | ||
4258 | |||
4259 | static void stop_controller_lockup_detector(struct ctlr_info *h) | ||
4260 | { | ||
4261 | unsigned long flags; | ||
4262 | |||
4263 | spin_lock_irqsave(&lockup_detector_lock, flags); | ||
4264 | remove_ctlr_from_lockup_detector_list(h); | ||
4265 | /* If the list of ctlr's to monitor is empty, stop the thread */ | ||
4266 | if (list_empty(&hpsa_ctlr_list)) { | ||
4267 | kthread_stop(hpsa_lockup_detector); | ||
4268 | hpsa_lockup_detector = NULL; | ||
4269 | } | ||
4270 | spin_unlock_irqrestore(&lockup_detector_lock, flags); | ||
4271 | } | ||
4272 | |||
4093 | static int __devinit hpsa_init_one(struct pci_dev *pdev, | 4273 | static int __devinit hpsa_init_one(struct pci_dev *pdev, |
4094 | const struct pci_device_id *ent) | 4274 | const struct pci_device_id *ent) |
4095 | { | 4275 | { |
@@ -4127,7 +4307,6 @@ reinit_after_soft_reset: | |||
4127 | return -ENOMEM; | 4307 | return -ENOMEM; |
4128 | 4308 | ||
4129 | h->pdev = pdev; | 4309 | h->pdev = pdev; |
4130 | h->busy_initializing = 1; | ||
4131 | h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; | 4310 | h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; |
4132 | INIT_LIST_HEAD(&h->cmpQ); | 4311 | INIT_LIST_HEAD(&h->cmpQ); |
4133 | INIT_LIST_HEAD(&h->reqQ); | 4312 | INIT_LIST_HEAD(&h->reqQ); |
@@ -4236,7 +4415,7 @@ reinit_after_soft_reset: | |||
4236 | 4415 | ||
4237 | hpsa_hba_inquiry(h); | 4416 | hpsa_hba_inquiry(h); |
4238 | hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ | 4417 | hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ |
4239 | h->busy_initializing = 0; | 4418 | start_controller_lockup_detector(h); |
4240 | return 1; | 4419 | return 1; |
4241 | 4420 | ||
4242 | clean4: | 4421 | clean4: |
@@ -4245,7 +4424,6 @@ clean4: | |||
4245 | free_irq(h->intr[h->intr_mode], h); | 4424 | free_irq(h->intr[h->intr_mode], h); |
4246 | clean2: | 4425 | clean2: |
4247 | clean1: | 4426 | clean1: |
4248 | h->busy_initializing = 0; | ||
4249 | kfree(h); | 4427 | kfree(h); |
4250 | return rc; | 4428 | return rc; |
4251 | } | 4429 | } |
@@ -4300,10 +4478,11 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev) | |||
4300 | struct ctlr_info *h; | 4478 | struct ctlr_info *h; |
4301 | 4479 | ||
4302 | if (pci_get_drvdata(pdev) == NULL) { | 4480 | if (pci_get_drvdata(pdev) == NULL) { |
4303 | dev_err(&pdev->dev, "unable to remove device \n"); | 4481 | dev_err(&pdev->dev, "unable to remove device\n"); |
4304 | return; | 4482 | return; |
4305 | } | 4483 | } |
4306 | h = pci_get_drvdata(pdev); | 4484 | h = pci_get_drvdata(pdev); |
4485 | stop_controller_lockup_detector(h); | ||
4307 | hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ | 4486 | hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ |
4308 | hpsa_shutdown(pdev); | 4487 | hpsa_shutdown(pdev); |
4309 | iounmap(h->vaddr); | 4488 | iounmap(h->vaddr); |