diff options
Diffstat (limited to 'drivers/scsi/hpsa.c')
-rw-r--r-- | drivers/scsi/hpsa.c | 297 |
1 files changed, 184 insertions, 113 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 9a6e4a2cd072..31184b35370f 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/bitmap.h> | 48 | #include <linux/bitmap.h> |
49 | #include <linux/atomic.h> | 49 | #include <linux/atomic.h> |
50 | #include <linux/jiffies.h> | 50 | #include <linux/jiffies.h> |
51 | #include <linux/percpu.h> | ||
51 | #include <asm/div64.h> | 52 | #include <asm/div64.h> |
52 | #include "hpsa_cmd.h" | 53 | #include "hpsa_cmd.h" |
53 | #include "hpsa.h" | 54 | #include "hpsa.h" |
@@ -115,9 +116,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = { | |||
115 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, | 116 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, |
116 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, | 117 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, |
117 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, | 118 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, |
119 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, | ||
118 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, | 120 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, |
119 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, | 121 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, |
120 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, | 122 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, |
123 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, | ||
124 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, | ||
125 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, | ||
126 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, | ||
127 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, | ||
121 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, | 128 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, |
122 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, | 129 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, |
123 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, | 130 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, |
@@ -165,9 +172,15 @@ static struct board_type products[] = { | |||
165 | {0x21C3103C, "Smart Array", &SA5_access}, | 172 | {0x21C3103C, "Smart Array", &SA5_access}, |
166 | {0x21C4103C, "Smart Array", &SA5_access}, | 173 | {0x21C4103C, "Smart Array", &SA5_access}, |
167 | {0x21C5103C, "Smart Array", &SA5_access}, | 174 | {0x21C5103C, "Smart Array", &SA5_access}, |
175 | {0x21C6103C, "Smart Array", &SA5_access}, | ||
168 | {0x21C7103C, "Smart Array", &SA5_access}, | 176 | {0x21C7103C, "Smart Array", &SA5_access}, |
169 | {0x21C8103C, "Smart Array", &SA5_access}, | 177 | {0x21C8103C, "Smart Array", &SA5_access}, |
170 | {0x21C9103C, "Smart Array", &SA5_access}, | 178 | {0x21C9103C, "Smart Array", &SA5_access}, |
179 | {0x21CA103C, "Smart Array", &SA5_access}, | ||
180 | {0x21CB103C, "Smart Array", &SA5_access}, | ||
181 | {0x21CC103C, "Smart Array", &SA5_access}, | ||
182 | {0x21CD103C, "Smart Array", &SA5_access}, | ||
183 | {0x21CE103C, "Smart Array", &SA5_access}, | ||
171 | {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, | 184 | {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, |
172 | {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, | 185 | {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, |
173 | {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, | 186 | {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, |
@@ -181,7 +194,8 @@ static int number_of_controllers; | |||
181 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); | 194 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); |
182 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); | 195 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); |
183 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); | 196 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); |
184 | static void start_io(struct ctlr_info *h); | 197 | static void lock_and_start_io(struct ctlr_info *h); |
198 | static void start_io(struct ctlr_info *h, unsigned long *flags); | ||
185 | 199 | ||
186 | #ifdef CONFIG_COMPAT | 200 | #ifdef CONFIG_COMPAT |
187 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); | 201 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); |
@@ -683,7 +697,7 @@ static inline void addQ(struct list_head *list, struct CommandList *c) | |||
683 | static inline u32 next_command(struct ctlr_info *h, u8 q) | 697 | static inline u32 next_command(struct ctlr_info *h, u8 q) |
684 | { | 698 | { |
685 | u32 a; | 699 | u32 a; |
686 | struct reply_pool *rq = &h->reply_queue[q]; | 700 | struct reply_queue_buffer *rq = &h->reply_queue[q]; |
687 | unsigned long flags; | 701 | unsigned long flags; |
688 | 702 | ||
689 | if (h->transMethod & CFGTBL_Trans_io_accel1) | 703 | if (h->transMethod & CFGTBL_Trans_io_accel1) |
@@ -832,8 +846,8 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h, | |||
832 | spin_lock_irqsave(&h->lock, flags); | 846 | spin_lock_irqsave(&h->lock, flags); |
833 | addQ(&h->reqQ, c); | 847 | addQ(&h->reqQ, c); |
834 | h->Qdepth++; | 848 | h->Qdepth++; |
849 | start_io(h, &flags); | ||
835 | spin_unlock_irqrestore(&h->lock, flags); | 850 | spin_unlock_irqrestore(&h->lock, flags); |
836 | start_io(h); | ||
837 | } | 851 | } |
838 | 852 | ||
839 | static inline void removeQ(struct CommandList *c) | 853 | static inline void removeQ(struct CommandList *c) |
@@ -1542,9 +1556,13 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, | |||
1542 | dev_warn(&h->pdev->dev, | 1556 | dev_warn(&h->pdev->dev, |
1543 | "%s: task complete with check condition.\n", | 1557 | "%s: task complete with check condition.\n", |
1544 | "HP SSD Smart Path"); | 1558 | "HP SSD Smart Path"); |
1559 | cmd->result |= SAM_STAT_CHECK_CONDITION; | ||
1545 | if (c2->error_data.data_present != | 1560 | if (c2->error_data.data_present != |
1546 | IOACCEL2_SENSE_DATA_PRESENT) | 1561 | IOACCEL2_SENSE_DATA_PRESENT) { |
1562 | memset(cmd->sense_buffer, 0, | ||
1563 | SCSI_SENSE_BUFFERSIZE); | ||
1547 | break; | 1564 | break; |
1565 | } | ||
1548 | /* copy the sense data */ | 1566 | /* copy the sense data */ |
1549 | data_len = c2->error_data.sense_data_len; | 1567 | data_len = c2->error_data.sense_data_len; |
1550 | if (data_len > SCSI_SENSE_BUFFERSIZE) | 1568 | if (data_len > SCSI_SENSE_BUFFERSIZE) |
@@ -1554,7 +1572,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, | |||
1554 | sizeof(c2->error_data.sense_data_buff); | 1572 | sizeof(c2->error_data.sense_data_buff); |
1555 | memcpy(cmd->sense_buffer, | 1573 | memcpy(cmd->sense_buffer, |
1556 | c2->error_data.sense_data_buff, data_len); | 1574 | c2->error_data.sense_data_buff, data_len); |
1557 | cmd->result |= SAM_STAT_CHECK_CONDITION; | ||
1558 | retry = 1; | 1575 | retry = 1; |
1559 | break; | 1576 | break; |
1560 | case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: | 1577 | case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: |
@@ -1639,16 +1656,6 @@ static void process_ioaccel2_completion(struct ctlr_info *h, | |||
1639 | if (is_logical_dev_addr_mode(dev->scsi3addr) && | 1656 | if (is_logical_dev_addr_mode(dev->scsi3addr) && |
1640 | c2->error_data.serv_response == | 1657 | c2->error_data.serv_response == |
1641 | IOACCEL2_SERV_RESPONSE_FAILURE) { | 1658 | IOACCEL2_SERV_RESPONSE_FAILURE) { |
1642 | if (c2->error_data.status == | ||
1643 | IOACCEL2_STATUS_SR_IOACCEL_DISABLED) | ||
1644 | dev_warn(&h->pdev->dev, | ||
1645 | "%s: Path is unavailable, retrying on standard path.\n", | ||
1646 | "HP SSD Smart Path"); | ||
1647 | else | ||
1648 | dev_warn(&h->pdev->dev, | ||
1649 | "%s: Error 0x%02x, retrying on standard path.\n", | ||
1650 | "HP SSD Smart Path", c2->error_data.status); | ||
1651 | |||
1652 | dev->offload_enabled = 0; | 1659 | dev->offload_enabled = 0; |
1653 | h->drv_req_rescan = 1; /* schedule controller for a rescan */ | 1660 | h->drv_req_rescan = 1; /* schedule controller for a rescan */ |
1654 | cmd->result = DID_SOFT_ERROR << 16; | 1661 | cmd->result = DID_SOFT_ERROR << 16; |
@@ -1979,20 +1986,26 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, | |||
1979 | wait_for_completion(&wait); | 1986 | wait_for_completion(&wait); |
1980 | } | 1987 | } |
1981 | 1988 | ||
1989 | static u32 lockup_detected(struct ctlr_info *h) | ||
1990 | { | ||
1991 | int cpu; | ||
1992 | u32 rc, *lockup_detected; | ||
1993 | |||
1994 | cpu = get_cpu(); | ||
1995 | lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); | ||
1996 | rc = *lockup_detected; | ||
1997 | put_cpu(); | ||
1998 | return rc; | ||
1999 | } | ||
2000 | |||
1982 | static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, | 2001 | static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, |
1983 | struct CommandList *c) | 2002 | struct CommandList *c) |
1984 | { | 2003 | { |
1985 | unsigned long flags; | ||
1986 | |||
1987 | /* If controller lockup detected, fake a hardware error. */ | 2004 | /* If controller lockup detected, fake a hardware error. */ |
1988 | spin_lock_irqsave(&h->lock, flags); | 2005 | if (unlikely(lockup_detected(h))) |
1989 | if (unlikely(h->lockup_detected)) { | ||
1990 | spin_unlock_irqrestore(&h->lock, flags); | ||
1991 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; | 2006 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; |
1992 | } else { | 2007 | else |
1993 | spin_unlock_irqrestore(&h->lock, flags); | ||
1994 | hpsa_scsi_do_simple_cmd_core(h, c); | 2008 | hpsa_scsi_do_simple_cmd_core(h, c); |
1995 | } | ||
1996 | } | 2009 | } |
1997 | 2010 | ||
1998 | #define MAX_DRIVER_CMD_RETRIES 25 | 2011 | #define MAX_DRIVER_CMD_RETRIES 25 |
@@ -2417,7 +2430,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2417 | buflen = 16; | 2430 | buflen = 16; |
2418 | buf = kzalloc(64, GFP_KERNEL); | 2431 | buf = kzalloc(64, GFP_KERNEL); |
2419 | if (!buf) | 2432 | if (!buf) |
2420 | return -1; | 2433 | return -ENOMEM; |
2421 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); | 2434 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); |
2422 | if (rc == 0) | 2435 | if (rc == 0) |
2423 | memcpy(device_id, &buf[8], buflen); | 2436 | memcpy(device_id, &buf[8], buflen); |
@@ -2503,27 +2516,21 @@ static int hpsa_get_volume_status(struct ctlr_info *h, | |||
2503 | return HPSA_VPD_LV_STATUS_UNSUPPORTED; | 2516 | return HPSA_VPD_LV_STATUS_UNSUPPORTED; |
2504 | 2517 | ||
2505 | /* Does controller have VPD for logical volume status? */ | 2518 | /* Does controller have VPD for logical volume status? */ |
2506 | if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) { | 2519 | if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) |
2507 | dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n"); | ||
2508 | goto exit_failed; | 2520 | goto exit_failed; |
2509 | } | ||
2510 | 2521 | ||
2511 | /* Get the size of the VPD return buffer */ | 2522 | /* Get the size of the VPD return buffer */ |
2512 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, | 2523 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, |
2513 | buf, HPSA_VPD_HEADER_SZ); | 2524 | buf, HPSA_VPD_HEADER_SZ); |
2514 | if (rc != 0) { | 2525 | if (rc != 0) |
2515 | dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n"); | ||
2516 | goto exit_failed; | 2526 | goto exit_failed; |
2517 | } | ||
2518 | size = buf[3]; | 2527 | size = buf[3]; |
2519 | 2528 | ||
2520 | /* Now get the whole VPD buffer */ | 2529 | /* Now get the whole VPD buffer */ |
2521 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, | 2530 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, |
2522 | buf, size + HPSA_VPD_HEADER_SZ); | 2531 | buf, size + HPSA_VPD_HEADER_SZ); |
2523 | if (rc != 0) { | 2532 | if (rc != 0) |
2524 | dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n"); | ||
2525 | goto exit_failed; | 2533 | goto exit_failed; |
2526 | } | ||
2527 | status = buf[4]; /* status byte */ | 2534 | status = buf[4]; /* status byte */ |
2528 | 2535 | ||
2529 | kfree(buf); | 2536 | kfree(buf); |
@@ -2536,11 +2543,11 @@ exit_failed: | |||
2536 | /* Determine offline status of a volume. | 2543 | /* Determine offline status of a volume. |
2537 | * Return either: | 2544 | * Return either: |
2538 | * 0 (not offline) | 2545 | * 0 (not offline) |
2539 | * -1 (offline for unknown reasons) | 2546 | * 0xff (offline for unknown reasons) |
2540 | * # (integer code indicating one of several NOT READY states | 2547 | * # (integer code indicating one of several NOT READY states |
2541 | * describing why a volume is to be kept offline) | 2548 | * describing why a volume is to be kept offline) |
2542 | */ | 2549 | */ |
2543 | static unsigned char hpsa_volume_offline(struct ctlr_info *h, | 2550 | static int hpsa_volume_offline(struct ctlr_info *h, |
2544 | unsigned char scsi3addr[]) | 2551 | unsigned char scsi3addr[]) |
2545 | { | 2552 | { |
2546 | struct CommandList *c; | 2553 | struct CommandList *c; |
@@ -2639,11 +2646,15 @@ static int hpsa_update_device_info(struct ctlr_info *h, | |||
2639 | 2646 | ||
2640 | if (this_device->devtype == TYPE_DISK && | 2647 | if (this_device->devtype == TYPE_DISK && |
2641 | is_logical_dev_addr_mode(scsi3addr)) { | 2648 | is_logical_dev_addr_mode(scsi3addr)) { |
2649 | int volume_offline; | ||
2650 | |||
2642 | hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); | 2651 | hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); |
2643 | if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) | 2652 | if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) |
2644 | hpsa_get_ioaccel_status(h, scsi3addr, this_device); | 2653 | hpsa_get_ioaccel_status(h, scsi3addr, this_device); |
2645 | this_device->volume_offline = | 2654 | volume_offline = hpsa_volume_offline(h, scsi3addr); |
2646 | hpsa_volume_offline(h, scsi3addr); | 2655 | if (volume_offline < 0 || volume_offline > 0xff) |
2656 | volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; | ||
2657 | this_device->volume_offline = volume_offline & 0xff; | ||
2647 | } else { | 2658 | } else { |
2648 | this_device->raid_level = RAID_UNKNOWN; | 2659 | this_device->raid_level = RAID_UNKNOWN; |
2649 | this_device->offload_config = 0; | 2660 | this_device->offload_config = 0; |
@@ -2836,6 +2847,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2836 | 2847 | ||
2837 | /* Get the list of physical devices */ | 2848 | /* Get the list of physical devices */ |
2838 | physicals = kzalloc(reportsize, GFP_KERNEL); | 2849 | physicals = kzalloc(reportsize, GFP_KERNEL); |
2850 | if (physicals == NULL) | ||
2851 | return 0; | ||
2839 | if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, | 2852 | if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, |
2840 | reportsize, extended)) { | 2853 | reportsize, extended)) { |
2841 | dev_err(&h->pdev->dev, | 2854 | dev_err(&h->pdev->dev, |
@@ -2847,26 +2860,20 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2847 | nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / | 2860 | nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / |
2848 | responsesize; | 2861 | responsesize; |
2849 | 2862 | ||
2850 | |||
2851 | /* find ioaccel2 handle in list of physicals: */ | 2863 | /* find ioaccel2 handle in list of physicals: */ |
2852 | for (i = 0; i < nphysicals; i++) { | 2864 | for (i = 0; i < nphysicals; i++) { |
2865 | struct ext_report_lun_entry *entry = &physicals->LUN[i]; | ||
2866 | |||
2853 | /* handle is in bytes 28-31 of each lun */ | 2867 | /* handle is in bytes 28-31 of each lun */ |
2854 | if (memcmp(&((struct ReportExtendedLUNdata *) | 2868 | if (entry->ioaccel_handle != find) |
2855 | physicals)->LUN[i][20], &find, 4) != 0) { | ||
2856 | continue; /* didn't match */ | 2869 | continue; /* didn't match */ |
2857 | } | ||
2858 | found = 1; | 2870 | found = 1; |
2859 | memcpy(scsi3addr, &((struct ReportExtendedLUNdata *) | 2871 | memcpy(scsi3addr, entry->lunid, 8); |
2860 | physicals)->LUN[i][0], 8); | ||
2861 | if (h->raid_offload_debug > 0) | 2872 | if (h->raid_offload_debug > 0) |
2862 | dev_info(&h->pdev->dev, | 2873 | dev_info(&h->pdev->dev, |
2863 | "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | 2874 | "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n", |
2864 | __func__, find, | 2875 | __func__, find, |
2865 | ((struct ReportExtendedLUNdata *) | 2876 | entry->ioaccel_handle, scsi3addr); |
2866 | physicals)->LUN[i][20], | ||
2867 | scsi3addr[0], scsi3addr[1], scsi3addr[2], | ||
2868 | scsi3addr[3], scsi3addr[4], scsi3addr[5], | ||
2869 | scsi3addr[6], scsi3addr[7]); | ||
2870 | break; /* found it */ | 2877 | break; /* found it */ |
2871 | } | 2878 | } |
2872 | 2879 | ||
@@ -2951,7 +2958,8 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, | |||
2951 | return RAID_CTLR_LUNID; | 2958 | return RAID_CTLR_LUNID; |
2952 | 2959 | ||
2953 | if (i < logicals_start) | 2960 | if (i < logicals_start) |
2954 | return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; | 2961 | return &physdev_list->LUN[i - |
2962 | (raid_ctlr_position == 0)].lunid[0]; | ||
2955 | 2963 | ||
2956 | if (i < last_device) | 2964 | if (i < last_device) |
2957 | return &logdev_list->LUN[i - nphysicals - | 2965 | return &logdev_list->LUN[i - nphysicals - |
@@ -2963,19 +2971,24 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, | |||
2963 | static int hpsa_hba_mode_enabled(struct ctlr_info *h) | 2971 | static int hpsa_hba_mode_enabled(struct ctlr_info *h) |
2964 | { | 2972 | { |
2965 | int rc; | 2973 | int rc; |
2974 | int hba_mode_enabled; | ||
2966 | struct bmic_controller_parameters *ctlr_params; | 2975 | struct bmic_controller_parameters *ctlr_params; |
2967 | ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), | 2976 | ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), |
2968 | GFP_KERNEL); | 2977 | GFP_KERNEL); |
2969 | 2978 | ||
2970 | if (!ctlr_params) | 2979 | if (!ctlr_params) |
2971 | return 0; | 2980 | return -ENOMEM; |
2972 | rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, | 2981 | rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, |
2973 | sizeof(struct bmic_controller_parameters)); | 2982 | sizeof(struct bmic_controller_parameters)); |
2974 | if (rc != 0) { | 2983 | if (rc) { |
2975 | kfree(ctlr_params); | 2984 | kfree(ctlr_params); |
2976 | return 0; | 2985 | return rc; |
2977 | } | 2986 | } |
2978 | return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0; | 2987 | |
2988 | hba_mode_enabled = | ||
2989 | ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0); | ||
2990 | kfree(ctlr_params); | ||
2991 | return hba_mode_enabled; | ||
2979 | } | 2992 | } |
2980 | 2993 | ||
2981 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | 2994 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) |
@@ -3001,7 +3014,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
3001 | int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24; | 3014 | int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24; |
3002 | int i, n_ext_target_devs, ndevs_to_allocate; | 3015 | int i, n_ext_target_devs, ndevs_to_allocate; |
3003 | int raid_ctlr_position; | 3016 | int raid_ctlr_position; |
3004 | u8 rescan_hba_mode; | 3017 | int rescan_hba_mode; |
3005 | DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); | 3018 | DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); |
3006 | 3019 | ||
3007 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); | 3020 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); |
@@ -3016,6 +3029,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
3016 | memset(lunzerobits, 0, sizeof(lunzerobits)); | 3029 | memset(lunzerobits, 0, sizeof(lunzerobits)); |
3017 | 3030 | ||
3018 | rescan_hba_mode = hpsa_hba_mode_enabled(h); | 3031 | rescan_hba_mode = hpsa_hba_mode_enabled(h); |
3032 | if (rescan_hba_mode < 0) | ||
3033 | goto out; | ||
3019 | 3034 | ||
3020 | if (!h->hba_mode_enabled && rescan_hba_mode) | 3035 | if (!h->hba_mode_enabled && rescan_hba_mode) |
3021 | dev_warn(&h->pdev->dev, "HBA mode enabled\n"); | 3036 | dev_warn(&h->pdev->dev, "HBA mode enabled\n"); |
@@ -3053,7 +3068,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
3053 | ndev_allocated++; | 3068 | ndev_allocated++; |
3054 | } | 3069 | } |
3055 | 3070 | ||
3056 | if (unlikely(is_scsi_rev_5(h))) | 3071 | if (is_scsi_rev_5(h)) |
3057 | raid_ctlr_position = 0; | 3072 | raid_ctlr_position = 0; |
3058 | else | 3073 | else |
3059 | raid_ctlr_position = nphysicals + nlogicals; | 3074 | raid_ctlr_position = nphysicals + nlogicals; |
@@ -3950,7 +3965,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, | |||
3950 | struct hpsa_scsi_dev_t *dev; | 3965 | struct hpsa_scsi_dev_t *dev; |
3951 | unsigned char scsi3addr[8]; | 3966 | unsigned char scsi3addr[8]; |
3952 | struct CommandList *c; | 3967 | struct CommandList *c; |
3953 | unsigned long flags; | ||
3954 | int rc = 0; | 3968 | int rc = 0; |
3955 | 3969 | ||
3956 | /* Get the ptr to our adapter structure out of cmd->host. */ | 3970 | /* Get the ptr to our adapter structure out of cmd->host. */ |
@@ -3963,14 +3977,11 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, | |||
3963 | } | 3977 | } |
3964 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); | 3978 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); |
3965 | 3979 | ||
3966 | spin_lock_irqsave(&h->lock, flags); | 3980 | if (unlikely(lockup_detected(h))) { |
3967 | if (unlikely(h->lockup_detected)) { | ||
3968 | spin_unlock_irqrestore(&h->lock, flags); | ||
3969 | cmd->result = DID_ERROR << 16; | 3981 | cmd->result = DID_ERROR << 16; |
3970 | done(cmd); | 3982 | done(cmd); |
3971 | return 0; | 3983 | return 0; |
3972 | } | 3984 | } |
3973 | spin_unlock_irqrestore(&h->lock, flags); | ||
3974 | c = cmd_alloc(h); | 3985 | c = cmd_alloc(h); |
3975 | if (c == NULL) { /* trouble... */ | 3986 | if (c == NULL) { /* trouble... */ |
3976 | dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); | 3987 | dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
@@ -4082,16 +4093,13 @@ static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) | |||
4082 | * we can prevent new rescan threads from piling up on a | 4093 | * we can prevent new rescan threads from piling up on a |
4083 | * locked up controller. | 4094 | * locked up controller. |
4084 | */ | 4095 | */ |
4085 | spin_lock_irqsave(&h->lock, flags); | 4096 | if (unlikely(lockup_detected(h))) { |
4086 | if (unlikely(h->lockup_detected)) { | ||
4087 | spin_unlock_irqrestore(&h->lock, flags); | ||
4088 | spin_lock_irqsave(&h->scan_lock, flags); | 4097 | spin_lock_irqsave(&h->scan_lock, flags); |
4089 | h->scan_finished = 1; | 4098 | h->scan_finished = 1; |
4090 | wake_up_all(&h->scan_wait_queue); | 4099 | wake_up_all(&h->scan_wait_queue); |
4091 | spin_unlock_irqrestore(&h->scan_lock, flags); | 4100 | spin_unlock_irqrestore(&h->scan_lock, flags); |
4092 | return 1; | 4101 | return 1; |
4093 | } | 4102 | } |
4094 | spin_unlock_irqrestore(&h->lock, flags); | ||
4095 | return 0; | 4103 | return 0; |
4096 | } | 4104 | } |
4097 | 4105 | ||
@@ -4942,7 +4950,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
4942 | buff = kmalloc(iocommand.buf_size, GFP_KERNEL); | 4950 | buff = kmalloc(iocommand.buf_size, GFP_KERNEL); |
4943 | if (buff == NULL) | 4951 | if (buff == NULL) |
4944 | return -EFAULT; | 4952 | return -EFAULT; |
4945 | if (iocommand.Request.Type.Direction == XFER_WRITE) { | 4953 | if (iocommand.Request.Type.Direction & XFER_WRITE) { |
4946 | /* Copy the data into the buffer we created */ | 4954 | /* Copy the data into the buffer we created */ |
4947 | if (copy_from_user(buff, iocommand.buf, | 4955 | if (copy_from_user(buff, iocommand.buf, |
4948 | iocommand.buf_size)) { | 4956 | iocommand.buf_size)) { |
@@ -5005,7 +5013,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
5005 | rc = -EFAULT; | 5013 | rc = -EFAULT; |
5006 | goto out; | 5014 | goto out; |
5007 | } | 5015 | } |
5008 | if (iocommand.Request.Type.Direction == XFER_READ && | 5016 | if ((iocommand.Request.Type.Direction & XFER_READ) && |
5009 | iocommand.buf_size > 0) { | 5017 | iocommand.buf_size > 0) { |
5010 | /* Copy the data out of the buffer we created */ | 5018 | /* Copy the data out of the buffer we created */ |
5011 | if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { | 5019 | if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { |
@@ -5082,7 +5090,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
5082 | status = -ENOMEM; | 5090 | status = -ENOMEM; |
5083 | goto cleanup1; | 5091 | goto cleanup1; |
5084 | } | 5092 | } |
5085 | if (ioc->Request.Type.Direction == XFER_WRITE) { | 5093 | if (ioc->Request.Type.Direction & XFER_WRITE) { |
5086 | if (copy_from_user(buff[sg_used], data_ptr, sz)) { | 5094 | if (copy_from_user(buff[sg_used], data_ptr, sz)) { |
5087 | status = -ENOMEM; | 5095 | status = -ENOMEM; |
5088 | goto cleanup1; | 5096 | goto cleanup1; |
@@ -5134,7 +5142,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
5134 | status = -EFAULT; | 5142 | status = -EFAULT; |
5135 | goto cleanup0; | 5143 | goto cleanup0; |
5136 | } | 5144 | } |
5137 | if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { | 5145 | if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { |
5138 | /* Copy the data out of the buffer we created */ | 5146 | /* Copy the data out of the buffer we created */ |
5139 | BYTE __user *ptr = ioc->buf; | 5147 | BYTE __user *ptr = ioc->buf; |
5140 | for (i = 0; i < sg_used; i++) { | 5148 | for (i = 0; i < sg_used; i++) { |
@@ -5438,13 +5446,12 @@ static void __iomem *remap_pci_mem(ulong base, ulong size) | |||
5438 | 5446 | ||
5439 | /* Takes cmds off the submission queue and sends them to the hardware, | 5447 | /* Takes cmds off the submission queue and sends them to the hardware, |
5440 | * then puts them on the queue of cmds waiting for completion. | 5448 | * then puts them on the queue of cmds waiting for completion. |
5449 | * Assumes h->lock is held | ||
5441 | */ | 5450 | */ |
5442 | static void start_io(struct ctlr_info *h) | 5451 | static void start_io(struct ctlr_info *h, unsigned long *flags) |
5443 | { | 5452 | { |
5444 | struct CommandList *c; | 5453 | struct CommandList *c; |
5445 | unsigned long flags; | ||
5446 | 5454 | ||
5447 | spin_lock_irqsave(&h->lock, flags); | ||
5448 | while (!list_empty(&h->reqQ)) { | 5455 | while (!list_empty(&h->reqQ)) { |
5449 | c = list_entry(h->reqQ.next, struct CommandList, list); | 5456 | c = list_entry(h->reqQ.next, struct CommandList, list); |
5450 | /* can't do anything if fifo is full */ | 5457 | /* can't do anything if fifo is full */ |
@@ -5467,14 +5474,20 @@ static void start_io(struct ctlr_info *h) | |||
5467 | * condition. | 5474 | * condition. |
5468 | */ | 5475 | */ |
5469 | h->commands_outstanding++; | 5476 | h->commands_outstanding++; |
5470 | if (h->commands_outstanding > h->max_outstanding) | ||
5471 | h->max_outstanding = h->commands_outstanding; | ||
5472 | 5477 | ||
5473 | /* Tell the controller execute command */ | 5478 | /* Tell the controller execute command */ |
5474 | spin_unlock_irqrestore(&h->lock, flags); | 5479 | spin_unlock_irqrestore(&h->lock, *flags); |
5475 | h->access.submit_command(h, c); | 5480 | h->access.submit_command(h, c); |
5476 | spin_lock_irqsave(&h->lock, flags); | 5481 | spin_lock_irqsave(&h->lock, *flags); |
5477 | } | 5482 | } |
5483 | } | ||
5484 | |||
5485 | static void lock_and_start_io(struct ctlr_info *h) | ||
5486 | { | ||
5487 | unsigned long flags; | ||
5488 | |||
5489 | spin_lock_irqsave(&h->lock, flags); | ||
5490 | start_io(h, &flags); | ||
5478 | spin_unlock_irqrestore(&h->lock, flags); | 5491 | spin_unlock_irqrestore(&h->lock, flags); |
5479 | } | 5492 | } |
5480 | 5493 | ||
@@ -5542,7 +5555,7 @@ static inline void finish_cmd(struct CommandList *c) | |||
5542 | else if (c->cmd_type == CMD_IOCTL_PEND) | 5555 | else if (c->cmd_type == CMD_IOCTL_PEND) |
5543 | complete(c->waiting); | 5556 | complete(c->waiting); |
5544 | if (unlikely(io_may_be_stalled)) | 5557 | if (unlikely(io_may_be_stalled)) |
5545 | start_io(h); | 5558 | lock_and_start_io(h); |
5546 | } | 5559 | } |
5547 | 5560 | ||
5548 | static inline u32 hpsa_tag_contains_index(u32 tag) | 5561 | static inline u32 hpsa_tag_contains_index(u32 tag) |
@@ -5819,12 +5832,12 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev, | |||
5819 | dev_info(&pdev->dev, "using doorbell to reset controller\n"); | 5832 | dev_info(&pdev->dev, "using doorbell to reset controller\n"); |
5820 | writel(use_doorbell, vaddr + SA5_DOORBELL); | 5833 | writel(use_doorbell, vaddr + SA5_DOORBELL); |
5821 | 5834 | ||
5822 | /* PMC hardware guys tell us we need a 5 second delay after | 5835 | /* PMC hardware guys tell us we need a 10 second delay after |
5823 | * doorbell reset and before any attempt to talk to the board | 5836 | * doorbell reset and before any attempt to talk to the board |
5824 | * at all to ensure that this actually works and doesn't fall | 5837 | * at all to ensure that this actually works and doesn't fall |
5825 | * over in some weird corner cases. | 5838 | * over in some weird corner cases. |
5826 | */ | 5839 | */ |
5827 | msleep(5000); | 5840 | msleep(10000); |
5828 | } else { /* Try to do it the PCI power state way */ | 5841 | } else { /* Try to do it the PCI power state way */ |
5829 | 5842 | ||
5830 | /* Quoting from the Open CISS Specification: "The Power | 5843 | /* Quoting from the Open CISS Specification: "The Power |
@@ -6145,6 +6158,8 @@ static void hpsa_interrupt_mode(struct ctlr_info *h) | |||
6145 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { | 6158 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { |
6146 | dev_info(&h->pdev->dev, "MSIX\n"); | 6159 | dev_info(&h->pdev->dev, "MSIX\n"); |
6147 | h->msix_vector = MAX_REPLY_QUEUES; | 6160 | h->msix_vector = MAX_REPLY_QUEUES; |
6161 | if (h->msix_vector > num_online_cpus()) | ||
6162 | h->msix_vector = num_online_cpus(); | ||
6148 | err = pci_enable_msix(h->pdev, hpsa_msix_entries, | 6163 | err = pci_enable_msix(h->pdev, hpsa_msix_entries, |
6149 | h->msix_vector); | 6164 | h->msix_vector); |
6150 | if (err > 0) { | 6165 | if (err > 0) { |
@@ -6594,6 +6609,17 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h) | |||
6594 | h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); | 6609 | h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); |
6595 | } | 6610 | } |
6596 | 6611 | ||
6612 | static void hpsa_irq_affinity_hints(struct ctlr_info *h) | ||
6613 | { | ||
6614 | int i, cpu, rc; | ||
6615 | |||
6616 | cpu = cpumask_first(cpu_online_mask); | ||
6617 | for (i = 0; i < h->msix_vector; i++) { | ||
6618 | rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); | ||
6619 | cpu = cpumask_next(cpu, cpu_online_mask); | ||
6620 | } | ||
6621 | } | ||
6622 | |||
6597 | static int hpsa_request_irq(struct ctlr_info *h, | 6623 | static int hpsa_request_irq(struct ctlr_info *h, |
6598 | irqreturn_t (*msixhandler)(int, void *), | 6624 | irqreturn_t (*msixhandler)(int, void *), |
6599 | irqreturn_t (*intxhandler)(int, void *)) | 6625 | irqreturn_t (*intxhandler)(int, void *)) |
@@ -6613,6 +6639,7 @@ static int hpsa_request_irq(struct ctlr_info *h, | |||
6613 | rc = request_irq(h->intr[i], msixhandler, | 6639 | rc = request_irq(h->intr[i], msixhandler, |
6614 | 0, h->devname, | 6640 | 0, h->devname, |
6615 | &h->q[i]); | 6641 | &h->q[i]); |
6642 | hpsa_irq_affinity_hints(h); | ||
6616 | } else { | 6643 | } else { |
6617 | /* Use single reply pool */ | 6644 | /* Use single reply pool */ |
6618 | if (h->msix_vector > 0 || h->msi_vector) { | 6645 | if (h->msix_vector > 0 || h->msi_vector) { |
@@ -6664,12 +6691,15 @@ static void free_irqs(struct ctlr_info *h) | |||
6664 | if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { | 6691 | if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { |
6665 | /* Single reply queue, only one irq to free */ | 6692 | /* Single reply queue, only one irq to free */ |
6666 | i = h->intr_mode; | 6693 | i = h->intr_mode; |
6694 | irq_set_affinity_hint(h->intr[i], NULL); | ||
6667 | free_irq(h->intr[i], &h->q[i]); | 6695 | free_irq(h->intr[i], &h->q[i]); |
6668 | return; | 6696 | return; |
6669 | } | 6697 | } |
6670 | 6698 | ||
6671 | for (i = 0; i < h->msix_vector; i++) | 6699 | for (i = 0; i < h->msix_vector; i++) { |
6700 | irq_set_affinity_hint(h->intr[i], NULL); | ||
6672 | free_irq(h->intr[i], &h->q[i]); | 6701 | free_irq(h->intr[i], &h->q[i]); |
6702 | } | ||
6673 | } | 6703 | } |
6674 | 6704 | ||
6675 | static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) | 6705 | static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) |
@@ -6686,6 +6716,20 @@ static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) | |||
6686 | #endif /* CONFIG_PCI_MSI */ | 6716 | #endif /* CONFIG_PCI_MSI */ |
6687 | } | 6717 | } |
6688 | 6718 | ||
6719 | static void hpsa_free_reply_queues(struct ctlr_info *h) | ||
6720 | { | ||
6721 | int i; | ||
6722 | |||
6723 | for (i = 0; i < h->nreply_queues; i++) { | ||
6724 | if (!h->reply_queue[i].head) | ||
6725 | continue; | ||
6726 | pci_free_consistent(h->pdev, h->reply_queue_size, | ||
6727 | h->reply_queue[i].head, h->reply_queue[i].busaddr); | ||
6728 | h->reply_queue[i].head = NULL; | ||
6729 | h->reply_queue[i].busaddr = 0; | ||
6730 | } | ||
6731 | } | ||
6732 | |||
6689 | static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) | 6733 | static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) |
6690 | { | 6734 | { |
6691 | hpsa_free_irqs_and_disable_msix(h); | 6735 | hpsa_free_irqs_and_disable_msix(h); |
@@ -6693,8 +6737,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) | |||
6693 | hpsa_free_cmd_pool(h); | 6737 | hpsa_free_cmd_pool(h); |
6694 | kfree(h->ioaccel1_blockFetchTable); | 6738 | kfree(h->ioaccel1_blockFetchTable); |
6695 | kfree(h->blockFetchTable); | 6739 | kfree(h->blockFetchTable); |
6696 | pci_free_consistent(h->pdev, h->reply_pool_size, | 6740 | hpsa_free_reply_queues(h); |
6697 | h->reply_pool, h->reply_pool_dhandle); | ||
6698 | if (h->vaddr) | 6741 | if (h->vaddr) |
6699 | iounmap(h->vaddr); | 6742 | iounmap(h->vaddr); |
6700 | if (h->transtable) | 6743 | if (h->transtable) |
@@ -6719,16 +6762,38 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) | |||
6719 | } | 6762 | } |
6720 | } | 6763 | } |
6721 | 6764 | ||
6765 | static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) | ||
6766 | { | ||
6767 | int i, cpu; | ||
6768 | |||
6769 | cpu = cpumask_first(cpu_online_mask); | ||
6770 | for (i = 0; i < num_online_cpus(); i++) { | ||
6771 | u32 *lockup_detected; | ||
6772 | lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); | ||
6773 | *lockup_detected = value; | ||
6774 | cpu = cpumask_next(cpu, cpu_online_mask); | ||
6775 | } | ||
6776 | wmb(); /* be sure the per-cpu variables are out to memory */ | ||
6777 | } | ||
6778 | |||
6722 | static void controller_lockup_detected(struct ctlr_info *h) | 6779 | static void controller_lockup_detected(struct ctlr_info *h) |
6723 | { | 6780 | { |
6724 | unsigned long flags; | 6781 | unsigned long flags; |
6782 | u32 lockup_detected; | ||
6725 | 6783 | ||
6726 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | 6784 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
6727 | spin_lock_irqsave(&h->lock, flags); | 6785 | spin_lock_irqsave(&h->lock, flags); |
6728 | h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); | 6786 | lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); |
6787 | if (!lockup_detected) { | ||
6788 | /* no heartbeat, but controller gave us a zero. */ | ||
6789 | dev_warn(&h->pdev->dev, | ||
6790 | "lockup detected but scratchpad register is zero\n"); | ||
6791 | lockup_detected = 0xffffffff; | ||
6792 | } | ||
6793 | set_lockup_detected_for_all_cpus(h, lockup_detected); | ||
6729 | spin_unlock_irqrestore(&h->lock, flags); | 6794 | spin_unlock_irqrestore(&h->lock, flags); |
6730 | dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", | 6795 | dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", |
6731 | h->lockup_detected); | 6796 | lockup_detected); |
6732 | pci_disable_device(h->pdev); | 6797 | pci_disable_device(h->pdev); |
6733 | spin_lock_irqsave(&h->lock, flags); | 6798 | spin_lock_irqsave(&h->lock, flags); |
6734 | fail_all_cmds_on_list(h, &h->cmpQ); | 6799 | fail_all_cmds_on_list(h, &h->cmpQ); |
@@ -6863,7 +6928,7 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work) | |||
6863 | struct ctlr_info *h = container_of(to_delayed_work(work), | 6928 | struct ctlr_info *h = container_of(to_delayed_work(work), |
6864 | struct ctlr_info, monitor_ctlr_work); | 6929 | struct ctlr_info, monitor_ctlr_work); |
6865 | detect_controller_lockup(h); | 6930 | detect_controller_lockup(h); |
6866 | if (h->lockup_detected) | 6931 | if (lockup_detected(h)) |
6867 | return; | 6932 | return; |
6868 | 6933 | ||
6869 | if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { | 6934 | if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { |
@@ -6913,7 +6978,6 @@ reinit_after_soft_reset: | |||
6913 | * the 5 lower bits of the address are used by the hardware. and by | 6978 | * the 5 lower bits of the address are used by the hardware. and by |
6914 | * the driver. See comments in hpsa.h for more info. | 6979 | * the driver. See comments in hpsa.h for more info. |
6915 | */ | 6980 | */ |
6916 | #define COMMANDLIST_ALIGNMENT 128 | ||
6917 | BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); | 6981 | BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); |
6918 | h = kzalloc(sizeof(*h), GFP_KERNEL); | 6982 | h = kzalloc(sizeof(*h), GFP_KERNEL); |
6919 | if (!h) | 6983 | if (!h) |
@@ -6928,6 +6992,13 @@ reinit_after_soft_reset: | |||
6928 | spin_lock_init(&h->offline_device_lock); | 6992 | spin_lock_init(&h->offline_device_lock); |
6929 | spin_lock_init(&h->scan_lock); | 6993 | spin_lock_init(&h->scan_lock); |
6930 | spin_lock_init(&h->passthru_count_lock); | 6994 | spin_lock_init(&h->passthru_count_lock); |
6995 | |||
6996 | /* Allocate and clear per-cpu variable lockup_detected */ | ||
6997 | h->lockup_detected = alloc_percpu(u32); | ||
6998 | if (!h->lockup_detected) | ||
6999 | goto clean1; | ||
7000 | set_lockup_detected_for_all_cpus(h, 0); | ||
7001 | |||
6931 | rc = hpsa_pci_init(h); | 7002 | rc = hpsa_pci_init(h); |
6932 | if (rc != 0) | 7003 | if (rc != 0) |
6933 | goto clean1; | 7004 | goto clean1; |
@@ -7051,6 +7122,8 @@ clean4: | |||
7051 | free_irqs(h); | 7122 | free_irqs(h); |
7052 | clean2: | 7123 | clean2: |
7053 | clean1: | 7124 | clean1: |
7125 | if (h->lockup_detected) | ||
7126 | free_percpu(h->lockup_detected); | ||
7054 | kfree(h); | 7127 | kfree(h); |
7055 | return rc; | 7128 | return rc; |
7056 | } | 7129 | } |
@@ -7059,16 +7132,10 @@ static void hpsa_flush_cache(struct ctlr_info *h) | |||
7059 | { | 7132 | { |
7060 | char *flush_buf; | 7133 | char *flush_buf; |
7061 | struct CommandList *c; | 7134 | struct CommandList *c; |
7062 | unsigned long flags; | ||
7063 | 7135 | ||
7064 | /* Don't bother trying to flush the cache if locked up */ | 7136 | /* Don't bother trying to flush the cache if locked up */ |
7065 | spin_lock_irqsave(&h->lock, flags); | 7137 | if (unlikely(lockup_detected(h))) |
7066 | if (unlikely(h->lockup_detected)) { | ||
7067 | spin_unlock_irqrestore(&h->lock, flags); | ||
7068 | return; | 7138 | return; |
7069 | } | ||
7070 | spin_unlock_irqrestore(&h->lock, flags); | ||
7071 | |||
7072 | flush_buf = kzalloc(4, GFP_KERNEL); | 7139 | flush_buf = kzalloc(4, GFP_KERNEL); |
7073 | if (!flush_buf) | 7140 | if (!flush_buf) |
7074 | return; | 7141 | return; |
@@ -7144,8 +7211,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) | |||
7144 | pci_free_consistent(h->pdev, | 7211 | pci_free_consistent(h->pdev, |
7145 | h->nr_cmds * sizeof(struct ErrorInfo), | 7212 | h->nr_cmds * sizeof(struct ErrorInfo), |
7146 | h->errinfo_pool, h->errinfo_pool_dhandle); | 7213 | h->errinfo_pool, h->errinfo_pool_dhandle); |
7147 | pci_free_consistent(h->pdev, h->reply_pool_size, | 7214 | hpsa_free_reply_queues(h); |
7148 | h->reply_pool, h->reply_pool_dhandle); | ||
7149 | kfree(h->cmd_pool_bits); | 7215 | kfree(h->cmd_pool_bits); |
7150 | kfree(h->blockFetchTable); | 7216 | kfree(h->blockFetchTable); |
7151 | kfree(h->ioaccel1_blockFetchTable); | 7217 | kfree(h->ioaccel1_blockFetchTable); |
@@ -7153,6 +7219,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) | |||
7153 | kfree(h->hba_inquiry_data); | 7219 | kfree(h->hba_inquiry_data); |
7154 | pci_disable_device(pdev); | 7220 | pci_disable_device(pdev); |
7155 | pci_release_regions(pdev); | 7221 | pci_release_regions(pdev); |
7222 | free_percpu(h->lockup_detected); | ||
7156 | kfree(h); | 7223 | kfree(h); |
7157 | } | 7224 | } |
7158 | 7225 | ||
@@ -7257,8 +7324,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7257 | * 10 = 6 s/g entry or 24k | 7324 | * 10 = 6 s/g entry or 24k |
7258 | */ | 7325 | */ |
7259 | 7326 | ||
7327 | /* If the controller supports either ioaccel method then | ||
7328 | * we can also use the RAID stack submit path that does not | ||
7329 | * perform the superfluous readl() after each command submission. | ||
7330 | */ | ||
7331 | if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) | ||
7332 | access = SA5_performant_access_no_read; | ||
7333 | |||
7260 | /* Controller spec: zero out this buffer. */ | 7334 | /* Controller spec: zero out this buffer. */ |
7261 | memset(h->reply_pool, 0, h->reply_pool_size); | 7335 | for (i = 0; i < h->nreply_queues; i++) |
7336 | memset(h->reply_queue[i].head, 0, h->reply_queue_size); | ||
7262 | 7337 | ||
7263 | bft[7] = SG_ENTRIES_IN_CMD + 4; | 7338 | bft[7] = SG_ENTRIES_IN_CMD + 4; |
7264 | calc_bucket_map(bft, ARRAY_SIZE(bft), | 7339 | calc_bucket_map(bft, ARRAY_SIZE(bft), |
@@ -7274,8 +7349,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7274 | 7349 | ||
7275 | for (i = 0; i < h->nreply_queues; i++) { | 7350 | for (i = 0; i < h->nreply_queues; i++) { |
7276 | writel(0, &h->transtable->RepQAddr[i].upper); | 7351 | writel(0, &h->transtable->RepQAddr[i].upper); |
7277 | writel(h->reply_pool_dhandle + | 7352 | writel(h->reply_queue[i].busaddr, |
7278 | (h->max_commands * sizeof(u64) * i), | ||
7279 | &h->transtable->RepQAddr[i].lower); | 7353 | &h->transtable->RepQAddr[i].lower); |
7280 | } | 7354 | } |
7281 | 7355 | ||
@@ -7323,8 +7397,10 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7323 | h->ioaccel1_blockFetchTable); | 7397 | h->ioaccel1_blockFetchTable); |
7324 | 7398 | ||
7325 | /* initialize all reply queue entries to unused */ | 7399 | /* initialize all reply queue entries to unused */ |
7326 | memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED, | 7400 | for (i = 0; i < h->nreply_queues; i++) |
7327 | h->reply_pool_size); | 7401 | memset(h->reply_queue[i].head, |
7402 | (u8) IOACCEL_MODE1_REPLY_UNUSED, | ||
7403 | h->reply_queue_size); | ||
7328 | 7404 | ||
7329 | /* set all the constant fields in the accelerator command | 7405 | /* set all the constant fields in the accelerator command |
7330 | * frames once at init time to save CPU cycles later. | 7406 | * frames once at init time to save CPU cycles later. |
@@ -7386,7 +7462,6 @@ static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) | |||
7386 | * because the 7 lower bits of the address are used by the | 7462 | * because the 7 lower bits of the address are used by the |
7387 | * hardware. | 7463 | * hardware. |
7388 | */ | 7464 | */ |
7389 | #define IOACCEL1_COMMANDLIST_ALIGNMENT 128 | ||
7390 | BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % | 7465 | BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % |
7391 | IOACCEL1_COMMANDLIST_ALIGNMENT); | 7466 | IOACCEL1_COMMANDLIST_ALIGNMENT); |
7392 | h->ioaccel_cmd_pool = | 7467 | h->ioaccel_cmd_pool = |
@@ -7424,7 +7499,6 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) | |||
7424 | if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) | 7499 | if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) |
7425 | h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; | 7500 | h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; |
7426 | 7501 | ||
7427 | #define IOACCEL2_COMMANDLIST_ALIGNMENT 128 | ||
7428 | BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % | 7502 | BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % |
7429 | IOACCEL2_COMMANDLIST_ALIGNMENT); | 7503 | IOACCEL2_COMMANDLIST_ALIGNMENT); |
7430 | h->ioaccel2_cmd_pool = | 7504 | h->ioaccel2_cmd_pool = |
@@ -7482,16 +7556,17 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) | |||
7482 | } | 7556 | } |
7483 | } | 7557 | } |
7484 | 7558 | ||
7485 | /* TODO, check that this next line h->nreply_queues is correct */ | ||
7486 | h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; | 7559 | h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; |
7487 | hpsa_get_max_perf_mode_cmds(h); | 7560 | hpsa_get_max_perf_mode_cmds(h); |
7488 | /* Performant mode ring buffer and supporting data structures */ | 7561 | /* Performant mode ring buffer and supporting data structures */ |
7489 | h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; | 7562 | h->reply_queue_size = h->max_commands * sizeof(u64); |
7490 | h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, | ||
7491 | &(h->reply_pool_dhandle)); | ||
7492 | 7563 | ||
7493 | for (i = 0; i < h->nreply_queues; i++) { | 7564 | for (i = 0; i < h->nreply_queues; i++) { |
7494 | h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; | 7565 | h->reply_queue[i].head = pci_alloc_consistent(h->pdev, |
7566 | h->reply_queue_size, | ||
7567 | &(h->reply_queue[i].busaddr)); | ||
7568 | if (!h->reply_queue[i].head) | ||
7569 | goto clean_up; | ||
7495 | h->reply_queue[i].size = h->max_commands; | 7570 | h->reply_queue[i].size = h->max_commands; |
7496 | h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ | 7571 | h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ |
7497 | h->reply_queue[i].current_entry = 0; | 7572 | h->reply_queue[i].current_entry = 0; |
@@ -7500,18 +7575,14 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) | |||
7500 | /* Need a block fetch table for performant mode */ | 7575 | /* Need a block fetch table for performant mode */ |
7501 | h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * | 7576 | h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * |
7502 | sizeof(u32)), GFP_KERNEL); | 7577 | sizeof(u32)), GFP_KERNEL); |
7503 | 7578 | if (!h->blockFetchTable) | |
7504 | if ((h->reply_pool == NULL) | ||
7505 | || (h->blockFetchTable == NULL)) | ||
7506 | goto clean_up; | 7579 | goto clean_up; |
7507 | 7580 | ||
7508 | hpsa_enter_performant_mode(h, trans_support); | 7581 | hpsa_enter_performant_mode(h, trans_support); |
7509 | return; | 7582 | return; |
7510 | 7583 | ||
7511 | clean_up: | 7584 | clean_up: |
7512 | if (h->reply_pool) | 7585 | hpsa_free_reply_queues(h); |
7513 | pci_free_consistent(h->pdev, h->reply_pool_size, | ||
7514 | h->reply_pool, h->reply_pool_dhandle); | ||
7515 | kfree(h->blockFetchTable); | 7586 | kfree(h->blockFetchTable); |
7516 | } | 7587 | } |
7517 | 7588 | ||