diff options
Diffstat (limited to 'drivers/block/cciss.c')
-rw-r--r-- | drivers/block/cciss.c | 544 |
1 files changed, 261 insertions, 283 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 92b126394fa1..873e594860d3 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -179,19 +179,17 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); | |||
179 | static int deregister_disk(ctlr_info_t *h, int drv_index, | 179 | static int deregister_disk(ctlr_info_t *h, int drv_index, |
180 | int clear_all, int via_ioctl); | 180 | int clear_all, int via_ioctl); |
181 | 181 | ||
182 | static void cciss_read_capacity(int ctlr, int logvol, int withirq, | 182 | static void cciss_read_capacity(int ctlr, int logvol, |
183 | sector_t *total_size, unsigned int *block_size); | 183 | sector_t *total_size, unsigned int *block_size); |
184 | static void cciss_read_capacity_16(int ctlr, int logvol, int withirq, | 184 | static void cciss_read_capacity_16(int ctlr, int logvol, |
185 | sector_t *total_size, unsigned int *block_size); | 185 | sector_t *total_size, unsigned int *block_size); |
186 | static void cciss_geometry_inquiry(int ctlr, int logvol, | 186 | static void cciss_geometry_inquiry(int ctlr, int logvol, |
187 | int withirq, sector_t total_size, | 187 | sector_t total_size, |
188 | unsigned int block_size, InquiryData_struct *inq_buff, | 188 | unsigned int block_size, InquiryData_struct *inq_buff, |
189 | drive_info_struct *drv); | 189 | drive_info_struct *drv); |
190 | static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, | 190 | static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, |
191 | __u32); | 191 | __u32); |
192 | static void start_io(ctlr_info_t *h); | 192 | static void start_io(ctlr_info_t *h); |
193 | static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, | ||
194 | __u8 page_code, unsigned char *scsi3addr, int cmd_type); | ||
195 | static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, | 193 | static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, |
196 | __u8 page_code, unsigned char scsi3addr[], | 194 | __u8 page_code, unsigned char scsi3addr[], |
197 | int cmd_type); | 195 | int cmd_type); |
@@ -424,12 +422,9 @@ cciss_proc_write(struct file *file, const char __user *buf, | |||
424 | if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { | 422 | if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { |
425 | struct seq_file *seq = file->private_data; | 423 | struct seq_file *seq = file->private_data; |
426 | ctlr_info_t *h = seq->private; | 424 | ctlr_info_t *h = seq->private; |
427 | int rc; | ||
428 | 425 | ||
429 | rc = cciss_engage_scsi(h->ctlr); | 426 | err = cciss_engage_scsi(h->ctlr); |
430 | if (rc != 0) | 427 | if (err == 0) |
431 | err = -rc; | ||
432 | else | ||
433 | err = length; | 428 | err = length; |
434 | } else | 429 | } else |
435 | #endif /* CONFIG_CISS_SCSI_TAPE */ | 430 | #endif /* CONFIG_CISS_SCSI_TAPE */ |
@@ -1657,9 +1652,11 @@ static void cciss_softirq_done(struct request *rq) | |||
1657 | { | 1652 | { |
1658 | CommandList_struct *cmd = rq->completion_data; | 1653 | CommandList_struct *cmd = rq->completion_data; |
1659 | ctlr_info_t *h = hba[cmd->ctlr]; | 1654 | ctlr_info_t *h = hba[cmd->ctlr]; |
1655 | SGDescriptor_struct *curr_sg = cmd->SG; | ||
1660 | unsigned long flags; | 1656 | unsigned long flags; |
1661 | u64bit temp64; | 1657 | u64bit temp64; |
1662 | int i, ddir; | 1658 | int i, ddir; |
1659 | int sg_index = 0; | ||
1663 | 1660 | ||
1664 | if (cmd->Request.Type.Direction == XFER_READ) | 1661 | if (cmd->Request.Type.Direction == XFER_READ) |
1665 | ddir = PCI_DMA_FROMDEVICE; | 1662 | ddir = PCI_DMA_FROMDEVICE; |
@@ -1669,9 +1666,22 @@ static void cciss_softirq_done(struct request *rq) | |||
1669 | /* command did not need to be retried */ | 1666 | /* command did not need to be retried */ |
1670 | /* unmap the DMA mapping for all the scatter gather elements */ | 1667 | /* unmap the DMA mapping for all the scatter gather elements */ |
1671 | for (i = 0; i < cmd->Header.SGList; i++) { | 1668 | for (i = 0; i < cmd->Header.SGList; i++) { |
1672 | temp64.val32.lower = cmd->SG[i].Addr.lower; | 1669 | if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { |
1673 | temp64.val32.upper = cmd->SG[i].Addr.upper; | 1670 | temp64.val32.lower = cmd->SG[i].Addr.lower; |
1674 | pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir); | 1671 | temp64.val32.upper = cmd->SG[i].Addr.upper; |
1672 | pci_dma_sync_single_for_cpu(h->pdev, temp64.val, | ||
1673 | cmd->SG[i].Len, ddir); | ||
1674 | pci_unmap_single(h->pdev, temp64.val, | ||
1675 | cmd->SG[i].Len, ddir); | ||
1676 | /* Point to the next block */ | ||
1677 | curr_sg = h->cmd_sg_list[cmd->cmdindex]->sgchain; | ||
1678 | sg_index = 0; | ||
1679 | } | ||
1680 | temp64.val32.lower = curr_sg[sg_index].Addr.lower; | ||
1681 | temp64.val32.upper = curr_sg[sg_index].Addr.upper; | ||
1682 | pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len, | ||
1683 | ddir); | ||
1684 | ++sg_index; | ||
1675 | } | 1685 | } |
1676 | 1686 | ||
1677 | #ifdef CCISS_DEBUG | 1687 | #ifdef CCISS_DEBUG |
@@ -1701,7 +1711,7 @@ static inline void log_unit_to_scsi3addr(ctlr_info_t *h, | |||
1701 | * via the inquiry page 0. Model, vendor, and rev are set to empty strings if | 1711 | * via the inquiry page 0. Model, vendor, and rev are set to empty strings if |
1702 | * they cannot be read. | 1712 | * they cannot be read. |
1703 | */ | 1713 | */ |
1704 | static void cciss_get_device_descr(int ctlr, int logvol, int withirq, | 1714 | static void cciss_get_device_descr(int ctlr, int logvol, |
1705 | char *vendor, char *model, char *rev) | 1715 | char *vendor, char *model, char *rev) |
1706 | { | 1716 | { |
1707 | int rc; | 1717 | int rc; |
@@ -1717,14 +1727,8 @@ static void cciss_get_device_descr(int ctlr, int logvol, int withirq, | |||
1717 | return; | 1727 | return; |
1718 | 1728 | ||
1719 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); | 1729 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); |
1720 | if (withirq) | 1730 | rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf, sizeof(*inq_buf), 0, |
1721 | rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf, | 1731 | scsi3addr, TYPE_CMD); |
1722 | sizeof(InquiryData_struct), 0, | ||
1723 | scsi3addr, TYPE_CMD); | ||
1724 | else | ||
1725 | rc = sendcmd(CISS_INQUIRY, ctlr, inq_buf, | ||
1726 | sizeof(InquiryData_struct), 0, | ||
1727 | scsi3addr, TYPE_CMD); | ||
1728 | if (rc == IO_OK) { | 1732 | if (rc == IO_OK) { |
1729 | memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); | 1733 | memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); |
1730 | vendor[VENDOR_LEN] = '\0'; | 1734 | vendor[VENDOR_LEN] = '\0'; |
@@ -1743,7 +1747,7 @@ static void cciss_get_device_descr(int ctlr, int logvol, int withirq, | |||
1743 | * number cannot be had, for whatever reason, 16 bytes of 0xff | 1747 | * number cannot be had, for whatever reason, 16 bytes of 0xff |
1744 | * are returned instead. | 1748 | * are returned instead. |
1745 | */ | 1749 | */ |
1746 | static void cciss_get_serial_no(int ctlr, int logvol, int withirq, | 1750 | static void cciss_get_serial_no(int ctlr, int logvol, |
1747 | unsigned char *serial_no, int buflen) | 1751 | unsigned char *serial_no, int buflen) |
1748 | { | 1752 | { |
1749 | #define PAGE_83_INQ_BYTES 64 | 1753 | #define PAGE_83_INQ_BYTES 64 |
@@ -1759,12 +1763,8 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq, | |||
1759 | return; | 1763 | return; |
1760 | memset(serial_no, 0, buflen); | 1764 | memset(serial_no, 0, buflen); |
1761 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); | 1765 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); |
1762 | if (withirq) | 1766 | rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf, |
1763 | rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf, | 1767 | PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); |
1764 | PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); | ||
1765 | else | ||
1766 | rc = sendcmd(CISS_INQUIRY, ctlr, buf, | ||
1767 | PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); | ||
1768 | if (rc == IO_OK) | 1768 | if (rc == IO_OK) |
1769 | memcpy(serial_no, &buf[8], buflen); | 1769 | memcpy(serial_no, &buf[8], buflen); |
1770 | kfree(buf); | 1770 | kfree(buf); |
@@ -1793,10 +1793,10 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, | |||
1793 | blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); | 1793 | blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); |
1794 | 1794 | ||
1795 | /* This is a hardware imposed limit. */ | 1795 | /* This is a hardware imposed limit. */ |
1796 | blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES); | 1796 | blk_queue_max_hw_segments(disk->queue, h->maxsgentries); |
1797 | 1797 | ||
1798 | /* This is a limit in the driver and could be eliminated. */ | 1798 | /* This is a limit in the driver and could be eliminated. */ |
1799 | blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES); | 1799 | blk_queue_max_phys_segments(disk->queue, h->maxsgentries); |
1800 | 1800 | ||
1801 | blk_queue_max_sectors(disk->queue, h->cciss_max_sectors); | 1801 | blk_queue_max_sectors(disk->queue, h->cciss_max_sectors); |
1802 | 1802 | ||
@@ -1852,18 +1852,16 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, | |||
1852 | 1852 | ||
1853 | /* testing to see if 16-byte CDBs are already being used */ | 1853 | /* testing to see if 16-byte CDBs are already being used */ |
1854 | if (h->cciss_read == CCISS_READ_16) { | 1854 | if (h->cciss_read == CCISS_READ_16) { |
1855 | cciss_read_capacity_16(h->ctlr, drv_index, 1, | 1855 | cciss_read_capacity_16(h->ctlr, drv_index, |
1856 | &total_size, &block_size); | 1856 | &total_size, &block_size); |
1857 | 1857 | ||
1858 | } else { | 1858 | } else { |
1859 | cciss_read_capacity(ctlr, drv_index, 1, | 1859 | cciss_read_capacity(ctlr, drv_index, &total_size, &block_size); |
1860 | &total_size, &block_size); | ||
1861 | |||
1862 | /* if read_capacity returns all F's this volume is >2TB */ | 1860 | /* if read_capacity returns all F's this volume is >2TB */ |
1863 | /* in size so we switch to 16-byte CDB's for all */ | 1861 | /* in size so we switch to 16-byte CDB's for all */ |
1864 | /* read/write ops */ | 1862 | /* read/write ops */ |
1865 | if (total_size == 0xFFFFFFFFULL) { | 1863 | if (total_size == 0xFFFFFFFFULL) { |
1866 | cciss_read_capacity_16(ctlr, drv_index, 1, | 1864 | cciss_read_capacity_16(ctlr, drv_index, |
1867 | &total_size, &block_size); | 1865 | &total_size, &block_size); |
1868 | h->cciss_read = CCISS_READ_16; | 1866 | h->cciss_read = CCISS_READ_16; |
1869 | h->cciss_write = CCISS_WRITE_16; | 1867 | h->cciss_write = CCISS_WRITE_16; |
@@ -1873,14 +1871,14 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, | |||
1873 | } | 1871 | } |
1874 | } | 1872 | } |
1875 | 1873 | ||
1876 | cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size, | 1874 | cciss_geometry_inquiry(ctlr, drv_index, total_size, block_size, |
1877 | inq_buff, drvinfo); | 1875 | inq_buff, drvinfo); |
1878 | drvinfo->block_size = block_size; | 1876 | drvinfo->block_size = block_size; |
1879 | drvinfo->nr_blocks = total_size + 1; | 1877 | drvinfo->nr_blocks = total_size + 1; |
1880 | 1878 | ||
1881 | cciss_get_device_descr(ctlr, drv_index, 1, drvinfo->vendor, | 1879 | cciss_get_device_descr(ctlr, drv_index, drvinfo->vendor, |
1882 | drvinfo->model, drvinfo->rev); | 1880 | drvinfo->model, drvinfo->rev); |
1883 | cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, | 1881 | cciss_get_serial_no(ctlr, drv_index, drvinfo->serial_no, |
1884 | sizeof(drvinfo->serial_no)); | 1882 | sizeof(drvinfo->serial_no)); |
1885 | /* Save the lunid in case we deregister the disk, below. */ | 1883 | /* Save the lunid in case we deregister the disk, below. */ |
1886 | memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, | 1884 | memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, |
@@ -2531,6 +2529,8 @@ static int check_target_status(ctlr_info_t *h, CommandList_struct *c) | |||
2531 | case 0: return IO_OK; /* no sense */ | 2529 | case 0: return IO_OK; /* no sense */ |
2532 | case 1: return IO_OK; /* recovered error */ | 2530 | case 1: return IO_OK; /* recovered error */ |
2533 | default: | 2531 | default: |
2532 | if (check_for_unit_attention(h, c)) | ||
2533 | return IO_NEEDS_RETRY; | ||
2534 | printk(KERN_WARNING "cciss%d: cmd 0x%02x " | 2534 | printk(KERN_WARNING "cciss%d: cmd 0x%02x " |
2535 | "check condition, sense key = 0x%02x\n", | 2535 | "check condition, sense key = 0x%02x\n", |
2536 | h->ctlr, c->Request.CDB[0], | 2536 | h->ctlr, c->Request.CDB[0], |
@@ -2672,7 +2672,7 @@ static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, | |||
2672 | } | 2672 | } |
2673 | 2673 | ||
2674 | static void cciss_geometry_inquiry(int ctlr, int logvol, | 2674 | static void cciss_geometry_inquiry(int ctlr, int logvol, |
2675 | int withirq, sector_t total_size, | 2675 | sector_t total_size, |
2676 | unsigned int block_size, | 2676 | unsigned int block_size, |
2677 | InquiryData_struct *inq_buff, | 2677 | InquiryData_struct *inq_buff, |
2678 | drive_info_struct *drv) | 2678 | drive_info_struct *drv) |
@@ -2683,14 +2683,8 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, | |||
2683 | 2683 | ||
2684 | memset(inq_buff, 0, sizeof(InquiryData_struct)); | 2684 | memset(inq_buff, 0, sizeof(InquiryData_struct)); |
2685 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); | 2685 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); |
2686 | if (withirq) | 2686 | return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff, |
2687 | return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, | 2687 | sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); |
2688 | inq_buff, sizeof(*inq_buff), | ||
2689 | 0xC1, scsi3addr, TYPE_CMD); | ||
2690 | else | ||
2691 | return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff, | ||
2692 | sizeof(*inq_buff), 0xC1, scsi3addr, | ||
2693 | TYPE_CMD); | ||
2694 | if (return_code == IO_OK) { | 2688 | if (return_code == IO_OK) { |
2695 | if (inq_buff->data_byte[8] == 0xFF) { | 2689 | if (inq_buff->data_byte[8] == 0xFF) { |
2696 | printk(KERN_WARNING | 2690 | printk(KERN_WARNING |
@@ -2723,7 +2717,7 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, | |||
2723 | } | 2717 | } |
2724 | 2718 | ||
2725 | static void | 2719 | static void |
2726 | cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, | 2720 | cciss_read_capacity(int ctlr, int logvol, sector_t *total_size, |
2727 | unsigned int *block_size) | 2721 | unsigned int *block_size) |
2728 | { | 2722 | { |
2729 | ReadCapdata_struct *buf; | 2723 | ReadCapdata_struct *buf; |
@@ -2737,14 +2731,8 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, | |||
2737 | } | 2731 | } |
2738 | 2732 | ||
2739 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); | 2733 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); |
2740 | if (withirq) | 2734 | return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, buf, |
2741 | return_code = sendcmd_withirq(CCISS_READ_CAPACITY, | 2735 | sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); |
2742 | ctlr, buf, sizeof(ReadCapdata_struct), | ||
2743 | 0, scsi3addr, TYPE_CMD); | ||
2744 | else | ||
2745 | return_code = sendcmd(CCISS_READ_CAPACITY, | ||
2746 | ctlr, buf, sizeof(ReadCapdata_struct), | ||
2747 | 0, scsi3addr, TYPE_CMD); | ||
2748 | if (return_code == IO_OK) { | 2736 | if (return_code == IO_OK) { |
2749 | *total_size = be32_to_cpu(*(__be32 *) buf->total_size); | 2737 | *total_size = be32_to_cpu(*(__be32 *) buf->total_size); |
2750 | *block_size = be32_to_cpu(*(__be32 *) buf->block_size); | 2738 | *block_size = be32_to_cpu(*(__be32 *) buf->block_size); |
@@ -2756,8 +2744,8 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, | |||
2756 | kfree(buf); | 2744 | kfree(buf); |
2757 | } | 2745 | } |
2758 | 2746 | ||
2759 | static void | 2747 | static void cciss_read_capacity_16(int ctlr, int logvol, |
2760 | cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size) | 2748 | sector_t *total_size, unsigned int *block_size) |
2761 | { | 2749 | { |
2762 | ReadCapdata_struct_16 *buf; | 2750 | ReadCapdata_struct_16 *buf; |
2763 | int return_code; | 2751 | int return_code; |
@@ -2770,16 +2758,9 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, | |||
2770 | } | 2758 | } |
2771 | 2759 | ||
2772 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); | 2760 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); |
2773 | if (withirq) { | 2761 | return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16, |
2774 | return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16, | 2762 | ctlr, buf, sizeof(ReadCapdata_struct_16), |
2775 | ctlr, buf, sizeof(ReadCapdata_struct_16), | 2763 | 0, scsi3addr, TYPE_CMD); |
2776 | 0, scsi3addr, TYPE_CMD); | ||
2777 | } | ||
2778 | else { | ||
2779 | return_code = sendcmd(CCISS_READ_CAPACITY_16, | ||
2780 | ctlr, buf, sizeof(ReadCapdata_struct_16), | ||
2781 | 0, scsi3addr, TYPE_CMD); | ||
2782 | } | ||
2783 | if (return_code == IO_OK) { | 2764 | if (return_code == IO_OK) { |
2784 | *total_size = be64_to_cpu(*(__be64 *) buf->total_size); | 2765 | *total_size = be64_to_cpu(*(__be64 *) buf->total_size); |
2785 | *block_size = be32_to_cpu(*(__be32 *) buf->block_size); | 2766 | *block_size = be32_to_cpu(*(__be32 *) buf->block_size); |
@@ -2820,13 +2801,13 @@ static int cciss_revalidate(struct gendisk *disk) | |||
2820 | return 1; | 2801 | return 1; |
2821 | } | 2802 | } |
2822 | if (h->cciss_read == CCISS_READ_10) { | 2803 | if (h->cciss_read == CCISS_READ_10) { |
2823 | cciss_read_capacity(h->ctlr, logvol, 1, | 2804 | cciss_read_capacity(h->ctlr, logvol, |
2824 | &total_size, &block_size); | 2805 | &total_size, &block_size); |
2825 | } else { | 2806 | } else { |
2826 | cciss_read_capacity_16(h->ctlr, logvol, 1, | 2807 | cciss_read_capacity_16(h->ctlr, logvol, |
2827 | &total_size, &block_size); | 2808 | &total_size, &block_size); |
2828 | } | 2809 | } |
2829 | cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, | 2810 | cciss_geometry_inquiry(h->ctlr, logvol, total_size, block_size, |
2830 | inq_buff, drv); | 2811 | inq_buff, drv); |
2831 | 2812 | ||
2832 | blk_queue_logical_block_size(drv->queue, drv->block_size); | 2813 | blk_queue_logical_block_size(drv->queue, drv->block_size); |
@@ -2837,167 +2818,6 @@ static int cciss_revalidate(struct gendisk *disk) | |||
2837 | } | 2818 | } |
2838 | 2819 | ||
2839 | /* | 2820 | /* |
2840 | * Wait polling for a command to complete. | ||
2841 | * The memory mapped FIFO is polled for the completion. | ||
2842 | * Used only at init time, interrupts from the HBA are disabled. | ||
2843 | */ | ||
2844 | static unsigned long pollcomplete(int ctlr) | ||
2845 | { | ||
2846 | unsigned long done; | ||
2847 | int i; | ||
2848 | |||
2849 | /* Wait (up to 20 seconds) for a command to complete */ | ||
2850 | |||
2851 | for (i = 20 * HZ; i > 0; i--) { | ||
2852 | done = hba[ctlr]->access.command_completed(hba[ctlr]); | ||
2853 | if (done == FIFO_EMPTY) | ||
2854 | schedule_timeout_uninterruptible(1); | ||
2855 | else | ||
2856 | return done; | ||
2857 | } | ||
2858 | /* Invalid address to tell caller we ran out of time */ | ||
2859 | return 1; | ||
2860 | } | ||
2861 | |||
2862 | /* Send command c to controller h and poll for it to complete. | ||
2863 | * Turns interrupts off on the board. Used at driver init time | ||
2864 | * and during SCSI error recovery. | ||
2865 | */ | ||
2866 | static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c) | ||
2867 | { | ||
2868 | int i; | ||
2869 | unsigned long complete; | ||
2870 | int status = IO_ERROR; | ||
2871 | u64bit buff_dma_handle; | ||
2872 | |||
2873 | resend_cmd1: | ||
2874 | |||
2875 | /* Disable interrupt on the board. */ | ||
2876 | h->access.set_intr_mask(h, CCISS_INTR_OFF); | ||
2877 | |||
2878 | /* Make sure there is room in the command FIFO */ | ||
2879 | /* Actually it should be completely empty at this time */ | ||
2880 | /* unless we are in here doing error handling for the scsi */ | ||
2881 | /* tape side of the driver. */ | ||
2882 | for (i = 200000; i > 0; i--) { | ||
2883 | /* if fifo isn't full go */ | ||
2884 | if (!(h->access.fifo_full(h))) | ||
2885 | break; | ||
2886 | udelay(10); | ||
2887 | printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full," | ||
2888 | " waiting!\n", h->ctlr); | ||
2889 | } | ||
2890 | h->access.submit_command(h, c); /* Send the cmd */ | ||
2891 | do { | ||
2892 | complete = pollcomplete(h->ctlr); | ||
2893 | |||
2894 | #ifdef CCISS_DEBUG | ||
2895 | printk(KERN_DEBUG "cciss: command completed\n"); | ||
2896 | #endif /* CCISS_DEBUG */ | ||
2897 | |||
2898 | if (complete == 1) { | ||
2899 | printk(KERN_WARNING | ||
2900 | "cciss cciss%d: SendCmd Timeout out, " | ||
2901 | "No command list address returned!\n", h->ctlr); | ||
2902 | status = IO_ERROR; | ||
2903 | break; | ||
2904 | } | ||
2905 | |||
2906 | /* Make sure it's the command we're expecting. */ | ||
2907 | if ((complete & ~CISS_ERROR_BIT) != c->busaddr) { | ||
2908 | printk(KERN_WARNING "cciss%d: Unexpected command " | ||
2909 | "completion.\n", h->ctlr); | ||
2910 | continue; | ||
2911 | } | ||
2912 | |||
2913 | /* It is our command. If no error, we're done. */ | ||
2914 | if (!(complete & CISS_ERROR_BIT)) { | ||
2915 | status = IO_OK; | ||
2916 | break; | ||
2917 | } | ||
2918 | |||
2919 | /* There is an error... */ | ||
2920 | |||
2921 | /* if data overrun or underun on Report command ignore it */ | ||
2922 | if (((c->Request.CDB[0] == CISS_REPORT_LOG) || | ||
2923 | (c->Request.CDB[0] == CISS_REPORT_PHYS) || | ||
2924 | (c->Request.CDB[0] == CISS_INQUIRY)) && | ||
2925 | ((c->err_info->CommandStatus == CMD_DATA_OVERRUN) || | ||
2926 | (c->err_info->CommandStatus == CMD_DATA_UNDERRUN))) { | ||
2927 | complete = c->busaddr; | ||
2928 | status = IO_OK; | ||
2929 | break; | ||
2930 | } | ||
2931 | |||
2932 | if (c->err_info->CommandStatus == CMD_UNSOLICITED_ABORT) { | ||
2933 | printk(KERN_WARNING "cciss%d: unsolicited abort %p\n", | ||
2934 | h->ctlr, c); | ||
2935 | if (c->retry_count < MAX_CMD_RETRIES) { | ||
2936 | printk(KERN_WARNING "cciss%d: retrying %p\n", | ||
2937 | h->ctlr, c); | ||
2938 | c->retry_count++; | ||
2939 | /* erase the old error information */ | ||
2940 | memset(c->err_info, 0, sizeof(c->err_info)); | ||
2941 | goto resend_cmd1; | ||
2942 | } | ||
2943 | printk(KERN_WARNING "cciss%d: retried %p too many " | ||
2944 | "times\n", h->ctlr, c); | ||
2945 | status = IO_ERROR; | ||
2946 | break; | ||
2947 | } | ||
2948 | |||
2949 | if (c->err_info->CommandStatus == CMD_UNABORTABLE) { | ||
2950 | printk(KERN_WARNING "cciss%d: command could not be " | ||
2951 | "aborted.\n", h->ctlr); | ||
2952 | status = IO_ERROR; | ||
2953 | break; | ||
2954 | } | ||
2955 | |||
2956 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS) { | ||
2957 | status = check_target_status(h, c); | ||
2958 | break; | ||
2959 | } | ||
2960 | |||
2961 | printk(KERN_WARNING "cciss%d: sendcmd error\n", h->ctlr); | ||
2962 | printk(KERN_WARNING "cmd = 0x%02x, CommandStatus = 0x%02x\n", | ||
2963 | c->Request.CDB[0], c->err_info->CommandStatus); | ||
2964 | status = IO_ERROR; | ||
2965 | break; | ||
2966 | |||
2967 | } while (1); | ||
2968 | |||
2969 | /* unlock the data buffer from DMA */ | ||
2970 | buff_dma_handle.val32.lower = c->SG[0].Addr.lower; | ||
2971 | buff_dma_handle.val32.upper = c->SG[0].Addr.upper; | ||
2972 | pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, | ||
2973 | c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); | ||
2974 | return status; | ||
2975 | } | ||
2976 | |||
2977 | /* | ||
2978 | * Send a command to the controller, and wait for it to complete. | ||
2979 | * Used at init time, and during SCSI error recovery. | ||
2980 | */ | ||
2981 | static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, | ||
2982 | __u8 page_code, unsigned char *scsi3addr, int cmd_type) | ||
2983 | { | ||
2984 | CommandList_struct *c; | ||
2985 | int status; | ||
2986 | |||
2987 | c = cmd_alloc(hba[ctlr], 1); | ||
2988 | if (!c) { | ||
2989 | printk(KERN_WARNING "cciss: unable to get memory"); | ||
2990 | return IO_ERROR; | ||
2991 | } | ||
2992 | status = fill_cmd(c, cmd, ctlr, buff, size, page_code, | ||
2993 | scsi3addr, cmd_type); | ||
2994 | if (status == IO_OK) | ||
2995 | status = sendcmd_core(hba[ctlr], c); | ||
2996 | cmd_free(hba[ctlr], c, 1); | ||
2997 | return status; | ||
2998 | } | ||
2999 | |||
3000 | /* | ||
3001 | * Map (physical) PCI mem into (virtual) kernel space | 2821 | * Map (physical) PCI mem into (virtual) kernel space |
3002 | */ | 2822 | */ |
3003 | static void __iomem *remap_pci_mem(ulong base, ulong size) | 2823 | static void __iomem *remap_pci_mem(ulong base, ulong size) |
@@ -3255,9 +3075,13 @@ static void do_cciss_request(struct request_queue *q) | |||
3255 | int seg; | 3075 | int seg; |
3256 | struct request *creq; | 3076 | struct request *creq; |
3257 | u64bit temp64; | 3077 | u64bit temp64; |
3258 | struct scatterlist tmp_sg[MAXSGENTRIES]; | 3078 | struct scatterlist *tmp_sg; |
3079 | SGDescriptor_struct *curr_sg; | ||
3259 | drive_info_struct *drv; | 3080 | drive_info_struct *drv; |
3260 | int i, dir; | 3081 | int i, dir; |
3082 | int nseg = 0; | ||
3083 | int sg_index = 0; | ||
3084 | int chained = 0; | ||
3261 | 3085 | ||
3262 | /* We call start_io here in case there is a command waiting on the | 3086 | /* We call start_io here in case there is a command waiting on the |
3263 | * queue that has not been sent. | 3087 | * queue that has not been sent. |
@@ -3270,13 +3094,14 @@ static void do_cciss_request(struct request_queue *q) | |||
3270 | if (!creq) | 3094 | if (!creq) |
3271 | goto startio; | 3095 | goto startio; |
3272 | 3096 | ||
3273 | BUG_ON(creq->nr_phys_segments > MAXSGENTRIES); | 3097 | BUG_ON(creq->nr_phys_segments > h->maxsgentries); |
3274 | 3098 | ||
3275 | if ((c = cmd_alloc(h, 1)) == NULL) | 3099 | if ((c = cmd_alloc(h, 1)) == NULL) |
3276 | goto full; | 3100 | goto full; |
3277 | 3101 | ||
3278 | blk_start_request(creq); | 3102 | blk_start_request(creq); |
3279 | 3103 | ||
3104 | tmp_sg = h->scatter_list[c->cmdindex]; | ||
3280 | spin_unlock_irq(q->queue_lock); | 3105 | spin_unlock_irq(q->queue_lock); |
3281 | 3106 | ||
3282 | c->cmd_type = CMD_RWREQ; | 3107 | c->cmd_type = CMD_RWREQ; |
@@ -3305,7 +3130,7 @@ static void do_cciss_request(struct request_queue *q) | |||
3305 | (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); | 3130 | (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); |
3306 | #endif /* CCISS_DEBUG */ | 3131 | #endif /* CCISS_DEBUG */ |
3307 | 3132 | ||
3308 | sg_init_table(tmp_sg, MAXSGENTRIES); | 3133 | sg_init_table(tmp_sg, h->maxsgentries); |
3309 | seg = blk_rq_map_sg(q, creq, tmp_sg); | 3134 | seg = blk_rq_map_sg(q, creq, tmp_sg); |
3310 | 3135 | ||
3311 | /* get the DMA records for the setup */ | 3136 | /* get the DMA records for the setup */ |
@@ -3314,25 +3139,70 @@ static void do_cciss_request(struct request_queue *q) | |||
3314 | else | 3139 | else |
3315 | dir = PCI_DMA_TODEVICE; | 3140 | dir = PCI_DMA_TODEVICE; |
3316 | 3141 | ||
3142 | curr_sg = c->SG; | ||
3143 | sg_index = 0; | ||
3144 | chained = 0; | ||
3145 | |||
3317 | for (i = 0; i < seg; i++) { | 3146 | for (i = 0; i < seg; i++) { |
3318 | c->SG[i].Len = tmp_sg[i].length; | 3147 | if (((sg_index+1) == (h->max_cmd_sgentries)) && |
3148 | !chained && ((seg - i) > 1)) { | ||
3149 | nseg = seg - i; | ||
3150 | curr_sg[sg_index].Len = (nseg) * | ||
3151 | sizeof(SGDescriptor_struct); | ||
3152 | curr_sg[sg_index].Ext = CCISS_SG_CHAIN; | ||
3153 | |||
3154 | /* Point to next chain block. */ | ||
3155 | curr_sg = h->cmd_sg_list[c->cmdindex]->sgchain; | ||
3156 | sg_index = 0; | ||
3157 | chained = 1; | ||
3158 | } | ||
3159 | curr_sg[sg_index].Len = tmp_sg[i].length; | ||
3319 | temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), | 3160 | temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), |
3320 | tmp_sg[i].offset, | 3161 | tmp_sg[i].offset, |
3321 | tmp_sg[i].length, dir); | 3162 | tmp_sg[i].length, dir); |
3322 | c->SG[i].Addr.lower = temp64.val32.lower; | 3163 | curr_sg[sg_index].Addr.lower = temp64.val32.lower; |
3323 | c->SG[i].Addr.upper = temp64.val32.upper; | 3164 | curr_sg[sg_index].Addr.upper = temp64.val32.upper; |
3324 | c->SG[i].Ext = 0; // we are not chaining | 3165 | curr_sg[sg_index].Ext = 0; /* we are not chaining */ |
3166 | |||
3167 | ++sg_index; | ||
3168 | } | ||
3169 | |||
3170 | if (chained) { | ||
3171 | int len; | ||
3172 | curr_sg = c->SG; | ||
3173 | sg_index = h->max_cmd_sgentries - 1; | ||
3174 | len = curr_sg[sg_index].Len; | ||
3175 | /* Setup pointer to next chain block. | ||
3176 | * Fill out last element in current chain | ||
3177 | * block with address of next chain block. | ||
3178 | */ | ||
3179 | temp64.val = pci_map_single(h->pdev, | ||
3180 | h->cmd_sg_list[c->cmdindex]->sgchain, | ||
3181 | len, dir); | ||
3182 | |||
3183 | h->cmd_sg_list[c->cmdindex]->sg_chain_dma = temp64.val; | ||
3184 | curr_sg[sg_index].Addr.lower = temp64.val32.lower; | ||
3185 | curr_sg[sg_index].Addr.upper = temp64.val32.upper; | ||
3186 | |||
3187 | pci_dma_sync_single_for_device(h->pdev, | ||
3188 | h->cmd_sg_list[c->cmdindex]->sg_chain_dma, | ||
3189 | len, dir); | ||
3325 | } | 3190 | } |
3191 | |||
3326 | /* track how many SG entries we are using */ | 3192 | /* track how many SG entries we are using */ |
3327 | if (seg > h->maxSG) | 3193 | if (seg > h->maxSG) |
3328 | h->maxSG = seg; | 3194 | h->maxSG = seg; |
3329 | 3195 | ||
3330 | #ifdef CCISS_DEBUG | 3196 | #ifdef CCISS_DEBUG |
3331 | printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n", | 3197 | printk(KERN_DEBUG "cciss: Submitting %ld sectors in %d segments " |
3332 | blk_rq_sectors(creq), seg); | 3198 | "chained[%d]\n", |
3199 | blk_rq_sectors(creq), seg, chained); | ||
3333 | #endif /* CCISS_DEBUG */ | 3200 | #endif /* CCISS_DEBUG */ |
3334 | 3201 | ||
3335 | c->Header.SGList = c->Header.SGTotal = seg; | 3202 | c->Header.SGList = c->Header.SGTotal = seg + chained; |
3203 | if (seg > h->max_cmd_sgentries) | ||
3204 | c->Header.SGList = h->max_cmd_sgentries; | ||
3205 | |||
3336 | if (likely(blk_fs_request(creq))) { | 3206 | if (likely(blk_fs_request(creq))) { |
3337 | if(h->cciss_read == CCISS_READ_10) { | 3207 | if(h->cciss_read == CCISS_READ_10) { |
3338 | c->Request.CDB[1] = 0; | 3208 | c->Request.CDB[1] = 0; |
@@ -3513,28 +3383,33 @@ static int add_to_scan_list(struct ctlr_info *h) | |||
3513 | * @h: Pointer to the controller. | 3383 | * @h: Pointer to the controller. |
3514 | * | 3384 | * |
3515 | * Removes the controller from the rescan queue if present. Blocks if | 3385 | * Removes the controller from the rescan queue if present. Blocks if |
3516 | * the controller is currently conducting a rescan. | 3386 | * the controller is currently conducting a rescan. The controller |
3387 | * can be in one of three states: | ||
3388 | * 1. Doesn't need a scan | ||
3389 | * 2. On the scan list, but not scanning yet (we remove it) | ||
3390 | * 3. Busy scanning (and not on the list). In this case we want to wait for | ||
3391 | * the scan to complete to make sure the scanning thread for this | ||
3392 | * controller is completely idle. | ||
3517 | **/ | 3393 | **/ |
3518 | static void remove_from_scan_list(struct ctlr_info *h) | 3394 | static void remove_from_scan_list(struct ctlr_info *h) |
3519 | { | 3395 | { |
3520 | struct ctlr_info *test_h, *tmp_h; | 3396 | struct ctlr_info *test_h, *tmp_h; |
3521 | int scanning = 0; | ||
3522 | 3397 | ||
3523 | mutex_lock(&scan_mutex); | 3398 | mutex_lock(&scan_mutex); |
3524 | list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { | 3399 | list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { |
3525 | if (test_h == h) { | 3400 | if (test_h == h) { /* state 2. */ |
3526 | list_del(&h->scan_list); | 3401 | list_del(&h->scan_list); |
3527 | complete_all(&h->scan_wait); | 3402 | complete_all(&h->scan_wait); |
3528 | mutex_unlock(&scan_mutex); | 3403 | mutex_unlock(&scan_mutex); |
3529 | return; | 3404 | return; |
3530 | } | 3405 | } |
3531 | } | 3406 | } |
3532 | if (&h->busy_scanning) | 3407 | if (h->busy_scanning) { /* state 3. */ |
3533 | scanning = 0; | 3408 | mutex_unlock(&scan_mutex); |
3534 | mutex_unlock(&scan_mutex); | ||
3535 | |||
3536 | if (scanning) | ||
3537 | wait_for_completion(&h->scan_wait); | 3409 | wait_for_completion(&h->scan_wait); |
3410 | } else { /* state 1, nothing to do. */ | ||
3411 | mutex_unlock(&scan_mutex); | ||
3412 | } | ||
3538 | } | 3413 | } |
3539 | 3414 | ||
3540 | /** | 3415 | /** |
@@ -3573,13 +3448,11 @@ static int scan_thread(void *data) | |||
3573 | h->busy_scanning = 1; | 3448 | h->busy_scanning = 1; |
3574 | mutex_unlock(&scan_mutex); | 3449 | mutex_unlock(&scan_mutex); |
3575 | 3450 | ||
3576 | if (h) { | 3451 | rebuild_lun_table(h, 0, 0); |
3577 | rebuild_lun_table(h, 0, 0); | 3452 | complete_all(&h->scan_wait); |
3578 | complete_all(&h->scan_wait); | 3453 | mutex_lock(&scan_mutex); |
3579 | mutex_lock(&scan_mutex); | 3454 | h->busy_scanning = 0; |
3580 | h->busy_scanning = 0; | 3455 | mutex_unlock(&scan_mutex); |
3581 | mutex_unlock(&scan_mutex); | ||
3582 | } | ||
3583 | } | 3456 | } |
3584 | } | 3457 | } |
3585 | 3458 | ||
@@ -3605,8 +3478,22 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) | |||
3605 | case REPORT_LUNS_CHANGED: | 3478 | case REPORT_LUNS_CHANGED: |
3606 | printk(KERN_WARNING "cciss%d: report LUN data " | 3479 | printk(KERN_WARNING "cciss%d: report LUN data " |
3607 | "changed\n", h->ctlr); | 3480 | "changed\n", h->ctlr); |
3608 | add_to_scan_list(h); | 3481 | /* |
3609 | wake_up_process(cciss_scan_thread); | 3482 | * Here, we could call add_to_scan_list and wake up the scan thread, |
3483 | * except that it's quite likely that we will get more than one | ||
3484 | * REPORT_LUNS_CHANGED condition in quick succession, which means | ||
3485 | * that those which occur after the first one will likely happen | ||
3486 | * *during* the scan_thread's rescan. And the rescan code is not | ||
3487 | * robust enough to restart in the middle, undoing what it has already | ||
3488 | * done, and it's not clear that it's even possible to do this, since | ||
3489 | * part of what it does is notify the block layer, which starts | ||
3490 | * doing it's own i/o to read partition tables and so on, and the | ||
3491 | * driver doesn't have visibility to know what might need undoing. | ||
3492 | * In any event, if possible, it is horribly complicated to get right | ||
3493 | * so we just don't do it for now. | ||
3494 | * | ||
3495 | * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. | ||
3496 | */ | ||
3610 | return 1; | 3497 | return 1; |
3611 | break; | 3498 | break; |
3612 | case POWER_OR_RESET: | 3499 | case POWER_OR_RESET: |
@@ -3888,6 +3775,23 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | |||
3888 | * leave a little room for ioctl calls. | 3775 | * leave a little room for ioctl calls. |
3889 | */ | 3776 | */ |
3890 | c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); | 3777 | c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); |
3778 | c->maxsgentries = readl(&(c->cfgtable->MaxSGElements)); | ||
3779 | |||
3780 | /* | ||
3781 | * Limit native command to 32 s/g elements to save dma'able memory. | ||
3782 | * Howvever spec says if 0, use 31 | ||
3783 | */ | ||
3784 | |||
3785 | c->max_cmd_sgentries = 31; | ||
3786 | if (c->maxsgentries > 512) { | ||
3787 | c->max_cmd_sgentries = 32; | ||
3788 | c->chainsize = c->maxsgentries - c->max_cmd_sgentries + 1; | ||
3789 | c->maxsgentries -= 1; /* account for chain pointer */ | ||
3790 | } else { | ||
3791 | c->maxsgentries = 31; /* Default to traditional value */ | ||
3792 | c->chainsize = 0; /* traditional */ | ||
3793 | } | ||
3794 | |||
3891 | c->product_name = products[prod_index].product_name; | 3795 | c->product_name = products[prod_index].product_name; |
3892 | c->access = *(products[prod_index].access); | 3796 | c->access = *(products[prod_index].access); |
3893 | c->nr_cmds = c->max_commands - 4; | 3797 | c->nr_cmds = c->max_commands - 4; |
@@ -4214,6 +4118,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4214 | { | 4118 | { |
4215 | int i; | 4119 | int i; |
4216 | int j = 0; | 4120 | int j = 0; |
4121 | int k = 0; | ||
4217 | int rc; | 4122 | int rc; |
4218 | int dac, return_code; | 4123 | int dac, return_code; |
4219 | InquiryData_struct *inq_buff; | 4124 | InquiryData_struct *inq_buff; |
@@ -4317,6 +4222,53 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4317 | printk(KERN_ERR "cciss: out of memory"); | 4222 | printk(KERN_ERR "cciss: out of memory"); |
4318 | goto clean4; | 4223 | goto clean4; |
4319 | } | 4224 | } |
4225 | |||
4226 | /* Need space for temp scatter list */ | ||
4227 | hba[i]->scatter_list = kmalloc(hba[i]->max_commands * | ||
4228 | sizeof(struct scatterlist *), | ||
4229 | GFP_KERNEL); | ||
4230 | for (k = 0; k < hba[i]->nr_cmds; k++) { | ||
4231 | hba[i]->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * | ||
4232 | hba[i]->maxsgentries, | ||
4233 | GFP_KERNEL); | ||
4234 | if (hba[i]->scatter_list[k] == NULL) { | ||
4235 | printk(KERN_ERR "cciss%d: could not allocate " | ||
4236 | "s/g lists\n", i); | ||
4237 | goto clean4; | ||
4238 | } | ||
4239 | } | ||
4240 | hba[i]->cmd_sg_list = kmalloc(sizeof(struct Cmd_sg_list *) * | ||
4241 | hba[i]->nr_cmds, | ||
4242 | GFP_KERNEL); | ||
4243 | if (!hba[i]->cmd_sg_list) { | ||
4244 | printk(KERN_ERR "cciss%d: Cannot get memory for " | ||
4245 | "s/g chaining.\n", i); | ||
4246 | goto clean4; | ||
4247 | } | ||
4248 | /* Build up chain blocks for each command */ | ||
4249 | if (hba[i]->chainsize > 0) { | ||
4250 | for (j = 0; j < hba[i]->nr_cmds; j++) { | ||
4251 | hba[i]->cmd_sg_list[j] = | ||
4252 | kmalloc(sizeof(struct Cmd_sg_list), | ||
4253 | GFP_KERNEL); | ||
4254 | if (!hba[i]->cmd_sg_list[j]) { | ||
4255 | printk(KERN_ERR "cciss%d: Cannot get memory " | ||
4256 | "for chain block.\n", i); | ||
4257 | goto clean4; | ||
4258 | } | ||
4259 | /* Need a block of chainsized s/g elements. */ | ||
4260 | hba[i]->cmd_sg_list[j]->sgchain = | ||
4261 | kmalloc((hba[i]->chainsize * | ||
4262 | sizeof(SGDescriptor_struct)), | ||
4263 | GFP_KERNEL); | ||
4264 | if (!hba[i]->cmd_sg_list[j]->sgchain) { | ||
4265 | printk(KERN_ERR "cciss%d: Cannot get memory " | ||
4266 | "for s/g chains\n", i); | ||
4267 | goto clean4; | ||
4268 | } | ||
4269 | } | ||
4270 | } | ||
4271 | |||
4320 | spin_lock_init(&hba[i]->lock); | 4272 | spin_lock_init(&hba[i]->lock); |
4321 | 4273 | ||
4322 | /* Initialize the pdev driver private data. | 4274 | /* Initialize the pdev driver private data. |
@@ -4362,7 +4314,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4362 | 4314 | ||
4363 | cciss_procinit(i); | 4315 | cciss_procinit(i); |
4364 | 4316 | ||
4365 | hba[i]->cciss_max_sectors = 2048; | 4317 | hba[i]->cciss_max_sectors = 8192; |
4366 | 4318 | ||
4367 | rebuild_lun_table(hba[i], 1, 0); | 4319 | rebuild_lun_table(hba[i], 1, 0); |
4368 | hba[i]->busy_initializing = 0; | 4320 | hba[i]->busy_initializing = 0; |
@@ -4370,6 +4322,20 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4370 | 4322 | ||
4371 | clean4: | 4323 | clean4: |
4372 | kfree(hba[i]->cmd_pool_bits); | 4324 | kfree(hba[i]->cmd_pool_bits); |
4325 | /* Free up sg elements */ | ||
4326 | for (k = 0; k < hba[i]->nr_cmds; k++) | ||
4327 | kfree(hba[i]->scatter_list[k]); | ||
4328 | kfree(hba[i]->scatter_list); | ||
4329 | /* Only free up extra s/g lists if controller supports them */ | ||
4330 | if (hba[i]->chainsize > 0) { | ||
4331 | for (j = 0; j < hba[i]->nr_cmds; j++) { | ||
4332 | if (hba[i]->cmd_sg_list[j]) { | ||
4333 | kfree(hba[i]->cmd_sg_list[j]->sgchain); | ||
4334 | kfree(hba[i]->cmd_sg_list[j]); | ||
4335 | } | ||
4336 | } | ||
4337 | kfree(hba[i]->cmd_sg_list); | ||
4338 | } | ||
4373 | if (hba[i]->cmd_pool) | 4339 | if (hba[i]->cmd_pool) |
4374 | pci_free_consistent(hba[i]->pdev, | 4340 | pci_free_consistent(hba[i]->pdev, |
4375 | hba[i]->nr_cmds * sizeof(CommandList_struct), | 4341 | hba[i]->nr_cmds * sizeof(CommandList_struct), |
@@ -4400,30 +4366,28 @@ clean_no_release_regions: | |||
4400 | 4366 | ||
4401 | static void cciss_shutdown(struct pci_dev *pdev) | 4367 | static void cciss_shutdown(struct pci_dev *pdev) |
4402 | { | 4368 | { |
4403 | ctlr_info_t *tmp_ptr; | 4369 | ctlr_info_t *h; |
4404 | int i; | 4370 | char *flush_buf; |
4405 | char flush_buf[4]; | ||
4406 | int return_code; | 4371 | int return_code; |
4407 | 4372 | ||
4408 | tmp_ptr = pci_get_drvdata(pdev); | 4373 | h = pci_get_drvdata(pdev); |
4409 | if (tmp_ptr == NULL) | 4374 | flush_buf = kzalloc(4, GFP_KERNEL); |
4410 | return; | 4375 | if (!flush_buf) { |
4411 | i = tmp_ptr->ctlr; | 4376 | printk(KERN_WARNING |
4412 | if (hba[i] == NULL) | 4377 | "cciss:%d cache not flushed, out of memory.\n", |
4378 | h->ctlr); | ||
4413 | return; | 4379 | return; |
4414 | |||
4415 | /* Turn board interrupts off and send the flush cache command */ | ||
4416 | /* sendcmd will turn off interrupt, and send the flush... | ||
4417 | * To write all data in the battery backed cache to disks */ | ||
4418 | memset(flush_buf, 0, 4); | ||
4419 | return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, | ||
4420 | CTLR_LUNID, TYPE_CMD); | ||
4421 | if (return_code == IO_OK) { | ||
4422 | printk(KERN_INFO "Completed flushing cache on controller %d\n", i); | ||
4423 | } else { | ||
4424 | printk(KERN_WARNING "Error flushing cache on controller %d\n", i); | ||
4425 | } | 4380 | } |
4426 | free_irq(hba[i]->intr[2], hba[i]); | 4381 | /* write all data in the battery backed cache to disk */ |
4382 | memset(flush_buf, 0, 4); | ||
4383 | return_code = sendcmd_withirq(CCISS_CACHE_FLUSH, h->ctlr, flush_buf, | ||
4384 | 4, 0, CTLR_LUNID, TYPE_CMD); | ||
4385 | kfree(flush_buf); | ||
4386 | if (return_code != IO_OK) | ||
4387 | printk(KERN_WARNING "cciss%d: Error flushing cache\n", | ||
4388 | h->ctlr); | ||
4389 | h->access.set_intr_mask(h, CCISS_INTR_OFF); | ||
4390 | free_irq(h->intr[2], h); | ||
4427 | } | 4391 | } |
4428 | 4392 | ||
4429 | static void __devexit cciss_remove_one(struct pci_dev *pdev) | 4393 | static void __devexit cciss_remove_one(struct pci_dev *pdev) |
@@ -4485,6 +4449,20 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
4485 | pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), | 4449 | pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), |
4486 | hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); | 4450 | hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); |
4487 | kfree(hba[i]->cmd_pool_bits); | 4451 | kfree(hba[i]->cmd_pool_bits); |
4452 | /* Free up sg elements */ | ||
4453 | for (j = 0; j < hba[i]->nr_cmds; j++) | ||
4454 | kfree(hba[i]->scatter_list[j]); | ||
4455 | kfree(hba[i]->scatter_list); | ||
4456 | /* Only free up extra s/g lists if controller supports them */ | ||
4457 | if (hba[i]->chainsize > 0) { | ||
4458 | for (j = 0; j < hba[i]->nr_cmds; j++) { | ||
4459 | if (hba[i]->cmd_sg_list[j]) { | ||
4460 | kfree(hba[i]->cmd_sg_list[j]->sgchain); | ||
4461 | kfree(hba[i]->cmd_sg_list[j]); | ||
4462 | } | ||
4463 | } | ||
4464 | kfree(hba[i]->cmd_sg_list); | ||
4465 | } | ||
4488 | /* | 4466 | /* |
4489 | * Deliberately omit pci_disable_device(): it does something nasty to | 4467 | * Deliberately omit pci_disable_device(): it does something nasty to |
4490 | * Smart Array controllers that pci_enable_device does not undo | 4468 | * Smart Array controllers that pci_enable_device does not undo |