aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c1771
1 files changed, 1357 insertions, 414 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 76d294fc7846..520461b9bc09 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -59,6 +59,7 @@
59#include <linux/types.h> 59#include <linux/types.h>
60#include <linux/errno.h> 60#include <linux/errno.h>
61#include <linux/kernel.h> 61#include <linux/kernel.h>
62#include <linux/slab.h>
62#include <linux/ioport.h> 63#include <linux/ioport.h>
63#include <linux/delay.h> 64#include <linux/delay.h>
64#include <linux/pci.h> 65#include <linux/pci.h>
@@ -72,6 +73,8 @@
72#include <linux/moduleparam.h> 73#include <linux/moduleparam.h>
73#include <linux/libata.h> 74#include <linux/libata.h>
74#include <linux/hdreg.h> 75#include <linux/hdreg.h>
76#include <linux/reboot.h>
77#include <linux/stringify.h>
75#include <asm/io.h> 78#include <asm/io.h>
76#include <asm/irq.h> 79#include <asm/irq.h>
77#include <asm/processor.h> 80#include <asm/processor.h>
@@ -91,8 +94,8 @@ static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0; 94static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0; 95static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = 0; 96static unsigned int ipr_transop_timeout = 0;
94static unsigned int ipr_enable_cache = 1;
95static unsigned int ipr_debug = 0; 97static unsigned int ipr_debug = 0;
98static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
96static unsigned int ipr_dual_ioa_raid = 1; 99static unsigned int ipr_dual_ioa_raid = 1;
97static DEFINE_SPINLOCK(ipr_driver_lock); 100static DEFINE_SPINLOCK(ipr_driver_lock);
98 101
@@ -104,13 +107,20 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
104 { 107 {
105 .set_interrupt_mask_reg = 0x0022C, 108 .set_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_mask_reg = 0x00230, 109 .clr_interrupt_mask_reg = 0x00230,
110 .clr_interrupt_mask_reg32 = 0x00230,
107 .sense_interrupt_mask_reg = 0x0022C, 111 .sense_interrupt_mask_reg = 0x0022C,
112 .sense_interrupt_mask_reg32 = 0x0022C,
108 .clr_interrupt_reg = 0x00228, 113 .clr_interrupt_reg = 0x00228,
114 .clr_interrupt_reg32 = 0x00228,
109 .sense_interrupt_reg = 0x00224, 115 .sense_interrupt_reg = 0x00224,
116 .sense_interrupt_reg32 = 0x00224,
110 .ioarrin_reg = 0x00404, 117 .ioarrin_reg = 0x00404,
111 .sense_uproc_interrupt_reg = 0x00214, 118 .sense_uproc_interrupt_reg = 0x00214,
119 .sense_uproc_interrupt_reg32 = 0x00214,
112 .set_uproc_interrupt_reg = 0x00214, 120 .set_uproc_interrupt_reg = 0x00214,
113 .clr_uproc_interrupt_reg = 0x00218 121 .set_uproc_interrupt_reg32 = 0x00214,
122 .clr_uproc_interrupt_reg = 0x00218,
123 .clr_uproc_interrupt_reg32 = 0x00218
114 } 124 }
115 }, 125 },
116 { /* Snipe and Scamp */ 126 { /* Snipe and Scamp */
@@ -119,25 +129,59 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
119 { 129 {
120 .set_interrupt_mask_reg = 0x00288, 130 .set_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_mask_reg = 0x0028C, 131 .clr_interrupt_mask_reg = 0x0028C,
132 .clr_interrupt_mask_reg32 = 0x0028C,
122 .sense_interrupt_mask_reg = 0x00288, 133 .sense_interrupt_mask_reg = 0x00288,
134 .sense_interrupt_mask_reg32 = 0x00288,
123 .clr_interrupt_reg = 0x00284, 135 .clr_interrupt_reg = 0x00284,
136 .clr_interrupt_reg32 = 0x00284,
124 .sense_interrupt_reg = 0x00280, 137 .sense_interrupt_reg = 0x00280,
138 .sense_interrupt_reg32 = 0x00280,
125 .ioarrin_reg = 0x00504, 139 .ioarrin_reg = 0x00504,
126 .sense_uproc_interrupt_reg = 0x00290, 140 .sense_uproc_interrupt_reg = 0x00290,
141 .sense_uproc_interrupt_reg32 = 0x00290,
127 .set_uproc_interrupt_reg = 0x00290, 142 .set_uproc_interrupt_reg = 0x00290,
128 .clr_uproc_interrupt_reg = 0x00294 143 .set_uproc_interrupt_reg32 = 0x00290,
144 .clr_uproc_interrupt_reg = 0x00294,
145 .clr_uproc_interrupt_reg32 = 0x00294
146 }
147 },
148 { /* CRoC */
149 .mailbox = 0x00040,
150 .cache_line_size = 0x20,
151 {
152 .set_interrupt_mask_reg = 0x00010,
153 .clr_interrupt_mask_reg = 0x00018,
154 .clr_interrupt_mask_reg32 = 0x0001C,
155 .sense_interrupt_mask_reg = 0x00010,
156 .sense_interrupt_mask_reg32 = 0x00014,
157 .clr_interrupt_reg = 0x00008,
158 .clr_interrupt_reg32 = 0x0000C,
159 .sense_interrupt_reg = 0x00000,
160 .sense_interrupt_reg32 = 0x00004,
161 .ioarrin_reg = 0x00070,
162 .sense_uproc_interrupt_reg = 0x00020,
163 .sense_uproc_interrupt_reg32 = 0x00024,
164 .set_uproc_interrupt_reg = 0x00020,
165 .set_uproc_interrupt_reg32 = 0x00024,
166 .clr_uproc_interrupt_reg = 0x00028,
167 .clr_uproc_interrupt_reg32 = 0x0002C,
168 .init_feedback_reg = 0x0005C,
169 .dump_addr_reg = 0x00064,
170 .dump_data_reg = 0x00068
129 } 171 }
130 }, 172 },
131}; 173};
132 174
133static const struct ipr_chip_t ipr_chip[] = { 175static const struct ipr_chip_t ipr_chip[] = {
134 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] }, 176 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] }, 177 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, 178 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, 179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] }, 180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
139 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] }, 181 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
140 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] } 182 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
141}; 185};
142 186
143static int ipr_max_bus_speeds [] = { 187static int ipr_max_bus_speeds [] = {
@@ -156,12 +200,13 @@ module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
156MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); 200MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
157module_param_named(transop_timeout, ipr_transop_timeout, int, 0); 201module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
158MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); 202MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
159module_param_named(enable_cache, ipr_enable_cache, int, 0);
160MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
161module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); 203module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
162MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 204MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
163module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); 205module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
164MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); 206MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
207module_param_named(max_devs, ipr_max_devs, int, 0);
208MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
165MODULE_LICENSE("GPL"); 210MODULE_LICENSE("GPL");
166MODULE_VERSION(IPR_DRIVER_VERSION); 211MODULE_VERSION(IPR_DRIVER_VERSION);
167 212
@@ -180,6 +225,20 @@ struct ipr_error_table_t ipr_error_table[] = {
180 "FFFE: Soft device bus error recovered by the IOA"}, 225 "FFFE: Soft device bus error recovered by the IOA"},
181 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, 226 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
182 "4101: Soft device bus fabric error"}, 227 "4101: Soft device bus fabric error"},
228 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
229 "FFFC: Logical block guard error recovered by the device"},
230 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
231 "FFFC: Logical block reference tag error recovered by the device"},
232 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
233 "4171: Recovered scatter list tag / sequence number error"},
234 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
235 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
236 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
237 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
238 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
239 "FFFD: Recovered logical block reference tag error detected by the IOA"},
240 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "FFFD: Logical block guard error recovered by the IOA"},
183 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, 242 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
184 "FFF9: Device sector reassign successful"}, 243 "FFF9: Device sector reassign successful"},
185 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, 244 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -236,12 +295,28 @@ struct ipr_error_table_t ipr_error_table[] = {
236 "3120: SCSI bus is not operational"}, 295 "3120: SCSI bus is not operational"},
237 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, 296 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
238 "4100: Hard device bus fabric error"}, 297 "4100: Hard device bus fabric error"},
298 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
299 "310C: Logical block guard error detected by the device"},
300 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
301 "310C: Logical block reference tag error detected by the device"},
302 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
303 "4170: Scatter list tag / sequence number error"},
304 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "8150: Logical block CRC error on IOA to Host transfer"},
306 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
307 "4170: Logical block sequence number error on IOA to Host transfer"},
308 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
309 "310D: Logical block reference tag error detected by the IOA"},
310 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "310D: Logical block guard error detected by the IOA"},
239 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, 312 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
240 "9000: IOA reserved area data check"}, 313 "9000: IOA reserved area data check"},
241 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, 314 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
242 "9001: IOA reserved area invalid data pattern"}, 315 "9001: IOA reserved area invalid data pattern"},
243 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, 316 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
244 "9002: IOA reserved area LRC error"}, 317 "9002: IOA reserved area LRC error"},
318 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
319 "Hardware Error, IOA metadata access error"},
245 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, 320 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
246 "102E: Out of alternate sectors for disk storage"}, 321 "102E: Out of alternate sectors for disk storage"},
247 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, 322 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
@@ -306,6 +381,8 @@ struct ipr_error_table_t ipr_error_table[] = {
306 "Illegal request, commands not allowed to this device"}, 381 "Illegal request, commands not allowed to this device"},
307 {0x05258100, 0, 0, 382 {0x05258100, 0, 0,
308 "Illegal request, command not allowed to a secondary adapter"}, 383 "Illegal request, command not allowed to a secondary adapter"},
384 {0x05258200, 0, 0,
385 "Illegal request, command not allowed to a non-optimized resource"},
309 {0x05260000, 0, 0, 386 {0x05260000, 0, 0,
310 "Illegal request, invalid field in parameter list"}, 387 "Illegal request, invalid field in parameter list"},
311 {0x05260100, 0, 0, 388 {0x05260100, 0, 0,
@@ -468,7 +545,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
468 trace_entry->time = jiffies; 545 trace_entry->time = jiffies;
469 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 546 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
470 trace_entry->type = type; 547 trace_entry->type = type;
471 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command; 548 if (ipr_cmd->ioa_cfg->sis64)
549 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
550 else
551 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
472 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; 552 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
473 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 553 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
474 trace_entry->u.add_data = add_data; 554 trace_entry->u.add_data = add_data;
@@ -488,16 +568,23 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
488{ 568{
489 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
490 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 570 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
491 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); 571 dma_addr_t dma_addr = ipr_cmd->dma_addr;
492 572
493 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 573 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
494 ioarcb->write_data_transfer_length = 0; 574 ioarcb->data_transfer_length = 0;
495 ioarcb->read_data_transfer_length = 0; 575 ioarcb->read_data_transfer_length = 0;
496 ioarcb->write_ioadl_len = 0; 576 ioarcb->ioadl_len = 0;
497 ioarcb->read_ioadl_len = 0; 577 ioarcb->read_ioadl_len = 0;
498 ioarcb->write_ioadl_addr = 578
499 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 579 if (ipr_cmd->ioa_cfg->sis64)
500 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 580 ioarcb->u.sis64_addr_data.data_ioadl_addr =
581 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
582 else {
583 ioarcb->write_ioadl_addr =
584 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
585 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
586 }
587
501 ioasa->ioasc = 0; 588 ioasa->ioasc = 0;
502 ioasa->residual_data_len = 0; 589 ioasa->residual_data_len = 0;
503 ioasa->u.gata.status = 0; 590 ioasa->u.gata.status = 0;
@@ -562,10 +649,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
562 ioa_cfg->allow_interrupts = 0; 649 ioa_cfg->allow_interrupts = 0;
563 650
564 /* Set interrupt mask to stop all new interrupts */ 651 /* Set interrupt mask to stop all new interrupts */
565 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); 652 if (ioa_cfg->sis64)
653 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
654 else
655 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
566 656
567 /* Clear any pending interrupts */ 657 /* Clear any pending interrupts */
568 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg); 658 if (ioa_cfg->sis64)
659 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
660 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
569 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 661 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
570} 662}
571 663
@@ -693,6 +785,35 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
693} 785}
694 786
695/** 787/**
788 * ipr_send_command - Send driver initiated requests.
789 * @ipr_cmd: ipr command struct
790 *
791 * This function sends a command to the adapter using the correct write call.
792 * In the case of sis64, calculate the ioarcb size required. Then or in the
793 * appropriate bits.
794 *
795 * Return value:
796 * none
797 **/
798static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
799{
800 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
801 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
802
803 if (ioa_cfg->sis64) {
804 /* The default size is 256 bytes */
805 send_dma_addr |= 0x1;
806
807 /* If the number of ioadls * size of ioadl > 128 bytes,
808 then use a 512 byte ioarcb */
809 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
810 send_dma_addr |= 0x4;
811 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
812 } else
813 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
814}
815
816/**
696 * ipr_do_req - Send driver initiated requests. 817 * ipr_do_req - Send driver initiated requests.
697 * @ipr_cmd: ipr command struct 818 * @ipr_cmd: ipr command struct
698 * @done: done function 819 * @done: done function
@@ -724,8 +845,8 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
724 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); 845 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
725 846
726 mb(); 847 mb();
727 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 848
728 ioa_cfg->regs.ioarrin_reg); 849 ipr_send_command(ipr_cmd);
729} 850}
730 851
731/** 852/**
@@ -747,6 +868,51 @@ static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
747} 868}
748 869
749/** 870/**
871 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
872 * @ipr_cmd: ipr command struct
873 * @dma_addr: dma address
874 * @len: transfer length
875 * @flags: ioadl flag value
876 *
877 * This function initializes an ioadl in the case where there is only a single
878 * descriptor.
879 *
880 * Return value:
881 * nothing
882 **/
883static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
884 u32 len, int flags)
885{
886 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
887 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
888
889 ipr_cmd->dma_use_sg = 1;
890
891 if (ipr_cmd->ioa_cfg->sis64) {
892 ioadl64->flags = cpu_to_be32(flags);
893 ioadl64->data_len = cpu_to_be32(len);
894 ioadl64->address = cpu_to_be64(dma_addr);
895
896 ipr_cmd->ioarcb.ioadl_len =
897 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
898 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
899 } else {
900 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
901 ioadl->address = cpu_to_be32(dma_addr);
902
903 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
904 ipr_cmd->ioarcb.read_ioadl_len =
905 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
906 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
907 } else {
908 ipr_cmd->ioarcb.ioadl_len =
909 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
910 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
911 }
912 }
913}
914
915/**
750 * ipr_send_blocking_cmd - Send command and sleep on its completion. 916 * ipr_send_blocking_cmd - Send command and sleep on its completion.
751 * @ipr_cmd: ipr command struct 917 * @ipr_cmd: ipr command struct
752 * @timeout_func: function to invoke if command times out 918 * @timeout_func: function to invoke if command times out
@@ -803,11 +969,8 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
803 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; 969 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
804 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; 970 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
805 971
806 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam)); 972 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
807 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 973 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
808 ipr_cmd->ioadl[0].flags_and_data_len =
809 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
810 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
811 974
812 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) 975 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
813 ipr_cmd->done = ipr_process_ccn; 976 ipr_cmd->done = ipr_process_ccn;
@@ -817,22 +980,54 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
817 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); 980 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
818 981
819 mb(); 982 mb();
820 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 983
821 ioa_cfg->regs.ioarrin_reg); 984 ipr_send_command(ipr_cmd);
822 } else { 985 } else {
823 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 986 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
824 } 987 }
825} 988}
826 989
827/** 990/**
991 * ipr_update_ata_class - Update the ata class in the resource entry
992 * @res: resource entry struct
993 * @proto: cfgte device bus protocol value
994 *
995 * Return value:
996 * none
997 **/
998static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
999{
1000 switch(proto) {
1001 case IPR_PROTO_SATA:
1002 case IPR_PROTO_SAS_STP:
1003 res->ata_class = ATA_DEV_ATA;
1004 break;
1005 case IPR_PROTO_SATA_ATAPI:
1006 case IPR_PROTO_SAS_STP_ATAPI:
1007 res->ata_class = ATA_DEV_ATAPI;
1008 break;
1009 default:
1010 res->ata_class = ATA_DEV_UNKNOWN;
1011 break;
1012 };
1013}
1014
1015/**
828 * ipr_init_res_entry - Initialize a resource entry struct. 1016 * ipr_init_res_entry - Initialize a resource entry struct.
829 * @res: resource entry struct 1017 * @res: resource entry struct
1018 * @cfgtew: config table entry wrapper struct
830 * 1019 *
831 * Return value: 1020 * Return value:
832 * none 1021 * none
833 **/ 1022 **/
834static void ipr_init_res_entry(struct ipr_resource_entry *res) 1023static void ipr_init_res_entry(struct ipr_resource_entry *res,
1024 struct ipr_config_table_entry_wrapper *cfgtew)
835{ 1025{
1026 int found = 0;
1027 unsigned int proto;
1028 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1029 struct ipr_resource_entry *gscsi_res = NULL;
1030
836 res->needs_sync_complete = 0; 1031 res->needs_sync_complete = 0;
837 res->in_erp = 0; 1032 res->in_erp = 0;
838 res->add_to_ml = 0; 1033 res->add_to_ml = 0;
@@ -840,6 +1035,205 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
840 res->resetting_device = 0; 1035 res->resetting_device = 0;
841 res->sdev = NULL; 1036 res->sdev = NULL;
842 res->sata_port = NULL; 1037 res->sata_port = NULL;
1038
1039 if (ioa_cfg->sis64) {
1040 proto = cfgtew->u.cfgte64->proto;
1041 res->res_flags = cfgtew->u.cfgte64->res_flags;
1042 res->qmodel = IPR_QUEUEING_MODEL64(res);
1043 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1044
1045 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1046 sizeof(res->res_path));
1047
1048 res->bus = 0;
1049 res->lun = scsilun_to_int(&res->dev_lun);
1050
1051 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1052 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1053 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1054 found = 1;
1055 res->target = gscsi_res->target;
1056 break;
1057 }
1058 }
1059 if (!found) {
1060 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1061 ioa_cfg->max_devs_supported);
1062 set_bit(res->target, ioa_cfg->target_ids);
1063 }
1064
1065 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1066 sizeof(res->dev_lun.scsi_lun));
1067 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1068 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1069 res->target = 0;
1070 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1071 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1072 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1073 ioa_cfg->max_devs_supported);
1074 set_bit(res->target, ioa_cfg->array_ids);
1075 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1076 res->bus = IPR_VSET_VIRTUAL_BUS;
1077 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1078 ioa_cfg->max_devs_supported);
1079 set_bit(res->target, ioa_cfg->vset_ids);
1080 } else {
1081 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1082 ioa_cfg->max_devs_supported);
1083 set_bit(res->target, ioa_cfg->target_ids);
1084 }
1085 } else {
1086 proto = cfgtew->u.cfgte->proto;
1087 res->qmodel = IPR_QUEUEING_MODEL(res);
1088 res->flags = cfgtew->u.cfgte->flags;
1089 if (res->flags & IPR_IS_IOA_RESOURCE)
1090 res->type = IPR_RES_TYPE_IOAFP;
1091 else
1092 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1093
1094 res->bus = cfgtew->u.cfgte->res_addr.bus;
1095 res->target = cfgtew->u.cfgte->res_addr.target;
1096 res->lun = cfgtew->u.cfgte->res_addr.lun;
1097 }
1098
1099 ipr_update_ata_class(res, proto);
1100}
1101
1102/**
1103 * ipr_is_same_device - Determine if two devices are the same.
1104 * @res: resource entry struct
1105 * @cfgtew: config table entry wrapper struct
1106 *
1107 * Return value:
1108 * 1 if the devices are the same / 0 otherwise
1109 **/
1110static int ipr_is_same_device(struct ipr_resource_entry *res,
1111 struct ipr_config_table_entry_wrapper *cfgtew)
1112{
1113 if (res->ioa_cfg->sis64) {
1114 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1115 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1116 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1117 sizeof(cfgtew->u.cfgte64->lun))) {
1118 return 1;
1119 }
1120 } else {
1121 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1122 res->target == cfgtew->u.cfgte->res_addr.target &&
1123 res->lun == cfgtew->u.cfgte->res_addr.lun)
1124 return 1;
1125 }
1126
1127 return 0;
1128}
1129
1130/**
1131 * ipr_format_resource_path - Format the resource path for printing.
1132 * @res_path: resource path
1133 * @buf: buffer
1134 *
1135 * Return value:
1136 * pointer to buffer
1137 **/
1138static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1139{
1140 int i;
1141
1142 sprintf(buffer, "%02X", res_path[0]);
1143 for (i=1; res_path[i] != 0xff; i++)
1144 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
1145
1146 return buffer;
1147}
1148
1149/**
1150 * ipr_update_res_entry - Update the resource entry.
1151 * @res: resource entry struct
1152 * @cfgtew: config table entry wrapper struct
1153 *
1154 * Return value:
1155 * none
1156 **/
1157static void ipr_update_res_entry(struct ipr_resource_entry *res,
1158 struct ipr_config_table_entry_wrapper *cfgtew)
1159{
1160 char buffer[IPR_MAX_RES_PATH_LENGTH];
1161 unsigned int proto;
1162 int new_path = 0;
1163
1164 if (res->ioa_cfg->sis64) {
1165 res->flags = cfgtew->u.cfgte64->flags;
1166 res->res_flags = cfgtew->u.cfgte64->res_flags;
1167 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1168
1169 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1170 sizeof(struct ipr_std_inq_data));
1171
1172 res->qmodel = IPR_QUEUEING_MODEL64(res);
1173 proto = cfgtew->u.cfgte64->proto;
1174 res->res_handle = cfgtew->u.cfgte64->res_handle;
1175 res->dev_id = cfgtew->u.cfgte64->dev_id;
1176
1177 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1178 sizeof(res->dev_lun.scsi_lun));
1179
1180 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1181 sizeof(res->res_path))) {
1182 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1183 sizeof(res->res_path));
1184 new_path = 1;
1185 }
1186
1187 if (res->sdev && new_path)
1188 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1189 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1190 } else {
1191 res->flags = cfgtew->u.cfgte->flags;
1192 if (res->flags & IPR_IS_IOA_RESOURCE)
1193 res->type = IPR_RES_TYPE_IOAFP;
1194 else
1195 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1196
1197 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1198 sizeof(struct ipr_std_inq_data));
1199
1200 res->qmodel = IPR_QUEUEING_MODEL(res);
1201 proto = cfgtew->u.cfgte->proto;
1202 res->res_handle = cfgtew->u.cfgte->res_handle;
1203 }
1204
1205 ipr_update_ata_class(res, proto);
1206}
1207
1208/**
1209 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1210 * for the resource.
1211 * @res: resource entry struct
1212 * @cfgtew: config table entry wrapper struct
1213 *
1214 * Return value:
1215 * none
1216 **/
1217static void ipr_clear_res_target(struct ipr_resource_entry *res)
1218{
1219 struct ipr_resource_entry *gscsi_res = NULL;
1220 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1221
1222 if (!ioa_cfg->sis64)
1223 return;
1224
1225 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1226 clear_bit(res->target, ioa_cfg->array_ids);
1227 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1228 clear_bit(res->target, ioa_cfg->vset_ids);
1229 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1230 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1231 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1232 return;
1233 clear_bit(res->target, ioa_cfg->target_ids);
1234
1235 } else if (res->bus == 0)
1236 clear_bit(res->target, ioa_cfg->target_ids);
843} 1237}
844 1238
845/** 1239/**
@@ -851,17 +1245,24 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
851 * none 1245 * none
852 **/ 1246 **/
853static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, 1247static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
854 struct ipr_hostrcb *hostrcb) 1248 struct ipr_hostrcb *hostrcb)
855{ 1249{
856 struct ipr_resource_entry *res = NULL; 1250 struct ipr_resource_entry *res = NULL;
857 struct ipr_config_table_entry *cfgte; 1251 struct ipr_config_table_entry_wrapper cfgtew;
1252 __be32 cc_res_handle;
1253
858 u32 is_ndn = 1; 1254 u32 is_ndn = 1;
859 1255
860 cfgte = &hostrcb->hcam.u.ccn.cfgte; 1256 if (ioa_cfg->sis64) {
1257 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1258 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1259 } else {
1260 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1261 cc_res_handle = cfgtew.u.cfgte->res_handle;
1262 }
861 1263
862 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 1264 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
863 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr, 1265 if (res->res_handle == cc_res_handle) {
864 sizeof(cfgte->res_addr))) {
865 is_ndn = 0; 1266 is_ndn = 0;
866 break; 1267 break;
867 } 1268 }
@@ -879,20 +1280,22 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
879 struct ipr_resource_entry, queue); 1280 struct ipr_resource_entry, queue);
880 1281
881 list_del(&res->queue); 1282 list_del(&res->queue);
882 ipr_init_res_entry(res); 1283 ipr_init_res_entry(res, &cfgtew);
883 list_add_tail(&res->queue, &ioa_cfg->used_res_q); 1284 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
884 } 1285 }
885 1286
886 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); 1287 ipr_update_res_entry(res, &cfgtew);
887 1288
888 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { 1289 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
889 if (res->sdev) { 1290 if (res->sdev) {
890 res->del_from_ml = 1; 1291 res->del_from_ml = 1;
891 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; 1292 res->res_handle = IPR_INVALID_RES_HANDLE;
892 if (ioa_cfg->allow_ml_add_del) 1293 if (ioa_cfg->allow_ml_add_del)
893 schedule_work(&ioa_cfg->work_q); 1294 schedule_work(&ioa_cfg->work_q);
894 } else 1295 } else {
1296 ipr_clear_res_target(res);
895 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1297 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1298 }
896 } else if (!res->sdev) { 1299 } else if (!res->sdev) {
897 res->add_to_ml = 1; 1300 res->add_to_ml = 1;
898 if (ioa_cfg->allow_ml_add_del) 1301 if (ioa_cfg->allow_ml_add_del)
@@ -1044,8 +1447,12 @@ static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1044static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1447static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1045 struct ipr_hostrcb *hostrcb) 1448 struct ipr_hostrcb *hostrcb)
1046{ 1449{
1047 struct ipr_hostrcb_type_12_error *error = 1450 struct ipr_hostrcb_type_12_error *error;
1048 &hostrcb->hcam.u.error.u.type_12_error; 1451
1452 if (ioa_cfg->sis64)
1453 error = &hostrcb->hcam.u.error64.u.type_12_error;
1454 else
1455 error = &hostrcb->hcam.u.error.u.type_12_error;
1049 1456
1050 ipr_err("-----Current Configuration-----\n"); 1457 ipr_err("-----Current Configuration-----\n");
1051 ipr_err("Cache Directory Card Information:\n"); 1458 ipr_err("Cache Directory Card Information:\n");
@@ -1138,6 +1545,48 @@ static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1138} 1545}
1139 1546
1140/** 1547/**
1548 * ipr_log_sis64_config_error - Log a device error.
1549 * @ioa_cfg: ioa config struct
1550 * @hostrcb: hostrcb struct
1551 *
1552 * Return value:
1553 * none
1554 **/
1555static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1556 struct ipr_hostrcb *hostrcb)
1557{
1558 int errors_logged, i;
1559 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1560 struct ipr_hostrcb_type_23_error *error;
1561 char buffer[IPR_MAX_RES_PATH_LENGTH];
1562
1563 error = &hostrcb->hcam.u.error64.u.type_23_error;
1564 errors_logged = be32_to_cpu(error->errors_logged);
1565
1566 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1567 be32_to_cpu(error->errors_detected), errors_logged);
1568
1569 dev_entry = error->dev;
1570
1571 for (i = 0; i < errors_logged; i++, dev_entry++) {
1572 ipr_err_separator;
1573
1574 ipr_err("Device %d : %s", i + 1,
1575 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1576 ipr_log_ext_vpd(&dev_entry->vpd);
1577
1578 ipr_err("-----New Device Information-----\n");
1579 ipr_log_ext_vpd(&dev_entry->new_vpd);
1580
1581 ipr_err("Cache Directory Card Information:\n");
1582 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1583
1584 ipr_err("Adapter Card Information:\n");
1585 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1586 }
1587}
1588
1589/**
1141 * ipr_log_config_error - Log a configuration error. 1590 * ipr_log_config_error - Log a configuration error.
1142 * @ioa_cfg: ioa config struct 1591 * @ioa_cfg: ioa config struct
1143 * @hostrcb: hostrcb struct 1592 * @hostrcb: hostrcb struct
@@ -1331,9 +1780,13 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1331{ 1780{
1332 struct ipr_hostrcb_type_17_error *error; 1781 struct ipr_hostrcb_type_17_error *error;
1333 1782
1334 error = &hostrcb->hcam.u.error.u.type_17_error; 1783 if (ioa_cfg->sis64)
1784 error = &hostrcb->hcam.u.error64.u.type_17_error;
1785 else
1786 error = &hostrcb->hcam.u.error.u.type_17_error;
1787
1335 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1788 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1336 strstrip(error->failure_reason); 1789 strim(error->failure_reason);
1337 1790
1338 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1791 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1339 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1792 be32_to_cpu(hostrcb->hcam.u.error.prc));
@@ -1359,7 +1812,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1359 1812
1360 error = &hostrcb->hcam.u.error.u.type_07_error; 1813 error = &hostrcb->hcam.u.error.u.type_07_error;
1361 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1814 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1362 strstrip(error->failure_reason); 1815 strim(error->failure_reason);
1363 1816
1364 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1817 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1365 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1818 be32_to_cpu(hostrcb->hcam.u.error.prc));
@@ -1438,6 +1891,42 @@ static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1438 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 1891 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1439} 1892}
1440 1893
1894/**
1895 * ipr_log64_fabric_path - Log a fabric path error
1896 * @hostrcb: hostrcb struct
1897 * @fabric: fabric descriptor
1898 *
1899 * Return value:
1900 * none
1901 **/
1902static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1903 struct ipr_hostrcb64_fabric_desc *fabric)
1904{
1905 int i, j;
1906 u8 path_state = fabric->path_state;
1907 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1908 u8 state = path_state & IPR_PATH_STATE_MASK;
1909 char buffer[IPR_MAX_RES_PATH_LENGTH];
1910
1911 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1912 if (path_active_desc[i].active != active)
1913 continue;
1914
1915 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1916 if (path_state_desc[j].state != state)
1917 continue;
1918
1919 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1920 path_active_desc[i].desc, path_state_desc[j].desc,
1921 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1922 return;
1923 }
1924 }
1925
1926 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1927 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1928}
1929
1441static const struct { 1930static const struct {
1442 u8 type; 1931 u8 type;
1443 char *desc; 1932 char *desc;
@@ -1547,6 +2036,49 @@ static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1547} 2036}
1548 2037
1549/** 2038/**
2039 * ipr_log64_path_elem - Log a fabric path element.
2040 * @hostrcb: hostrcb struct
2041 * @cfg: fabric path element struct
2042 *
2043 * Return value:
2044 * none
2045 **/
2046static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2047 struct ipr_hostrcb64_config_element *cfg)
2048{
2049 int i, j;
2050 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2051 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2052 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2053 char buffer[IPR_MAX_RES_PATH_LENGTH];
2054
2055 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2056 return;
2057
2058 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2059 if (path_type_desc[i].type != type)
2060 continue;
2061
2062 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2063 if (path_status_desc[j].status != status)
2064 continue;
2065
2066 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2067 path_status_desc[j].desc, path_type_desc[i].desc,
2068 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2069 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2070 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2071 return;
2072 }
2073 }
2074 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2075 "WWN=%08X%08X\n", cfg->type_status,
2076 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2077 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2078 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2079}
2080
2081/**
1550 * ipr_log_fabric_error - Log a fabric error. 2082 * ipr_log_fabric_error - Log a fabric error.
1551 * @ioa_cfg: ioa config struct 2083 * @ioa_cfg: ioa config struct
1552 * @hostrcb: hostrcb struct 2084 * @hostrcb: hostrcb struct
@@ -1584,6 +2116,96 @@ static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1584} 2116}
1585 2117
1586/** 2118/**
2119 * ipr_log_sis64_array_error - Log a sis64 array error.
2120 * @ioa_cfg: ioa config struct
2121 * @hostrcb: hostrcb struct
2122 *
2123 * Return value:
2124 * none
2125 **/
2126static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2127 struct ipr_hostrcb *hostrcb)
2128{
2129 int i, num_entries;
2130 struct ipr_hostrcb_type_24_error *error;
2131 struct ipr_hostrcb64_array_data_entry *array_entry;
2132 char buffer[IPR_MAX_RES_PATH_LENGTH];
2133 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2134
2135 error = &hostrcb->hcam.u.error64.u.type_24_error;
2136
2137 ipr_err_separator;
2138
2139 ipr_err("RAID %s Array Configuration: %s\n",
2140 error->protection_level,
2141 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2142
2143 ipr_err_separator;
2144
2145 array_entry = error->array_member;
2146 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2147 sizeof(error->array_member));
2148
2149 for (i = 0; i < num_entries; i++, array_entry++) {
2150
2151 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2152 continue;
2153
2154 if (error->exposed_mode_adn == i)
2155 ipr_err("Exposed Array Member %d:\n", i);
2156 else
2157 ipr_err("Array Member %d:\n", i);
2158
2159 ipr_err("Array Member %d:\n", i);
2160 ipr_log_ext_vpd(&array_entry->vpd);
2161 ipr_err("Current Location: %s",
2162 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2163 ipr_err("Expected Location: %s",
2164 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2165
2166 ipr_err_separator;
2167 }
2168}
2169
2170/**
2171 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2172 * @ioa_cfg: ioa config struct
2173 * @hostrcb: hostrcb struct
2174 *
2175 * Return value:
2176 * none
2177 **/
2178static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2179 struct ipr_hostrcb *hostrcb)
2180{
2181 struct ipr_hostrcb_type_30_error *error;
2182 struct ipr_hostrcb64_fabric_desc *fabric;
2183 struct ipr_hostrcb64_config_element *cfg;
2184 int i, add_len;
2185
2186 error = &hostrcb->hcam.u.error64.u.type_30_error;
2187
2188 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2189 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2190
2191 add_len = be32_to_cpu(hostrcb->hcam.length) -
2192 (offsetof(struct ipr_hostrcb64_error, u) +
2193 offsetof(struct ipr_hostrcb_type_30_error, desc));
2194
2195 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2196 ipr_log64_fabric_path(hostrcb, fabric);
2197 for_each_fabric_cfg(fabric, cfg)
2198 ipr_log64_path_elem(hostrcb, cfg);
2199
2200 add_len -= be16_to_cpu(fabric->length);
2201 fabric = (struct ipr_hostrcb64_fabric_desc *)
2202 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2203 }
2204
2205 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2206}
2207
2208/**
1587 * ipr_log_generic_error - Log an adapter error. 2209 * ipr_log_generic_error - Log an adapter error.
1588 * @ioa_cfg: ioa config struct 2210 * @ioa_cfg: ioa config struct
1589 * @hostrcb: hostrcb struct 2211 * @hostrcb: hostrcb struct
@@ -1642,13 +2264,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1642 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) 2264 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1643 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); 2265 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1644 2266
1645 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 2267 if (ioa_cfg->sis64)
2268 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2269 else
2270 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1646 2271
1647 if (ioasc == IPR_IOASC_BUS_WAS_RESET || 2272 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1648 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) { 2273 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1649 /* Tell the midlayer we had a bus reset so it will handle the UA properly */ 2274 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1650 scsi_report_bus_reset(ioa_cfg->host, 2275 scsi_report_bus_reset(ioa_cfg->host,
1651 hostrcb->hcam.u.error.failing_dev_res_addr.bus); 2276 hostrcb->hcam.u.error.fd_res_addr.bus);
1652 } 2277 }
1653 2278
1654 error_index = ipr_get_error(ioasc); 2279 error_index = ipr_get_error(ioasc);
@@ -1696,6 +2321,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1696 case IPR_HOST_RCB_OVERLAY_ID_20: 2321 case IPR_HOST_RCB_OVERLAY_ID_20:
1697 ipr_log_fabric_error(ioa_cfg, hostrcb); 2322 ipr_log_fabric_error(ioa_cfg, hostrcb);
1698 break; 2323 break;
2324 case IPR_HOST_RCB_OVERLAY_ID_23:
2325 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2326 break;
2327 case IPR_HOST_RCB_OVERLAY_ID_24:
2328 case IPR_HOST_RCB_OVERLAY_ID_26:
2329 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2330 break;
2331 case IPR_HOST_RCB_OVERLAY_ID_30:
2332 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2333 break;
1699 case IPR_HOST_RCB_OVERLAY_ID_1: 2334 case IPR_HOST_RCB_OVERLAY_ID_1:
1700 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 2335 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1701 default: 2336 default:
@@ -1720,7 +2355,12 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1720 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2355 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1721 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2356 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1722 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 2357 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1723 u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 2358 u32 fd_ioasc;
2359
2360 if (ioa_cfg->sis64)
2361 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2362 else
2363 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1724 2364
1725 list_del(&hostrcb->queue); 2365 list_del(&hostrcb->queue);
1726 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 2366 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
@@ -1845,12 +2485,14 @@ static const struct ipr_ses_table_entry *
1845ipr_find_ses_entry(struct ipr_resource_entry *res) 2485ipr_find_ses_entry(struct ipr_resource_entry *res)
1846{ 2486{
1847 int i, j, matches; 2487 int i, j, matches;
2488 struct ipr_std_inq_vpids *vpids;
1848 const struct ipr_ses_table_entry *ste = ipr_ses_table; 2489 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1849 2490
1850 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { 2491 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1851 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { 2492 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1852 if (ste->compare_product_id_byte[j] == 'X') { 2493 if (ste->compare_product_id_byte[j] == 'X') {
1853 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j]) 2494 vpids = &res->std_inq_data.vpids;
2495 if (vpids->product_id[j] == ste->product_id[j])
1854 matches++; 2496 matches++;
1855 else 2497 else
1856 break; 2498 break;
@@ -1885,10 +2527,10 @@ static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_wi
1885 2527
1886 /* Loop through each config table entry in the config table buffer */ 2528 /* Loop through each config table entry in the config table buffer */
1887 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 2529 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1888 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data))) 2530 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1889 continue; 2531 continue;
1890 2532
1891 if (bus != res->cfgte.res_addr.bus) 2533 if (bus != res->bus)
1892 continue; 2534 continue;
1893 2535
1894 if (!(ste = ipr_find_ses_entry(res))) 2536 if (!(ste = ipr_find_ses_entry(res)))
@@ -1934,6 +2576,31 @@ static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1934} 2576}
1935 2577
1936/** 2578/**
2579 * ipr_get_sis64_dump_data_section - Dump IOA memory
2580 * @ioa_cfg: ioa config struct
2581 * @start_addr: adapter address to dump
2582 * @dest: destination kernel buffer
2583 * @length_in_words: length to dump in 4 byte words
2584 *
2585 * Return value:
2586 * 0 on success
2587 **/
2588static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2589 u32 start_addr,
2590 __be32 *dest, u32 length_in_words)
2591{
2592 int i;
2593
2594 for (i = 0; i < length_in_words; i++) {
2595 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2596 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2597 dest++;
2598 }
2599
2600 return 0;
2601}
2602
2603/**
1937 * ipr_get_ldump_data_section - Dump IOA memory 2604 * ipr_get_ldump_data_section - Dump IOA memory
1938 * @ioa_cfg: ioa config struct 2605 * @ioa_cfg: ioa config struct
1939 * @start_addr: adapter address to dump 2606 * @start_addr: adapter address to dump
@@ -1950,9 +2617,13 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1950 volatile u32 temp_pcii_reg; 2617 volatile u32 temp_pcii_reg;
1951 int i, delay = 0; 2618 int i, delay = 0;
1952 2619
2620 if (ioa_cfg->sis64)
2621 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2622 dest, length_in_words);
2623
1953 /* Write IOA interrupt reg starting LDUMP state */ 2624 /* Write IOA interrupt reg starting LDUMP state */
1954 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), 2625 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1955 ioa_cfg->regs.set_uproc_interrupt_reg); 2626 ioa_cfg->regs.set_uproc_interrupt_reg32);
1956 2627
1957 /* Wait for IO debug acknowledge */ 2628 /* Wait for IO debug acknowledge */
1958 if (ipr_wait_iodbg_ack(ioa_cfg, 2629 if (ipr_wait_iodbg_ack(ioa_cfg,
@@ -1971,7 +2642,7 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1971 2642
1972 /* Signal address valid - clear IOA Reset alert */ 2643 /* Signal address valid - clear IOA Reset alert */
1973 writel(IPR_UPROCI_RESET_ALERT, 2644 writel(IPR_UPROCI_RESET_ALERT,
1974 ioa_cfg->regs.clr_uproc_interrupt_reg); 2645 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1975 2646
1976 for (i = 0; i < length_in_words; i++) { 2647 for (i = 0; i < length_in_words; i++) {
1977 /* Wait for IO debug acknowledge */ 2648 /* Wait for IO debug acknowledge */
@@ -1996,10 +2667,10 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1996 2667
1997 /* Signal end of block transfer. Set reset alert then clear IO debug ack */ 2668 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1998 writel(IPR_UPROCI_RESET_ALERT, 2669 writel(IPR_UPROCI_RESET_ALERT,
1999 ioa_cfg->regs.set_uproc_interrupt_reg); 2670 ioa_cfg->regs.set_uproc_interrupt_reg32);
2000 2671
2001 writel(IPR_UPROCI_IO_DEBUG_ALERT, 2672 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2002 ioa_cfg->regs.clr_uproc_interrupt_reg); 2673 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2003 2674
2004 /* Signal dump data received - Clear IO debug Ack */ 2675 /* Signal dump data received - Clear IO debug Ack */
2005 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2676 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
@@ -2008,7 +2679,7 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2008 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ 2679 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2009 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { 2680 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2010 temp_pcii_reg = 2681 temp_pcii_reg =
2011 readl(ioa_cfg->regs.sense_uproc_interrupt_reg); 2682 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2012 2683
2013 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) 2684 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2014 return 0; 2685 return 0;
@@ -2207,6 +2878,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2207 u32 num_entries, start_off, end_off; 2878 u32 num_entries, start_off, end_off;
2208 u32 bytes_to_copy, bytes_copied, rc; 2879 u32 bytes_to_copy, bytes_copied, rc;
2209 struct ipr_sdt *sdt; 2880 struct ipr_sdt *sdt;
2881 int valid = 1;
2210 int i; 2882 int i;
2211 2883
2212 ENTER; 2884 ENTER;
@@ -2220,7 +2892,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2220 2892
2221 start_addr = readl(ioa_cfg->ioa_mailbox); 2893 start_addr = readl(ioa_cfg->ioa_mailbox);
2222 2894
2223 if (!ipr_sdt_is_fmt2(start_addr)) { 2895 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2224 dev_err(&ioa_cfg->pdev->dev, 2896 dev_err(&ioa_cfg->pdev->dev,
2225 "Invalid dump table format: %lx\n", start_addr); 2897 "Invalid dump table format: %lx\n", start_addr);
2226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2898 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -2249,7 +2921,6 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2249 2921
2250 /* IOA Dump entry */ 2922 /* IOA Dump entry */
2251 ipr_init_dump_entry_hdr(&ioa_dump->hdr); 2923 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2252 ioa_dump->format = IPR_SDT_FMT2;
2253 ioa_dump->hdr.len = 0; 2924 ioa_dump->hdr.len = 0;
2254 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 2925 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2255 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; 2926 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
@@ -2264,7 +2935,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2264 sizeof(struct ipr_sdt) / sizeof(__be32)); 2935 sizeof(struct ipr_sdt) / sizeof(__be32));
2265 2936
2266 /* Smart Dump table is ready to use and the first entry is valid */ 2937 /* Smart Dump table is ready to use and the first entry is valid */
2267 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) { 2938 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2939 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2268 dev_err(&ioa_cfg->pdev->dev, 2940 dev_err(&ioa_cfg->pdev->dev,
2269 "Dump of IOA failed. Dump table not valid: %d, %X.\n", 2941 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2270 rc, be32_to_cpu(sdt->hdr.state)); 2942 rc, be32_to_cpu(sdt->hdr.state));
@@ -2288,12 +2960,19 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2288 } 2960 }
2289 2961
2290 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { 2962 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2291 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset); 2963 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2292 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; 2964 if (ioa_cfg->sis64)
2293 end_off = be32_to_cpu(sdt->entry[i].end_offset); 2965 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2294 2966 else {
2295 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) { 2967 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2296 bytes_to_copy = end_off - start_off; 2968 end_off = be32_to_cpu(sdt->entry[i].end_token);
2969
2970 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2971 bytes_to_copy = end_off - start_off;
2972 else
2973 valid = 0;
2974 }
2975 if (valid) {
2297 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { 2976 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2298 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; 2977 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2299 continue; 2978 continue;
@@ -2422,9 +3101,9 @@ restart:
2422 3101
2423 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3102 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2424 if (res->add_to_ml) { 3103 if (res->add_to_ml) {
2425 bus = res->cfgte.res_addr.bus; 3104 bus = res->bus;
2426 target = res->cfgte.res_addr.target; 3105 target = res->target;
2427 lun = res->cfgte.res_addr.lun; 3106 lun = res->lun;
2428 res->add_to_ml = 0; 3107 res->add_to_ml = 0;
2429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3108 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2430 scsi_add_device(ioa_cfg->host, bus, target, lun); 3109 scsi_add_device(ioa_cfg->host, bus, target, lun);
@@ -2478,105 +3157,6 @@ static struct bin_attribute ipr_trace_attr = {
2478}; 3157};
2479#endif 3158#endif
2480 3159
2481static const struct {
2482 enum ipr_cache_state state;
2483 char *name;
2484} cache_state [] = {
2485 { CACHE_NONE, "none" },
2486 { CACHE_DISABLED, "disabled" },
2487 { CACHE_ENABLED, "enabled" }
2488};
2489
2490/**
2491 * ipr_show_write_caching - Show the write caching attribute
2492 * @dev: device struct
2493 * @buf: buffer
2494 *
2495 * Return value:
2496 * number of bytes printed to buffer
2497 **/
2498static ssize_t ipr_show_write_caching(struct device *dev,
2499 struct device_attribute *attr, char *buf)
2500{
2501 struct Scsi_Host *shost = class_to_shost(dev);
2502 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2503 unsigned long lock_flags = 0;
2504 int i, len = 0;
2505
2506 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2507 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2508 if (cache_state[i].state == ioa_cfg->cache_state) {
2509 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2510 break;
2511 }
2512 }
2513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2514 return len;
2515}
2516
2517
2518/**
2519 * ipr_store_write_caching - Enable/disable adapter write cache
2520 * @dev: device struct
2521 * @buf: buffer
2522 * @count: buffer size
2523 *
2524 * This function will enable/disable adapter write cache.
2525 *
2526 * Return value:
2527 * count on success / other on failure
2528 **/
2529static ssize_t ipr_store_write_caching(struct device *dev,
2530 struct device_attribute *attr,
2531 const char *buf, size_t count)
2532{
2533 struct Scsi_Host *shost = class_to_shost(dev);
2534 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2535 unsigned long lock_flags = 0;
2536 enum ipr_cache_state new_state = CACHE_INVALID;
2537 int i;
2538
2539 if (!capable(CAP_SYS_ADMIN))
2540 return -EACCES;
2541 if (ioa_cfg->cache_state == CACHE_NONE)
2542 return -EINVAL;
2543
2544 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2545 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2546 new_state = cache_state[i].state;
2547 break;
2548 }
2549 }
2550
2551 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2552 return -EINVAL;
2553
2554 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2555 if (ioa_cfg->cache_state == new_state) {
2556 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2557 return count;
2558 }
2559
2560 ioa_cfg->cache_state = new_state;
2561 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2562 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2563 if (!ioa_cfg->in_reset_reload)
2564 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2565 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2566 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2567
2568 return count;
2569}
2570
2571static struct device_attribute ipr_ioa_cache_attr = {
2572 .attr = {
2573 .name = "write_cache",
2574 .mode = S_IRUGO | S_IWUSR,
2575 },
2576 .show = ipr_show_write_caching,
2577 .store = ipr_store_write_caching
2578};
2579
2580/** 3160/**
2581 * ipr_show_fw_version - Show the firmware version 3161 * ipr_show_fw_version - Show the firmware version
2582 * @dev: class device struct 3162 * @dev: class device struct
@@ -2976,6 +3556,37 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2976} 3556}
2977 3557
2978/** 3558/**
3559 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3560 * @ipr_cmd: ipr command struct
3561 * @sglist: scatter/gather list
3562 *
3563 * Builds a microcode download IOA data list (IOADL).
3564 *
3565 **/
3566static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3567 struct ipr_sglist *sglist)
3568{
3569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3570 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3571 struct scatterlist *scatterlist = sglist->scatterlist;
3572 int i;
3573
3574 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3575 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3576 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3577
3578 ioarcb->ioadl_len =
3579 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3580 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3581 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3582 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3583 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3584 }
3585
3586 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3587}
3588
3589/**
2979 * ipr_build_ucode_ioadl - Build a microcode download IOADL 3590 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2980 * @ipr_cmd: ipr command struct 3591 * @ipr_cmd: ipr command struct
2981 * @sglist: scatter/gather list 3592 * @sglist: scatter/gather list
@@ -2987,14 +3598,15 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2987 struct ipr_sglist *sglist) 3598 struct ipr_sglist *sglist)
2988{ 3599{
2989 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3600 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2990 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 3601 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
2991 struct scatterlist *scatterlist = sglist->scatterlist; 3602 struct scatterlist *scatterlist = sglist->scatterlist;
2992 int i; 3603 int i;
2993 3604
2994 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3605 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2995 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3606 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2996 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len); 3607 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
2997 ioarcb->write_ioadl_len = 3608
3609 ioarcb->ioadl_len =
2998 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 3610 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2999 3611
3000 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3612 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
@@ -3146,7 +3758,6 @@ static struct device_attribute *ipr_ioa_attrs[] = {
3146 &ipr_ioa_state_attr, 3758 &ipr_ioa_state_attr,
3147 &ipr_ioa_reset_attr, 3759 &ipr_ioa_reset_attr,
3148 &ipr_update_fw_attr, 3760 &ipr_update_fw_attr,
3149 &ipr_ioa_cache_attr,
3150 NULL, 3761 NULL,
3151}; 3762};
3152 3763
@@ -3367,16 +3978,21 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3367 * ipr_change_queue_depth - Change the device's queue depth 3978 * ipr_change_queue_depth - Change the device's queue depth
3368 * @sdev: scsi device struct 3979 * @sdev: scsi device struct
3369 * @qdepth: depth to set 3980 * @qdepth: depth to set
3981 * @reason: calling context
3370 * 3982 *
3371 * Return value: 3983 * Return value:
3372 * actual depth set 3984 * actual depth set
3373 **/ 3985 **/
3374static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) 3986static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3987 int reason)
3375{ 3988{
3376 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 3989 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3377 struct ipr_resource_entry *res; 3990 struct ipr_resource_entry *res;
3378 unsigned long lock_flags = 0; 3991 unsigned long lock_flags = 0;
3379 3992
3993 if (reason != SCSI_QDEPTH_DEFAULT)
3994 return -EOPNOTSUPP;
3995
3380 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3996 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3381 res = (struct ipr_resource_entry *)sdev->hostdata; 3997 res = (struct ipr_resource_entry *)sdev->hostdata;
3382 3998
@@ -3445,7 +4061,7 @@ static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribu
3445 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4061 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3446 res = (struct ipr_resource_entry *)sdev->hostdata; 4062 res = (struct ipr_resource_entry *)sdev->hostdata;
3447 if (res) 4063 if (res)
3448 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle); 4064 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
3449 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4065 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3450 return len; 4066 return len;
3451} 4067}
@@ -3458,8 +4074,43 @@ static struct device_attribute ipr_adapter_handle_attr = {
3458 .show = ipr_show_adapter_handle 4074 .show = ipr_show_adapter_handle
3459}; 4075};
3460 4076
4077/**
4078 * ipr_show_resource_path - Show the resource path for this device.
4079 * @dev: device struct
4080 * @buf: buffer
4081 *
4082 * Return value:
4083 * number of bytes printed to buffer
4084 **/
4085static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4086{
4087 struct scsi_device *sdev = to_scsi_device(dev);
4088 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4089 struct ipr_resource_entry *res;
4090 unsigned long lock_flags = 0;
4091 ssize_t len = -ENXIO;
4092 char buffer[IPR_MAX_RES_PATH_LENGTH];
4093
4094 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4095 res = (struct ipr_resource_entry *)sdev->hostdata;
4096 if (res)
4097 len = snprintf(buf, PAGE_SIZE, "%s\n",
4098 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4099 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4100 return len;
4101}
4102
4103static struct device_attribute ipr_resource_path_attr = {
4104 .attr = {
4105 .name = "resource_path",
4106 .mode = S_IRUSR,
4107 },
4108 .show = ipr_show_resource_path
4109};
4110
3461static struct device_attribute *ipr_dev_attrs[] = { 4111static struct device_attribute *ipr_dev_attrs[] = {
3462 &ipr_adapter_handle_attr, 4112 &ipr_adapter_handle_attr,
4113 &ipr_resource_path_attr,
3463 NULL, 4114 NULL,
3464}; 4115};
3465 4116
@@ -3512,9 +4163,9 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3512 struct ipr_resource_entry *res; 4163 struct ipr_resource_entry *res;
3513 4164
3514 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4165 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3515 if ((res->cfgte.res_addr.bus == starget->channel) && 4166 if ((res->bus == starget->channel) &&
3516 (res->cfgte.res_addr.target == starget->id) && 4167 (res->target == starget->id) &&
3517 (res->cfgte.res_addr.lun == 0)) { 4168 (res->lun == 0)) {
3518 return res; 4169 return res;
3519 } 4170 }
3520 } 4171 }
@@ -3584,6 +4235,17 @@ static int ipr_target_alloc(struct scsi_target *starget)
3584static void ipr_target_destroy(struct scsi_target *starget) 4235static void ipr_target_destroy(struct scsi_target *starget)
3585{ 4236{
3586 struct ipr_sata_port *sata_port = starget->hostdata; 4237 struct ipr_sata_port *sata_port = starget->hostdata;
4238 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4239 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4240
4241 if (ioa_cfg->sis64) {
4242 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4243 clear_bit(starget->id, ioa_cfg->array_ids);
4244 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4245 clear_bit(starget->id, ioa_cfg->vset_ids);
4246 else if (starget->channel == 0)
4247 clear_bit(starget->id, ioa_cfg->target_ids);
4248 }
3587 4249
3588 if (sata_port) { 4250 if (sata_port) {
3589 starget->hostdata = NULL; 4251 starget->hostdata = NULL;
@@ -3605,9 +4267,9 @@ static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3605 struct ipr_resource_entry *res; 4267 struct ipr_resource_entry *res;
3606 4268
3607 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4269 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3608 if ((res->cfgte.res_addr.bus == sdev->channel) && 4270 if ((res->bus == sdev->channel) &&
3609 (res->cfgte.res_addr.target == sdev->id) && 4271 (res->target == sdev->id) &&
3610 (res->cfgte.res_addr.lun == sdev->lun)) 4272 (res->lun == sdev->lun))
3611 return res; 4273 return res;
3612 } 4274 }
3613 4275
@@ -3656,6 +4318,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3656 struct ipr_resource_entry *res; 4318 struct ipr_resource_entry *res;
3657 struct ata_port *ap = NULL; 4319 struct ata_port *ap = NULL;
3658 unsigned long lock_flags = 0; 4320 unsigned long lock_flags = 0;
4321 char buffer[IPR_MAX_RES_PATH_LENGTH];
3659 4322
3660 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4323 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3661 res = sdev->hostdata; 4324 res = sdev->hostdata;
@@ -3669,7 +4332,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3669 if (ipr_is_vset_device(res)) { 4332 if (ipr_is_vset_device(res)) {
3670 blk_queue_rq_timeout(sdev->request_queue, 4333 blk_queue_rq_timeout(sdev->request_queue,
3671 IPR_VSET_RW_TIMEOUT); 4334 IPR_VSET_RW_TIMEOUT);
3672 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 4335 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3673 } 4336 }
3674 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) 4337 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3675 sdev->allow_restart = 1; 4338 sdev->allow_restart = 1;
@@ -3682,6 +4345,9 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3682 ata_sas_slave_configure(sdev, ap); 4345 ata_sas_slave_configure(sdev, ap);
3683 } else 4346 } else
3684 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 4347 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4348 if (ioa_cfg->sis64)
4349 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4350 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
3685 return 0; 4351 return 0;
3686 } 4352 }
3687 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3823,14 +4489,19 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3823 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4489 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3824 ioarcb = &ipr_cmd->ioarcb; 4490 ioarcb = &ipr_cmd->ioarcb;
3825 cmd_pkt = &ioarcb->cmd_pkt; 4491 cmd_pkt = &ioarcb->cmd_pkt;
3826 regs = &ioarcb->add_data.u.regs;
3827 4492
3828 ioarcb->res_handle = res->cfgte.res_handle; 4493 if (ipr_cmd->ioa_cfg->sis64) {
4494 regs = &ipr_cmd->i.ata_ioadl.regs;
4495 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4496 } else
4497 regs = &ioarcb->u.add_data.u.regs;
4498
4499 ioarcb->res_handle = res->res_handle;
3829 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4500 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3830 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 4501 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3831 if (ipr_is_gata(res)) { 4502 if (ipr_is_gata(res)) {
3832 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; 4503 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3833 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags)); 4504 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
3834 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 4505 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3835 } 4506 }
3836 4507
@@ -3875,19 +4546,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
3875 res = sata_port->res; 4546 res = sata_port->res;
3876 if (res) { 4547 if (res) {
3877 rc = ipr_device_reset(ioa_cfg, res); 4548 rc = ipr_device_reset(ioa_cfg, res);
3878 switch(res->cfgte.proto) { 4549 *classes = res->ata_class;
3879 case IPR_PROTO_SATA:
3880 case IPR_PROTO_SAS_STP:
3881 *classes = ATA_DEV_ATA;
3882 break;
3883 case IPR_PROTO_SATA_ATAPI:
3884 case IPR_PROTO_SAS_STP_ATAPI:
3885 *classes = ATA_DEV_ATAPI;
3886 break;
3887 default:
3888 *classes = ATA_DEV_UNKNOWN;
3889 break;
3890 };
3891 } 4550 }
3892 4551
3893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4552 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3932,7 +4591,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3932 return FAILED; 4591 return FAILED;
3933 4592
3934 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4593 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3935 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 4594 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
3936 if (ipr_cmd->scsi_cmd) 4595 if (ipr_cmd->scsi_cmd)
3937 ipr_cmd->done = ipr_scsi_eh_done; 4596 ipr_cmd->done = ipr_scsi_eh_done;
3938 if (ipr_cmd->qc) 4597 if (ipr_cmd->qc)
@@ -3954,7 +4613,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3954 spin_lock_irq(scsi_cmd->device->host->host_lock); 4613 spin_lock_irq(scsi_cmd->device->host->host_lock);
3955 4614
3956 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4615 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3957 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 4616 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
3958 rc = -EIO; 4617 rc = -EIO;
3959 break; 4618 break;
3960 } 4619 }
@@ -3993,13 +4652,13 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3993 struct ipr_resource_entry *res; 4652 struct ipr_resource_entry *res;
3994 4653
3995 ENTER; 4654 ENTER;
3996 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4655 if (!ioa_cfg->sis64)
3997 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle, 4656 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3998 sizeof(res->cfgte.res_handle))) { 4657 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
3999 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus); 4658 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4000 break; 4659 break;
4660 }
4001 } 4661 }
4002 }
4003 4662
4004 /* 4663 /*
4005 * If abort has not completed, indicate the reset has, else call the 4664 * If abort has not completed, indicate the reset has, else call the
@@ -4097,7 +4756,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4097 return SUCCESS; 4756 return SUCCESS;
4098 4757
4099 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4758 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4100 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; 4759 ipr_cmd->ioarcb.res_handle = res->res_handle;
4101 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 4760 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4102 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4761 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4103 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 4762 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
@@ -4234,11 +4893,29 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4234 return IRQ_NONE; 4893 return IRQ_NONE;
4235 } 4894 }
4236 4895
4237 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 4896 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4238 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4897 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4239 4898
4240 /* If an interrupt on the adapter did not occur, ignore it */ 4899 /* If an interrupt on the adapter did not occur, ignore it.
4900 * Or in the case of SIS 64, check for a stage change interrupt.
4901 */
4241 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { 4902 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4903 if (ioa_cfg->sis64) {
4904 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4905 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4906 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4907
4908 /* clear stage change */
4909 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4910 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4911 list_del(&ioa_cfg->reset_cmd->queue);
4912 del_timer(&ioa_cfg->reset_cmd->timer);
4913 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4915 return IRQ_HANDLED;
4916 }
4917 }
4918
4242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4243 return IRQ_NONE; 4920 return IRQ_NONE;
4244 } 4921 }
@@ -4281,8 +4958,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4281 if (ipr_cmd != NULL) { 4958 if (ipr_cmd != NULL) {
4282 /* Clear the PCI interrupt */ 4959 /* Clear the PCI interrupt */
4283 do { 4960 do {
4284 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg); 4961 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4285 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4962 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4286 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 4963 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4287 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 4964 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4288 4965
@@ -4304,6 +4981,53 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4304} 4981}
4305 4982
4306/** 4983/**
4984 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4985 * @ioa_cfg: ioa config struct
4986 * @ipr_cmd: ipr command struct
4987 *
4988 * Return value:
4989 * 0 on success / -1 on failure
4990 **/
4991static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4992 struct ipr_cmnd *ipr_cmd)
4993{
4994 int i, nseg;
4995 struct scatterlist *sg;
4996 u32 length;
4997 u32 ioadl_flags = 0;
4998 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4999 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5000 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5001
5002 length = scsi_bufflen(scsi_cmd);
5003 if (!length)
5004 return 0;
5005
5006 nseg = scsi_dma_map(scsi_cmd);
5007 if (nseg < 0) {
5008 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5009 return -1;
5010 }
5011
5012 ipr_cmd->dma_use_sg = nseg;
5013
5014 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5015 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5016 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5017 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5018 ioadl_flags = IPR_IOADL_FLAGS_READ;
5019
5020 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5021 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5022 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5023 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5024 }
5025
5026 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5027 return 0;
5028}
5029
5030/**
4307 * ipr_build_ioadl - Build a scatter/gather list and map the buffer 5031 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4308 * @ioa_cfg: ioa config struct 5032 * @ioa_cfg: ioa config struct
4309 * @ipr_cmd: ipr command struct 5033 * @ipr_cmd: ipr command struct
@@ -4320,7 +5044,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4320 u32 ioadl_flags = 0; 5044 u32 ioadl_flags = 0;
4321 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5045 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4322 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5046 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4323 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 5047 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4324 5048
4325 length = scsi_bufflen(scsi_cmd); 5049 length = scsi_bufflen(scsi_cmd);
4326 if (!length) 5050 if (!length)
@@ -4337,8 +5061,8 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4337 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5061 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4338 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5062 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4339 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5063 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4340 ioarcb->write_data_transfer_length = cpu_to_be32(length); 5064 ioarcb->data_transfer_length = cpu_to_be32(length);
4341 ioarcb->write_ioadl_len = 5065 ioarcb->ioadl_len =
4342 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5066 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4343 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { 5067 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4344 ioadl_flags = IPR_IOADL_FLAGS_READ; 5068 ioadl_flags = IPR_IOADL_FLAGS_READ;
@@ -4347,11 +5071,10 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4347 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5071 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4348 } 5072 }
4349 5073
4350 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { 5074 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
4351 ioadl = ioarcb->add_data.u.ioadl; 5075 ioadl = ioarcb->u.add_data.u.ioadl;
4352 ioarcb->write_ioadl_addr = 5076 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
4353 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) + 5077 offsetof(struct ipr_ioarcb, u.add_data));
4354 offsetof(struct ipr_ioarcb, add_data));
4355 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5078 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4356 } 5079 }
4357 5080
@@ -4441,18 +5164,24 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4441{ 5164{
4442 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5165 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4443 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5166 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4444 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); 5167 dma_addr_t dma_addr = ipr_cmd->dma_addr;
4445 5168
4446 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 5169 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4447 ioarcb->write_data_transfer_length = 0; 5170 ioarcb->data_transfer_length = 0;
4448 ioarcb->read_data_transfer_length = 0; 5171 ioarcb->read_data_transfer_length = 0;
4449 ioarcb->write_ioadl_len = 0; 5172 ioarcb->ioadl_len = 0;
4450 ioarcb->read_ioadl_len = 0; 5173 ioarcb->read_ioadl_len = 0;
4451 ioasa->ioasc = 0; 5174 ioasa->ioasc = 0;
4452 ioasa->residual_data_len = 0; 5175 ioasa->residual_data_len = 0;
4453 ioarcb->write_ioadl_addr = 5176
4454 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 5177 if (ipr_cmd->ioa_cfg->sis64)
4455 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5178 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5179 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5180 else {
5181 ioarcb->write_ioadl_addr =
5182 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5183 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5184 }
4456} 5185}
4457 5186
4458/** 5187/**
@@ -4484,15 +5213,8 @@ static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4484 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5213 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4485 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); 5214 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4486 5215
4487 ipr_cmd->ioadl[0].flags_and_data_len = 5216 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
4488 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE); 5217 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
4489 ipr_cmd->ioadl[0].address =
4490 cpu_to_be32(ipr_cmd->sense_buffer_dma);
4491
4492 ipr_cmd->ioarcb.read_ioadl_len =
4493 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4494 ipr_cmd->ioarcb.read_data_transfer_length =
4495 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4496 5218
4497 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, 5219 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4498 IPR_REQUEST_SENSE_TIMEOUT * 2); 5220 IPR_REQUEST_SENSE_TIMEOUT * 2);
@@ -4888,9 +5610,9 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4888 5610
4889 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 5611 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4890 ipr_cmd->scsi_cmd = scsi_cmd; 5612 ipr_cmd->scsi_cmd = scsi_cmd;
4891 ioarcb->res_handle = res->cfgte.res_handle; 5613 ioarcb->res_handle = res->res_handle;
4892 ipr_cmd->done = ipr_scsi_done; 5614 ipr_cmd->done = ipr_scsi_done;
4893 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); 5615 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
4894 5616
4895 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 5617 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4896 if (scsi_cmd->underflow == 0) 5618 if (scsi_cmd->underflow == 0)
@@ -4911,13 +5633,16 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4911 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 5633 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4912 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 5634 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4913 5635
4914 if (likely(rc == 0)) 5636 if (likely(rc == 0)) {
4915 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 5637 if (ioa_cfg->sis64)
5638 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5639 else
5640 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5641 }
4916 5642
4917 if (likely(rc == 0)) { 5643 if (likely(rc == 0)) {
4918 mb(); 5644 mb();
4919 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 5645 ipr_send_command(ipr_cmd);
4920 ioa_cfg->regs.ioarrin_reg);
4921 } else { 5646 } else {
4922 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5647 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4923 return SCSI_MLQUEUE_HOST_BUSY; 5648 return SCSI_MLQUEUE_HOST_BUSY;
@@ -5030,20 +5755,9 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
5030 goto out_unlock; 5755 goto out_unlock;
5031 } 5756 }
5032 5757
5033 switch(res->cfgte.proto) { 5758 ap->link.device[0].class = res->ata_class;
5034 case IPR_PROTO_SATA: 5759 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5035 case IPR_PROTO_SAS_STP:
5036 ap->link.device[0].class = ATA_DEV_ATA;
5037 break;
5038 case IPR_PROTO_SATA_ATAPI:
5039 case IPR_PROTO_SAS_STP_ATAPI:
5040 ap->link.device[0].class = ATA_DEV_ATAPI;
5041 break;
5042 default:
5043 ap->link.device[0].class = ATA_DEV_UNKNOWN;
5044 ata_port_disable(ap); 5760 ata_port_disable(ap);
5045 break;
5046 };
5047 5761
5048out_unlock: 5762out_unlock:
5049 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 5763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
@@ -5129,8 +5843,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5129 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 5843 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5130 5844
5131 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 5845 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5132 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus, 5846 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5133 res->cfgte.res_addr.target);
5134 5847
5135 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 5848 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5136 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); 5849 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
@@ -5141,6 +5854,52 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5141} 5854}
5142 5855
5143/** 5856/**
5857 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5858 * @ipr_cmd: ipr command struct
5859 * @qc: ATA queued command
5860 *
5861 **/
5862static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5863 struct ata_queued_cmd *qc)
5864{
5865 u32 ioadl_flags = 0;
5866 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5867 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5868 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5869 int len = qc->nbytes;
5870 struct scatterlist *sg;
5871 unsigned int si;
5872 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5873
5874 if (len == 0)
5875 return;
5876
5877 if (qc->dma_dir == DMA_TO_DEVICE) {
5878 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5879 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5880 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5881 ioadl_flags = IPR_IOADL_FLAGS_READ;
5882
5883 ioarcb->data_transfer_length = cpu_to_be32(len);
5884 ioarcb->ioadl_len =
5885 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5886 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5887 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5888
5889 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5890 ioadl64->flags = cpu_to_be32(ioadl_flags);
5891 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5892 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5893
5894 last_ioadl64 = ioadl64;
5895 ioadl64++;
5896 }
5897
5898 if (likely(last_ioadl64))
5899 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5900}
5901
5902/**
5144 * ipr_build_ata_ioadl - Build an ATA scatter/gather list 5903 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5145 * @ipr_cmd: ipr command struct 5904 * @ipr_cmd: ipr command struct
5146 * @qc: ATA queued command 5905 * @qc: ATA queued command
@@ -5151,7 +5910,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5151{ 5910{
5152 u32 ioadl_flags = 0; 5911 u32 ioadl_flags = 0;
5153 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5912 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5154 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 5913 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5155 struct ipr_ioadl_desc *last_ioadl = NULL; 5914 struct ipr_ioadl_desc *last_ioadl = NULL;
5156 int len = qc->nbytes; 5915 int len = qc->nbytes;
5157 struct scatterlist *sg; 5916 struct scatterlist *sg;
@@ -5163,8 +5922,8 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5163 if (qc->dma_dir == DMA_TO_DEVICE) { 5922 if (qc->dma_dir == DMA_TO_DEVICE) {
5164 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5923 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5165 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5924 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5166 ioarcb->write_data_transfer_length = cpu_to_be32(len); 5925 ioarcb->data_transfer_length = cpu_to_be32(len);
5167 ioarcb->write_ioadl_len = 5926 ioarcb->ioadl_len =
5168 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5927 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5169 } else if (qc->dma_dir == DMA_FROM_DEVICE) { 5928 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5170 ioadl_flags = IPR_IOADL_FLAGS_READ; 5929 ioadl_flags = IPR_IOADL_FLAGS_READ;
@@ -5207,25 +5966,34 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5207 5966
5208 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5967 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5209 ioarcb = &ipr_cmd->ioarcb; 5968 ioarcb = &ipr_cmd->ioarcb;
5210 regs = &ioarcb->add_data.u.regs;
5211 5969
5212 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data)); 5970 if (ioa_cfg->sis64) {
5213 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs)); 5971 regs = &ipr_cmd->i.ata_ioadl.regs;
5972 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5973 } else
5974 regs = &ioarcb->u.add_data.u.regs;
5975
5976 memset(regs, 0, sizeof(*regs));
5977 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
5214 5978
5215 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 5979 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5216 ipr_cmd->qc = qc; 5980 ipr_cmd->qc = qc;
5217 ipr_cmd->done = ipr_sata_done; 5981 ipr_cmd->done = ipr_sata_done;
5218 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; 5982 ipr_cmd->ioarcb.res_handle = res->res_handle;
5219 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 5983 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5220 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5984 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5221 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5985 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5222 ipr_cmd->dma_use_sg = qc->n_elem; 5986 ipr_cmd->dma_use_sg = qc->n_elem;
5223 5987
5224 ipr_build_ata_ioadl(ipr_cmd, qc); 5988 if (ioa_cfg->sis64)
5989 ipr_build_ata_ioadl64(ipr_cmd, qc);
5990 else
5991 ipr_build_ata_ioadl(ipr_cmd, qc);
5992
5225 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5993 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5226 ipr_copy_sata_tf(regs, &qc->tf); 5994 ipr_copy_sata_tf(regs, &qc->tf);
5227 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); 5995 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5228 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); 5996 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5229 5997
5230 switch (qc->tf.protocol) { 5998 switch (qc->tf.protocol) {
5231 case ATA_PROT_NODATA: 5999 case ATA_PROT_NODATA:
@@ -5252,8 +6020,9 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5252 } 6020 }
5253 6021
5254 mb(); 6022 mb();
5255 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr), 6023
5256 ioa_cfg->regs.ioarrin_reg); 6024 ipr_send_command(ipr_cmd);
6025
5257 return 0; 6026 return 0;
5258} 6027}
5259 6028
@@ -5454,7 +6223,7 @@ static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5454 * ipr_set_supported_devs - Send Set Supported Devices for a device 6223 * ipr_set_supported_devs - Send Set Supported Devices for a device
5455 * @ipr_cmd: ipr command struct 6224 * @ipr_cmd: ipr command struct
5456 * 6225 *
5457 * This function send a Set Supported Devices to the adapter 6226 * This function sends a Set Supported Devices to the adapter
5458 * 6227 *
5459 * Return value: 6228 * Return value:
5460 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 6229 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
@@ -5463,7 +6232,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5463{ 6232{
5464 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6233 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5465 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; 6234 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5466 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5467 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6235 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5468 struct ipr_resource_entry *res = ipr_cmd->u.res; 6236 struct ipr_resource_entry *res = ipr_cmd->u.res;
5469 6237
@@ -5474,28 +6242,28 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5474 continue; 6242 continue;
5475 6243
5476 ipr_cmd->u.res = res; 6244 ipr_cmd->u.res = res;
5477 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids); 6245 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
5478 6246
5479 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6247 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5480 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6248 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5481 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6249 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5482 6250
5483 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; 6251 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6252 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
5484 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; 6253 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5485 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; 6254 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5486 6255
5487 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | 6256 ipr_init_ioadl(ipr_cmd,
5488 sizeof(struct ipr_supported_device)); 6257 ioa_cfg->vpd_cbs_dma +
5489 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma + 6258 offsetof(struct ipr_misc_cbs, supp_dev),
5490 offsetof(struct ipr_misc_cbs, supp_dev)); 6259 sizeof(struct ipr_supported_device),
5491 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6260 IPR_IOADL_FLAGS_WRITE_LAST);
5492 ioarcb->write_data_transfer_length =
5493 cpu_to_be32(sizeof(struct ipr_supported_device));
5494 6261
5495 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 6262 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5496 IPR_SET_SUP_DEVICE_TIMEOUT); 6263 IPR_SET_SUP_DEVICE_TIMEOUT);
5497 6264
5498 ipr_cmd->job_step = ipr_set_supported_devs; 6265 if (!ioa_cfg->sis64)
6266 ipr_cmd->job_step = ipr_set_supported_devs;
5499 return IPR_RC_JOB_RETURN; 6267 return IPR_RC_JOB_RETURN;
5500 } 6268 }
5501 6269
@@ -5503,36 +6271,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5503} 6271}
5504 6272
5505/** 6273/**
5506 * ipr_setup_write_cache - Disable write cache if needed
5507 * @ipr_cmd: ipr command struct
5508 *
5509 * This function sets up adapters write cache to desired setting
5510 *
5511 * Return value:
5512 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5513 **/
5514static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5515{
5516 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5517
5518 ipr_cmd->job_step = ipr_set_supported_devs;
5519 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5520 struct ipr_resource_entry, queue);
5521
5522 if (ioa_cfg->cache_state != CACHE_DISABLED)
5523 return IPR_RC_JOB_CONTINUE;
5524
5525 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5526 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5527 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5528 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5529
5530 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5531
5532 return IPR_RC_JOB_RETURN;
5533}
5534
5535/**
5536 * ipr_get_mode_page - Locate specified mode page 6274 * ipr_get_mode_page - Locate specified mode page
5537 * @mode_pages: mode page buffer 6275 * @mode_pages: mode page buffer
5538 * @page_code: page code to find 6276 * @page_code: page code to find
@@ -5690,10 +6428,9 @@ static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5690 * none 6428 * none
5691 **/ 6429 **/
5692static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, 6430static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5693 __be32 res_handle, u8 parm, u32 dma_addr, 6431 __be32 res_handle, u8 parm,
5694 u8 xfer_len) 6432 dma_addr_t dma_addr, u8 xfer_len)
5695{ 6433{
5696 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5697 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6434 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5698 6435
5699 ioarcb->res_handle = res_handle; 6436 ioarcb->res_handle = res_handle;
@@ -5703,11 +6440,7 @@ static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5703 ioarcb->cmd_pkt.cdb[1] = parm; 6440 ioarcb->cmd_pkt.cdb[1] = parm;
5704 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6441 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5705 6442
5706 ioadl->flags_and_data_len = 6443 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
5707 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5708 ioadl->address = cpu_to_be32(dma_addr);
5709 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5710 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5711} 6444}
5712 6445
5713/** 6446/**
@@ -5737,7 +6470,9 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5737 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 6470 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5738 length); 6471 length);
5739 6472
5740 ipr_cmd->job_step = ipr_setup_write_cache; 6473 ipr_cmd->job_step = ipr_set_supported_devs;
6474 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6475 struct ipr_resource_entry, queue);
5741 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6476 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5742 6477
5743 LEAVE; 6478 LEAVE;
@@ -5757,9 +6492,8 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5757 **/ 6492 **/
5758static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, 6493static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5759 __be32 res_handle, 6494 __be32 res_handle,
5760 u8 parm, u32 dma_addr, u8 xfer_len) 6495 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
5761{ 6496{
5762 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5763 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6497 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5764 6498
5765 ioarcb->res_handle = res_handle; 6499 ioarcb->res_handle = res_handle;
@@ -5768,11 +6502,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5768 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6502 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5769 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 6503 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5770 6504
5771 ioadl->flags_and_data_len = 6505 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
5772 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5773 ioadl->address = cpu_to_be32(dma_addr);
5774 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5775 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5776} 6506}
5777 6507
5778/** 6508/**
@@ -5810,10 +6540,13 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5810 **/ 6540 **/
5811static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) 6541static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5812{ 6542{
6543 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5813 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 6544 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5814 6545
5815 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 6546 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5816 ipr_cmd->job_step = ipr_setup_write_cache; 6547 ipr_cmd->job_step = ipr_set_supported_devs;
6548 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6549 struct ipr_resource_entry, queue);
5817 return IPR_RC_JOB_CONTINUE; 6550 return IPR_RC_JOB_CONTINUE;
5818 } 6551 }
5819 6552
@@ -5953,24 +6686,36 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5953{ 6686{
5954 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6687 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5955 struct ipr_resource_entry *res, *temp; 6688 struct ipr_resource_entry *res, *temp;
5956 struct ipr_config_table_entry *cfgte; 6689 struct ipr_config_table_entry_wrapper cfgtew;
5957 int found, i; 6690 int entries, found, flag, i;
5958 LIST_HEAD(old_res); 6691 LIST_HEAD(old_res);
5959 6692
5960 ENTER; 6693 ENTER;
5961 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ) 6694 if (ioa_cfg->sis64)
6695 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6696 else
6697 flag = ioa_cfg->u.cfg_table->hdr.flags;
6698
6699 if (flag & IPR_UCODE_DOWNLOAD_REQ)
5962 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); 6700 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5963 6701
5964 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) 6702 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5965 list_move_tail(&res->queue, &old_res); 6703 list_move_tail(&res->queue, &old_res);
5966 6704
5967 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) { 6705 if (ioa_cfg->sis64)
5968 cfgte = &ioa_cfg->cfg_table->dev[i]; 6706 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6707 else
6708 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6709
6710 for (i = 0; i < entries; i++) {
6711 if (ioa_cfg->sis64)
6712 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6713 else
6714 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
5969 found = 0; 6715 found = 0;
5970 6716
5971 list_for_each_entry_safe(res, temp, &old_res, queue) { 6717 list_for_each_entry_safe(res, temp, &old_res, queue) {
5972 if (!memcmp(&res->cfgte.res_addr, 6718 if (ipr_is_same_device(res, &cfgtew)) {
5973 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5974 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6719 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5975 found = 1; 6720 found = 1;
5976 break; 6721 break;
@@ -5987,24 +6732,27 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5987 res = list_entry(ioa_cfg->free_res_q.next, 6732 res = list_entry(ioa_cfg->free_res_q.next,
5988 struct ipr_resource_entry, queue); 6733 struct ipr_resource_entry, queue);
5989 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6734 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5990 ipr_init_res_entry(res); 6735 ipr_init_res_entry(res, &cfgtew);
5991 res->add_to_ml = 1; 6736 res->add_to_ml = 1;
5992 } 6737 }
5993 6738
5994 if (found) 6739 if (found)
5995 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); 6740 ipr_update_res_entry(res, &cfgtew);
5996 } 6741 }
5997 6742
5998 list_for_each_entry_safe(res, temp, &old_res, queue) { 6743 list_for_each_entry_safe(res, temp, &old_res, queue) {
5999 if (res->sdev) { 6744 if (res->sdev) {
6000 res->del_from_ml = 1; 6745 res->del_from_ml = 1;
6001 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; 6746 res->res_handle = IPR_INVALID_RES_HANDLE;
6002 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6747 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6003 } else {
6004 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6005 } 6748 }
6006 } 6749 }
6007 6750
6751 list_for_each_entry_safe(res, temp, &old_res, queue) {
6752 ipr_clear_res_target(res);
6753 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6754 }
6755
6008 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 6756 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6009 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; 6757 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6010 else 6758 else
@@ -6028,7 +6776,6 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6028{ 6776{
6029 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6777 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6030 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6778 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6031 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6032 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 6779 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6033 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 6780 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6034 6781
@@ -6042,16 +6789,11 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6042 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6789 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6043 6790
6044 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 6791 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6045 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff; 6792 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6046 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff; 6793 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6047 6794
6048 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6795 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6049 ioarcb->read_data_transfer_length = 6796 IPR_IOADL_FLAGS_READ_LAST);
6050 cpu_to_be32(sizeof(struct ipr_config_table));
6051
6052 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
6053 ioadl->flags_and_data_len =
6054 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
6055 6797
6056 ipr_cmd->job_step = ipr_init_res_table; 6798 ipr_cmd->job_step = ipr_init_res_table;
6057 6799
@@ -6071,10 +6813,9 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6071 * none 6813 * none
6072 **/ 6814 **/
6073static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, 6815static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6074 u32 dma_addr, u8 xfer_len) 6816 dma_addr_t dma_addr, u8 xfer_len)
6075{ 6817{
6076 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6818 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6077 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6078 6819
6079 ENTER; 6820 ENTER;
6080 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 6821 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
@@ -6085,12 +6826,7 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6085 ioarcb->cmd_pkt.cdb[2] = page; 6826 ioarcb->cmd_pkt.cdb[2] = page;
6086 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6827 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6087 6828
6088 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6829 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6089 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
6090
6091 ioadl->address = cpu_to_be32(dma_addr);
6092 ioadl->flags_and_data_len =
6093 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
6094 6830
6095 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6831 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6096 LEAVE; 6832 LEAVE;
@@ -6161,13 +6897,9 @@ static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6161static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) 6897static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6162{ 6898{
6163 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6899 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6164 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6165 6900
6166 ENTER; 6901 ENTER;
6167 6902
6168 if (!ipr_inquiry_page_supported(page0, 1))
6169 ioa_cfg->cache_state = CACHE_NONE;
6170
6171 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; 6903 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6172 6904
6173 ipr_ioafp_inquiry(ipr_cmd, 1, 3, 6905 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
@@ -6235,7 +6967,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6235} 6967}
6236 6968
6237/** 6969/**
6238 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ. 6970 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
6239 * @ipr_cmd: ipr command struct 6971 * @ipr_cmd: ipr command struct
6240 * 6972 *
6241 * This function send an Identify Host Request Response Queue 6973 * This function send an Identify Host Request Response Queue
@@ -6244,7 +6976,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6244 * Return value: 6976 * Return value:
6245 * IPR_RC_JOB_RETURN 6977 * IPR_RC_JOB_RETURN
6246 **/ 6978 **/
6247static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd) 6979static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
6248{ 6980{
6249 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6981 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6250 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6982 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
@@ -6256,19 +6988,32 @@ static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6256 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6988 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6257 6989
6258 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6990 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6991 if (ioa_cfg->sis64)
6992 ioarcb->cmd_pkt.cdb[1] = 0x1;
6259 ioarcb->cmd_pkt.cdb[2] = 6993 ioarcb->cmd_pkt.cdb[2] =
6260 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff; 6994 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6261 ioarcb->cmd_pkt.cdb[3] = 6995 ioarcb->cmd_pkt.cdb[3] =
6262 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff; 6996 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6263 ioarcb->cmd_pkt.cdb[4] = 6997 ioarcb->cmd_pkt.cdb[4] =
6264 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff; 6998 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6265 ioarcb->cmd_pkt.cdb[5] = 6999 ioarcb->cmd_pkt.cdb[5] =
6266 ((u32) ioa_cfg->host_rrq_dma) & 0xff; 7000 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
6267 ioarcb->cmd_pkt.cdb[7] = 7001 ioarcb->cmd_pkt.cdb[7] =
6268 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff; 7002 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6269 ioarcb->cmd_pkt.cdb[8] = 7003 ioarcb->cmd_pkt.cdb[8] =
6270 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff; 7004 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6271 7005
7006 if (ioa_cfg->sis64) {
7007 ioarcb->cmd_pkt.cdb[10] =
7008 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7009 ioarcb->cmd_pkt.cdb[11] =
7010 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7011 ioarcb->cmd_pkt.cdb[12] =
7012 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7013 ioarcb->cmd_pkt.cdb[13] =
7014 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7015 }
7016
6272 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 7017 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6273 7018
6274 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7019 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
@@ -6349,7 +7094,58 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6349 ioa_cfg->toggle_bit = 1; 7094 ioa_cfg->toggle_bit = 1;
6350 7095
6351 /* Zero out config table */ 7096 /* Zero out config table */
6352 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table)); 7097 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7098}
7099
7100/**
7101 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7102 * @ipr_cmd: ipr command struct
7103 *
7104 * Return value:
7105 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7106 **/
7107static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7108{
7109 unsigned long stage, stage_time;
7110 u32 feedback;
7111 volatile u32 int_reg;
7112 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7113 u64 maskval = 0;
7114
7115 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7116 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7117 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7118
7119 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7120
7121 /* sanity check the stage_time value */
7122 if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7123 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7124 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7125 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7126
7127 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7128 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7129 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7130 stage_time = ioa_cfg->transop_timeout;
7131 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7132 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7133 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7134 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7135 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7136 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7137 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7138 return IPR_RC_JOB_CONTINUE;
7139 }
7140
7141 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7142 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7143 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7144 ipr_cmd->done = ipr_reset_ioa_job;
7145 add_timer(&ipr_cmd->timer);
7146 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7147
7148 return IPR_RC_JOB_RETURN;
6353} 7149}
6354 7150
6355/** 7151/**
@@ -6368,7 +7164,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6368 volatile u32 int_reg; 7164 volatile u32 int_reg;
6369 7165
6370 ENTER; 7166 ENTER;
6371 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq; 7167 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
6372 ipr_init_ioa_mem(ioa_cfg); 7168 ipr_init_ioa_mem(ioa_cfg);
6373 7169
6374 ioa_cfg->allow_interrupts = 1; 7170 ioa_cfg->allow_interrupts = 1;
@@ -6376,19 +7172,27 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6376 7172
6377 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 7173 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6378 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), 7174 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6379 ioa_cfg->regs.clr_interrupt_mask_reg); 7175 ioa_cfg->regs.clr_interrupt_mask_reg32);
6380 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7176 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6381 return IPR_RC_JOB_CONTINUE; 7177 return IPR_RC_JOB_CONTINUE;
6382 } 7178 }
6383 7179
6384 /* Enable destructive diagnostics on IOA */ 7180 /* Enable destructive diagnostics on IOA */
6385 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg); 7181 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7182
7183 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7184 if (ioa_cfg->sis64)
7185 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
6386 7186
6387 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6388 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7187 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6389 7188
6390 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); 7189 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6391 7190
7191 if (ioa_cfg->sis64) {
7192 ipr_cmd->job_step = ipr_reset_next_stage;
7193 return IPR_RC_JOB_CONTINUE;
7194 }
7195
6392 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7196 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6393 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); 7197 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6394 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 7198 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
@@ -6458,7 +7262,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6458 7262
6459 mailbox = readl(ioa_cfg->ioa_mailbox); 7263 mailbox = readl(ioa_cfg->ioa_mailbox);
6460 7264
6461 if (!ipr_sdt_is_fmt2(mailbox)) { 7265 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
6462 ipr_unit_check_no_data(ioa_cfg); 7266 ipr_unit_check_no_data(ioa_cfg);
6463 return; 7267 return;
6464 } 7268 }
@@ -6467,15 +7271,20 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6467 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, 7271 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6468 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); 7272 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6469 7273
6470 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) || 7274 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
6471 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) { 7275 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7276 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
6472 ipr_unit_check_no_data(ioa_cfg); 7277 ipr_unit_check_no_data(ioa_cfg);
6473 return; 7278 return;
6474 } 7279 }
6475 7280
6476 /* Find length of the first sdt entry (UC buffer) */ 7281 /* Find length of the first sdt entry (UC buffer) */
6477 length = (be32_to_cpu(sdt.entry[0].end_offset) - 7282 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
6478 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK; 7283 length = be32_to_cpu(sdt.entry[0].end_token);
7284 else
7285 length = (be32_to_cpu(sdt.entry[0].end_token) -
7286 be32_to_cpu(sdt.entry[0].start_token)) &
7287 IPR_FMT2_MBX_ADDR_MASK;
6479 7288
6480 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, 7289 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6481 struct ipr_hostrcb, queue); 7290 struct ipr_hostrcb, queue);
@@ -6483,13 +7292,13 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6483 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); 7292 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6484 7293
6485 rc = ipr_get_ldump_data_section(ioa_cfg, 7294 rc = ipr_get_ldump_data_section(ioa_cfg,
6486 be32_to_cpu(sdt.entry[0].bar_str_offset), 7295 be32_to_cpu(sdt.entry[0].start_token),
6487 (__be32 *)&hostrcb->hcam, 7296 (__be32 *)&hostrcb->hcam,
6488 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); 7297 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6489 7298
6490 if (!rc) { 7299 if (!rc) {
6491 ipr_handle_log_data(ioa_cfg, hostrcb); 7300 ipr_handle_log_data(ioa_cfg, hostrcb);
6492 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 7301 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
6493 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && 7302 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6494 ioa_cfg->sdt_state == GET_DUMP) 7303 ioa_cfg->sdt_state == GET_DUMP)
6495 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 7304 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
@@ -6516,6 +7325,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6516 int rc; 7325 int rc;
6517 7326
6518 ENTER; 7327 ENTER;
7328 ioa_cfg->pdev->state_saved = true;
6519 rc = pci_restore_state(ioa_cfg->pdev); 7329 rc = pci_restore_state(ioa_cfg->pdev);
6520 7330
6521 if (rc != PCIBIOS_SUCCESSFUL) { 7331 if (rc != PCIBIOS_SUCCESSFUL) {
@@ -6716,7 +7526,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6716 7526
6717 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { 7527 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6718 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 7528 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6719 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg); 7529 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
6720 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; 7530 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6721 } else { 7531 } else {
6722 ipr_cmd->job_step = ioa_cfg->reset; 7532 ipr_cmd->job_step = ioa_cfg->reset;
@@ -6779,7 +7589,10 @@ static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6779 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; 7589 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6780 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; 7590 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6781 7591
6782 ipr_build_ucode_ioadl(ipr_cmd, sglist); 7592 if (ioa_cfg->sis64)
7593 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7594 else
7595 ipr_build_ucode_ioadl(ipr_cmd, sglist);
6783 ipr_cmd->job_step = ipr_reset_ucode_download_done; 7596 ipr_cmd->job_step = ipr_reset_ucode_download_done;
6784 7597
6785 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7598 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
@@ -7148,8 +7961,8 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7148 ipr_free_cmd_blks(ioa_cfg); 7961 ipr_free_cmd_blks(ioa_cfg);
7149 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 7962 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7150 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 7963 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7151 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table), 7964 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7152 ioa_cfg->cfg_table, 7965 ioa_cfg->u.cfg_table,
7153 ioa_cfg->cfg_table_dma); 7966 ioa_cfg->cfg_table_dma);
7154 7967
7155 for (i = 0; i < IPR_NUM_HCAMS; i++) { 7968 for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7203,7 +8016,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7203 int i; 8016 int i;
7204 8017
7205 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, 8018 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7206 sizeof(struct ipr_cmnd), 8, 0); 8019 sizeof(struct ipr_cmnd), 16, 0);
7207 8020
7208 if (!ioa_cfg->ipr_cmd_pool) 8021 if (!ioa_cfg->ipr_cmd_pool)
7209 return -ENOMEM; 8022 return -ENOMEM;
@@ -7221,13 +8034,25 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7221 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; 8034 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7222 8035
7223 ioarcb = &ipr_cmd->ioarcb; 8036 ioarcb = &ipr_cmd->ioarcb;
7224 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr); 8037 ipr_cmd->dma_addr = dma_addr;
8038 if (ioa_cfg->sis64)
8039 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8040 else
8041 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8042
7225 ioarcb->host_response_handle = cpu_to_be32(i << 2); 8043 ioarcb->host_response_handle = cpu_to_be32(i << 2);
7226 ioarcb->write_ioadl_addr = 8044 if (ioa_cfg->sis64) {
7227 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 8045 ioarcb->u.sis64_addr_data.data_ioadl_addr =
7228 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 8046 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
7229 ioarcb->ioasa_host_pci_addr = 8047 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
7230 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); 8048 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8049 } else {
8050 ioarcb->write_ioadl_addr =
8051 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8052 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8053 ioarcb->ioasa_host_pci_addr =
8054 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8055 }
7231 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); 8056 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7232 ipr_cmd->cmd_index = i; 8057 ipr_cmd->cmd_index = i;
7233 ipr_cmd->ioa_cfg = ioa_cfg; 8058 ipr_cmd->ioa_cfg = ioa_cfg;
@@ -7254,13 +8079,24 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7254 8079
7255 ENTER; 8080 ENTER;
7256 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * 8081 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7257 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL); 8082 ioa_cfg->max_devs_supported, GFP_KERNEL);
7258 8083
7259 if (!ioa_cfg->res_entries) 8084 if (!ioa_cfg->res_entries)
7260 goto out; 8085 goto out;
7261 8086
7262 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++) 8087 if (ioa_cfg->sis64) {
8088 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8089 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8090 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8091 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8092 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8093 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8094 }
8095
8096 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
7263 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 8097 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8098 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8099 }
7264 8100
7265 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, 8101 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7266 sizeof(struct ipr_misc_cbs), 8102 sizeof(struct ipr_misc_cbs),
@@ -7279,11 +8115,11 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7279 if (!ioa_cfg->host_rrq) 8115 if (!ioa_cfg->host_rrq)
7280 goto out_ipr_free_cmd_blocks; 8116 goto out_ipr_free_cmd_blocks;
7281 8117
7282 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev, 8118 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7283 sizeof(struct ipr_config_table), 8119 ioa_cfg->cfg_table_size,
7284 &ioa_cfg->cfg_table_dma); 8120 &ioa_cfg->cfg_table_dma);
7285 8121
7286 if (!ioa_cfg->cfg_table) 8122 if (!ioa_cfg->u.cfg_table)
7287 goto out_free_host_rrq; 8123 goto out_free_host_rrq;
7288 8124
7289 for (i = 0; i < IPR_NUM_HCAMS; i++) { 8125 for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7317,8 +8153,9 @@ out_free_hostrcb_dma:
7317 ioa_cfg->hostrcb[i], 8153 ioa_cfg->hostrcb[i],
7318 ioa_cfg->hostrcb_dma[i]); 8154 ioa_cfg->hostrcb_dma[i]);
7319 } 8155 }
7320 pci_free_consistent(pdev, sizeof(struct ipr_config_table), 8156 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
7321 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma); 8157 ioa_cfg->u.cfg_table,
8158 ioa_cfg->cfg_table_dma);
7322out_free_host_rrq: 8159out_free_host_rrq:
7323 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 8160 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7324 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 8161 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
@@ -7393,15 +8230,21 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7393 init_waitqueue_head(&ioa_cfg->reset_wait_q); 8230 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7394 init_waitqueue_head(&ioa_cfg->msi_wait_q); 8231 init_waitqueue_head(&ioa_cfg->msi_wait_q);
7395 ioa_cfg->sdt_state = INACTIVE; 8232 ioa_cfg->sdt_state = INACTIVE;
7396 if (ipr_enable_cache)
7397 ioa_cfg->cache_state = CACHE_ENABLED;
7398 else
7399 ioa_cfg->cache_state = CACHE_DISABLED;
7400 8233
7401 ipr_initialize_bus_attr(ioa_cfg); 8234 ipr_initialize_bus_attr(ioa_cfg);
8235 ioa_cfg->max_devs_supported = ipr_max_devs;
7402 8236
7403 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 8237 if (ioa_cfg->sis64) {
7404 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 8238 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8239 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8240 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8241 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8242 } else {
8243 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8244 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8245 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8246 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8247 }
7405 host->max_channel = IPR_MAX_BUS_TO_SCAN; 8248 host->max_channel = IPR_MAX_BUS_TO_SCAN;
7406 host->unique_id = host->host_no; 8249 host->unique_id = host->host_no;
7407 host->max_cmd_len = IPR_MAX_CDB_LEN; 8250 host->max_cmd_len = IPR_MAX_CDB_LEN;
@@ -7413,13 +8256,26 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7413 8256
7414 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; 8257 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7415 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; 8258 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8259 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
7416 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; 8260 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8261 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
7417 t->clr_interrupt_reg = base + p->clr_interrupt_reg; 8262 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8263 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
7418 t->sense_interrupt_reg = base + p->sense_interrupt_reg; 8264 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8265 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
7419 t->ioarrin_reg = base + p->ioarrin_reg; 8266 t->ioarrin_reg = base + p->ioarrin_reg;
7420 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; 8267 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8268 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
7421 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; 8269 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8270 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
7422 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; 8271 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8272 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8273
8274 if (ioa_cfg->sis64) {
8275 t->init_feedback_reg = base + p->init_feedback_reg;
8276 t->dump_addr_reg = base + p->dump_addr_reg;
8277 t->dump_data_reg = base + p->dump_data_reg;
8278 }
7423} 8279}
7424 8280
7425/** 8281/**
@@ -7491,7 +8347,7 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
7491 init_waitqueue_head(&ioa_cfg->msi_wait_q); 8347 init_waitqueue_head(&ioa_cfg->msi_wait_q);
7492 ioa_cfg->msi_received = 0; 8348 ioa_cfg->msi_received = 0;
7493 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8349 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7494 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg); 8350 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
7495 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8351 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7496 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7497 8353
@@ -7502,7 +8358,7 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
7502 } else if (ipr_debug) 8358 } else if (ipr_debug)
7503 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); 8359 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
7504 8360
7505 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg); 8361 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
7506 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 8362 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7507 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 8363 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
7508 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8364 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
@@ -7572,6 +8428,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7572 goto out_scsi_host_put; 8428 goto out_scsi_host_put;
7573 } 8429 }
7574 8430
8431 /* set SIS 32 or SIS 64 */
8432 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
7575 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; 8433 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7576 8434
7577 if (ipr_transop_timeout) 8435 if (ipr_transop_timeout)
@@ -7609,7 +8467,16 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7609 8467
7610 pci_set_master(pdev); 8468 pci_set_master(pdev);
7611 8469
7612 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 8470 if (ioa_cfg->sis64) {
8471 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8472 if (rc < 0) {
8473 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8474 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8475 }
8476
8477 } else
8478 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8479
7613 if (rc < 0) { 8480 if (rc < 0) {
7614 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); 8481 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7615 goto cleanup_nomem; 8482 goto cleanup_nomem;
@@ -7651,6 +8518,15 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7651 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 8518 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7652 goto cleanup_nomem; 8519 goto cleanup_nomem;
7653 8520
8521 if (ioa_cfg->sis64)
8522 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8523 + ((sizeof(struct ipr_config_table_entry64)
8524 * ioa_cfg->max_devs_supported)));
8525 else
8526 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8527 + ((sizeof(struct ipr_config_table_entry)
8528 * ioa_cfg->max_devs_supported)));
8529
7654 rc = ipr_alloc_mem(ioa_cfg); 8530 rc = ipr_alloc_mem(ioa_cfg);
7655 if (rc < 0) { 8531 if (rc < 0) {
7656 dev_err(&pdev->dev, 8532 dev_err(&pdev->dev,
@@ -7662,9 +8538,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7662 * If HRRQ updated interrupt is not masked, or reset alert is set, 8538 * If HRRQ updated interrupt is not masked, or reset alert is set,
7663 * the card is in an unknown state and needs a hard reset 8539 * the card is in an unknown state and needs a hard reset
7664 */ 8540 */
7665 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8541 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
7666 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); 8542 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
7667 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg); 8543 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
7668 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) 8544 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7669 ioa_cfg->needs_hard_reset = 1; 8545 ioa_cfg->needs_hard_reset = 1;
7670 if (interrupts & IPR_PCII_ERROR_INTERRUPTS) 8546 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
@@ -7952,9 +8828,6 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7952 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 8828 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7953 IPR_USE_LONG_TRANSOP_TIMEOUT }, 8829 IPR_USE_LONG_TRANSOP_TIMEOUT },
7954 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 8830 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7955 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7956 IPR_USE_LONG_TRANSOP_TIMEOUT },
7957 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7958 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, 8831 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7959 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 8832 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7960 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 8833 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
@@ -7969,9 +8842,22 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7969 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 8842 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7970 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 8843 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7971 IPR_USE_LONG_TRANSOP_TIMEOUT }, 8844 IPR_USE_LONG_TRANSOP_TIMEOUT },
7972 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E, 8845 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
7973 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 8846 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
7974 IPR_USE_LONG_TRANSOP_TIMEOUT }, 8847 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8848 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8849 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8850 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8851 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8852 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8853 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8854 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8855 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8856 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8857 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8858 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8859 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8860 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
7975 { } 8861 { }
7976}; 8862};
7977MODULE_DEVICE_TABLE(pci, ipr_pci_table); 8863MODULE_DEVICE_TABLE(pci, ipr_pci_table);
@@ -7991,6 +8877,61 @@ static struct pci_driver ipr_driver = {
7991}; 8877};
7992 8878
7993/** 8879/**
8880 * ipr_halt_done - Shutdown prepare completion
8881 *
8882 * Return value:
8883 * none
8884 **/
8885static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8886{
8887 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8888
8889 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8890}
8891
8892/**
8893 * ipr_halt - Issue shutdown prepare to all adapters
8894 *
8895 * Return value:
8896 * NOTIFY_OK on success / NOTIFY_DONE on failure
8897 **/
8898static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8899{
8900 struct ipr_cmnd *ipr_cmd;
8901 struct ipr_ioa_cfg *ioa_cfg;
8902 unsigned long flags = 0;
8903
8904 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8905 return NOTIFY_DONE;
8906
8907 spin_lock(&ipr_driver_lock);
8908
8909 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8910 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8911 if (!ioa_cfg->allow_cmds) {
8912 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8913 continue;
8914 }
8915
8916 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8917 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8918 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8919 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8920 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8921
8922 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8923 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8924 }
8925 spin_unlock(&ipr_driver_lock);
8926
8927 return NOTIFY_OK;
8928}
8929
8930static struct notifier_block ipr_notifier = {
8931 ipr_halt, NULL, 0
8932};
8933
8934/**
7994 * ipr_init - Module entry point 8935 * ipr_init - Module entry point
7995 * 8936 *
7996 * Return value: 8937 * Return value:
@@ -8001,6 +8942,7 @@ static int __init ipr_init(void)
8001 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", 8942 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8002 IPR_DRIVER_VERSION, IPR_DRIVER_DATE); 8943 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8003 8944
8945 register_reboot_notifier(&ipr_notifier);
8004 return pci_register_driver(&ipr_driver); 8946 return pci_register_driver(&ipr_driver);
8005} 8947}
8006 8948
@@ -8014,6 +8956,7 @@ static int __init ipr_init(void)
8014 **/ 8956 **/
8015static void __exit ipr_exit(void) 8957static void __exit ipr_exit(void)
8016{ 8958{
8959 unregister_reboot_notifier(&ipr_notifier);
8017 pci_unregister_driver(&ipr_driver); 8960 pci_unregister_driver(&ipr_driver);
8018} 8961}
8019 8962