diff options
| -rw-r--r-- | drivers/scsi/aacraid/commctrl.c | 4 | ||||
| -rw-r--r-- | drivers/scsi/arcmsr/arcmsr.h | 29 | ||||
| -rw-r--r-- | drivers/scsi/arcmsr/arcmsr_attr.c | 3 | ||||
| -rw-r--r-- | drivers/scsi/arcmsr/arcmsr_hba.c | 684 | ||||
| -rw-r--r-- | drivers/scsi/be2iscsi/be_mgmt.c | 5 | ||||
| -rw-r--r-- | drivers/scsi/bfa/bfa_core.c | 22 | ||||
| -rw-r--r-- | drivers/scsi/ipr.c | 221 | ||||
| -rw-r--r-- | drivers/scsi/ipr.h | 31 | ||||
| -rw-r--r-- | drivers/scsi/iscsi_tcp.c | 6 | ||||
| -rw-r--r-- | drivers/scsi/scsi_scan.c | 9 |
10 files changed, 708 insertions, 306 deletions
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 9c0c91178538..1a5bf5724750 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
| @@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
| 655 | /* Does this really need to be GFP_DMA? */ | 655 | /* Does this really need to be GFP_DMA? */ |
| 656 | p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); | 656 | p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); |
| 657 | if(!p) { | 657 | if(!p) { |
| 658 | kfree (usg); | 658 | dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", |
| 659 | dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | ||
| 660 | usg->sg[i].count,i,usg->count)); | 659 | usg->sg[i].count,i,usg->count)); |
| 660 | kfree(usg); | ||
| 661 | rcode = -ENOMEM; | 661 | rcode = -ENOMEM; |
| 662 | goto cleanup; | 662 | goto cleanup; |
| 663 | } | 663 | } |
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index ab646e580d64..ce5371b3cdd5 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h | |||
| @@ -48,7 +48,7 @@ struct device_attribute; | |||
| 48 | /*The limit of outstanding scsi command that firmware can handle*/ | 48 | /*The limit of outstanding scsi command that firmware can handle*/ |
| 49 | #define ARCMSR_MAX_OUTSTANDING_CMD 256 | 49 | #define ARCMSR_MAX_OUTSTANDING_CMD 256 |
| 50 | #define ARCMSR_MAX_FREECCB_NUM 320 | 50 | #define ARCMSR_MAX_FREECCB_NUM 320 |
| 51 | #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27" | 51 | #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/11/03" |
| 52 | #define ARCMSR_SCSI_INITIATOR_ID 255 | 52 | #define ARCMSR_SCSI_INITIATOR_ID 255 |
| 53 | #define ARCMSR_MAX_XFER_SECTORS 512 | 53 | #define ARCMSR_MAX_XFER_SECTORS 512 |
| 54 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 | 54 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 |
| @@ -110,6 +110,8 @@ struct CMD_MESSAGE_FIELD | |||
| 110 | #define FUNCTION_SAY_HELLO 0x0807 | 110 | #define FUNCTION_SAY_HELLO 0x0807 |
| 111 | #define FUNCTION_SAY_GOODBYE 0x0808 | 111 | #define FUNCTION_SAY_GOODBYE 0x0808 |
| 112 | #define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809 | 112 | #define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809 |
| 113 | #define FUNCTION_GET_FIRMWARE_STATUS 0x080A | ||
| 114 | #define FUNCTION_HARDWARE_RESET 0x080B | ||
| 113 | /* ARECA IO CONTROL CODE*/ | 115 | /* ARECA IO CONTROL CODE*/ |
| 114 | #define ARCMSR_MESSAGE_READ_RQBUFFER \ | 116 | #define ARCMSR_MESSAGE_READ_RQBUFFER \ |
| 115 | ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER | 117 | ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER |
| @@ -133,6 +135,7 @@ struct CMD_MESSAGE_FIELD | |||
| 133 | #define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001 | 135 | #define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001 |
| 134 | #define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006 | 136 | #define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006 |
| 135 | #define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F | 137 | #define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F |
| 138 | #define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088 | ||
| 136 | /* | 139 | /* |
| 137 | ************************************************************* | 140 | ************************************************************* |
| 138 | ** structure for holding DMA address data | 141 | ** structure for holding DMA address data |
| @@ -341,13 +344,13 @@ struct MessageUnit_B | |||
| 341 | uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; | 344 | uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; |
| 342 | uint32_t postq_index; | 345 | uint32_t postq_index; |
| 343 | uint32_t doneq_index; | 346 | uint32_t doneq_index; |
| 344 | void __iomem *drv2iop_doorbell_reg; | 347 | uint32_t __iomem *drv2iop_doorbell_reg; |
| 345 | void __iomem *drv2iop_doorbell_mask_reg; | 348 | uint32_t __iomem *drv2iop_doorbell_mask_reg; |
| 346 | void __iomem *iop2drv_doorbell_reg; | 349 | uint32_t __iomem *iop2drv_doorbell_reg; |
| 347 | void __iomem *iop2drv_doorbell_mask_reg; | 350 | uint32_t __iomem *iop2drv_doorbell_mask_reg; |
| 348 | void __iomem *msgcode_rwbuffer_reg; | 351 | uint32_t __iomem *msgcode_rwbuffer_reg; |
| 349 | void __iomem *ioctl_wbuffer_reg; | 352 | uint32_t __iomem *ioctl_wbuffer_reg; |
| 350 | void __iomem *ioctl_rbuffer_reg; | 353 | uint32_t __iomem *ioctl_rbuffer_reg; |
| 351 | }; | 354 | }; |
| 352 | 355 | ||
| 353 | /* | 356 | /* |
| @@ -375,6 +378,7 @@ struct AdapterControlBlock | |||
| 375 | /* message unit ATU inbound base address0 */ | 378 | /* message unit ATU inbound base address0 */ |
| 376 | 379 | ||
| 377 | uint32_t acb_flags; | 380 | uint32_t acb_flags; |
| 381 | uint8_t adapter_index; | ||
| 378 | #define ACB_F_SCSISTOPADAPTER 0x0001 | 382 | #define ACB_F_SCSISTOPADAPTER 0x0001 |
| 379 | #define ACB_F_MSG_STOP_BGRB 0x0002 | 383 | #define ACB_F_MSG_STOP_BGRB 0x0002 |
| 380 | /* stop RAID background rebuild */ | 384 | /* stop RAID background rebuild */ |
| @@ -390,7 +394,7 @@ struct AdapterControlBlock | |||
| 390 | #define ACB_F_BUS_RESET 0x0080 | 394 | #define ACB_F_BUS_RESET 0x0080 |
| 391 | #define ACB_F_IOP_INITED 0x0100 | 395 | #define ACB_F_IOP_INITED 0x0100 |
| 392 | /* iop init */ | 396 | /* iop init */ |
| 393 | 397 | #define ACB_F_FIRMWARE_TRAP 0x0400 | |
| 394 | struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; | 398 | struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; |
| 395 | /* used for memory free */ | 399 | /* used for memory free */ |
| 396 | struct list_head ccb_free_list; | 400 | struct list_head ccb_free_list; |
| @@ -423,12 +427,19 @@ struct AdapterControlBlock | |||
| 423 | #define ARECA_RAID_GOOD 0xaa | 427 | #define ARECA_RAID_GOOD 0xaa |
| 424 | uint32_t num_resets; | 428 | uint32_t num_resets; |
| 425 | uint32_t num_aborts; | 429 | uint32_t num_aborts; |
| 430 | uint32_t signature; | ||
| 426 | uint32_t firm_request_len; | 431 | uint32_t firm_request_len; |
| 427 | uint32_t firm_numbers_queue; | 432 | uint32_t firm_numbers_queue; |
| 428 | uint32_t firm_sdram_size; | 433 | uint32_t firm_sdram_size; |
| 429 | uint32_t firm_hd_channels; | 434 | uint32_t firm_hd_channels; |
| 430 | char firm_model[12]; | 435 | char firm_model[12]; |
| 431 | char firm_version[20]; | 436 | char firm_version[20]; |
| 437 | char device_map[20]; /*21,84-99*/ | ||
| 438 | struct work_struct arcmsr_do_message_isr_bh; | ||
| 439 | struct timer_list eternal_timer; | ||
| 440 | unsigned short fw_state; | ||
| 441 | atomic_t rq_map_token; | ||
| 442 | int ante_token_value; | ||
| 432 | };/* HW_DEVICE_EXTENSION */ | 443 | };/* HW_DEVICE_EXTENSION */ |
| 433 | /* | 444 | /* |
| 434 | ******************************************************************************* | 445 | ******************************************************************************* |
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index a4e04c50c436..07fdfe57e38e 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c | |||
| @@ -192,6 +192,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = { | |||
| 192 | .attr = { | 192 | .attr = { |
| 193 | .name = "mu_read", | 193 | .name = "mu_read", |
| 194 | .mode = S_IRUSR , | 194 | .mode = S_IRUSR , |
| 195 | .owner = THIS_MODULE, | ||
| 195 | }, | 196 | }, |
| 196 | .size = 1032, | 197 | .size = 1032, |
| 197 | .read = arcmsr_sysfs_iop_message_read, | 198 | .read = arcmsr_sysfs_iop_message_read, |
| @@ -201,6 +202,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = { | |||
| 201 | .attr = { | 202 | .attr = { |
| 202 | .name = "mu_write", | 203 | .name = "mu_write", |
| 203 | .mode = S_IWUSR, | 204 | .mode = S_IWUSR, |
| 205 | .owner = THIS_MODULE, | ||
| 204 | }, | 206 | }, |
| 205 | .size = 1032, | 207 | .size = 1032, |
| 206 | .write = arcmsr_sysfs_iop_message_write, | 208 | .write = arcmsr_sysfs_iop_message_write, |
| @@ -210,6 +212,7 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = { | |||
| 210 | .attr = { | 212 | .attr = { |
| 211 | .name = "mu_clear", | 213 | .name = "mu_clear", |
| 212 | .mode = S_IWUSR, | 214 | .mode = S_IWUSR, |
| 215 | .owner = THIS_MODULE, | ||
| 213 | }, | 216 | }, |
| 214 | .size = 1, | 217 | .size = 1, |
| 215 | .write = arcmsr_sysfs_iop_message_clear, | 218 | .write = arcmsr_sysfs_iop_message_clear, |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index ffbe2192da3c..ffa54792bb33 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
| @@ -72,8 +72,16 @@ | |||
| 72 | #include <scsi/scsicam.h> | 72 | #include <scsi/scsicam.h> |
| 73 | #include "arcmsr.h" | 73 | #include "arcmsr.h" |
| 74 | 74 | ||
| 75 | #ifdef CONFIG_SCSI_ARCMSR_RESET | ||
| 76 | static int sleeptime = 20; | ||
| 77 | static int retrycount = 12; | ||
| 78 | module_param(sleeptime, int, S_IRUGO|S_IWUSR); | ||
| 79 | MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset"); | ||
| 80 | module_param(retrycount, int, S_IRUGO|S_IWUSR); | ||
| 81 | MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset"); | ||
| 82 | #endif | ||
| 75 | MODULE_AUTHOR("Erich Chen <support@areca.com.tw>"); | 83 | MODULE_AUTHOR("Erich Chen <support@areca.com.tw>"); |
| 76 | MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter"); | 84 | MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter"); |
| 77 | MODULE_LICENSE("Dual BSD/GPL"); | 85 | MODULE_LICENSE("Dual BSD/GPL"); |
| 78 | MODULE_VERSION(ARCMSR_DRIVER_VERSION); | 86 | MODULE_VERSION(ARCMSR_DRIVER_VERSION); |
| 79 | 87 | ||
| @@ -96,6 +104,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); | |||
| 96 | static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); | 104 | static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); |
| 97 | static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); | 105 | static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); |
| 98 | static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); | 106 | static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); |
| 107 | static void arcmsr_request_device_map(unsigned long pacb); | ||
| 108 | static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb); | ||
| 109 | static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb); | ||
| 110 | static void arcmsr_message_isr_bh_fn(struct work_struct *work); | ||
| 111 | static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode); | ||
| 112 | static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); | ||
| 113 | |||
| 99 | static const char *arcmsr_info(struct Scsi_Host *); | 114 | static const char *arcmsr_info(struct Scsi_Host *); |
| 100 | static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); | 115 | static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); |
| 101 | static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, | 116 | static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, |
| @@ -112,7 +127,7 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, | |||
| 112 | 127 | ||
| 113 | static struct scsi_host_template arcmsr_scsi_host_template = { | 128 | static struct scsi_host_template arcmsr_scsi_host_template = { |
| 114 | .module = THIS_MODULE, | 129 | .module = THIS_MODULE, |
| 115 | .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter" | 130 | .name = "ARCMSR ARECA SATA/SAS RAID Host Bus Adapter" |
| 116 | ARCMSR_DRIVER_VERSION, | 131 | ARCMSR_DRIVER_VERSION, |
| 117 | .info = arcmsr_info, | 132 | .info = arcmsr_info, |
| 118 | .queuecommand = arcmsr_queue_command, | 133 | .queuecommand = arcmsr_queue_command, |
| @@ -128,16 +143,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = { | |||
| 128 | .use_clustering = ENABLE_CLUSTERING, | 143 | .use_clustering = ENABLE_CLUSTERING, |
| 129 | .shost_attrs = arcmsr_host_attrs, | 144 | .shost_attrs = arcmsr_host_attrs, |
| 130 | }; | 145 | }; |
| 131 | #ifdef CONFIG_SCSI_ARCMSR_AER | ||
| 132 | static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev); | ||
| 133 | static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev, | ||
| 134 | pci_channel_state_t state); | ||
| 135 | |||
| 136 | static struct pci_error_handlers arcmsr_pci_error_handlers = { | ||
| 137 | .error_detected = arcmsr_pci_error_detected, | ||
| 138 | .slot_reset = arcmsr_pci_slot_reset, | ||
| 139 | }; | ||
| 140 | #endif | ||
| 141 | static struct pci_device_id arcmsr_device_id_table[] = { | 146 | static struct pci_device_id arcmsr_device_id_table[] = { |
| 142 | {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, | 147 | {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, |
| 143 | {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, | 148 | {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, |
| @@ -166,9 +171,6 @@ static struct pci_driver arcmsr_pci_driver = { | |||
| 166 | .probe = arcmsr_probe, | 171 | .probe = arcmsr_probe, |
| 167 | .remove = arcmsr_remove, | 172 | .remove = arcmsr_remove, |
| 168 | .shutdown = arcmsr_shutdown, | 173 | .shutdown = arcmsr_shutdown, |
| 169 | #ifdef CONFIG_SCSI_ARCMSR_AER | ||
| 170 | .err_handler = &arcmsr_pci_error_handlers, | ||
| 171 | #endif | ||
| 172 | }; | 174 | }; |
| 173 | 175 | ||
| 174 | static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) | 176 | static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) |
| @@ -236,10 +238,9 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
| 236 | void *dma_coherent; | 238 | void *dma_coherent; |
| 237 | dma_addr_t dma_coherent_handle, dma_addr; | 239 | dma_addr_t dma_coherent_handle, dma_addr; |
| 238 | struct CommandControlBlock *ccb_tmp; | 240 | struct CommandControlBlock *ccb_tmp; |
| 239 | uint32_t intmask_org; | ||
| 240 | int i, j; | 241 | int i, j; |
| 241 | 242 | ||
| 242 | acb->pmuA = pci_ioremap_bar(pdev, 0); | 243 | acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); |
| 243 | if (!acb->pmuA) { | 244 | if (!acb->pmuA) { |
| 244 | printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", | 245 | printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", |
| 245 | acb->host->host_no); | 246 | acb->host->host_no); |
| @@ -281,12 +282,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
| 281 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) | 282 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) |
| 282 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) | 283 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) |
| 283 | acb->devstate[i][j] = ARECA_RAID_GONE; | 284 | acb->devstate[i][j] = ARECA_RAID_GONE; |
| 284 | |||
| 285 | /* | ||
| 286 | ** here we need to tell iop 331 our ccb_tmp.HighPart | ||
| 287 | ** if ccb_tmp.HighPart is not zero | ||
| 288 | */ | ||
| 289 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
| 290 | } | 285 | } |
| 291 | break; | 286 | break; |
| 292 | 287 | ||
| @@ -297,7 +292,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
| 297 | void __iomem *mem_base0, *mem_base1; | 292 | void __iomem *mem_base0, *mem_base1; |
| 298 | void *dma_coherent; | 293 | void *dma_coherent; |
| 299 | dma_addr_t dma_coherent_handle, dma_addr; | 294 | dma_addr_t dma_coherent_handle, dma_addr; |
| 300 | uint32_t intmask_org; | ||
| 301 | struct CommandControlBlock *ccb_tmp; | 295 | struct CommandControlBlock *ccb_tmp; |
| 302 | int i, j; | 296 | int i, j; |
| 303 | 297 | ||
| @@ -333,11 +327,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
| 333 | reg = (struct MessageUnit_B *)(dma_coherent + | 327 | reg = (struct MessageUnit_B *)(dma_coherent + |
| 334 | ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); | 328 | ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); |
| 335 | acb->pmuB = reg; | 329 | acb->pmuB = reg; |
| 336 | mem_base0 = pci_ioremap_bar(pdev, 0); | 330 | mem_base0 = ioremap(pci_resource_start(pdev, 0), |
| 331 | pci_resource_len(pdev, 0)); | ||
| 337 | if (!mem_base0) | 332 | if (!mem_base0) |
| 338 | goto out; | 333 | goto out; |
| 339 | 334 | ||
| 340 | mem_base1 = pci_ioremap_bar(pdev, 2); | 335 | mem_base1 = ioremap(pci_resource_start(pdev, 2), |
| 336 | pci_resource_len(pdev, 2)); | ||
| 341 | if (!mem_base1) { | 337 | if (!mem_base1) { |
| 342 | iounmap(mem_base0); | 338 | iounmap(mem_base0); |
| 343 | goto out; | 339 | goto out; |
| @@ -357,12 +353,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
| 357 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) | 353 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) |
| 358 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) | 354 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) |
| 359 | acb->devstate[i][j] = ARECA_RAID_GOOD; | 355 | acb->devstate[i][j] = ARECA_RAID_GOOD; |
| 360 | |||
| 361 | /* | ||
| 362 | ** here we need to tell iop 331 our ccb_tmp.HighPart | ||
| 363 | ** if ccb_tmp.HighPart is not zero | ||
| 364 | */ | ||
| 365 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
| 366 | } | 356 | } |
| 367 | break; | 357 | break; |
| 368 | } | 358 | } |
| @@ -374,6 +364,88 @@ out: | |||
| 374 | sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); | 364 | sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); |
| 375 | return -ENOMEM; | 365 | return -ENOMEM; |
| 376 | } | 366 | } |
| 367 | static void arcmsr_message_isr_bh_fn(struct work_struct *work) | ||
| 368 | { | ||
| 369 | struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh); | ||
| 370 | |||
| 371 | switch (acb->adapter_type) { | ||
| 372 | case ACB_ADAPTER_TYPE_A: { | ||
| 373 | |||
| 374 | struct MessageUnit_A __iomem *reg = acb->pmuA; | ||
| 375 | char *acb_dev_map = (char *)acb->device_map; | ||
| 376 | uint32_t __iomem *signature = (uint32_t __iomem *) (®->message_rwbuffer[0]); | ||
| 377 | char __iomem *devicemap = (char __iomem *) (®->message_rwbuffer[21]); | ||
| 378 | int target, lun; | ||
| 379 | struct scsi_device *psdev; | ||
| 380 | char diff; | ||
| 381 | |||
| 382 | atomic_inc(&acb->rq_map_token); | ||
| 383 | if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { | ||
| 384 | for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { | ||
| 385 | diff = (*acb_dev_map)^readb(devicemap); | ||
| 386 | if (diff != 0) { | ||
| 387 | char temp; | ||
| 388 | *acb_dev_map = readb(devicemap); | ||
| 389 | temp = *acb_dev_map; | ||
| 390 | for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { | ||
| 391 | if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { | ||
| 392 | scsi_add_device(acb->host, 0, target, lun); | ||
| 393 | } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { | ||
| 394 | psdev = scsi_device_lookup(acb->host, 0, target, lun); | ||
| 395 | if (psdev != NULL) { | ||
| 396 | scsi_remove_device(psdev); | ||
| 397 | scsi_device_put(psdev); | ||
| 398 | } | ||
| 399 | } | ||
| 400 | temp >>= 1; | ||
| 401 | diff >>= 1; | ||
| 402 | } | ||
| 403 | } | ||
| 404 | devicemap++; | ||
| 405 | acb_dev_map++; | ||
| 406 | } | ||
| 407 | } | ||
| 408 | break; | ||
| 409 | } | ||
| 410 | |||
| 411 | case ACB_ADAPTER_TYPE_B: { | ||
| 412 | struct MessageUnit_B *reg = acb->pmuB; | ||
| 413 | char *acb_dev_map = (char *)acb->device_map; | ||
| 414 | uint32_t __iomem *signature = (uint32_t __iomem *)(®->msgcode_rwbuffer_reg[0]); | ||
| 415 | char __iomem *devicemap = (char __iomem *)(®->msgcode_rwbuffer_reg[21]); | ||
| 416 | int target, lun; | ||
| 417 | struct scsi_device *psdev; | ||
| 418 | char diff; | ||
| 419 | |||
| 420 | atomic_inc(&acb->rq_map_token); | ||
| 421 | if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { | ||
| 422 | for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { | ||
| 423 | diff = (*acb_dev_map)^readb(devicemap); | ||
| 424 | if (diff != 0) { | ||
| 425 | char temp; | ||
| 426 | *acb_dev_map = readb(devicemap); | ||
| 427 | temp = *acb_dev_map; | ||
| 428 | for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { | ||
| 429 | if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { | ||
| 430 | scsi_add_device(acb->host, 0, target, lun); | ||
| 431 | } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { | ||
| 432 | psdev = scsi_device_lookup(acb->host, 0, target, lun); | ||
| 433 | if (psdev != NULL) { | ||
| 434 | scsi_remove_device(psdev); | ||
| 435 | scsi_device_put(psdev); | ||
| 436 | } | ||
| 437 | } | ||
| 438 | temp >>= 1; | ||
| 439 | diff >>= 1; | ||
| 440 | } | ||
| 441 | } | ||
| 442 | devicemap++; | ||
| 443 | acb_dev_map++; | ||
| 444 | } | ||
| 445 | } | ||
| 446 | } | ||
| 447 | } | ||
| 448 | } | ||
| 377 | 449 | ||
| 378 | static int arcmsr_probe(struct pci_dev *pdev, | 450 | static int arcmsr_probe(struct pci_dev *pdev, |
| 379 | const struct pci_device_id *id) | 451 | const struct pci_device_id *id) |
| @@ -432,17 +504,17 @@ static int arcmsr_probe(struct pci_dev *pdev, | |||
| 432 | ACB_F_MESSAGE_WQBUFFER_READED); | 504 | ACB_F_MESSAGE_WQBUFFER_READED); |
| 433 | acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; | 505 | acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; |
| 434 | INIT_LIST_HEAD(&acb->ccb_free_list); | 506 | INIT_LIST_HEAD(&acb->ccb_free_list); |
| 435 | 507 | INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); | |
| 436 | error = arcmsr_alloc_ccb_pool(acb); | 508 | error = arcmsr_alloc_ccb_pool(acb); |
| 437 | if (error) | 509 | if (error) |
| 438 | goto out_release_regions; | 510 | goto out_release_regions; |
| 439 | 511 | ||
| 512 | arcmsr_iop_init(acb); | ||
| 440 | error = request_irq(pdev->irq, arcmsr_do_interrupt, | 513 | error = request_irq(pdev->irq, arcmsr_do_interrupt, |
| 441 | IRQF_SHARED, "arcmsr", acb); | 514 | IRQF_SHARED, "arcmsr", acb); |
| 442 | if (error) | 515 | if (error) |
| 443 | goto out_free_ccb_pool; | 516 | goto out_free_ccb_pool; |
| 444 | 517 | ||
| 445 | arcmsr_iop_init(acb); | ||
| 446 | pci_set_drvdata(pdev, host); | 518 | pci_set_drvdata(pdev, host); |
| 447 | if (strncmp(acb->firm_version, "V1.42", 5) >= 0) | 519 | if (strncmp(acb->firm_version, "V1.42", 5) >= 0) |
| 448 | host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B; | 520 | host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B; |
| @@ -459,6 +531,14 @@ static int arcmsr_probe(struct pci_dev *pdev, | |||
| 459 | #ifdef CONFIG_SCSI_ARCMSR_AER | 531 | #ifdef CONFIG_SCSI_ARCMSR_AER |
| 460 | pci_enable_pcie_error_reporting(pdev); | 532 | pci_enable_pcie_error_reporting(pdev); |
| 461 | #endif | 533 | #endif |
| 534 | atomic_set(&acb->rq_map_token, 16); | ||
| 535 | acb->fw_state = true; | ||
| 536 | init_timer(&acb->eternal_timer); | ||
| 537 | acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ); | ||
| 538 | acb->eternal_timer.data = (unsigned long) acb; | ||
| 539 | acb->eternal_timer.function = &arcmsr_request_device_map; | ||
| 540 | add_timer(&acb->eternal_timer); | ||
| 541 | |||
| 462 | return 0; | 542 | return 0; |
| 463 | out_free_sysfs: | 543 | out_free_sysfs: |
| 464 | out_free_irq: | 544 | out_free_irq: |
| @@ -518,40 +598,48 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) | |||
| 518 | return 0xff; | 598 | return 0xff; |
| 519 | } | 599 | } |
| 520 | 600 | ||
| 521 | static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) | 601 | static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) |
| 522 | { | 602 | { |
| 523 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 603 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
| 524 | 604 | ||
| 525 | writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); | 605 | writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); |
| 526 | if (arcmsr_hba_wait_msgint_ready(acb)) | 606 | if (arcmsr_hba_wait_msgint_ready(acb)) { |
| 527 | printk(KERN_NOTICE | 607 | printk(KERN_NOTICE |
| 528 | "arcmsr%d: wait 'abort all outstanding command' timeout \n" | 608 | "arcmsr%d: wait 'abort all outstanding command' timeout \n" |
| 529 | , acb->host->host_no); | 609 | , acb->host->host_no); |
| 610 | return 0xff; | ||
| 611 | } | ||
| 612 | return 0x00; | ||
| 530 | } | 613 | } |
| 531 | 614 | ||
| 532 | static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) | 615 | static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) |
| 533 | { | 616 | { |
| 534 | struct MessageUnit_B *reg = acb->pmuB; | 617 | struct MessageUnit_B *reg = acb->pmuB; |
| 535 | 618 | ||
| 536 | writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); | 619 | writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); |
| 537 | if (arcmsr_hbb_wait_msgint_ready(acb)) | 620 | if (arcmsr_hbb_wait_msgint_ready(acb)) { |
| 538 | printk(KERN_NOTICE | 621 | printk(KERN_NOTICE |
| 539 | "arcmsr%d: wait 'abort all outstanding command' timeout \n" | 622 | "arcmsr%d: wait 'abort all outstanding command' timeout \n" |
| 540 | , acb->host->host_no); | 623 | , acb->host->host_no); |
| 624 | return 0xff; | ||
| 625 | } | ||
| 626 | return 0x00; | ||
| 541 | } | 627 | } |
| 542 | 628 | ||
| 543 | static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) | 629 | static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) |
| 544 | { | 630 | { |
| 631 | uint8_t rtnval = 0; | ||
| 545 | switch (acb->adapter_type) { | 632 | switch (acb->adapter_type) { |
| 546 | case ACB_ADAPTER_TYPE_A: { | 633 | case ACB_ADAPTER_TYPE_A: { |
| 547 | arcmsr_abort_hba_allcmd(acb); | 634 | rtnval = arcmsr_abort_hba_allcmd(acb); |
| 548 | } | 635 | } |
| 549 | break; | 636 | break; |
| 550 | 637 | ||
| 551 | case ACB_ADAPTER_TYPE_B: { | 638 | case ACB_ADAPTER_TYPE_B: { |
| 552 | arcmsr_abort_hbb_allcmd(acb); | 639 | rtnval = arcmsr_abort_hbb_allcmd(acb); |
| 553 | } | 640 | } |
| 554 | } | 641 | } |
| 642 | return rtnval; | ||
| 555 | } | 643 | } |
| 556 | 644 | ||
| 557 | static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) | 645 | static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) |
| @@ -649,8 +737,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) | |||
| 649 | 737 | ||
| 650 | case ACB_ADAPTER_TYPE_A : { | 738 | case ACB_ADAPTER_TYPE_A : { |
| 651 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 739 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
| 652 | orig_mask = readl(®->outbound_intmask)|\ | 740 | orig_mask = readl(®->outbound_intmask); |
| 653 | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; | ||
| 654 | writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ | 741 | writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ |
| 655 | ®->outbound_intmask); | 742 | ®->outbound_intmask); |
| 656 | } | 743 | } |
| @@ -658,8 +745,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) | |||
| 658 | 745 | ||
| 659 | case ACB_ADAPTER_TYPE_B : { | 746 | case ACB_ADAPTER_TYPE_B : { |
| 660 | struct MessageUnit_B *reg = acb->pmuB; | 747 | struct MessageUnit_B *reg = acb->pmuB; |
| 661 | orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \ | 748 | orig_mask = readl(reg->iop2drv_doorbell_mask_reg); |
| 662 | (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); | ||
| 663 | writel(0, reg->iop2drv_doorbell_mask_reg); | 749 | writel(0, reg->iop2drv_doorbell_mask_reg); |
| 664 | } | 750 | } |
| 665 | break; | 751 | break; |
| @@ -795,12 +881,13 @@ static void arcmsr_remove(struct pci_dev *pdev) | |||
| 795 | struct AdapterControlBlock *acb = | 881 | struct AdapterControlBlock *acb = |
| 796 | (struct AdapterControlBlock *) host->hostdata; | 882 | (struct AdapterControlBlock *) host->hostdata; |
| 797 | int poll_count = 0; | 883 | int poll_count = 0; |
| 798 | |||
| 799 | arcmsr_free_sysfs_attr(acb); | 884 | arcmsr_free_sysfs_attr(acb); |
| 800 | scsi_remove_host(host); | 885 | scsi_remove_host(host); |
| 886 | flush_scheduled_work(); | ||
| 887 | del_timer_sync(&acb->eternal_timer); | ||
| 888 | arcmsr_disable_outbound_ints(acb); | ||
| 801 | arcmsr_stop_adapter_bgrb(acb); | 889 | arcmsr_stop_adapter_bgrb(acb); |
| 802 | arcmsr_flush_adapter_cache(acb); | 890 | arcmsr_flush_adapter_cache(acb); |
| 803 | arcmsr_disable_outbound_ints(acb); | ||
| 804 | acb->acb_flags |= ACB_F_SCSISTOPADAPTER; | 891 | acb->acb_flags |= ACB_F_SCSISTOPADAPTER; |
| 805 | acb->acb_flags &= ~ACB_F_IOP_INITED; | 892 | acb->acb_flags &= ~ACB_F_IOP_INITED; |
| 806 | 893 | ||
| @@ -841,7 +928,9 @@ static void arcmsr_shutdown(struct pci_dev *pdev) | |||
| 841 | struct Scsi_Host *host = pci_get_drvdata(pdev); | 928 | struct Scsi_Host *host = pci_get_drvdata(pdev); |
| 842 | struct AdapterControlBlock *acb = | 929 | struct AdapterControlBlock *acb = |
| 843 | (struct AdapterControlBlock *)host->hostdata; | 930 | (struct AdapterControlBlock *)host->hostdata; |
| 844 | 931 | del_timer_sync(&acb->eternal_timer); | |
| 932 | arcmsr_disable_outbound_ints(acb); | ||
| 933 | flush_scheduled_work(); | ||
| 845 | arcmsr_stop_adapter_bgrb(acb); | 934 | arcmsr_stop_adapter_bgrb(acb); |
| 846 | arcmsr_flush_adapter_cache(acb); | 935 | arcmsr_flush_adapter_cache(acb); |
| 847 | } | 936 | } |
| @@ -861,7 +950,7 @@ static void arcmsr_module_exit(void) | |||
| 861 | module_init(arcmsr_module_init); | 950 | module_init(arcmsr_module_init); |
| 862 | module_exit(arcmsr_module_exit); | 951 | module_exit(arcmsr_module_exit); |
| 863 | 952 | ||
| 864 | static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ | 953 | static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, |
| 865 | u32 intmask_org) | 954 | u32 intmask_org) |
| 866 | { | 955 | { |
| 867 | u32 mask; | 956 | u32 mask; |
| @@ -871,7 +960,8 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ | |||
| 871 | case ACB_ADAPTER_TYPE_A : { | 960 | case ACB_ADAPTER_TYPE_A : { |
| 872 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 961 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
| 873 | mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | | 962 | mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | |
| 874 | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); | 963 | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE| |
| 964 | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); | ||
| 875 | writel(mask, ®->outbound_intmask); | 965 | writel(mask, ®->outbound_intmask); |
| 876 | acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; | 966 | acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; |
| 877 | } | 967 | } |
| @@ -879,8 +969,10 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ | |||
| 879 | 969 | ||
| 880 | case ACB_ADAPTER_TYPE_B : { | 970 | case ACB_ADAPTER_TYPE_B : { |
| 881 | struct MessageUnit_B *reg = acb->pmuB; | 971 | struct MessageUnit_B *reg = acb->pmuB; |
| 882 | mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \ | 972 | mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | |
| 883 | ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE); | 973 | ARCMSR_IOP2DRV_DATA_READ_OK | |
| 974 | ARCMSR_IOP2DRV_CDB_DONE | | ||
| 975 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); | ||
| 884 | writel(mask, reg->iop2drv_doorbell_mask_reg); | 976 | writel(mask, reg->iop2drv_doorbell_mask_reg); |
| 885 | acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; | 977 | acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; |
| 886 | } | 978 | } |
| @@ -1048,8 +1140,8 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) | |||
| 1048 | } | 1140 | } |
| 1049 | case ACB_ADAPTER_TYPE_B: { | 1141 | case ACB_ADAPTER_TYPE_B: { |
| 1050 | struct MessageUnit_B *reg = acb->pmuB; | 1142 | struct MessageUnit_B *reg = acb->pmuB; |
| 1051 | iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); | 1143 | iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); |
| 1052 | iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); | 1144 | iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); |
| 1053 | dma_free_coherent(&acb->pdev->dev, | 1145 | dma_free_coherent(&acb->pdev->dev, |
| 1054 | (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + | 1146 | (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + |
| 1055 | sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); | 1147 | sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); |
| @@ -1249,13 +1341,36 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) | |||
| 1249 | reg->doneq_index = index; | 1341 | reg->doneq_index = index; |
| 1250 | } | 1342 | } |
| 1251 | } | 1343 | } |
| 1344 | /* | ||
| 1345 | ********************************************************************************** | ||
| 1346 | ** Handle a message interrupt | ||
| 1347 | ** | ||
| 1348 | ** The only message interrupt we expect is in response to a query for the current adapter config. | ||
| 1349 | ** We want this in order to compare the drivemap so that we can detect newly-attached drives. | ||
| 1350 | ********************************************************************************** | ||
| 1351 | */ | ||
| 1352 | static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) | ||
| 1353 | { | ||
| 1354 | struct MessageUnit_A *reg = acb->pmuA; | ||
| 1355 | |||
| 1356 | /*clear interrupt and message state*/ | ||
| 1357 | writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus); | ||
| 1358 | schedule_work(&acb->arcmsr_do_message_isr_bh); | ||
| 1359 | } | ||
| 1360 | static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) | ||
| 1361 | { | ||
| 1362 | struct MessageUnit_B *reg = acb->pmuB; | ||
| 1252 | 1363 | ||
| 1364 | /*clear interrupt and message state*/ | ||
| 1365 | writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); | ||
| 1366 | schedule_work(&acb->arcmsr_do_message_isr_bh); | ||
| 1367 | } | ||
| 1253 | static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) | 1368 | static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) |
| 1254 | { | 1369 | { |
| 1255 | uint32_t outbound_intstatus; | 1370 | uint32_t outbound_intstatus; |
| 1256 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 1371 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
| 1257 | 1372 | ||
| 1258 | outbound_intstatus = readl(®->outbound_intstatus) & \ | 1373 | outbound_intstatus = readl(®->outbound_intstatus) & |
| 1259 | acb->outbound_int_enable; | 1374 | acb->outbound_int_enable; |
| 1260 | if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { | 1375 | if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { |
| 1261 | return 1; | 1376 | return 1; |
| @@ -1267,6 +1382,10 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) | |||
| 1267 | if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { | 1382 | if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { |
| 1268 | arcmsr_hba_postqueue_isr(acb); | 1383 | arcmsr_hba_postqueue_isr(acb); |
| 1269 | } | 1384 | } |
| 1385 | if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { | ||
| 1386 | /* messenger of "driver to iop commands" */ | ||
| 1387 | arcmsr_hba_message_isr(acb); | ||
| 1388 | } | ||
| 1270 | return 0; | 1389 | return 0; |
| 1271 | } | 1390 | } |
| 1272 | 1391 | ||
| @@ -1275,13 +1394,14 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) | |||
| 1275 | uint32_t outbound_doorbell; | 1394 | uint32_t outbound_doorbell; |
| 1276 | struct MessageUnit_B *reg = acb->pmuB; | 1395 | struct MessageUnit_B *reg = acb->pmuB; |
| 1277 | 1396 | ||
| 1278 | outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \ | 1397 | outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & |
| 1279 | acb->outbound_int_enable; | 1398 | acb->outbound_int_enable; |
| 1280 | if (!outbound_doorbell) | 1399 | if (!outbound_doorbell) |
| 1281 | return 1; | 1400 | return 1; |
| 1282 | 1401 | ||
| 1283 | writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); | 1402 | writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); |
| 1284 | /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/ | 1403 | /*in case the last action of doorbell interrupt clearance is cached, |
| 1404 | this action can push HW to write down the clear bit*/ | ||
| 1285 | readl(reg->iop2drv_doorbell_reg); | 1405 | readl(reg->iop2drv_doorbell_reg); |
| 1286 | writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); | 1406 | writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); |
| 1287 | if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { | 1407 | if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { |
| @@ -1293,6 +1413,10 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) | |||
| 1293 | if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { | 1413 | if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { |
| 1294 | arcmsr_hbb_postqueue_isr(acb); | 1414 | arcmsr_hbb_postqueue_isr(acb); |
| 1295 | } | 1415 | } |
| 1416 | if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { | ||
| 1417 | /* messenger of "driver to iop commands" */ | ||
| 1418 | arcmsr_hbb_message_isr(acb); | ||
| 1419 | } | ||
| 1296 | 1420 | ||
| 1297 | return 0; | 1421 | return 0; |
| 1298 | } | 1422 | } |
| @@ -1360,7 +1484,7 @@ void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) | |||
| 1360 | } | 1484 | } |
| 1361 | } | 1485 | } |
| 1362 | 1486 | ||
| 1363 | static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | 1487 | static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, |
| 1364 | struct scsi_cmnd *cmd) | 1488 | struct scsi_cmnd *cmd) |
| 1365 | { | 1489 | { |
| 1366 | struct CMD_MESSAGE_FIELD *pcmdmessagefld; | 1490 | struct CMD_MESSAGE_FIELD *pcmdmessagefld; |
| @@ -1398,6 +1522,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
| 1398 | retvalue = ARCMSR_MESSAGE_FAIL; | 1522 | retvalue = ARCMSR_MESSAGE_FAIL; |
| 1399 | goto message_out; | 1523 | goto message_out; |
| 1400 | } | 1524 | } |
| 1525 | |||
| 1526 | if (!acb->fw_state) { | ||
| 1527 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
| 1528 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
| 1529 | goto message_out; | ||
| 1530 | } | ||
| 1531 | |||
| 1401 | ptmpQbuffer = ver_addr; | 1532 | ptmpQbuffer = ver_addr; |
| 1402 | while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) | 1533 | while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) |
| 1403 | && (allxfer_len < 1031)) { | 1534 | && (allxfer_len < 1031)) { |
| @@ -1444,6 +1575,12 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
| 1444 | retvalue = ARCMSR_MESSAGE_FAIL; | 1575 | retvalue = ARCMSR_MESSAGE_FAIL; |
| 1445 | goto message_out; | 1576 | goto message_out; |
| 1446 | } | 1577 | } |
| 1578 | if (!acb->fw_state) { | ||
| 1579 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
| 1580 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
| 1581 | goto message_out; | ||
| 1582 | } | ||
| 1583 | |||
| 1447 | ptmpuserbuffer = ver_addr; | 1584 | ptmpuserbuffer = ver_addr; |
| 1448 | user_len = pcmdmessagefld->cmdmessage.Length; | 1585 | user_len = pcmdmessagefld->cmdmessage.Length; |
| 1449 | memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); | 1586 | memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); |
| @@ -1496,6 +1633,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
| 1496 | 1633 | ||
| 1497 | case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { | 1634 | case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { |
| 1498 | uint8_t *pQbuffer = acb->rqbuffer; | 1635 | uint8_t *pQbuffer = acb->rqbuffer; |
| 1636 | if (!acb->fw_state) { | ||
| 1637 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
| 1638 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
| 1639 | goto message_out; | ||
| 1640 | } | ||
| 1499 | 1641 | ||
| 1500 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { | 1642 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { |
| 1501 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; | 1643 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; |
| @@ -1511,6 +1653,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
| 1511 | 1653 | ||
| 1512 | case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { | 1654 | case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { |
| 1513 | uint8_t *pQbuffer = acb->wqbuffer; | 1655 | uint8_t *pQbuffer = acb->wqbuffer; |
| 1656 | if (!acb->fw_state) { | ||
| 1657 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
| 1658 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
| 1659 | goto message_out; | ||
| 1660 | } | ||
| 1514 | 1661 | ||
| 1515 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { | 1662 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { |
| 1516 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; | 1663 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; |
| @@ -1529,6 +1676,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
| 1529 | 1676 | ||
| 1530 | case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { | 1677 | case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { |
| 1531 | uint8_t *pQbuffer; | 1678 | uint8_t *pQbuffer; |
| 1679 | if (!acb->fw_state) { | ||
| 1680 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
| 1681 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
| 1682 | goto message_out; | ||
| 1683 | } | ||
| 1532 | 1684 | ||
| 1533 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { | 1685 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { |
| 1534 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; | 1686 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; |
| @@ -1551,13 +1703,22 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
| 1551 | break; | 1703 | break; |
| 1552 | 1704 | ||
| 1553 | case ARCMSR_MESSAGE_RETURN_CODE_3F: { | 1705 | case ARCMSR_MESSAGE_RETURN_CODE_3F: { |
| 1706 | if (!acb->fw_state) { | ||
| 1707 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
| 1708 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
| 1709 | goto message_out; | ||
| 1710 | } | ||
| 1554 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; | 1711 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; |
| 1555 | } | 1712 | } |
| 1556 | break; | 1713 | break; |
| 1557 | 1714 | ||
| 1558 | case ARCMSR_MESSAGE_SAY_HELLO: { | 1715 | case ARCMSR_MESSAGE_SAY_HELLO: { |
| 1559 | int8_t *hello_string = "Hello! I am ARCMSR"; | 1716 | int8_t *hello_string = "Hello! I am ARCMSR"; |
| 1560 | 1717 | if (!acb->fw_state) { | |
| 1718 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
| 1719 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
| 1720 | goto message_out; | ||
| 1721 | } | ||
| 1561 | memcpy(pcmdmessagefld->messagedatabuffer, hello_string | 1722 | memcpy(pcmdmessagefld->messagedatabuffer, hello_string |
| 1562 | , (int16_t)strlen(hello_string)); | 1723 | , (int16_t)strlen(hello_string)); |
| 1563 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; | 1724 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; |
| @@ -1565,10 +1726,20 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
| 1565 | break; | 1726 | break; |
| 1566 | 1727 | ||
| 1567 | case ARCMSR_MESSAGE_SAY_GOODBYE: | 1728 | case ARCMSR_MESSAGE_SAY_GOODBYE: |
| 1729 | if (!acb->fw_state) { | ||
| 1730 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
| 1731 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
| 1732 | goto message_out; | ||
| 1733 | } | ||
| 1568 | arcmsr_iop_parking(acb); | 1734 | arcmsr_iop_parking(acb); |
| 1569 | break; | 1735 | break; |
| 1570 | 1736 | ||
| 1571 | case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: | 1737 | case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: |
| 1738 | if (!acb->fw_state) { | ||
| 1739 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
| 1740 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
| 1741 | goto message_out; | ||
| 1742 | } | ||
| 1572 | arcmsr_flush_adapter_cache(acb); | 1743 | arcmsr_flush_adapter_cache(acb); |
| 1573 | break; | 1744 | break; |
| 1574 | 1745 | ||
| @@ -1651,16 +1822,57 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, | |||
| 1651 | struct CommandControlBlock *ccb; | 1822 | struct CommandControlBlock *ccb; |
| 1652 | int target = cmd->device->id; | 1823 | int target = cmd->device->id; |
| 1653 | int lun = cmd->device->lun; | 1824 | int lun = cmd->device->lun; |
| 1654 | 1825 | uint8_t scsicmd = cmd->cmnd[0]; | |
| 1655 | cmd->scsi_done = done; | 1826 | cmd->scsi_done = done; |
| 1656 | cmd->host_scribble = NULL; | 1827 | cmd->host_scribble = NULL; |
| 1657 | cmd->result = 0; | 1828 | cmd->result = 0; |
| 1829 | |||
| 1830 | if ((scsicmd == SYNCHRONIZE_CACHE) || (scsicmd == SEND_DIAGNOSTIC)) { | ||
| 1831 | if (acb->devstate[target][lun] == ARECA_RAID_GONE) { | ||
| 1832 | cmd->result = (DID_NO_CONNECT << 16); | ||
| 1833 | } | ||
| 1834 | cmd->scsi_done(cmd); | ||
| 1835 | return 0; | ||
| 1836 | } | ||
| 1837 | |||
| 1658 | if (acb->acb_flags & ACB_F_BUS_RESET) { | 1838 | if (acb->acb_flags & ACB_F_BUS_RESET) { |
| 1659 | printk(KERN_NOTICE "arcmsr%d: bus reset" | 1839 | switch (acb->adapter_type) { |
| 1660 | " and return busy \n" | 1840 | case ACB_ADAPTER_TYPE_A: { |
| 1661 | , acb->host->host_no); | 1841 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
| 1842 | uint32_t intmask_org, outbound_doorbell; | ||
| 1843 | |||
| 1844 | if ((readl(®->outbound_msgaddr1) & | ||
| 1845 | ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { | ||
| 1846 | printk(KERN_NOTICE "arcmsr%d: bus reset and return busy\n", | ||
| 1847 | acb->host->host_no); | ||
| 1662 | return SCSI_MLQUEUE_HOST_BUSY; | 1848 | return SCSI_MLQUEUE_HOST_BUSY; |
| 1663 | } | 1849 | } |
| 1850 | |||
| 1851 | acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP; | ||
| 1852 | printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok\n", | ||
| 1853 | acb->host->host_no); | ||
| 1854 | /* disable all outbound interrupt */ | ||
| 1855 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
| 1856 | arcmsr_get_firmware_spec(acb, 1); | ||
| 1857 | /*start background rebuild*/ | ||
| 1858 | arcmsr_start_adapter_bgrb(acb); | ||
| 1859 | /* clear Qbuffer if door bell ringed */ | ||
| 1860 | outbound_doorbell = readl(®->outbound_doorbell); | ||
| 1861 | /*clear interrupt */ | ||
| 1862 | writel(outbound_doorbell, ®->outbound_doorbell); | ||
| 1863 | writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, | ||
| 1864 | ®->inbound_doorbell); | ||
| 1865 | /* enable outbound Post Queue,outbound doorbell Interrupt */ | ||
| 1866 | arcmsr_enable_outbound_ints(acb, intmask_org); | ||
| 1867 | acb->acb_flags |= ACB_F_IOP_INITED; | ||
| 1868 | acb->acb_flags &= ~ACB_F_BUS_RESET; | ||
| 1869 | } | ||
| 1870 | break; | ||
| 1871 | case ACB_ADAPTER_TYPE_B: { | ||
| 1872 | } | ||
| 1873 | } | ||
| 1874 | } | ||
| 1875 | |||
| 1664 | if (target == 16) { | 1876 | if (target == 16) { |
| 1665 | /* virtual device for iop message transfer */ | 1877 | /* virtual device for iop message transfer */ |
| 1666 | arcmsr_handle_virtual_command(acb, cmd); | 1878 | arcmsr_handle_virtual_command(acb, cmd); |
| @@ -1699,21 +1911,25 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, | |||
| 1699 | return 0; | 1911 | return 0; |
| 1700 | } | 1912 | } |
| 1701 | 1913 | ||
| 1702 | static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) | 1914 | static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode) |
| 1703 | { | 1915 | { |
| 1704 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 1916 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
| 1705 | char *acb_firm_model = acb->firm_model; | 1917 | char *acb_firm_model = acb->firm_model; |
| 1706 | char *acb_firm_version = acb->firm_version; | 1918 | char *acb_firm_version = acb->firm_version; |
| 1919 | char *acb_device_map = acb->device_map; | ||
| 1707 | char __iomem *iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]); | 1920 | char __iomem *iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]); |
| 1708 | char __iomem *iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]); | 1921 | char __iomem *iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]); |
| 1922 | char __iomem *iop_device_map = (char __iomem *) (®->message_rwbuffer[21]); | ||
| 1709 | int count; | 1923 | int count; |
| 1710 | 1924 | ||
| 1711 | writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); | 1925 | writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); |
| 1712 | if (arcmsr_hba_wait_msgint_ready(acb)) { | 1926 | if (arcmsr_hba_wait_msgint_ready(acb)) { |
| 1713 | printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ | 1927 | printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ |
| 1714 | miscellaneous data' timeout \n", acb->host->host_no); | 1928 | miscellaneous data' timeout \n", acb->host->host_no); |
| 1929 | return NULL; | ||
| 1715 | } | 1930 | } |
| 1716 | 1931 | ||
| 1932 | if (mode == 1) { | ||
| 1717 | count = 8; | 1933 | count = 8; |
| 1718 | while (count) { | 1934 | while (count) { |
| 1719 | *acb_firm_model = readb(iop_firm_model); | 1935 | *acb_firm_model = readb(iop_firm_model); |
| @@ -1730,34 +1946,48 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) | |||
| 1730 | count--; | 1946 | count--; |
| 1731 | } | 1947 | } |
| 1732 | 1948 | ||
| 1949 | count = 16; | ||
| 1950 | while (count) { | ||
| 1951 | *acb_device_map = readb(iop_device_map); | ||
| 1952 | acb_device_map++; | ||
| 1953 | iop_device_map++; | ||
| 1954 | count--; | ||
| 1955 | } | ||
| 1956 | |||
| 1733 | printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n" | 1957 | printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n" |
| 1734 | , acb->host->host_no | 1958 | , acb->host->host_no |
| 1735 | , acb->firm_version); | 1959 | , acb->firm_version); |
| 1736 | 1960 | acb->signature = readl(®->message_rwbuffer[0]); | |
| 1737 | acb->firm_request_len = readl(®->message_rwbuffer[1]); | 1961 | acb->firm_request_len = readl(®->message_rwbuffer[1]); |
| 1738 | acb->firm_numbers_queue = readl(®->message_rwbuffer[2]); | 1962 | acb->firm_numbers_queue = readl(®->message_rwbuffer[2]); |
| 1739 | acb->firm_sdram_size = readl(®->message_rwbuffer[3]); | 1963 | acb->firm_sdram_size = readl(®->message_rwbuffer[3]); |
| 1740 | acb->firm_hd_channels = readl(®->message_rwbuffer[4]); | 1964 | acb->firm_hd_channels = readl(®->message_rwbuffer[4]); |
| 1741 | } | 1965 | } |
| 1742 | 1966 | return reg->message_rwbuffer; | |
| 1743 | static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) | 1967 | } |
| 1968 | static void __iomem *arcmsr_get_hbb_config(struct AdapterControlBlock *acb, int mode) | ||
| 1744 | { | 1969 | { |
| 1745 | struct MessageUnit_B *reg = acb->pmuB; | 1970 | struct MessageUnit_B *reg = acb->pmuB; |
| 1746 | uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg; | 1971 | uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg; |
| 1747 | char *acb_firm_model = acb->firm_model; | 1972 | char *acb_firm_model = acb->firm_model; |
| 1748 | char *acb_firm_version = acb->firm_version; | 1973 | char *acb_firm_version = acb->firm_version; |
| 1974 | char *acb_device_map = acb->device_map; | ||
| 1749 | char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]); | 1975 | char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]); |
| 1750 | /*firm_model,15,60-67*/ | 1976 | /*firm_model,15,60-67*/ |
| 1751 | char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]); | 1977 | char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]); |
| 1752 | /*firm_version,17,68-83*/ | 1978 | /*firm_version,17,68-83*/ |
| 1979 | char __iomem *iop_device_map = (char __iomem *) (&lrwbuffer[21]); | ||
| 1980 | /*firm_version,21,84-99*/ | ||
| 1753 | int count; | 1981 | int count; |
| 1754 | 1982 | ||
| 1755 | writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); | 1983 | writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); |
| 1756 | if (arcmsr_hbb_wait_msgint_ready(acb)) { | 1984 | if (arcmsr_hbb_wait_msgint_ready(acb)) { |
| 1757 | printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ | 1985 | printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ |
| 1758 | miscellaneous data' timeout \n", acb->host->host_no); | 1986 | miscellaneous data' timeout \n", acb->host->host_no); |
| 1987 | return NULL; | ||
| 1759 | } | 1988 | } |
| 1760 | 1989 | ||
| 1990 | if (mode == 1) { | ||
| 1761 | count = 8; | 1991 | count = 8; |
| 1762 | while (count) | 1992 | while (count) |
| 1763 | { | 1993 | { |
| @@ -1776,11 +2006,20 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) | |||
| 1776 | count--; | 2006 | count--; |
| 1777 | } | 2007 | } |
| 1778 | 2008 | ||
| 2009 | count = 16; | ||
| 2010 | while (count) { | ||
| 2011 | *acb_device_map = readb(iop_device_map); | ||
| 2012 | acb_device_map++; | ||
| 2013 | iop_device_map++; | ||
| 2014 | count--; | ||
| 2015 | } | ||
| 2016 | |||
| 1779 | printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", | 2017 | printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", |
| 1780 | acb->host->host_no, | 2018 | acb->host->host_no, |
| 1781 | acb->firm_version); | 2019 | acb->firm_version); |
| 1782 | 2020 | ||
| 1783 | lrwbuffer++; | 2021 | acb->signature = readl(lrwbuffer++); |
| 2022 | /*firm_signature,1,00-03*/ | ||
| 1784 | acb->firm_request_len = readl(lrwbuffer++); | 2023 | acb->firm_request_len = readl(lrwbuffer++); |
| 1785 | /*firm_request_len,1,04-07*/ | 2024 | /*firm_request_len,1,04-07*/ |
| 1786 | acb->firm_numbers_queue = readl(lrwbuffer++); | 2025 | acb->firm_numbers_queue = readl(lrwbuffer++); |
| @@ -1790,20 +2029,23 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) | |||
| 1790 | acb->firm_hd_channels = readl(lrwbuffer); | 2029 | acb->firm_hd_channels = readl(lrwbuffer); |
| 1791 | /*firm_ide_channels,4,16-19*/ | 2030 | /*firm_ide_channels,4,16-19*/ |
| 1792 | } | 2031 | } |
| 1793 | 2032 | return reg->msgcode_rwbuffer_reg; | |
| 1794 | static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) | 2033 | } |
| 2034 | static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode) | ||
| 1795 | { | 2035 | { |
| 2036 | void *rtnval = 0; | ||
| 1796 | switch (acb->adapter_type) { | 2037 | switch (acb->adapter_type) { |
| 1797 | case ACB_ADAPTER_TYPE_A: { | 2038 | case ACB_ADAPTER_TYPE_A: { |
| 1798 | arcmsr_get_hba_config(acb); | 2039 | rtnval = arcmsr_get_hba_config(acb, mode); |
| 1799 | } | 2040 | } |
| 1800 | break; | 2041 | break; |
| 1801 | 2042 | ||
| 1802 | case ACB_ADAPTER_TYPE_B: { | 2043 | case ACB_ADAPTER_TYPE_B: { |
| 1803 | arcmsr_get_hbb_config(acb); | 2044 | rtnval = arcmsr_get_hbb_config(acb, mode); |
| 1804 | } | 2045 | } |
| 1805 | break; | 2046 | break; |
| 1806 | } | 2047 | } |
| 2048 | return rtnval; | ||
| 1807 | } | 2049 | } |
| 1808 | 2050 | ||
| 1809 | static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, | 2051 | static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, |
| @@ -2043,6 +2285,66 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) | |||
| 2043 | } | 2285 | } |
| 2044 | } | 2286 | } |
| 2045 | 2287 | ||
| 2288 | static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) | ||
| 2289 | { | ||
| 2290 | struct MessageUnit_A __iomem *reg = acb->pmuA; | ||
| 2291 | |||
| 2292 | if (unlikely(atomic_read(&acb->rq_map_token) == 0)) { | ||
| 2293 | acb->fw_state = false; | ||
| 2294 | } else { | ||
| 2295 | /*to prevent rq_map_token from changing by other interrupt, then | ||
| 2296 | avoid the dead-lock*/ | ||
| 2297 | acb->fw_state = true; | ||
| 2298 | atomic_dec(&acb->rq_map_token); | ||
| 2299 | if (!(acb->fw_state) || | ||
| 2300 | (acb->ante_token_value == atomic_read(&acb->rq_map_token))) { | ||
| 2301 | atomic_set(&acb->rq_map_token, 16); | ||
| 2302 | } | ||
| 2303 | acb->ante_token_value = atomic_read(&acb->rq_map_token); | ||
| 2304 | writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); | ||
| 2305 | } | ||
| 2306 | mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000)); | ||
| 2307 | return; | ||
| 2308 | } | ||
| 2309 | |||
| 2310 | static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb) | ||
| 2311 | { | ||
| 2312 | struct MessageUnit_B __iomem *reg = acb->pmuB; | ||
| 2313 | |||
| 2314 | if (unlikely(atomic_read(&acb->rq_map_token) == 0)) { | ||
| 2315 | acb->fw_state = false; | ||
| 2316 | } else { | ||
| 2317 | /*to prevent rq_map_token from changing by other interrupt, then | ||
| 2318 | avoid the dead-lock*/ | ||
| 2319 | acb->fw_state = true; | ||
| 2320 | atomic_dec(&acb->rq_map_token); | ||
| 2321 | if (!(acb->fw_state) || | ||
| 2322 | (acb->ante_token_value == atomic_read(&acb->rq_map_token))) { | ||
| 2323 | atomic_set(&acb->rq_map_token, 16); | ||
| 2324 | } | ||
| 2325 | acb->ante_token_value = atomic_read(&acb->rq_map_token); | ||
| 2326 | writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); | ||
| 2327 | } | ||
| 2328 | mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000)); | ||
| 2329 | return; | ||
| 2330 | } | ||
| 2331 | |||
| 2332 | static void arcmsr_request_device_map(unsigned long pacb) | ||
| 2333 | { | ||
| 2334 | struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb; | ||
| 2335 | |||
| 2336 | switch (acb->adapter_type) { | ||
| 2337 | case ACB_ADAPTER_TYPE_A: { | ||
| 2338 | arcmsr_request_hba_device_map(acb); | ||
| 2339 | } | ||
| 2340 | break; | ||
| 2341 | case ACB_ADAPTER_TYPE_B: { | ||
| 2342 | arcmsr_request_hbb_device_map(acb); | ||
| 2343 | } | ||
| 2344 | break; | ||
| 2345 | } | ||
| 2346 | } | ||
| 2347 | |||
| 2046 | static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) | 2348 | static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) |
| 2047 | { | 2349 | { |
| 2048 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 2350 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
| @@ -2121,6 +2423,60 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) | |||
| 2121 | return; | 2423 | return; |
| 2122 | } | 2424 | } |
| 2123 | 2425 | ||
| 2426 | static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) | ||
| 2427 | { | ||
| 2428 | uint8_t value[64]; | ||
| 2429 | int i; | ||
| 2430 | |||
| 2431 | /* backup pci config data */ | ||
| 2432 | for (i = 0; i < 64; i++) { | ||
| 2433 | pci_read_config_byte(acb->pdev, i, &value[i]); | ||
| 2434 | } | ||
| 2435 | /* hardware reset signal */ | ||
| 2436 | pci_write_config_byte(acb->pdev, 0x84, 0x20); | ||
| 2437 | msleep(1000); | ||
| 2438 | /* write back pci config data */ | ||
| 2439 | for (i = 0; i < 64; i++) { | ||
| 2440 | pci_write_config_byte(acb->pdev, i, value[i]); | ||
| 2441 | } | ||
| 2442 | msleep(1000); | ||
| 2443 | return; | ||
| 2444 | } | ||
| 2445 | /* | ||
| 2446 | **************************************************************************** | ||
| 2447 | **************************************************************************** | ||
| 2448 | */ | ||
| 2449 | #ifdef CONFIG_SCSI_ARCMSR_RESET | ||
| 2450 | int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd) | ||
| 2451 | { | ||
| 2452 | struct Scsi_Host *shost = NULL; | ||
| 2453 | spinlock_t *host_lock = NULL; | ||
| 2454 | int i, isleep; | ||
| 2455 | |||
| 2456 | shost = cmd->device->host; | ||
| 2457 | host_lock = shost->host_lock; | ||
| 2458 | |||
| 2459 | printk(KERN_NOTICE "Host %d bus reset over, sleep %d seconds (busy %d, can queue %d) ...........\n", | ||
| 2460 | shost->host_no, sleeptime, shost->host_busy, shost->can_queue); | ||
| 2461 | isleep = sleeptime / 10; | ||
| 2462 | spin_unlock_irq(host_lock); | ||
| 2463 | if (isleep > 0) { | ||
| 2464 | for (i = 0; i < isleep; i++) { | ||
| 2465 | msleep(10000); | ||
| 2466 | printk(KERN_NOTICE "^%d^\n", i); | ||
| 2467 | } | ||
| 2468 | } | ||
| 2469 | |||
| 2470 | isleep = sleeptime % 10; | ||
| 2471 | if (isleep > 0) { | ||
| 2472 | msleep(isleep * 1000); | ||
| 2473 | printk(KERN_NOTICE "^v^\n"); | ||
| 2474 | } | ||
| 2475 | spin_lock_irq(host_lock); | ||
| 2476 | printk(KERN_NOTICE "***** wake up *****\n"); | ||
| 2477 | return 0; | ||
| 2478 | } | ||
| 2479 | #endif | ||
| 2124 | static void arcmsr_iop_init(struct AdapterControlBlock *acb) | 2480 | static void arcmsr_iop_init(struct AdapterControlBlock *acb) |
| 2125 | { | 2481 | { |
| 2126 | uint32_t intmask_org; | 2482 | uint32_t intmask_org; |
| @@ -2129,7 +2485,7 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb) | |||
| 2129 | intmask_org = arcmsr_disable_outbound_ints(acb); | 2485 | intmask_org = arcmsr_disable_outbound_ints(acb); |
| 2130 | arcmsr_wait_firmware_ready(acb); | 2486 | arcmsr_wait_firmware_ready(acb); |
| 2131 | arcmsr_iop_confirm(acb); | 2487 | arcmsr_iop_confirm(acb); |
| 2132 | arcmsr_get_firmware_spec(acb); | 2488 | arcmsr_get_firmware_spec(acb, 1); |
| 2133 | /*start background rebuild*/ | 2489 | /*start background rebuild*/ |
| 2134 | arcmsr_start_adapter_bgrb(acb); | 2490 | arcmsr_start_adapter_bgrb(acb); |
| 2135 | /* empty doorbell Qbuffer if door bell ringed */ | 2491 | /* empty doorbell Qbuffer if door bell ringed */ |
| @@ -2140,51 +2496,110 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb) | |||
| 2140 | acb->acb_flags |= ACB_F_IOP_INITED; | 2496 | acb->acb_flags |= ACB_F_IOP_INITED; |
| 2141 | } | 2497 | } |
| 2142 | 2498 | ||
| 2143 | static void arcmsr_iop_reset(struct AdapterControlBlock *acb) | 2499 | static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) |
| 2144 | { | 2500 | { |
| 2145 | struct CommandControlBlock *ccb; | 2501 | struct CommandControlBlock *ccb; |
| 2146 | uint32_t intmask_org; | 2502 | uint32_t intmask_org; |
| 2503 | uint8_t rtnval = 0x00; | ||
| 2147 | int i = 0; | 2504 | int i = 0; |
| 2148 | 2505 | ||
| 2149 | if (atomic_read(&acb->ccboutstandingcount) != 0) { | 2506 | if (atomic_read(&acb->ccboutstandingcount) != 0) { |
| 2507 | /* disable all outbound interrupt */ | ||
| 2508 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
| 2150 | /* talk to iop 331 outstanding command aborted */ | 2509 | /* talk to iop 331 outstanding command aborted */ |
| 2151 | arcmsr_abort_allcmd(acb); | 2510 | rtnval = arcmsr_abort_allcmd(acb); |
| 2152 | |||
| 2153 | /* wait for 3 sec for all command aborted*/ | 2511 | /* wait for 3 sec for all command aborted*/ |
| 2154 | ssleep(3); | 2512 | ssleep(3); |
| 2155 | |||
| 2156 | /* disable all outbound interrupt */ | ||
| 2157 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
| 2158 | /* clear all outbound posted Q */ | 2513 | /* clear all outbound posted Q */ |
| 2159 | arcmsr_done4abort_postqueue(acb); | 2514 | arcmsr_done4abort_postqueue(acb); |
| 2160 | for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { | 2515 | for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { |
| 2161 | ccb = acb->pccb_pool[i]; | 2516 | ccb = acb->pccb_pool[i]; |
| 2162 | if (ccb->startdone == ARCMSR_CCB_START) { | 2517 | if (ccb->startdone == ARCMSR_CCB_START) { |
| 2163 | ccb->startdone = ARCMSR_CCB_ABORTED; | ||
| 2164 | arcmsr_ccb_complete(ccb, 1); | 2518 | arcmsr_ccb_complete(ccb, 1); |
| 2165 | } | 2519 | } |
| 2166 | } | 2520 | } |
| 2521 | atomic_set(&acb->ccboutstandingcount, 0); | ||
| 2167 | /* enable all outbound interrupt */ | 2522 | /* enable all outbound interrupt */ |
| 2168 | arcmsr_enable_outbound_ints(acb, intmask_org); | 2523 | arcmsr_enable_outbound_ints(acb, intmask_org); |
| 2524 | return rtnval; | ||
| 2169 | } | 2525 | } |
| 2526 | return rtnval; | ||
| 2170 | } | 2527 | } |
| 2171 | 2528 | ||
| 2172 | static int arcmsr_bus_reset(struct scsi_cmnd *cmd) | 2529 | static int arcmsr_bus_reset(struct scsi_cmnd *cmd) |
| 2173 | { | 2530 | { |
| 2174 | struct AdapterControlBlock *acb = | 2531 | struct AdapterControlBlock *acb = |
| 2175 | (struct AdapterControlBlock *)cmd->device->host->hostdata; | 2532 | (struct AdapterControlBlock *)cmd->device->host->hostdata; |
| 2176 | int i; | 2533 | int retry = 0; |
| 2177 | 2534 | ||
| 2178 | acb->num_resets++; | 2535 | if (acb->acb_flags & ACB_F_BUS_RESET) |
| 2536 | return SUCCESS; | ||
| 2537 | |||
| 2538 | printk(KERN_NOTICE "arcmsr%d: bus reset ..... \n", acb->adapter_index); | ||
| 2179 | acb->acb_flags |= ACB_F_BUS_RESET; | 2539 | acb->acb_flags |= ACB_F_BUS_RESET; |
| 2180 | for (i = 0; i < 400; i++) { | 2540 | acb->num_resets++; |
| 2181 | if (!atomic_read(&acb->ccboutstandingcount)) | 2541 | while (atomic_read(&acb->ccboutstandingcount) != 0 && retry < 4) { |
| 2542 | arcmsr_interrupt(acb); | ||
| 2543 | retry++; | ||
| 2544 | } | ||
| 2545 | |||
| 2546 | if (arcmsr_iop_reset(acb)) { | ||
| 2547 | switch (acb->adapter_type) { | ||
| 2548 | case ACB_ADAPTER_TYPE_A: { | ||
| 2549 | printk(KERN_NOTICE "arcmsr%d: do hardware bus reset, num_resets = %d num_aborts = %d \n", | ||
| 2550 | acb->adapter_index, acb->num_resets, acb->num_aborts); | ||
| 2551 | arcmsr_hardware_reset(acb); | ||
| 2552 | acb->acb_flags |= ACB_F_FIRMWARE_TRAP; | ||
| 2553 | acb->acb_flags &= ~ACB_F_IOP_INITED; | ||
| 2554 | #ifdef CONFIG_SCSI_ARCMSR_RESET | ||
| 2555 | struct MessageUnit_A __iomem *reg = acb->pmuA; | ||
| 2556 | uint32_t intmask_org, outbound_doorbell; | ||
| 2557 | int retry_count = 0; | ||
| 2558 | sleep_again: | ||
| 2559 | arcmsr_sleep_for_bus_reset(cmd); | ||
| 2560 | if ((readl(®->outbound_msgaddr1) & | ||
| 2561 | ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { | ||
| 2562 | printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry=%d \n", | ||
| 2563 | acb->host->host_no, retry_count); | ||
| 2564 | if (retry_count > retrycount) { | ||
| 2565 | printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry aborted \n", | ||
| 2566 | acb->host->host_no); | ||
| 2567 | return SUCCESS; | ||
| 2568 | } | ||
| 2569 | retry_count++; | ||
| 2570 | goto sleep_again; | ||
| 2571 | } | ||
| 2572 | acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP; | ||
| 2573 | acb->acb_flags |= ACB_F_IOP_INITED; | ||
| 2574 | acb->acb_flags &= ~ACB_F_BUS_RESET; | ||
| 2575 | printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok \n", | ||
| 2576 | acb->host->host_no); | ||
| 2577 | /* disable all outbound interrupt */ | ||
| 2578 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
| 2579 | arcmsr_get_firmware_spec(acb, 1); | ||
| 2580 | /*start background rebuild*/ | ||
| 2581 | arcmsr_start_adapter_bgrb(acb); | ||
| 2582 | /* clear Qbuffer if door bell ringed */ | ||
| 2583 | outbound_doorbell = readl(®->outbound_doorbell); | ||
| 2584 | writel(outbound_doorbell, ®->outbound_doorbell); /*clear interrupt */ | ||
| 2585 | writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); | ||
| 2586 | /* enable outbound Post Queue,outbound doorbell Interrupt */ | ||
| 2587 | arcmsr_enable_outbound_ints(acb, intmask_org); | ||
| 2588 | atomic_set(&acb->rq_map_token, 16); | ||
| 2589 | init_timer(&acb->eternal_timer); | ||
| 2590 | acb->eternal_timer.expires = jiffies + msecs_to_jiffies(20*HZ); | ||
| 2591 | acb->eternal_timer.data = (unsigned long) acb; | ||
| 2592 | acb->eternal_timer.function = &arcmsr_request_device_map; | ||
| 2593 | add_timer(&acb->eternal_timer); | ||
| 2594 | #endif | ||
| 2595 | } | ||
| 2182 | break; | 2596 | break; |
| 2183 | arcmsr_interrupt(acb);/* FIXME: need spinlock */ | 2597 | case ACB_ADAPTER_TYPE_B: { |
| 2184 | msleep(25); | ||
| 2185 | } | 2598 | } |
| 2186 | arcmsr_iop_reset(acb); | 2599 | } |
| 2600 | } else { | ||
| 2187 | acb->acb_flags &= ~ACB_F_BUS_RESET; | 2601 | acb->acb_flags &= ~ACB_F_BUS_RESET; |
| 2602 | } | ||
| 2188 | return SUCCESS; | 2603 | return SUCCESS; |
| 2189 | } | 2604 | } |
| 2190 | 2605 | ||
| @@ -2277,98 +2692,3 @@ static const char *arcmsr_info(struct Scsi_Host *host) | |||
| 2277 | ARCMSR_DRIVER_VERSION); | 2692 | ARCMSR_DRIVER_VERSION); |
| 2278 | return buf; | 2693 | return buf; |
| 2279 | } | 2694 | } |
| 2280 | #ifdef CONFIG_SCSI_ARCMSR_AER | ||
| 2281 | static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev) | ||
| 2282 | { | ||
| 2283 | struct Scsi_Host *host = pci_get_drvdata(pdev); | ||
| 2284 | struct AdapterControlBlock *acb = | ||
| 2285 | (struct AdapterControlBlock *) host->hostdata; | ||
| 2286 | uint32_t intmask_org; | ||
| 2287 | int i, j; | ||
| 2288 | |||
| 2289 | if (pci_enable_device(pdev)) { | ||
| 2290 | return PCI_ERS_RESULT_DISCONNECT; | ||
| 2291 | } | ||
| 2292 | pci_set_master(pdev); | ||
| 2293 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
| 2294 | acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | | ||
| 2295 | ACB_F_MESSAGE_RQBUFFER_CLEARED | | ||
| 2296 | ACB_F_MESSAGE_WQBUFFER_READED); | ||
| 2297 | acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; | ||
| 2298 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) | ||
| 2299 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) | ||
| 2300 | acb->devstate[i][j] = ARECA_RAID_GONE; | ||
| 2301 | |||
| 2302 | arcmsr_wait_firmware_ready(acb); | ||
| 2303 | arcmsr_iop_confirm(acb); | ||
| 2304 | /* disable all outbound interrupt */ | ||
| 2305 | arcmsr_get_firmware_spec(acb); | ||
| 2306 | /*start background rebuild*/ | ||
| 2307 | arcmsr_start_adapter_bgrb(acb); | ||
| 2308 | /* empty doorbell Qbuffer if door bell ringed */ | ||
| 2309 | arcmsr_clear_doorbell_queue_buffer(acb); | ||
| 2310 | arcmsr_enable_eoi_mode(acb); | ||
| 2311 | /* enable outbound Post Queue,outbound doorbell Interrupt */ | ||
| 2312 | arcmsr_enable_outbound_ints(acb, intmask_org); | ||
| 2313 | acb->acb_flags |= ACB_F_IOP_INITED; | ||
| 2314 | |||
| 2315 | pci_enable_pcie_error_reporting(pdev); | ||
| 2316 | return PCI_ERS_RESULT_RECOVERED; | ||
| 2317 | } | ||
| 2318 | |||
| 2319 | static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev) | ||
| 2320 | { | ||
| 2321 | struct Scsi_Host *host = pci_get_drvdata(pdev); | ||
| 2322 | struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; | ||
| 2323 | struct CommandControlBlock *ccb; | ||
| 2324 | uint32_t intmask_org; | ||
| 2325 | int i = 0; | ||
| 2326 | |||
| 2327 | if (atomic_read(&acb->ccboutstandingcount) != 0) { | ||
| 2328 | /* talk to iop 331 outstanding command aborted */ | ||
| 2329 | arcmsr_abort_allcmd(acb); | ||
| 2330 | /* wait for 3 sec for all command aborted*/ | ||
| 2331 | ssleep(3); | ||
| 2332 | /* disable all outbound interrupt */ | ||
| 2333 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
| 2334 | /* clear all outbound posted Q */ | ||
| 2335 | arcmsr_done4abort_postqueue(acb); | ||
| 2336 | for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { | ||
| 2337 | ccb = acb->pccb_pool[i]; | ||
| 2338 | if (ccb->startdone == ARCMSR_CCB_START) { | ||
| 2339 | ccb->startdone = ARCMSR_CCB_ABORTED; | ||
| 2340 | arcmsr_ccb_complete(ccb, 1); | ||
| 2341 | } | ||
| 2342 | } | ||
| 2343 | /* enable all outbound interrupt */ | ||
| 2344 | arcmsr_enable_outbound_ints(acb, intmask_org); | ||
| 2345 | } | ||
| 2346 | pci_disable_device(pdev); | ||
| 2347 | } | ||
| 2348 | |||
| 2349 | static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev) | ||
| 2350 | { | ||
| 2351 | struct Scsi_Host *host = pci_get_drvdata(pdev); | ||
| 2352 | struct AdapterControlBlock *acb = \ | ||
| 2353 | (struct AdapterControlBlock *)host->hostdata; | ||
| 2354 | |||
| 2355 | arcmsr_stop_adapter_bgrb(acb); | ||
| 2356 | arcmsr_flush_adapter_cache(acb); | ||
| 2357 | } | ||
| 2358 | |||
| 2359 | static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev, | ||
| 2360 | pci_channel_state_t state) | ||
| 2361 | { | ||
| 2362 | switch (state) { | ||
| 2363 | case pci_channel_io_frozen: | ||
| 2364 | arcmsr_pci_ers_need_reset_forepart(pdev); | ||
| 2365 | return PCI_ERS_RESULT_NEED_RESET; | ||
| 2366 | case pci_channel_io_perm_failure: | ||
| 2367 | arcmsr_pci_ers_disconnect_forepart(pdev); | ||
| 2368 | return PCI_ERS_RESULT_DISCONNECT; | ||
| 2369 | break; | ||
| 2370 | default: | ||
| 2371 | return PCI_ERS_RESULT_NEED_RESET; | ||
| 2372 | } | ||
| 2373 | } | ||
| 2374 | #endif | ||
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index e641922f20bc..350cbeaae160 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
| @@ -167,10 +167,9 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, | |||
| 167 | &nonemb_cmd.dma); | 167 | &nonemb_cmd.dma); |
| 168 | if (nonemb_cmd.va == NULL) { | 168 | if (nonemb_cmd.va == NULL) { |
| 169 | SE_DEBUG(DBG_LVL_1, | 169 | SE_DEBUG(DBG_LVL_1, |
| 170 | "Failed to allocate memory for" | 170 | "Failed to allocate memory for mgmt_invalidate_icds\n"); |
| 171 | "mgmt_invalidate_icds \n"); | ||
| 172 | spin_unlock(&ctrl->mbox_lock); | 171 | spin_unlock(&ctrl->mbox_lock); |
| 173 | return -1; | 172 | return 0; |
| 174 | } | 173 | } |
| 175 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); | 174 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); |
| 176 | req = nonemb_cmd.va; | 175 | req = nonemb_cmd.va; |
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 0c08e185a766..3a7b3f88932f 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c | |||
| @@ -84,11 +84,32 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) | |||
| 84 | for (i = 0; hal_mods[i]; i++) | 84 | for (i = 0; hal_mods[i]; i++) |
| 85 | hal_mods[i]->meminfo(cfg, &km_len, &dm_len); | 85 | hal_mods[i]->meminfo(cfg, &km_len, &dm_len); |
| 86 | 86 | ||
| 87 | dm_len += bfa_port_meminfo(); | ||
| 87 | 88 | ||
| 88 | meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; | 89 | meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; |
| 89 | meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; | 90 | meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; |
| 90 | } | 91 | } |
| 91 | 92 | ||
| 93 | static void | ||
| 94 | bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) | ||
| 95 | { | ||
| 96 | struct bfa_port_s *port = &bfa->modules.port; | ||
| 97 | uint32_t dm_len; | ||
| 98 | uint8_t *dm_kva; | ||
| 99 | uint64_t dm_pa; | ||
| 100 | |||
| 101 | dm_len = bfa_port_meminfo(); | ||
| 102 | dm_kva = bfa_meminfo_dma_virt(mi); | ||
| 103 | dm_pa = bfa_meminfo_dma_phys(mi); | ||
| 104 | |||
| 105 | memset(port, 0, sizeof(struct bfa_port_s)); | ||
| 106 | bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm); | ||
| 107 | bfa_port_mem_claim(port, dm_kva, dm_pa); | ||
| 108 | |||
| 109 | bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; | ||
| 110 | bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; | ||
| 111 | } | ||
| 112 | |||
| 92 | /** | 113 | /** |
| 93 | * Use this function to do attach the driver instance with the BFA | 114 | * Use this function to do attach the driver instance with the BFA |
| 94 | * library. This function will not trigger any HW initialization | 115 | * library. This function will not trigger any HW initialization |
| @@ -140,6 +161,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
| 140 | for (i = 0; hal_mods[i]; i++) | 161 | for (i = 0; hal_mods[i]; i++) |
| 141 | hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); | 162 | hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); |
| 142 | 163 | ||
| 164 | bfa_com_port_attach(bfa, meminfo); | ||
| 143 | } | 165 | } |
| 144 | 166 | ||
| 145 | /** | 167 | /** |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 6a6661c35b2f..82ea4a8226b0 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -567,7 +567,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, | |||
| 567 | static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) | 567 | static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) |
| 568 | { | 568 | { |
| 569 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 569 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
| 570 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 570 | struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; |
| 571 | struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; | ||
| 571 | dma_addr_t dma_addr = ipr_cmd->dma_addr; | 572 | dma_addr_t dma_addr = ipr_cmd->dma_addr; |
| 572 | 573 | ||
| 573 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); | 574 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); |
| @@ -576,19 +577,19 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) | |||
| 576 | ioarcb->ioadl_len = 0; | 577 | ioarcb->ioadl_len = 0; |
| 577 | ioarcb->read_ioadl_len = 0; | 578 | ioarcb->read_ioadl_len = 0; |
| 578 | 579 | ||
| 579 | if (ipr_cmd->ioa_cfg->sis64) | 580 | if (ipr_cmd->ioa_cfg->sis64) { |
| 580 | ioarcb->u.sis64_addr_data.data_ioadl_addr = | 581 | ioarcb->u.sis64_addr_data.data_ioadl_addr = |
| 581 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); | 582 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); |
| 582 | else { | 583 | ioasa64->u.gata.status = 0; |
| 584 | } else { | ||
| 583 | ioarcb->write_ioadl_addr = | 585 | ioarcb->write_ioadl_addr = |
| 584 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); | 586 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); |
| 585 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; | 587 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; |
| 588 | ioasa->u.gata.status = 0; | ||
| 586 | } | 589 | } |
| 587 | 590 | ||
| 588 | ioasa->ioasc = 0; | 591 | ioasa->hdr.ioasc = 0; |
| 589 | ioasa->residual_data_len = 0; | 592 | ioasa->hdr.residual_data_len = 0; |
| 590 | ioasa->u.gata.status = 0; | ||
| 591 | |||
| 592 | ipr_cmd->scsi_cmd = NULL; | 593 | ipr_cmd->scsi_cmd = NULL; |
| 593 | ipr_cmd->qc = NULL; | 594 | ipr_cmd->qc = NULL; |
| 594 | ipr_cmd->sense_buffer[0] = 0; | 595 | ipr_cmd->sense_buffer[0] = 0; |
| @@ -768,8 +769,8 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) | |||
| 768 | list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { | 769 | list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { |
| 769 | list_del(&ipr_cmd->queue); | 770 | list_del(&ipr_cmd->queue); |
| 770 | 771 | ||
| 771 | ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); | 772 | ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); |
| 772 | ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID); | 773 | ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID); |
| 773 | 774 | ||
| 774 | if (ipr_cmd->scsi_cmd) | 775 | if (ipr_cmd->scsi_cmd) |
| 775 | ipr_cmd->done = ipr_scsi_eh_done; | 776 | ipr_cmd->done = ipr_scsi_eh_done; |
| @@ -1040,7 +1041,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, | |||
| 1040 | proto = cfgtew->u.cfgte64->proto; | 1041 | proto = cfgtew->u.cfgte64->proto; |
| 1041 | res->res_flags = cfgtew->u.cfgte64->res_flags; | 1042 | res->res_flags = cfgtew->u.cfgte64->res_flags; |
| 1042 | res->qmodel = IPR_QUEUEING_MODEL64(res); | 1043 | res->qmodel = IPR_QUEUEING_MODEL64(res); |
| 1043 | res->type = cfgtew->u.cfgte64->res_type & 0x0f; | 1044 | res->type = cfgtew->u.cfgte64->res_type; |
| 1044 | 1045 | ||
| 1045 | memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, | 1046 | memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, |
| 1046 | sizeof(res->res_path)); | 1047 | sizeof(res->res_path)); |
| @@ -1319,7 +1320,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) | |||
| 1319 | { | 1320 | { |
| 1320 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 1321 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
| 1321 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; | 1322 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; |
| 1322 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 1323 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 1323 | 1324 | ||
| 1324 | list_del(&hostrcb->queue); | 1325 | list_del(&hostrcb->queue); |
| 1325 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | 1326 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); |
| @@ -2354,7 +2355,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) | |||
| 2354 | { | 2355 | { |
| 2355 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 2356 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
| 2356 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; | 2357 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; |
| 2357 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 2358 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 2358 | u32 fd_ioasc; | 2359 | u32 fd_ioasc; |
| 2359 | 2360 | ||
| 2360 | if (ioa_cfg->sis64) | 2361 | if (ioa_cfg->sis64) |
| @@ -4509,11 +4510,16 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, | |||
| 4509 | } | 4510 | } |
| 4510 | 4511 | ||
| 4511 | ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); | 4512 | ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); |
| 4512 | ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 4513 | ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 4513 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | 4514 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); |
| 4514 | if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) | 4515 | if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { |
| 4515 | memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata, | 4516 | if (ipr_cmd->ioa_cfg->sis64) |
| 4516 | sizeof(struct ipr_ioasa_gata)); | 4517 | memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, |
| 4518 | sizeof(struct ipr_ioasa_gata)); | ||
| 4519 | else | ||
| 4520 | memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, | ||
| 4521 | sizeof(struct ipr_ioasa_gata)); | ||
| 4522 | } | ||
| 4517 | 4523 | ||
| 4518 | LEAVE; | 4524 | LEAVE; |
| 4519 | return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); | 4525 | return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); |
| @@ -4768,7 +4774,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) | |||
| 4768 | scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", | 4774 | scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", |
| 4769 | scsi_cmd->cmnd[0]); | 4775 | scsi_cmd->cmnd[0]); |
| 4770 | ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); | 4776 | ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); |
| 4771 | ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 4777 | ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 4772 | 4778 | ||
| 4773 | /* | 4779 | /* |
| 4774 | * If the abort task timed out and we sent a bus reset, we will get | 4780 | * If the abort task timed out and we sent a bus reset, we will get |
| @@ -4812,15 +4818,39 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) | |||
| 4812 | /** | 4818 | /** |
| 4813 | * ipr_handle_other_interrupt - Handle "other" interrupts | 4819 | * ipr_handle_other_interrupt - Handle "other" interrupts |
| 4814 | * @ioa_cfg: ioa config struct | 4820 | * @ioa_cfg: ioa config struct |
| 4815 | * @int_reg: interrupt register | ||
| 4816 | * | 4821 | * |
| 4817 | * Return value: | 4822 | * Return value: |
| 4818 | * IRQ_NONE / IRQ_HANDLED | 4823 | * IRQ_NONE / IRQ_HANDLED |
| 4819 | **/ | 4824 | **/ |
| 4820 | static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, | 4825 | static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg) |
| 4821 | volatile u32 int_reg) | ||
| 4822 | { | 4826 | { |
| 4823 | irqreturn_t rc = IRQ_HANDLED; | 4827 | irqreturn_t rc = IRQ_HANDLED; |
| 4828 | volatile u32 int_reg, int_mask_reg; | ||
| 4829 | |||
| 4830 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); | ||
| 4831 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; | ||
| 4832 | |||
| 4833 | /* If an interrupt on the adapter did not occur, ignore it. | ||
| 4834 | * Or in the case of SIS 64, check for a stage change interrupt. | ||
| 4835 | */ | ||
| 4836 | if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { | ||
| 4837 | if (ioa_cfg->sis64) { | ||
| 4838 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | ||
| 4839 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
| 4840 | if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { | ||
| 4841 | |||
| 4842 | /* clear stage change */ | ||
| 4843 | writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); | ||
| 4844 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
| 4845 | list_del(&ioa_cfg->reset_cmd->queue); | ||
| 4846 | del_timer(&ioa_cfg->reset_cmd->timer); | ||
| 4847 | ipr_reset_ioa_job(ioa_cfg->reset_cmd); | ||
| 4848 | return IRQ_HANDLED; | ||
| 4849 | } | ||
| 4850 | } | ||
| 4851 | |||
| 4852 | return IRQ_NONE; | ||
| 4853 | } | ||
| 4824 | 4854 | ||
| 4825 | if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { | 4855 | if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { |
| 4826 | /* Mask the interrupt */ | 4856 | /* Mask the interrupt */ |
| @@ -4881,7 +4911,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
| 4881 | { | 4911 | { |
| 4882 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; | 4912 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; |
| 4883 | unsigned long lock_flags = 0; | 4913 | unsigned long lock_flags = 0; |
| 4884 | volatile u32 int_reg, int_mask_reg; | 4914 | volatile u32 int_reg; |
| 4885 | u32 ioasc; | 4915 | u32 ioasc; |
| 4886 | u16 cmd_index; | 4916 | u16 cmd_index; |
| 4887 | int num_hrrq = 0; | 4917 | int num_hrrq = 0; |
| @@ -4896,33 +4926,6 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
| 4896 | return IRQ_NONE; | 4926 | return IRQ_NONE; |
| 4897 | } | 4927 | } |
| 4898 | 4928 | ||
| 4899 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); | ||
| 4900 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; | ||
| 4901 | |||
| 4902 | /* If an interrupt on the adapter did not occur, ignore it. | ||
| 4903 | * Or in the case of SIS 64, check for a stage change interrupt. | ||
| 4904 | */ | ||
| 4905 | if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { | ||
| 4906 | if (ioa_cfg->sis64) { | ||
| 4907 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | ||
| 4908 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
| 4909 | if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { | ||
| 4910 | |||
| 4911 | /* clear stage change */ | ||
| 4912 | writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); | ||
| 4913 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
| 4914 | list_del(&ioa_cfg->reset_cmd->queue); | ||
| 4915 | del_timer(&ioa_cfg->reset_cmd->timer); | ||
| 4916 | ipr_reset_ioa_job(ioa_cfg->reset_cmd); | ||
| 4917 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 4918 | return IRQ_HANDLED; | ||
| 4919 | } | ||
| 4920 | } | ||
| 4921 | |||
| 4922 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 4923 | return IRQ_NONE; | ||
| 4924 | } | ||
| 4925 | |||
| 4926 | while (1) { | 4929 | while (1) { |
| 4927 | ipr_cmd = NULL; | 4930 | ipr_cmd = NULL; |
| 4928 | 4931 | ||
| @@ -4940,7 +4943,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
| 4940 | 4943 | ||
| 4941 | ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; | 4944 | ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; |
| 4942 | 4945 | ||
| 4943 | ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 4946 | ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 4944 | 4947 | ||
| 4945 | ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); | 4948 | ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); |
| 4946 | 4949 | ||
| @@ -4962,7 +4965,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
| 4962 | /* Clear the PCI interrupt */ | 4965 | /* Clear the PCI interrupt */ |
| 4963 | do { | 4966 | do { |
| 4964 | writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); | 4967 | writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); |
| 4965 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; | 4968 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); |
| 4966 | } while (int_reg & IPR_PCII_HRRQ_UPDATED && | 4969 | } while (int_reg & IPR_PCII_HRRQ_UPDATED && |
| 4967 | num_hrrq++ < IPR_MAX_HRRQ_RETRIES); | 4970 | num_hrrq++ < IPR_MAX_HRRQ_RETRIES); |
| 4968 | 4971 | ||
| @@ -4977,7 +4980,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
| 4977 | } | 4980 | } |
| 4978 | 4981 | ||
| 4979 | if (unlikely(rc == IRQ_NONE)) | 4982 | if (unlikely(rc == IRQ_NONE)) |
| 4980 | rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); | 4983 | rc = ipr_handle_other_interrupt(ioa_cfg); |
| 4981 | 4984 | ||
| 4982 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 4985 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
| 4983 | return rc; | 4986 | return rc; |
| @@ -5014,6 +5017,10 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, | |||
| 5014 | 5017 | ||
| 5015 | ipr_cmd->dma_use_sg = nseg; | 5018 | ipr_cmd->dma_use_sg = nseg; |
| 5016 | 5019 | ||
| 5020 | ioarcb->data_transfer_length = cpu_to_be32(length); | ||
| 5021 | ioarcb->ioadl_len = | ||
| 5022 | cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); | ||
| 5023 | |||
| 5017 | if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { | 5024 | if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { |
| 5018 | ioadl_flags = IPR_IOADL_FLAGS_WRITE; | 5025 | ioadl_flags = IPR_IOADL_FLAGS_WRITE; |
| 5019 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; | 5026 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; |
| @@ -5135,7 +5142,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) | |||
| 5135 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; | 5142 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; |
| 5136 | struct ipr_resource_entry *res = scsi_cmd->device->hostdata; | 5143 | struct ipr_resource_entry *res = scsi_cmd->device->hostdata; |
| 5137 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 5144 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
| 5138 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 5145 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 5139 | 5146 | ||
| 5140 | if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { | 5147 | if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { |
| 5141 | scsi_cmd->result |= (DID_ERROR << 16); | 5148 | scsi_cmd->result |= (DID_ERROR << 16); |
| @@ -5166,7 +5173,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) | |||
| 5166 | static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) | 5173 | static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) |
| 5167 | { | 5174 | { |
| 5168 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 5175 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
| 5169 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 5176 | struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; |
| 5170 | dma_addr_t dma_addr = ipr_cmd->dma_addr; | 5177 | dma_addr_t dma_addr = ipr_cmd->dma_addr; |
| 5171 | 5178 | ||
| 5172 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); | 5179 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); |
| @@ -5174,8 +5181,8 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) | |||
| 5174 | ioarcb->read_data_transfer_length = 0; | 5181 | ioarcb->read_data_transfer_length = 0; |
| 5175 | ioarcb->ioadl_len = 0; | 5182 | ioarcb->ioadl_len = 0; |
| 5176 | ioarcb->read_ioadl_len = 0; | 5183 | ioarcb->read_ioadl_len = 0; |
| 5177 | ioasa->ioasc = 0; | 5184 | ioasa->hdr.ioasc = 0; |
| 5178 | ioasa->residual_data_len = 0; | 5185 | ioasa->hdr.residual_data_len = 0; |
| 5179 | 5186 | ||
| 5180 | if (ipr_cmd->ioa_cfg->sis64) | 5187 | if (ipr_cmd->ioa_cfg->sis64) |
| 5181 | ioarcb->u.sis64_addr_data.data_ioadl_addr = | 5188 | ioarcb->u.sis64_addr_data.data_ioadl_addr = |
| @@ -5200,7 +5207,7 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) | |||
| 5200 | static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) | 5207 | static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) |
| 5201 | { | 5208 | { |
| 5202 | struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; | 5209 | struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; |
| 5203 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 5210 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 5204 | 5211 | ||
| 5205 | if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { | 5212 | if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { |
| 5206 | ipr_erp_done(ipr_cmd); | 5213 | ipr_erp_done(ipr_cmd); |
| @@ -5277,12 +5284,12 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, | |||
| 5277 | int i; | 5284 | int i; |
| 5278 | u16 data_len; | 5285 | u16 data_len; |
| 5279 | u32 ioasc, fd_ioasc; | 5286 | u32 ioasc, fd_ioasc; |
| 5280 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 5287 | struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; |
| 5281 | __be32 *ioasa_data = (__be32 *)ioasa; | 5288 | __be32 *ioasa_data = (__be32 *)ioasa; |
| 5282 | int error_index; | 5289 | int error_index; |
| 5283 | 5290 | ||
| 5284 | ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; | 5291 | ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; |
| 5285 | fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK; | 5292 | fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; |
| 5286 | 5293 | ||
| 5287 | if (0 == ioasc) | 5294 | if (0 == ioasc) |
| 5288 | return; | 5295 | return; |
| @@ -5297,7 +5304,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, | |||
| 5297 | 5304 | ||
| 5298 | if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { | 5305 | if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { |
| 5299 | /* Don't log an error if the IOA already logged one */ | 5306 | /* Don't log an error if the IOA already logged one */ |
| 5300 | if (ioasa->ilid != 0) | 5307 | if (ioasa->hdr.ilid != 0) |
| 5301 | return; | 5308 | return; |
| 5302 | 5309 | ||
| 5303 | if (!ipr_is_gscsi(res)) | 5310 | if (!ipr_is_gscsi(res)) |
| @@ -5309,10 +5316,11 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, | |||
| 5309 | 5316 | ||
| 5310 | ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); | 5317 | ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); |
| 5311 | 5318 | ||
| 5312 | if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len)) | 5319 | data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); |
| 5320 | if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) | ||
| 5321 | data_len = sizeof(struct ipr_ioasa64); | ||
| 5322 | else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) | ||
| 5313 | data_len = sizeof(struct ipr_ioasa); | 5323 | data_len = sizeof(struct ipr_ioasa); |
| 5314 | else | ||
| 5315 | data_len = be16_to_cpu(ioasa->ret_stat_len); | ||
| 5316 | 5324 | ||
| 5317 | ipr_err("IOASA Dump:\n"); | 5325 | ipr_err("IOASA Dump:\n"); |
| 5318 | 5326 | ||
| @@ -5338,8 +5346,8 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) | |||
| 5338 | u32 failing_lba; | 5346 | u32 failing_lba; |
| 5339 | u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; | 5347 | u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; |
| 5340 | struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; | 5348 | struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; |
| 5341 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 5349 | struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; |
| 5342 | u32 ioasc = be32_to_cpu(ioasa->ioasc); | 5350 | u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); |
| 5343 | 5351 | ||
| 5344 | memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); | 5352 | memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); |
| 5345 | 5353 | ||
| @@ -5382,7 +5390,7 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) | |||
| 5382 | 5390 | ||
| 5383 | /* Illegal request */ | 5391 | /* Illegal request */ |
| 5384 | if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && | 5392 | if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && |
| 5385 | (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) { | 5393 | (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { |
| 5386 | sense_buf[7] = 10; /* additional length */ | 5394 | sense_buf[7] = 10; /* additional length */ |
| 5387 | 5395 | ||
| 5388 | /* IOARCB was in error */ | 5396 | /* IOARCB was in error */ |
| @@ -5393,10 +5401,10 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) | |||
| 5393 | 5401 | ||
| 5394 | sense_buf[16] = | 5402 | sense_buf[16] = |
| 5395 | ((IPR_FIELD_POINTER_MASK & | 5403 | ((IPR_FIELD_POINTER_MASK & |
| 5396 | be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff; | 5404 | be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; |
| 5397 | sense_buf[17] = | 5405 | sense_buf[17] = |
| 5398 | (IPR_FIELD_POINTER_MASK & | 5406 | (IPR_FIELD_POINTER_MASK & |
| 5399 | be32_to_cpu(ioasa->ioasc_specific)) & 0xff; | 5407 | be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; |
| 5400 | } else { | 5408 | } else { |
| 5401 | if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { | 5409 | if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { |
| 5402 | if (ipr_is_vset_device(res)) | 5410 | if (ipr_is_vset_device(res)) |
| @@ -5428,14 +5436,20 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) | |||
| 5428 | **/ | 5436 | **/ |
| 5429 | static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) | 5437 | static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) |
| 5430 | { | 5438 | { |
| 5431 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 5439 | struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; |
| 5440 | struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; | ||
| 5432 | 5441 | ||
| 5433 | if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) | 5442 | if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) |
| 5434 | return 0; | 5443 | return 0; |
| 5435 | 5444 | ||
| 5436 | memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, | 5445 | if (ipr_cmd->ioa_cfg->sis64) |
| 5437 | min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), | 5446 | memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, |
| 5438 | SCSI_SENSE_BUFFERSIZE)); | 5447 | min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), |
| 5448 | SCSI_SENSE_BUFFERSIZE)); | ||
| 5449 | else | ||
| 5450 | memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, | ||
| 5451 | min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), | ||
| 5452 | SCSI_SENSE_BUFFERSIZE)); | ||
| 5439 | return 1; | 5453 | return 1; |
| 5440 | } | 5454 | } |
| 5441 | 5455 | ||
| @@ -5455,7 +5469,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, | |||
| 5455 | { | 5469 | { |
| 5456 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; | 5470 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; |
| 5457 | struct ipr_resource_entry *res = scsi_cmd->device->hostdata; | 5471 | struct ipr_resource_entry *res = scsi_cmd->device->hostdata; |
| 5458 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 5472 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 5459 | u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; | 5473 | u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; |
| 5460 | 5474 | ||
| 5461 | if (!res) { | 5475 | if (!res) { |
| @@ -5547,9 +5561,9 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) | |||
| 5547 | { | 5561 | { |
| 5548 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 5562 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
| 5549 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; | 5563 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; |
| 5550 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 5564 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 5551 | 5565 | ||
| 5552 | scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len)); | 5566 | scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); |
| 5553 | 5567 | ||
| 5554 | if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { | 5568 | if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { |
| 5555 | scsi_dma_unmap(ipr_cmd->scsi_cmd); | 5569 | scsi_dma_unmap(ipr_cmd->scsi_cmd); |
| @@ -5839,19 +5853,23 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) | |||
| 5839 | struct ata_queued_cmd *qc = ipr_cmd->qc; | 5853 | struct ata_queued_cmd *qc = ipr_cmd->qc; |
| 5840 | struct ipr_sata_port *sata_port = qc->ap->private_data; | 5854 | struct ipr_sata_port *sata_port = qc->ap->private_data; |
| 5841 | struct ipr_resource_entry *res = sata_port->res; | 5855 | struct ipr_resource_entry *res = sata_port->res; |
| 5842 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 5856 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 5843 | 5857 | ||
| 5844 | memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata, | 5858 | if (ipr_cmd->ioa_cfg->sis64) |
| 5845 | sizeof(struct ipr_ioasa_gata)); | 5859 | memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, |
| 5860 | sizeof(struct ipr_ioasa_gata)); | ||
| 5861 | else | ||
| 5862 | memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, | ||
| 5863 | sizeof(struct ipr_ioasa_gata)); | ||
| 5846 | ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); | 5864 | ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); |
| 5847 | 5865 | ||
| 5848 | if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) | 5866 | if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) |
| 5849 | scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); | 5867 | scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); |
| 5850 | 5868 | ||
| 5851 | if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) | 5869 | if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) |
| 5852 | qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); | 5870 | qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); |
| 5853 | else | 5871 | else |
| 5854 | qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status); | 5872 | qc->err_mask |= ac_err_mask(sata_port->ioasa.status); |
| 5855 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | 5873 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); |
| 5856 | ata_qc_complete(qc); | 5874 | ata_qc_complete(qc); |
| 5857 | } | 5875 | } |
| @@ -6520,7 +6538,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, | |||
| 6520 | static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) | 6538 | static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) |
| 6521 | { | 6539 | { |
| 6522 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 6540 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
| 6523 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 6541 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 6524 | 6542 | ||
| 6525 | dev_err(&ioa_cfg->pdev->dev, | 6543 | dev_err(&ioa_cfg->pdev->dev, |
| 6526 | "0x%02X failed with IOASC: 0x%08X\n", | 6544 | "0x%02X failed with IOASC: 0x%08X\n", |
| @@ -6544,7 +6562,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) | |||
| 6544 | static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) | 6562 | static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) |
| 6545 | { | 6563 | { |
| 6546 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 6564 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
| 6547 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 6565 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 6548 | 6566 | ||
| 6549 | if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { | 6567 | if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { |
| 6550 | ipr_cmd->job_step = ipr_set_supported_devs; | 6568 | ipr_cmd->job_step = ipr_set_supported_devs; |
| @@ -6634,7 +6652,7 @@ static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) | |||
| 6634 | **/ | 6652 | **/ |
| 6635 | static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) | 6653 | static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) |
| 6636 | { | 6654 | { |
| 6637 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 6655 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 6638 | 6656 | ||
| 6639 | if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { | 6657 | if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { |
| 6640 | ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; | 6658 | ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; |
| @@ -6706,7 +6724,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) | |||
| 6706 | list_move_tail(&res->queue, &old_res); | 6724 | list_move_tail(&res->queue, &old_res); |
| 6707 | 6725 | ||
| 6708 | if (ioa_cfg->sis64) | 6726 | if (ioa_cfg->sis64) |
| 6709 | entries = ioa_cfg->u.cfg_table64->hdr64.num_entries; | 6727 | entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); |
| 6710 | else | 6728 | else |
| 6711 | entries = ioa_cfg->u.cfg_table->hdr.num_entries; | 6729 | entries = ioa_cfg->u.cfg_table->hdr.num_entries; |
| 6712 | 6730 | ||
| @@ -6792,6 +6810,7 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) | |||
| 6792 | ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); | 6810 | ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); |
| 6793 | 6811 | ||
| 6794 | ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; | 6812 | ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; |
| 6813 | ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; | ||
| 6795 | ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; | 6814 | ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; |
| 6796 | ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; | 6815 | ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; |
| 6797 | 6816 | ||
| @@ -7122,7 +7141,9 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) | |||
| 7122 | ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); | 7141 | ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); |
| 7123 | 7142 | ||
| 7124 | /* sanity check the stage_time value */ | 7143 | /* sanity check the stage_time value */ |
| 7125 | if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) | 7144 | if (stage_time == 0) |
| 7145 | stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME; | ||
| 7146 | else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) | ||
| 7126 | stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; | 7147 | stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; |
| 7127 | else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) | 7148 | else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) |
| 7128 | stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; | 7149 | stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; |
| @@ -7165,13 +7186,14 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) | |||
| 7165 | { | 7186 | { |
| 7166 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 7187 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
| 7167 | volatile u32 int_reg; | 7188 | volatile u32 int_reg; |
| 7189 | volatile u64 maskval; | ||
| 7168 | 7190 | ||
| 7169 | ENTER; | 7191 | ENTER; |
| 7170 | ipr_cmd->job_step = ipr_ioafp_identify_hrrq; | 7192 | ipr_cmd->job_step = ipr_ioafp_identify_hrrq; |
| 7171 | ipr_init_ioa_mem(ioa_cfg); | 7193 | ipr_init_ioa_mem(ioa_cfg); |
| 7172 | 7194 | ||
| 7173 | ioa_cfg->allow_interrupts = 1; | 7195 | ioa_cfg->allow_interrupts = 1; |
| 7174 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); | 7196 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); |
| 7175 | 7197 | ||
| 7176 | if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { | 7198 | if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { |
| 7177 | writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), | 7199 | writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), |
| @@ -7183,9 +7205,12 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) | |||
| 7183 | /* Enable destructive diagnostics on IOA */ | 7205 | /* Enable destructive diagnostics on IOA */ |
| 7184 | writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); | 7206 | writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); |
| 7185 | 7207 | ||
| 7186 | writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); | 7208 | if (ioa_cfg->sis64) { |
| 7187 | if (ioa_cfg->sis64) | 7209 | maskval = IPR_PCII_IPL_STAGE_CHANGE; |
| 7188 | writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg); | 7210 | maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS; |
| 7211 | writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); | ||
| 7212 | } else | ||
| 7213 | writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); | ||
| 7189 | 7214 | ||
| 7190 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | 7215 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); |
| 7191 | 7216 | ||
| @@ -7332,12 +7357,12 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) | |||
| 7332 | rc = pci_restore_state(ioa_cfg->pdev); | 7357 | rc = pci_restore_state(ioa_cfg->pdev); |
| 7333 | 7358 | ||
| 7334 | if (rc != PCIBIOS_SUCCESSFUL) { | 7359 | if (rc != PCIBIOS_SUCCESSFUL) { |
| 7335 | ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); | 7360 | ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); |
| 7336 | return IPR_RC_JOB_CONTINUE; | 7361 | return IPR_RC_JOB_CONTINUE; |
| 7337 | } | 7362 | } |
| 7338 | 7363 | ||
| 7339 | if (ipr_set_pcix_cmd_reg(ioa_cfg)) { | 7364 | if (ipr_set_pcix_cmd_reg(ioa_cfg)) { |
| 7340 | ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); | 7365 | ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); |
| 7341 | return IPR_RC_JOB_CONTINUE; | 7366 | return IPR_RC_JOB_CONTINUE; |
| 7342 | } | 7367 | } |
| 7343 | 7368 | ||
| @@ -7364,7 +7389,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) | |||
| 7364 | } | 7389 | } |
| 7365 | } | 7390 | } |
| 7366 | 7391 | ||
| 7367 | ENTER; | 7392 | LEAVE; |
| 7368 | return IPR_RC_JOB_CONTINUE; | 7393 | return IPR_RC_JOB_CONTINUE; |
| 7369 | } | 7394 | } |
| 7370 | 7395 | ||
| @@ -7406,7 +7431,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) | |||
| 7406 | 7431 | ||
| 7407 | if (rc != PCIBIOS_SUCCESSFUL) { | 7432 | if (rc != PCIBIOS_SUCCESSFUL) { |
| 7408 | pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); | 7433 | pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); |
| 7409 | ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); | 7434 | ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); |
| 7410 | rc = IPR_RC_JOB_CONTINUE; | 7435 | rc = IPR_RC_JOB_CONTINUE; |
| 7411 | } else { | 7436 | } else { |
| 7412 | ipr_cmd->job_step = ipr_reset_bist_done; | 7437 | ipr_cmd->job_step = ipr_reset_bist_done; |
| @@ -7665,7 +7690,7 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) | |||
| 7665 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 7690 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
| 7666 | 7691 | ||
| 7667 | do { | 7692 | do { |
| 7668 | ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 7693 | ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
| 7669 | 7694 | ||
| 7670 | if (ioa_cfg->reset_cmd != ipr_cmd) { | 7695 | if (ioa_cfg->reset_cmd != ipr_cmd) { |
| 7671 | /* | 7696 | /* |
| @@ -8048,13 +8073,13 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) | |||
| 8048 | ioarcb->u.sis64_addr_data.data_ioadl_addr = | 8073 | ioarcb->u.sis64_addr_data.data_ioadl_addr = |
| 8049 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); | 8074 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); |
| 8050 | ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = | 8075 | ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = |
| 8051 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa)); | 8076 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64)); |
| 8052 | } else { | 8077 | } else { |
| 8053 | ioarcb->write_ioadl_addr = | 8078 | ioarcb->write_ioadl_addr = |
| 8054 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); | 8079 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); |
| 8055 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; | 8080 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; |
| 8056 | ioarcb->ioasa_host_pci_addr = | 8081 | ioarcb->ioasa_host_pci_addr = |
| 8057 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); | 8082 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa)); |
| 8058 | } | 8083 | } |
| 8059 | ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); | 8084 | ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); |
| 8060 | ipr_cmd->cmd_index = i; | 8085 | ipr_cmd->cmd_index = i; |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 4c267b5e0b96..9ecd2259eb39 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
| @@ -244,6 +244,7 @@ | |||
| 244 | #define IPR_RUNTIME_RESET 0x40000000 | 244 | #define IPR_RUNTIME_RESET 0x40000000 |
| 245 | 245 | ||
| 246 | #define IPR_IPL_INIT_MIN_STAGE_TIME 5 | 246 | #define IPR_IPL_INIT_MIN_STAGE_TIME 5 |
| 247 | #define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15 | ||
| 247 | #define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 | 248 | #define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 |
| 248 | #define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 | 249 | #define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 |
| 249 | #define IPR_IPL_INIT_STAGE_MASK 0xff000000 | 250 | #define IPR_IPL_INIT_STAGE_MASK 0xff000000 |
| @@ -613,7 +614,7 @@ struct ipr_auto_sense { | |||
| 613 | __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)]; | 614 | __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)]; |
| 614 | }; | 615 | }; |
| 615 | 616 | ||
| 616 | struct ipr_ioasa { | 617 | struct ipr_ioasa_hdr { |
| 617 | __be32 ioasc; | 618 | __be32 ioasc; |
| 618 | #define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) | 619 | #define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) |
| 619 | #define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) | 620 | #define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) |
| @@ -645,6 +646,25 @@ struct ipr_ioasa { | |||
| 645 | #define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) | 646 | #define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) |
| 646 | #define IPR_FIELD_POINTER_MASK 0x0000ffff | 647 | #define IPR_FIELD_POINTER_MASK 0x0000ffff |
| 647 | 648 | ||
| 649 | }__attribute__((packed, aligned (4))); | ||
| 650 | |||
| 651 | struct ipr_ioasa { | ||
| 652 | struct ipr_ioasa_hdr hdr; | ||
| 653 | |||
| 654 | union { | ||
| 655 | struct ipr_ioasa_vset vset; | ||
| 656 | struct ipr_ioasa_af_dasd dasd; | ||
| 657 | struct ipr_ioasa_gpdd gpdd; | ||
| 658 | struct ipr_ioasa_gata gata; | ||
| 659 | } u; | ||
| 660 | |||
| 661 | struct ipr_auto_sense auto_sense; | ||
| 662 | }__attribute__((packed, aligned (4))); | ||
| 663 | |||
| 664 | struct ipr_ioasa64 { | ||
| 665 | struct ipr_ioasa_hdr hdr; | ||
| 666 | u8 fd_res_path[8]; | ||
| 667 | |||
| 648 | union { | 668 | union { |
| 649 | struct ipr_ioasa_vset vset; | 669 | struct ipr_ioasa_vset vset; |
| 650 | struct ipr_ioasa_af_dasd dasd; | 670 | struct ipr_ioasa_af_dasd dasd; |
| @@ -804,7 +824,7 @@ struct ipr_hostrcb_array_data_entry_enhanced { | |||
| 804 | }__attribute__((packed, aligned (4))); | 824 | }__attribute__((packed, aligned (4))); |
| 805 | 825 | ||
| 806 | struct ipr_hostrcb_type_ff_error { | 826 | struct ipr_hostrcb_type_ff_error { |
| 807 | __be32 ioa_data[502]; | 827 | __be32 ioa_data[758]; |
| 808 | }__attribute__((packed, aligned (4))); | 828 | }__attribute__((packed, aligned (4))); |
| 809 | 829 | ||
| 810 | struct ipr_hostrcb_type_01_error { | 830 | struct ipr_hostrcb_type_01_error { |
| @@ -1181,7 +1201,7 @@ struct ipr_resource_entry { | |||
| 1181 | u8 flags; | 1201 | u8 flags; |
| 1182 | __be16 res_flags; | 1202 | __be16 res_flags; |
| 1183 | 1203 | ||
| 1184 | __be32 type; | 1204 | u8 type; |
| 1185 | 1205 | ||
| 1186 | u8 qmodel; | 1206 | u8 qmodel; |
| 1187 | struct ipr_std_inq_data std_inq_data; | 1207 | struct ipr_std_inq_data std_inq_data; |
| @@ -1464,7 +1484,10 @@ struct ipr_cmnd { | |||
| 1464 | struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; | 1484 | struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; |
| 1465 | struct ipr_ata64_ioadl ata_ioadl; | 1485 | struct ipr_ata64_ioadl ata_ioadl; |
| 1466 | } i; | 1486 | } i; |
| 1467 | struct ipr_ioasa ioasa; | 1487 | union { |
| 1488 | struct ipr_ioasa ioasa; | ||
| 1489 | struct ipr_ioasa64 ioasa64; | ||
| 1490 | } s; | ||
| 1468 | struct list_head queue; | 1491 | struct list_head queue; |
| 1469 | struct scsi_cmnd *scsi_cmd; | 1492 | struct scsi_cmnd *scsi_cmd; |
| 1470 | struct ata_queued_cmd *qc; | 1493 | struct ata_queued_cmd *qc; |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index bf55d3057413..fec47de72535 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
| @@ -601,10 +601,8 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | |||
| 601 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); | 601 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); |
| 602 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); | 602 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); |
| 603 | 603 | ||
| 604 | if (sk_sleep(sock->sk)) { | 604 | sock->sk->sk_err = EIO; |
| 605 | sock->sk->sk_err = EIO; | 605 | wake_up_interruptible(sk_sleep(sock->sk)); |
| 606 | wake_up_interruptible(sk_sleep(sock->sk)); | ||
| 607 | } | ||
| 608 | 606 | ||
| 609 | iscsi_conn_stop(cls_conn, flag); | 607 | iscsi_conn_stop(cls_conn, flag); |
| 610 | iscsi_sw_tcp_release_conn(conn); | 608 | iscsi_sw_tcp_release_conn(conn); |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 9798c2c06b93..1c027a97d8b9 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
| @@ -492,19 +492,20 @@ void scsi_target_reap(struct scsi_target *starget) | |||
| 492 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 492 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
| 493 | unsigned long flags; | 493 | unsigned long flags; |
| 494 | enum scsi_target_state state; | 494 | enum scsi_target_state state; |
| 495 | int empty; | 495 | int empty = 0; |
| 496 | 496 | ||
| 497 | spin_lock_irqsave(shost->host_lock, flags); | 497 | spin_lock_irqsave(shost->host_lock, flags); |
| 498 | state = starget->state; | 498 | state = starget->state; |
| 499 | empty = --starget->reap_ref == 0 && | 499 | if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { |
| 500 | list_empty(&starget->devices) ? 1 : 0; | 500 | empty = 1; |
| 501 | starget->state = STARGET_DEL; | ||
| 502 | } | ||
| 501 | spin_unlock_irqrestore(shost->host_lock, flags); | 503 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 502 | 504 | ||
| 503 | if (!empty) | 505 | if (!empty) |
| 504 | return; | 506 | return; |
| 505 | 507 | ||
| 506 | BUG_ON(state == STARGET_DEL); | 508 | BUG_ON(state == STARGET_DEL); |
| 507 | starget->state = STARGET_DEL; | ||
| 508 | if (state == STARGET_CREATED) | 509 | if (state == STARGET_CREATED) |
| 509 | scsi_target_destroy(starget); | 510 | scsi_target_destroy(starget); |
| 510 | else | 511 | else |
