diff options
Diffstat (limited to 'drivers/scsi')
82 files changed, 2361 insertions, 1794 deletions
diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c index c690c2b89e41..acf292736b4e 100644 --- a/drivers/scsi/53c7xx.c +++ b/drivers/scsi/53c7xx.c | |||
@@ -3451,12 +3451,12 @@ create_cmd (Scsi_Cmnd *cmd) { | |||
3451 | for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4, | 3451 | for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4, |
3452 | cmd_dataout += 4, ++i) { | 3452 | cmd_dataout += 4, ++i) { |
3453 | u32 vbuf = cmd->use_sg | 3453 | u32 vbuf = cmd->use_sg |
3454 | ? (u32)page_address(((struct scatterlist *)cmd->buffer)[i].page)+ | 3454 | ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+ |
3455 | ((struct scatterlist *)cmd->buffer)[i].offset | 3455 | ((struct scatterlist *)cmd->request_buffer)[i].offset |
3456 | : (u32)(cmd->request_buffer); | 3456 | : (u32)(cmd->request_buffer); |
3457 | u32 bbuf = virt_to_bus((void *)vbuf); | 3457 | u32 bbuf = virt_to_bus((void *)vbuf); |
3458 | u32 count = cmd->use_sg ? | 3458 | u32 count = cmd->use_sg ? |
3459 | ((struct scatterlist *)cmd->buffer)[i].length : | 3459 | ((struct scatterlist *)cmd->request_buffer)[i].length : |
3460 | cmd->request_bufflen; | 3460 | cmd->request_bufflen; |
3461 | 3461 | ||
3462 | /* | 3462 | /* |
@@ -5417,7 +5417,7 @@ insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) { | |||
5417 | 5417 | ||
5418 | if ((buffers = cmd->use_sg)) { | 5418 | if ((buffers = cmd->use_sg)) { |
5419 | for (offset = 0, | 5419 | for (offset = 0, |
5420 | segment = (struct scatterlist *) cmd->buffer; | 5420 | segment = (struct scatterlist *) cmd->request_buffer; |
5421 | buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) && | 5421 | buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) && |
5422 | (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length))))); | 5422 | (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length))))); |
5423 | --buffers, offset += segment->length, ++segment) | 5423 | --buffers, offset += segment->length, ++segment) |
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c index 8a4659e94105..bdc6bb262bce 100644 --- a/drivers/scsi/NCR53C9x.c +++ b/drivers/scsi/NCR53C9x.c | |||
@@ -911,7 +911,7 @@ static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp) | |||
911 | sp->SCp.ptr = | 911 | sp->SCp.ptr = |
912 | (char *) virt_to_phys(sp->request_buffer); | 912 | (char *) virt_to_phys(sp->request_buffer); |
913 | } else { | 913 | } else { |
914 | sp->SCp.buffer = (struct scatterlist *) sp->buffer; | 914 | sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; |
915 | sp->SCp.buffers_residual = sp->use_sg - 1; | 915 | sp->SCp.buffers_residual = sp->use_sg - 1; |
916 | sp->SCp.this_residual = sp->SCp.buffer->length; | 916 | sp->SCp.this_residual = sp->SCp.buffer->length; |
917 | if (esp->dma_mmu_get_scsi_sgl) | 917 | if (esp->dma_mmu_get_scsi_sgl) |
@@ -2152,29 +2152,23 @@ static int esp_do_data_finale(struct NCR_ESP *esp, | |||
2152 | */ | 2152 | */ |
2153 | static int esp_should_clear_sync(Scsi_Cmnd *sp) | 2153 | static int esp_should_clear_sync(Scsi_Cmnd *sp) |
2154 | { | 2154 | { |
2155 | unchar cmd1 = sp->cmnd[0]; | 2155 | unchar cmd = sp->cmnd[0]; |
2156 | unchar cmd2 = sp->data_cmnd[0]; | ||
2157 | 2156 | ||
2158 | /* These cases are for spinning up a disk and | 2157 | /* These cases are for spinning up a disk and |
2159 | * waiting for that spinup to complete. | 2158 | * waiting for that spinup to complete. |
2160 | */ | 2159 | */ |
2161 | if(cmd1 == START_STOP || | 2160 | if(cmd == START_STOP) |
2162 | cmd2 == START_STOP) | ||
2163 | return 0; | 2161 | return 0; |
2164 | 2162 | ||
2165 | if(cmd1 == TEST_UNIT_READY || | 2163 | if(cmd == TEST_UNIT_READY) |
2166 | cmd2 == TEST_UNIT_READY) | ||
2167 | return 0; | 2164 | return 0; |
2168 | 2165 | ||
2169 | /* One more special case for SCSI tape drives, | 2166 | /* One more special case for SCSI tape drives, |
2170 | * this is what is used to probe the device for | 2167 | * this is what is used to probe the device for |
2171 | * completion of a rewind or tape load operation. | 2168 | * completion of a rewind or tape load operation. |
2172 | */ | 2169 | */ |
2173 | if(sp->device->type == TYPE_TAPE) { | 2170 | if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE) |
2174 | if(cmd1 == MODE_SENSE || | 2171 | return 0; |
2175 | cmd2 == MODE_SENSE) | ||
2176 | return 0; | ||
2177 | } | ||
2178 | 2172 | ||
2179 | return 1; | 2173 | return 1; |
2180 | } | 2174 | } |
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c index a06f547e87f7..d05681f9d81a 100644 --- a/drivers/scsi/NCR_D700.c +++ b/drivers/scsi/NCR_D700.c | |||
@@ -114,7 +114,7 @@ MODULE_DESCRIPTION("NCR Dual700 SCSI Driver"); | |||
114 | MODULE_LICENSE("GPL"); | 114 | MODULE_LICENSE("GPL"); |
115 | module_param(NCR_D700, charp, 0); | 115 | module_param(NCR_D700, charp, 0); |
116 | 116 | ||
117 | static __u8 __initdata id_array[2*(MCA_MAX_SLOT_NR + 1)] = | 117 | static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] = |
118 | { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 }; | 118 | { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 }; |
119 | 119 | ||
120 | #ifdef MODULE | 120 | #ifdef MODULE |
@@ -173,7 +173,7 @@ struct NCR_D700_private { | |||
173 | char pad; | 173 | char pad; |
174 | }; | 174 | }; |
175 | 175 | ||
176 | static int | 176 | static int __devinit |
177 | NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, | 177 | NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, |
178 | int slot, u32 region, int differential) | 178 | int slot, u32 region, int differential) |
179 | { | 179 | { |
@@ -243,7 +243,7 @@ NCR_D700_intr(int irq, void *data, struct pt_regs *regs) | |||
243 | * essentially connectecd to the MCA bus independently, it is easier | 243 | * essentially connectecd to the MCA bus independently, it is easier |
244 | * to set them up as two separate host adapters, rather than one | 244 | * to set them up as two separate host adapters, rather than one |
245 | * adapter with two channels */ | 245 | * adapter with two channels */ |
246 | static int | 246 | static int __devinit |
247 | NCR_D700_probe(struct device *dev) | 247 | NCR_D700_probe(struct device *dev) |
248 | { | 248 | { |
249 | struct NCR_D700_private *p; | 249 | struct NCR_D700_private *p; |
@@ -329,7 +329,7 @@ NCR_D700_probe(struct device *dev) | |||
329 | for (i = 0; i < 2; i++) { | 329 | for (i = 0; i < 2; i++) { |
330 | int err; | 330 | int err; |
331 | 331 | ||
332 | if ((err = NCR_D700_probe_one(p, i, slot, irq, | 332 | if ((err = NCR_D700_probe_one(p, i, irq, slot, |
333 | offset_addr + (0x80 * i), | 333 | offset_addr + (0x80 * i), |
334 | differential)) != 0) | 334 | differential)) != 0) |
335 | printk("D700: SIOP%d: probe failed, error = %d\n", | 335 | printk("D700: SIOP%d: probe failed, error = %d\n", |
@@ -349,7 +349,7 @@ NCR_D700_probe(struct device *dev) | |||
349 | return 0; | 349 | return 0; |
350 | } | 350 | } |
351 | 351 | ||
352 | static void | 352 | static void __devexit |
353 | NCR_D700_remove_one(struct Scsi_Host *host) | 353 | NCR_D700_remove_one(struct Scsi_Host *host) |
354 | { | 354 | { |
355 | scsi_remove_host(host); | 355 | scsi_remove_host(host); |
@@ -359,7 +359,7 @@ NCR_D700_remove_one(struct Scsi_Host *host) | |||
359 | release_region(host->base, 64); | 359 | release_region(host->base, 64); |
360 | } | 360 | } |
361 | 361 | ||
362 | static int | 362 | static int __devexit |
363 | NCR_D700_remove(struct device *dev) | 363 | NCR_D700_remove(struct device *dev) |
364 | { | 364 | { |
365 | struct NCR_D700_private *p = dev_get_drvdata(dev); | 365 | struct NCR_D700_private *p = dev_get_drvdata(dev); |
@@ -380,7 +380,7 @@ static struct mca_driver NCR_D700_driver = { | |||
380 | .name = "NCR_D700", | 380 | .name = "NCR_D700", |
381 | .bus = &mca_bus_type, | 381 | .bus = &mca_bus_type, |
382 | .probe = NCR_D700_probe, | 382 | .probe = NCR_D700_probe, |
383 | .remove = NCR_D700_remove, | 383 | .remove = __devexit_p(NCR_D700_remove), |
384 | }, | 384 | }, |
385 | }; | 385 | }; |
386 | 386 | ||
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 36e63f82d9f8..f974869ea323 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c | |||
@@ -551,6 +551,11 @@ struct aha152x_hostdata { | |||
551 | struct aha152x_scdata { | 551 | struct aha152x_scdata { |
552 | Scsi_Cmnd *next; /* next sc in queue */ | 552 | Scsi_Cmnd *next; /* next sc in queue */ |
553 | struct semaphore *sem; /* semaphore to block on */ | 553 | struct semaphore *sem; /* semaphore to block on */ |
554 | unsigned char cmd_len; | ||
555 | unsigned char cmnd[MAX_COMMAND_SIZE]; | ||
556 | unsigned short use_sg; | ||
557 | unsigned request_bufflen; | ||
558 | void *request_buffer; | ||
554 | }; | 559 | }; |
555 | 560 | ||
556 | 561 | ||
@@ -1006,11 +1011,20 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int p | |||
1006 | return FAILED; | 1011 | return FAILED; |
1007 | } | 1012 | } |
1008 | } else { | 1013 | } else { |
1014 | struct aha152x_scdata *sc; | ||
1015 | |||
1009 | SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); | 1016 | SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); |
1010 | if(SCpnt->host_scribble==0) { | 1017 | if(SCpnt->host_scribble==0) { |
1011 | printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt)); | 1018 | printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt)); |
1012 | return FAILED; | 1019 | return FAILED; |
1013 | } | 1020 | } |
1021 | |||
1022 | sc = SCDATA(SCpnt); | ||
1023 | memcpy(sc->cmnd, SCpnt->cmnd, sizeof(sc->cmnd)); | ||
1024 | sc->request_buffer = SCpnt->request_buffer; | ||
1025 | sc->request_bufflen = SCpnt->request_bufflen; | ||
1026 | sc->use_sg = SCpnt->use_sg; | ||
1027 | sc->cmd_len = SCpnt->cmd_len; | ||
1014 | } | 1028 | } |
1015 | 1029 | ||
1016 | SCNEXT(SCpnt) = NULL; | 1030 | SCNEXT(SCpnt) = NULL; |
@@ -1165,6 +1179,10 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt) | |||
1165 | DECLARE_MUTEX_LOCKED(sem); | 1179 | DECLARE_MUTEX_LOCKED(sem); |
1166 | struct timer_list timer; | 1180 | struct timer_list timer; |
1167 | int ret, issued, disconnected; | 1181 | int ret, issued, disconnected; |
1182 | unsigned char old_cmd_len = SCpnt->cmd_len; | ||
1183 | unsigned short old_use_sg = SCpnt->use_sg; | ||
1184 | void *old_buffer = SCpnt->request_buffer; | ||
1185 | unsigned old_bufflen = SCpnt->request_bufflen; | ||
1168 | unsigned long flags; | 1186 | unsigned long flags; |
1169 | 1187 | ||
1170 | #if defined(AHA152X_DEBUG) | 1188 | #if defined(AHA152X_DEBUG) |
@@ -1198,11 +1216,11 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt) | |||
1198 | add_timer(&timer); | 1216 | add_timer(&timer); |
1199 | down(&sem); | 1217 | down(&sem); |
1200 | del_timer(&timer); | 1218 | del_timer(&timer); |
1201 | 1219 | ||
1202 | SCpnt->cmd_len = SCpnt->old_cmd_len; | 1220 | SCpnt->cmd_len = old_cmd_len; |
1203 | SCpnt->use_sg = SCpnt->old_use_sg; | 1221 | SCpnt->use_sg = old_use_sg; |
1204 | SCpnt->request_buffer = SCpnt->buffer; | 1222 | SCpnt->request_buffer = old_buffer; |
1205 | SCpnt->request_bufflen = SCpnt->bufflen; | 1223 | SCpnt->request_bufflen = old_bufflen; |
1206 | 1224 | ||
1207 | DO_LOCK(flags); | 1225 | DO_LOCK(flags); |
1208 | 1226 | ||
@@ -1565,6 +1583,9 @@ static void busfree_run(struct Scsi_Host *shpnt) | |||
1565 | #endif | 1583 | #endif |
1566 | 1584 | ||
1567 | if(DONE_SC->SCp.phase & check_condition) { | 1585 | if(DONE_SC->SCp.phase & check_condition) { |
1586 | struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC; | ||
1587 | struct aha152x_scdata *sc = SCDATA(cmd); | ||
1588 | |||
1568 | #if 0 | 1589 | #if 0 |
1569 | if(HOSTDATA(shpnt)->debug & debug_eh) { | 1590 | if(HOSTDATA(shpnt)->debug & debug_eh) { |
1570 | printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC)); | 1591 | printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC)); |
@@ -1573,13 +1594,13 @@ static void busfree_run(struct Scsi_Host *shpnt) | |||
1573 | #endif | 1594 | #endif |
1574 | 1595 | ||
1575 | /* restore old command */ | 1596 | /* restore old command */ |
1576 | memcpy((void *) DONE_SC->cmnd, (void *) DONE_SC->data_cmnd, sizeof(DONE_SC->data_cmnd)); | 1597 | memcpy(cmd->cmnd, sc->cmnd, sizeof(sc->cmnd)); |
1577 | DONE_SC->request_buffer = DONE_SC->buffer; | 1598 | cmd->request_buffer = sc->request_buffer; |
1578 | DONE_SC->request_bufflen = DONE_SC->bufflen; | 1599 | cmd->request_bufflen = sc->request_bufflen; |
1579 | DONE_SC->use_sg = DONE_SC->old_use_sg; | 1600 | cmd->use_sg = sc->use_sg; |
1580 | DONE_SC->cmd_len = DONE_SC->old_cmd_len; | 1601 | cmd->cmd_len = sc->cmd_len; |
1581 | 1602 | ||
1582 | DONE_SC->SCp.Status = 0x02; | 1603 | cmd->SCp.Status = 0x02; |
1583 | 1604 | ||
1584 | HOSTDATA(shpnt)->commands--; | 1605 | HOSTDATA(shpnt)->commands--; |
1585 | if (!HOSTDATA(shpnt)->commands) | 1606 | if (!HOSTDATA(shpnt)->commands) |
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index 15f6cd4279b7..904c25fb4ba4 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c | |||
@@ -940,14 +940,8 @@ static void ahci_host_intr(struct ata_port *ap) | |||
940 | return; | 940 | return; |
941 | 941 | ||
942 | /* ignore interim PIO setup fis interrupts */ | 942 | /* ignore interim PIO setup fis interrupts */ |
943 | if (ata_tag_valid(ap->active_tag)) { | 943 | if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS)) |
944 | struct ata_queued_cmd *qc = | 944 | return; |
945 | ata_qc_from_tag(ap, ap->active_tag); | ||
946 | |||
947 | if (qc && qc->tf.protocol == ATA_PROT_PIO && | ||
948 | (status & PORT_IRQ_PIOS_FIS)) | ||
949 | return; | ||
950 | } | ||
951 | 945 | ||
952 | if (ata_ratelimit()) | 946 | if (ata_ratelimit()) |
953 | ata_port_printk(ap, KERN_INFO, "spurious interrupt " | 947 | ata_port_printk(ap, KERN_INFO, "spurious interrupt " |
@@ -1052,7 +1046,7 @@ static void ahci_thaw(struct ata_port *ap) | |||
1052 | 1046 | ||
1053 | static void ahci_error_handler(struct ata_port *ap) | 1047 | static void ahci_error_handler(struct ata_port *ap) |
1054 | { | 1048 | { |
1055 | if (!(ap->flags & ATA_FLAG_FROZEN)) { | 1049 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) { |
1056 | /* restart engine */ | 1050 | /* restart engine */ |
1057 | ahci_stop_engine(ap); | 1051 | ahci_stop_engine(ap); |
1058 | ahci_start_engine(ap); | 1052 | ahci_start_engine(ap); |
@@ -1323,6 +1317,17 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1323 | if (!printed_version++) | 1317 | if (!printed_version++) |
1324 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 1318 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
1325 | 1319 | ||
1320 | /* JMicron-specific fixup: make sure we're in AHCI mode */ | ||
1321 | /* This is protected from races with ata_jmicron by the pci probe | ||
1322 | locking */ | ||
1323 | if (pdev->vendor == PCI_VENDOR_ID_JMICRON) { | ||
1324 | /* AHCI enable, AHCI on function 0 */ | ||
1325 | pci_write_config_byte(pdev, 0x41, 0xa1); | ||
1326 | /* Function 1 is the PATA controller */ | ||
1327 | if (PCI_FUNC(pdev->devfn)) | ||
1328 | return -ENODEV; | ||
1329 | } | ||
1330 | |||
1326 | rc = pci_enable_device(pdev); | 1331 | rc = pci_enable_device(pdev); |
1327 | if (rc) | 1332 | if (rc) |
1328 | return rc; | 1333 | return rc; |
@@ -1378,10 +1383,6 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1378 | if (have_msi) | 1383 | if (have_msi) |
1379 | hpriv->flags |= AHCI_FLAG_MSI; | 1384 | hpriv->flags |= AHCI_FLAG_MSI; |
1380 | 1385 | ||
1381 | /* JMicron-specific fixup: make sure we're in AHCI mode */ | ||
1382 | if (pdev->vendor == 0x197b) | ||
1383 | pci_write_config_byte(pdev, 0x41, 0xa1); | ||
1384 | |||
1385 | /* initialize adapter */ | 1386 | /* initialize adapter */ |
1386 | rc = ahci_host_init(probe_ent); | 1387 | rc = ahci_host_init(probe_ent); |
1387 | if (rc) | 1388 | if (rc) |
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index a1e8ca758594..653818d2f802 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c | |||
@@ -7289,7 +7289,7 @@ ahd_reset_cmds_pending(struct ahd_softc *ahd) | |||
7289 | ahd->flags &= ~AHD_UPDATE_PEND_CMDS; | 7289 | ahd->flags &= ~AHD_UPDATE_PEND_CMDS; |
7290 | } | 7290 | } |
7291 | 7291 | ||
7292 | void | 7292 | static void |
7293 | ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) | 7293 | ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) |
7294 | { | 7294 | { |
7295 | cam_status ostat; | 7295 | cam_status ostat; |
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index b244c7124179..998999c0a972 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c | |||
@@ -243,25 +243,6 @@ ahd_print_path(struct ahd_softc *ahd, struct scb *scb) | |||
243 | static uint32_t aic79xx_no_reset; | 243 | static uint32_t aic79xx_no_reset; |
244 | 244 | ||
245 | /* | 245 | /* |
246 | * Certain PCI motherboards will scan PCI devices from highest to lowest, | ||
247 | * others scan from lowest to highest, and they tend to do all kinds of | ||
248 | * strange things when they come into contact with PCI bridge chips. The | ||
249 | * net result of all this is that the PCI card that is actually used to boot | ||
250 | * the machine is very hard to detect. Most motherboards go from lowest | ||
251 | * PCI slot number to highest, and the first SCSI controller found is the | ||
252 | * one you boot from. The only exceptions to this are when a controller | ||
253 | * has its BIOS disabled. So, we by default sort all of our SCSI controllers | ||
254 | * from lowest PCI slot number to highest PCI slot number. We also force | ||
255 | * all controllers with their BIOS disabled to the end of the list. This | ||
256 | * works on *almost* all computers. Where it doesn't work, we have this | ||
257 | * option. Setting this option to non-0 will reverse the order of the sort | ||
258 | * to highest first, then lowest, but will still leave cards with their BIOS | ||
259 | * disabled at the very end. That should fix everyone up unless there are | ||
260 | * really strange cirumstances. | ||
261 | */ | ||
262 | static uint32_t aic79xx_reverse_scan; | ||
263 | |||
264 | /* | ||
265 | * Should we force EXTENDED translation on a controller. | 246 | * Should we force EXTENDED translation on a controller. |
266 | * 0 == Use whatever is in the SEEPROM or default to off | 247 | * 0 == Use whatever is in the SEEPROM or default to off |
267 | * 1 == Use whatever is in the SEEPROM or default to on | 248 | * 1 == Use whatever is in the SEEPROM or default to on |
@@ -350,7 +331,6 @@ MODULE_PARM_DESC(aic79xx, | |||
350 | " periodically to prevent tag starvation.\n" | 331 | " periodically to prevent tag starvation.\n" |
351 | " This may be required by some older disk\n" | 332 | " This may be required by some older disk\n" |
352 | " or drives/RAID arrays.\n" | 333 | " or drives/RAID arrays.\n" |
353 | " reverse_scan Sort PCI devices highest Bus/Slot to lowest\n" | ||
354 | " tag_info:<tag_str> Set per-target tag depth\n" | 334 | " tag_info:<tag_str> Set per-target tag depth\n" |
355 | " global_tag_depth:<int> Global tag depth for all targets on all buses\n" | 335 | " global_tag_depth:<int> Global tag depth for all targets on all buses\n" |
356 | " slewrate:<slewrate_list>Set the signal slew rate (0-15).\n" | 336 | " slewrate:<slewrate_list>Set the signal slew rate (0-15).\n" |
@@ -1031,7 +1011,6 @@ aic79xx_setup(char *s) | |||
1031 | #ifdef AHD_DEBUG | 1011 | #ifdef AHD_DEBUG |
1032 | { "debug", &ahd_debug }, | 1012 | { "debug", &ahd_debug }, |
1033 | #endif | 1013 | #endif |
1034 | { "reverse_scan", &aic79xx_reverse_scan }, | ||
1035 | { "periodic_otag", &aic79xx_periodic_otag }, | 1014 | { "periodic_otag", &aic79xx_periodic_otag }, |
1036 | { "pci_parity", &aic79xx_pci_parity }, | 1015 | { "pci_parity", &aic79xx_pci_parity }, |
1037 | { "seltime", &aic79xx_seltime }, | 1016 | { "seltime", &aic79xx_seltime }, |
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h index 9e871de23835..601340d84410 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.h +++ b/drivers/scsi/aic7xxx/aic79xx_osm.h | |||
@@ -93,7 +93,6 @@ | |||
93 | #endif | 93 | #endif |
94 | 94 | ||
95 | /********************************** Misc Macros *******************************/ | 95 | /********************************** Misc Macros *******************************/ |
96 | #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) | ||
97 | #define powerof2(x) ((((x)-1)&(x))==0) | 96 | #define powerof2(x) ((((x)-1)&(x))==0) |
98 | 97 | ||
99 | /************************* Forward Declarations *******************************/ | 98 | /************************* Forward Declarations *******************************/ |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index debf3e2a0798..aa4be8a31415 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c | |||
@@ -353,7 +353,6 @@ MODULE_PARM_DESC(aic7xxx, | |||
353 | " periodically to prevent tag starvation.\n" | 353 | " periodically to prevent tag starvation.\n" |
354 | " This may be required by some older disk\n" | 354 | " This may be required by some older disk\n" |
355 | " drives or RAID arrays.\n" | 355 | " drives or RAID arrays.\n" |
356 | " reverse_scan Sort PCI devices highest Bus/Slot to lowest\n" | ||
357 | " tag_info:<tag_str> Set per-target tag depth\n" | 356 | " tag_info:<tag_str> Set per-target tag depth\n" |
358 | " global_tag_depth:<int> Global tag depth for every target\n" | 357 | " global_tag_depth:<int> Global tag depth for every target\n" |
359 | " on every bus\n" | 358 | " on every bus\n" |
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile index 8c91fda6482c..b98c5c1056c3 100644 --- a/drivers/scsi/aic7xxx/aicasm/Makefile +++ b/drivers/scsi/aic7xxx/aicasm/Makefile | |||
@@ -14,6 +14,8 @@ LIBS= -ldb | |||
14 | clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG) | 14 | clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG) |
15 | # Override default kernel CFLAGS. This is a userland app. | 15 | # Override default kernel CFLAGS. This is a userland app. |
16 | AICASM_CFLAGS:= -I/usr/include -I. | 16 | AICASM_CFLAGS:= -I/usr/include -I. |
17 | LEX= flex | ||
18 | YACC= bison | ||
17 | YFLAGS= -d | 19 | YFLAGS= -d |
18 | 20 | ||
19 | NOMAN= noman | 21 | NOMAN= noman |
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig index 06d7601cdf56..d006a8cb4a74 100644 --- a/drivers/scsi/arm/Kconfig +++ b/drivers/scsi/arm/Kconfig | |||
@@ -69,6 +69,7 @@ comment "The following drivers are not fully supported" | |||
69 | config SCSI_CUMANA_1 | 69 | config SCSI_CUMANA_1 |
70 | tristate "CumanaSCSI I support (EXPERIMENTAL)" | 70 | tristate "CumanaSCSI I support (EXPERIMENTAL)" |
71 | depends on ARCH_ACORN && EXPERIMENTAL && SCSI | 71 | depends on ARCH_ACORN && EXPERIMENTAL && SCSI |
72 | select SCSI_SPI_ATTRS | ||
72 | help | 73 | help |
73 | This enables support for the Cumana SCSI I card. If you have an | 74 | This enables support for the Cumana SCSI I card. If you have an |
74 | Acorn system with one of these, say Y. If unsure, say N. | 75 | Acorn system with one of these, say Y. If unsure, say N. |
@@ -76,6 +77,7 @@ config SCSI_CUMANA_1 | |||
76 | config SCSI_ECOSCSI | 77 | config SCSI_ECOSCSI |
77 | tristate "EcoScsi support (EXPERIMENTAL)" | 78 | tristate "EcoScsi support (EXPERIMENTAL)" |
78 | depends on ARCH_ACORN && EXPERIMENTAL && (ARCH_ARC || ARCH_A5K) && SCSI | 79 | depends on ARCH_ACORN && EXPERIMENTAL && (ARCH_ARC || ARCH_A5K) && SCSI |
80 | select SCSI_SPI_ATTRS | ||
79 | help | 81 | help |
80 | This enables support for the EcoSCSI card -- a small card that sits | 82 | This enables support for the EcoSCSI card -- a small card that sits |
81 | in the Econet socket. If you have an Acorn system with one of these, | 83 | in the Econet socket. If you have an Acorn system with one of these, |
@@ -84,6 +86,7 @@ config SCSI_ECOSCSI | |||
84 | config SCSI_OAK1 | 86 | config SCSI_OAK1 |
85 | tristate "Oak SCSI support (EXPERIMENTAL)" | 87 | tristate "Oak SCSI support (EXPERIMENTAL)" |
86 | depends on ARCH_ACORN && EXPERIMENTAL && SCSI | 88 | depends on ARCH_ACORN && EXPERIMENTAL && SCSI |
89 | select SCSI_SPI_ATTRS | ||
87 | help | 90 | help |
88 | This enables support for the Oak SCSI card. If you have an Acorn | 91 | This enables support for the Oak SCSI card. If you have an Acorn |
89 | system with one of these, say Y. If unsure, say N. | 92 | system with one of these, say Y. If unsure, say N. |
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 3e1053f111dc..4cf7afc31cc7 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
@@ -2427,7 +2427,7 @@ int fas216_eh_abort(Scsi_Cmnd *SCpnt) | |||
2427 | info->stats.aborts += 1; | 2427 | info->stats.aborts += 1; |
2428 | 2428 | ||
2429 | printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); | 2429 | printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); |
2430 | __scsi_print_command(SCpnt->data_cmnd); | 2430 | __scsi_print_command(SCpnt->cmnd); |
2431 | 2431 | ||
2432 | print_debug_list(); | 2432 | print_debug_list(); |
2433 | fas216_dumpstate(info); | 2433 | fas216_dumpstate(info); |
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h index 6dd544a5eb56..8c2600ffc6af 100644 --- a/drivers/scsi/arm/scsi.h +++ b/drivers/scsi/arm/scsi.h | |||
@@ -74,7 +74,7 @@ static inline void init_SCp(Scsi_Cmnd *SCpnt) | |||
74 | unsigned long len = 0; | 74 | unsigned long len = 0; |
75 | int buf; | 75 | int buf; |
76 | 76 | ||
77 | SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->buffer; | 77 | SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer; |
78 | SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1; | 78 | SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1; |
79 | SCpnt->SCp.ptr = (char *) | 79 | SCpnt->SCp.ptr = (char *) |
80 | (page_address(SCpnt->SCp.buffer->page) + | 80 | (page_address(SCpnt->SCp.buffer->page) + |
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index 94b1261a259d..a9bb3cb7e89b 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c | |||
@@ -105,9 +105,6 @@ enum { | |||
105 | PIIX_FLAG_SCR = (1 << 26), /* SCR available */ | 105 | PIIX_FLAG_SCR = (1 << 26), /* SCR available */ |
106 | PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ | 106 | PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ |
107 | PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ | 107 | PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ |
108 | PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */ | ||
109 | /* ICH6/7 use different scheme for map value */ | ||
110 | PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30), | ||
111 | 108 | ||
112 | /* combined mode. if set, PATA is channel 0. | 109 | /* combined mode. if set, PATA is channel 0. |
113 | * if clear, PATA is channel 1. | 110 | * if clear, PATA is channel 1. |
@@ -126,6 +123,8 @@ enum { | |||
126 | ich6_sata = 4, | 123 | ich6_sata = 4, |
127 | ich6_sata_ahci = 5, | 124 | ich6_sata_ahci = 5, |
128 | ich6m_sata_ahci = 6, | 125 | ich6m_sata_ahci = 6, |
126 | ich7m_sata_ahci = 7, | ||
127 | ich8_sata_ahci = 8, | ||
129 | 128 | ||
130 | /* constants for mapping table */ | 129 | /* constants for mapping table */ |
131 | P0 = 0, /* port 0 */ | 130 | P0 = 0, /* port 0 */ |
@@ -141,11 +140,19 @@ enum { | |||
141 | 140 | ||
142 | struct piix_map_db { | 141 | struct piix_map_db { |
143 | const u32 mask; | 142 | const u32 mask; |
143 | const u16 port_enable; | ||
144 | const int present_shift; | ||
144 | const int map[][4]; | 145 | const int map[][4]; |
145 | }; | 146 | }; |
146 | 147 | ||
148 | struct piix_host_priv { | ||
149 | const int *map; | ||
150 | const struct piix_map_db *map_db; | ||
151 | }; | ||
152 | |||
147 | static int piix_init_one (struct pci_dev *pdev, | 153 | static int piix_init_one (struct pci_dev *pdev, |
148 | const struct pci_device_id *ent); | 154 | const struct pci_device_id *ent); |
155 | static void piix_host_stop(struct ata_host_set *host_set); | ||
149 | static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); | 156 | static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); |
150 | static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); | 157 | static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); |
151 | static void piix_pata_error_handler(struct ata_port *ap); | 158 | static void piix_pata_error_handler(struct ata_port *ap); |
@@ -182,15 +189,15 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
182 | /* 82801GB/GR/GH (ICH7, identical to ICH6) */ | 189 | /* 82801GB/GR/GH (ICH7, identical to ICH6) */ |
183 | { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | 190 | { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, |
184 | /* 2801GBM/GHM (ICH7M, identical to ICH6M) */ | 191 | /* 2801GBM/GHM (ICH7M, identical to ICH6M) */ |
185 | { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, | 192 | { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich7m_sata_ahci }, |
186 | /* Enterprise Southbridge 2 (where's the datasheet?) */ | 193 | /* Enterprise Southbridge 2 (where's the datasheet?) */ |
187 | { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | 194 | { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, |
188 | /* SATA Controller 1 IDE (ICH8, no datasheet yet) */ | 195 | /* SATA Controller 1 IDE (ICH8, no datasheet yet) */ |
189 | { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | 196 | { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, |
190 | /* SATA Controller 2 IDE (ICH8, ditto) */ | 197 | /* SATA Controller 2 IDE (ICH8, ditto) */ |
191 | { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | 198 | { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, |
192 | /* Mobile SATA Controller IDE (ICH8M, ditto) */ | 199 | /* Mobile SATA Controller IDE (ICH8M, ditto) */ |
193 | { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, | 200 | { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, |
194 | 201 | ||
195 | { } /* terminate list */ | 202 | { } /* terminate list */ |
196 | }; | 203 | }; |
@@ -254,7 +261,7 @@ static const struct ata_port_operations piix_pata_ops = { | |||
254 | 261 | ||
255 | .port_start = ata_port_start, | 262 | .port_start = ata_port_start, |
256 | .port_stop = ata_port_stop, | 263 | .port_stop = ata_port_stop, |
257 | .host_stop = ata_host_stop, | 264 | .host_stop = piix_host_stop, |
258 | }; | 265 | }; |
259 | 266 | ||
260 | static const struct ata_port_operations piix_sata_ops = { | 267 | static const struct ata_port_operations piix_sata_ops = { |
@@ -284,11 +291,13 @@ static const struct ata_port_operations piix_sata_ops = { | |||
284 | 291 | ||
285 | .port_start = ata_port_start, | 292 | .port_start = ata_port_start, |
286 | .port_stop = ata_port_stop, | 293 | .port_stop = ata_port_stop, |
287 | .host_stop = ata_host_stop, | 294 | .host_stop = piix_host_stop, |
288 | }; | 295 | }; |
289 | 296 | ||
290 | static struct piix_map_db ich5_map_db = { | 297 | static const struct piix_map_db ich5_map_db = { |
291 | .mask = 0x7, | 298 | .mask = 0x7, |
299 | .port_enable = 0x3, | ||
300 | .present_shift = 4, | ||
292 | .map = { | 301 | .map = { |
293 | /* PM PS SM SS MAP */ | 302 | /* PM PS SM SS MAP */ |
294 | { P0, NA, P1, NA }, /* 000b */ | 303 | { P0, NA, P1, NA }, /* 000b */ |
@@ -302,8 +311,10 @@ static struct piix_map_db ich5_map_db = { | |||
302 | }, | 311 | }, |
303 | }; | 312 | }; |
304 | 313 | ||
305 | static struct piix_map_db ich6_map_db = { | 314 | static const struct piix_map_db ich6_map_db = { |
306 | .mask = 0x3, | 315 | .mask = 0x3, |
316 | .port_enable = 0xf, | ||
317 | .present_shift = 4, | ||
307 | .map = { | 318 | .map = { |
308 | /* PM PS SM SS MAP */ | 319 | /* PM PS SM SS MAP */ |
309 | { P0, P2, P1, P3 }, /* 00b */ | 320 | { P0, P2, P1, P3 }, /* 00b */ |
@@ -313,8 +324,10 @@ static struct piix_map_db ich6_map_db = { | |||
313 | }, | 324 | }, |
314 | }; | 325 | }; |
315 | 326 | ||
316 | static struct piix_map_db ich6m_map_db = { | 327 | static const struct piix_map_db ich6m_map_db = { |
317 | .mask = 0x3, | 328 | .mask = 0x3, |
329 | .port_enable = 0x5, | ||
330 | .present_shift = 4, | ||
318 | .map = { | 331 | .map = { |
319 | /* PM PS SM SS MAP */ | 332 | /* PM PS SM SS MAP */ |
320 | { P0, P2, RV, RV }, /* 00b */ | 333 | { P0, P2, RV, RV }, /* 00b */ |
@@ -324,6 +337,47 @@ static struct piix_map_db ich6m_map_db = { | |||
324 | }, | 337 | }, |
325 | }; | 338 | }; |
326 | 339 | ||
340 | static const struct piix_map_db ich7m_map_db = { | ||
341 | .mask = 0x3, | ||
342 | .port_enable = 0x5, | ||
343 | .present_shift = 4, | ||
344 | |||
345 | /* Map 01b isn't specified in the doc but some notebooks use | ||
346 | * it anyway. ATM, the only case spotted carries subsystem ID | ||
347 | * 1025:0107. This is the only difference from ich6m. | ||
348 | */ | ||
349 | .map = { | ||
350 | /* PM PS SM SS MAP */ | ||
351 | { P0, P2, RV, RV }, /* 00b */ | ||
352 | { IDE, IDE, P1, P3 }, /* 01b */ | ||
353 | { P0, P2, IDE, IDE }, /* 10b */ | ||
354 | { RV, RV, RV, RV }, | ||
355 | }, | ||
356 | }; | ||
357 | |||
358 | static const struct piix_map_db ich8_map_db = { | ||
359 | .mask = 0x3, | ||
360 | .port_enable = 0x3, | ||
361 | .present_shift = 8, | ||
362 | .map = { | ||
363 | /* PM PS SM SS MAP */ | ||
364 | { P0, NA, P1, NA }, /* 00b (hardwired) */ | ||
365 | { RV, RV, RV, RV }, | ||
366 | { RV, RV, RV, RV }, /* 10b (never) */ | ||
367 | { RV, RV, RV, RV }, | ||
368 | }, | ||
369 | }; | ||
370 | |||
371 | static const struct piix_map_db *piix_map_db_table[] = { | ||
372 | [ich5_sata] = &ich5_map_db, | ||
373 | [esb_sata] = &ich5_map_db, | ||
374 | [ich6_sata] = &ich6_map_db, | ||
375 | [ich6_sata_ahci] = &ich6_map_db, | ||
376 | [ich6m_sata_ahci] = &ich6m_map_db, | ||
377 | [ich7m_sata_ahci] = &ich7m_map_db, | ||
378 | [ich8_sata_ahci] = &ich8_map_db, | ||
379 | }; | ||
380 | |||
327 | static struct ata_port_info piix_port_info[] = { | 381 | static struct ata_port_info piix_port_info[] = { |
328 | /* piix4_pata */ | 382 | /* piix4_pata */ |
329 | { | 383 | { |
@@ -356,63 +410,82 @@ static struct ata_port_info piix_port_info[] = { | |||
356 | /* ich5_sata */ | 410 | /* ich5_sata */ |
357 | { | 411 | { |
358 | .sht = &piix_sht, | 412 | .sht = &piix_sht, |
359 | .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | | 413 | .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | |
360 | PIIX_FLAG_CHECKINTR, | 414 | PIIX_FLAG_IGNORE_PCS, |
361 | .pio_mask = 0x1f, /* pio0-4 */ | 415 | .pio_mask = 0x1f, /* pio0-4 */ |
362 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 416 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
363 | .udma_mask = 0x7f, /* udma0-6 */ | 417 | .udma_mask = 0x7f, /* udma0-6 */ |
364 | .port_ops = &piix_sata_ops, | 418 | .port_ops = &piix_sata_ops, |
365 | .private_data = &ich5_map_db, | ||
366 | }, | 419 | }, |
367 | 420 | ||
368 | /* i6300esb_sata */ | 421 | /* i6300esb_sata */ |
369 | { | 422 | { |
370 | .sht = &piix_sht, | 423 | .sht = &piix_sht, |
371 | .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | | 424 | .host_flags = ATA_FLAG_SATA | |
372 | PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS, | 425 | PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS, |
373 | .pio_mask = 0x1f, /* pio0-4 */ | 426 | .pio_mask = 0x1f, /* pio0-4 */ |
374 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 427 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
375 | .udma_mask = 0x7f, /* udma0-6 */ | 428 | .udma_mask = 0x7f, /* udma0-6 */ |
376 | .port_ops = &piix_sata_ops, | 429 | .port_ops = &piix_sata_ops, |
377 | .private_data = &ich5_map_db, | ||
378 | }, | 430 | }, |
379 | 431 | ||
380 | /* ich6_sata */ | 432 | /* ich6_sata */ |
381 | { | 433 | { |
382 | .sht = &piix_sht, | 434 | .sht = &piix_sht, |
383 | .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | | 435 | .host_flags = ATA_FLAG_SATA | |
384 | PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR, | 436 | PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR, |
385 | .pio_mask = 0x1f, /* pio0-4 */ | 437 | .pio_mask = 0x1f, /* pio0-4 */ |
386 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 438 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
387 | .udma_mask = 0x7f, /* udma0-6 */ | 439 | .udma_mask = 0x7f, /* udma0-6 */ |
388 | .port_ops = &piix_sata_ops, | 440 | .port_ops = &piix_sata_ops, |
389 | .private_data = &ich6_map_db, | ||
390 | }, | 441 | }, |
391 | 442 | ||
392 | /* ich6_sata_ahci */ | 443 | /* ich6_sata_ahci */ |
393 | { | 444 | { |
394 | .sht = &piix_sht, | 445 | .sht = &piix_sht, |
395 | .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | | 446 | .host_flags = ATA_FLAG_SATA | |
396 | PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | | 447 | PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | |
397 | PIIX_FLAG_AHCI, | 448 | PIIX_FLAG_AHCI, |
398 | .pio_mask = 0x1f, /* pio0-4 */ | 449 | .pio_mask = 0x1f, /* pio0-4 */ |
399 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 450 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
400 | .udma_mask = 0x7f, /* udma0-6 */ | 451 | .udma_mask = 0x7f, /* udma0-6 */ |
401 | .port_ops = &piix_sata_ops, | 452 | .port_ops = &piix_sata_ops, |
402 | .private_data = &ich6_map_db, | ||
403 | }, | 453 | }, |
404 | 454 | ||
405 | /* ich6m_sata_ahci */ | 455 | /* ich6m_sata_ahci */ |
406 | { | 456 | { |
407 | .sht = &piix_sht, | 457 | .sht = &piix_sht, |
408 | .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | | 458 | .host_flags = ATA_FLAG_SATA | |
459 | PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | | ||
460 | PIIX_FLAG_AHCI, | ||
461 | .pio_mask = 0x1f, /* pio0-4 */ | ||
462 | .mwdma_mask = 0x07, /* mwdma0-2 */ | ||
463 | .udma_mask = 0x7f, /* udma0-6 */ | ||
464 | .port_ops = &piix_sata_ops, | ||
465 | }, | ||
466 | |||
467 | /* ich7m_sata_ahci */ | ||
468 | { | ||
469 | .sht = &piix_sht, | ||
470 | .host_flags = ATA_FLAG_SATA | | ||
471 | PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | | ||
472 | PIIX_FLAG_AHCI, | ||
473 | .pio_mask = 0x1f, /* pio0-4 */ | ||
474 | .mwdma_mask = 0x07, /* mwdma0-2 */ | ||
475 | .udma_mask = 0x7f, /* udma0-6 */ | ||
476 | .port_ops = &piix_sata_ops, | ||
477 | }, | ||
478 | |||
479 | /* ich8_sata_ahci */ | ||
480 | { | ||
481 | .sht = &piix_sht, | ||
482 | .host_flags = ATA_FLAG_SATA | | ||
409 | PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | | 483 | PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | |
410 | PIIX_FLAG_AHCI, | 484 | PIIX_FLAG_AHCI, |
411 | .pio_mask = 0x1f, /* pio0-4 */ | 485 | .pio_mask = 0x1f, /* pio0-4 */ |
412 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 486 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
413 | .udma_mask = 0x7f, /* udma0-6 */ | 487 | .udma_mask = 0x7f, /* udma0-6 */ |
414 | .port_ops = &piix_sata_ops, | 488 | .port_ops = &piix_sata_ops, |
415 | .private_data = &ich6m_map_db, | ||
416 | }, | 489 | }, |
417 | }; | 490 | }; |
418 | 491 | ||
@@ -427,6 +500,11 @@ MODULE_LICENSE("GPL"); | |||
427 | MODULE_DEVICE_TABLE(pci, piix_pci_tbl); | 500 | MODULE_DEVICE_TABLE(pci, piix_pci_tbl); |
428 | MODULE_VERSION(DRV_VERSION); | 501 | MODULE_VERSION(DRV_VERSION); |
429 | 502 | ||
503 | static int force_pcs = 0; | ||
504 | module_param(force_pcs, int, 0444); | ||
505 | MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around " | ||
506 | "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)"); | ||
507 | |||
430 | /** | 508 | /** |
431 | * piix_pata_cbl_detect - Probe host controller cable detect info | 509 | * piix_pata_cbl_detect - Probe host controller cable detect info |
432 | * @ap: Port for which cable detect info is desired | 510 | * @ap: Port for which cable detect info is desired |
@@ -491,74 +569,83 @@ static void piix_pata_error_handler(struct ata_port *ap) | |||
491 | } | 569 | } |
492 | 570 | ||
493 | /** | 571 | /** |
494 | * piix_sata_prereset - prereset for SATA host controller | 572 | * piix_sata_present_mask - determine present mask for SATA host controller |
495 | * @ap: Target port | 573 | * @ap: Target port |
496 | * | 574 | * |
497 | * Reads and configures SATA PCI device's PCI config register | 575 | * Reads SATA PCI device's PCI config register Port Configuration |
498 | * Port Configuration and Status (PCS) to determine port and | 576 | * and Status (PCS) to determine port and device availability. |
499 | * device availability. Return -ENODEV to skip reset if no | ||
500 | * device is present. | ||
501 | * | 577 | * |
502 | * LOCKING: | 578 | * LOCKING: |
503 | * None (inherited from caller). | 579 | * None (inherited from caller). |
504 | * | 580 | * |
505 | * RETURNS: | 581 | * RETURNS: |
506 | * 0 if device is present, -ENODEV otherwise. | 582 | * determined present_mask |
507 | */ | 583 | */ |
508 | static int piix_sata_prereset(struct ata_port *ap) | 584 | static unsigned int piix_sata_present_mask(struct ata_port *ap) |
509 | { | 585 | { |
510 | struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); | 586 | struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); |
511 | const unsigned int *map = ap->host_set->private_data; | 587 | struct piix_host_priv *hpriv = ap->host_set->private_data; |
588 | const unsigned int *map = hpriv->map; | ||
512 | int base = 2 * ap->hard_port_no; | 589 | int base = 2 * ap->hard_port_no; |
513 | unsigned int present_mask = 0; | 590 | unsigned int present_mask = 0; |
514 | int port, i; | 591 | int port, i; |
515 | u8 pcs; | 592 | u16 pcs; |
516 | 593 | ||
517 | pci_read_config_byte(pdev, ICH5_PCS, &pcs); | 594 | pci_read_config_word(pdev, ICH5_PCS, &pcs); |
518 | DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base); | 595 | DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base); |
519 | 596 | ||
520 | /* enable all ports on this ap and wait for them to settle */ | ||
521 | for (i = 0; i < 2; i++) { | ||
522 | port = map[base + i]; | ||
523 | if (port >= 0) | ||
524 | pcs |= 1 << port; | ||
525 | } | ||
526 | |||
527 | pci_write_config_byte(pdev, ICH5_PCS, pcs); | ||
528 | msleep(100); | ||
529 | |||
530 | /* let's see which devices are present */ | ||
531 | pci_read_config_byte(pdev, ICH5_PCS, &pcs); | ||
532 | |||
533 | for (i = 0; i < 2; i++) { | 597 | for (i = 0; i < 2; i++) { |
534 | port = map[base + i]; | 598 | port = map[base + i]; |
535 | if (port < 0) | 599 | if (port < 0) |
536 | continue; | 600 | continue; |
537 | if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port)) | 601 | if ((ap->flags & PIIX_FLAG_IGNORE_PCS) || |
602 | (pcs & 1 << (hpriv->map_db->present_shift + port))) | ||
538 | present_mask |= 1 << i; | 603 | present_mask |= 1 << i; |
539 | else | ||
540 | pcs &= ~(1 << port); | ||
541 | } | 604 | } |
542 | 605 | ||
543 | /* disable offline ports on non-AHCI controllers */ | ||
544 | if (!(ap->flags & PIIX_FLAG_AHCI)) | ||
545 | pci_write_config_byte(pdev, ICH5_PCS, pcs); | ||
546 | |||
547 | DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", | 606 | DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", |
548 | ap->id, pcs, present_mask); | 607 | ap->id, pcs, present_mask); |
549 | 608 | ||
550 | if (!present_mask) { | 609 | return present_mask; |
551 | ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n"); | 610 | } |
552 | ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; | 611 | |
553 | return 0; | 612 | /** |
613 | * piix_sata_softreset - reset SATA host port via ATA SRST | ||
614 | * @ap: port to reset | ||
615 | * @classes: resulting classes of attached devices | ||
616 | * | ||
617 | * Reset SATA host port via ATA SRST. On controllers with | ||
618 | * reliable PCS present bits, the bits are used to determine | ||
619 | * device presence. | ||
620 | * | ||
621 | * LOCKING: | ||
622 | * Kernel thread context (may sleep) | ||
623 | * | ||
624 | * RETURNS: | ||
625 | * 0 on success, -errno otherwise. | ||
626 | */ | ||
627 | static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes) | ||
628 | { | ||
629 | unsigned int present_mask; | ||
630 | int i, rc; | ||
631 | |||
632 | present_mask = piix_sata_present_mask(ap); | ||
633 | |||
634 | rc = ata_std_softreset(ap, classes); | ||
635 | if (rc) | ||
636 | return rc; | ||
637 | |||
638 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
639 | if (!(present_mask & (1 << i))) | ||
640 | classes[i] = ATA_DEV_NONE; | ||
554 | } | 641 | } |
555 | 642 | ||
556 | return ata_std_prereset(ap); | 643 | return 0; |
557 | } | 644 | } |
558 | 645 | ||
559 | static void piix_sata_error_handler(struct ata_port *ap) | 646 | static void piix_sata_error_handler(struct ata_port *ap) |
560 | { | 647 | { |
561 | ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL, | 648 | ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL, |
562 | ata_std_postreset); | 649 | ata_std_postreset); |
563 | } | 650 | } |
564 | 651 | ||
@@ -761,10 +848,40 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) | |||
761 | return no_piix_dma; | 848 | return no_piix_dma; |
762 | } | 849 | } |
763 | 850 | ||
851 | static void __devinit piix_init_pcs(struct pci_dev *pdev, | ||
852 | struct ata_port_info *pinfo, | ||
853 | const struct piix_map_db *map_db) | ||
854 | { | ||
855 | u16 pcs, new_pcs; | ||
856 | |||
857 | pci_read_config_word(pdev, ICH5_PCS, &pcs); | ||
858 | |||
859 | new_pcs = pcs | map_db->port_enable; | ||
860 | |||
861 | if (new_pcs != pcs) { | ||
862 | DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs); | ||
863 | pci_write_config_word(pdev, ICH5_PCS, new_pcs); | ||
864 | msleep(150); | ||
865 | } | ||
866 | |||
867 | if (force_pcs == 1) { | ||
868 | dev_printk(KERN_INFO, &pdev->dev, | ||
869 | "force ignoring PCS (0x%x)\n", new_pcs); | ||
870 | pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS; | ||
871 | pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS; | ||
872 | } else if (force_pcs == 2) { | ||
873 | dev_printk(KERN_INFO, &pdev->dev, | ||
874 | "force honoring PCS (0x%x)\n", new_pcs); | ||
875 | pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS; | ||
876 | pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS; | ||
877 | } | ||
878 | } | ||
879 | |||
764 | static void __devinit piix_init_sata_map(struct pci_dev *pdev, | 880 | static void __devinit piix_init_sata_map(struct pci_dev *pdev, |
765 | struct ata_port_info *pinfo) | 881 | struct ata_port_info *pinfo, |
882 | const struct piix_map_db *map_db) | ||
766 | { | 883 | { |
767 | struct piix_map_db *map_db = pinfo[0].private_data; | 884 | struct piix_host_priv *hpriv = pinfo[0].private_data; |
768 | const unsigned int *map; | 885 | const unsigned int *map; |
769 | int i, invalid_map = 0; | 886 | int i, invalid_map = 0; |
770 | u8 map_value; | 887 | u8 map_value; |
@@ -788,6 +905,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev, | |||
788 | case IDE: | 905 | case IDE: |
789 | WARN_ON((i & 1) || map[i + 1] != IDE); | 906 | WARN_ON((i & 1) || map[i + 1] != IDE); |
790 | pinfo[i / 2] = piix_port_info[ich5_pata]; | 907 | pinfo[i / 2] = piix_port_info[ich5_pata]; |
908 | pinfo[i / 2].private_data = hpriv; | ||
791 | i++; | 909 | i++; |
792 | printk(" IDE IDE"); | 910 | printk(" IDE IDE"); |
793 | break; | 911 | break; |
@@ -805,8 +923,8 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev, | |||
805 | dev_printk(KERN_ERR, &pdev->dev, | 923 | dev_printk(KERN_ERR, &pdev->dev, |
806 | "invalid MAP value %u\n", map_value); | 924 | "invalid MAP value %u\n", map_value); |
807 | 925 | ||
808 | pinfo[0].private_data = (void *)map; | 926 | hpriv->map = map; |
809 | pinfo[1].private_data = (void *)map; | 927 | hpriv->map_db = map_db; |
810 | } | 928 | } |
811 | 929 | ||
812 | /** | 930 | /** |
@@ -829,6 +947,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
829 | static int printed_version; | 947 | static int printed_version; |
830 | struct ata_port_info port_info[2]; | 948 | struct ata_port_info port_info[2]; |
831 | struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] }; | 949 | struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] }; |
950 | struct piix_host_priv *hpriv; | ||
832 | unsigned long host_flags; | 951 | unsigned long host_flags; |
833 | 952 | ||
834 | if (!printed_version++) | 953 | if (!printed_version++) |
@@ -839,8 +958,14 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
839 | if (!in_module_init) | 958 | if (!in_module_init) |
840 | return -ENODEV; | 959 | return -ENODEV; |
841 | 960 | ||
961 | hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); | ||
962 | if (!hpriv) | ||
963 | return -ENOMEM; | ||
964 | |||
842 | port_info[0] = piix_port_info[ent->driver_data]; | 965 | port_info[0] = piix_port_info[ent->driver_data]; |
843 | port_info[1] = piix_port_info[ent->driver_data]; | 966 | port_info[1] = piix_port_info[ent->driver_data]; |
967 | port_info[0].private_data = hpriv; | ||
968 | port_info[1].private_data = hpriv; | ||
844 | 969 | ||
845 | host_flags = port_info[0].host_flags; | 970 | host_flags = port_info[0].host_flags; |
846 | 971 | ||
@@ -855,8 +980,12 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
855 | } | 980 | } |
856 | 981 | ||
857 | /* Initialize SATA map */ | 982 | /* Initialize SATA map */ |
858 | if (host_flags & ATA_FLAG_SATA) | 983 | if (host_flags & ATA_FLAG_SATA) { |
859 | piix_init_sata_map(pdev, port_info); | 984 | piix_init_sata_map(pdev, port_info, |
985 | piix_map_db_table[ent->driver_data]); | ||
986 | piix_init_pcs(pdev, port_info, | ||
987 | piix_map_db_table[ent->driver_data]); | ||
988 | } | ||
860 | 989 | ||
861 | /* On ICH5, some BIOSen disable the interrupt using the | 990 | /* On ICH5, some BIOSen disable the interrupt using the |
862 | * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. | 991 | * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. |
@@ -879,6 +1008,13 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
879 | return ata_pci_init_one(pdev, ppinfo, 2); | 1008 | return ata_pci_init_one(pdev, ppinfo, 2); |
880 | } | 1009 | } |
881 | 1010 | ||
1011 | static void piix_host_stop(struct ata_host_set *host_set) | ||
1012 | { | ||
1013 | if (host_set->next == NULL) | ||
1014 | kfree(host_set->private_data); | ||
1015 | ata_host_stop(host_set); | ||
1016 | } | ||
1017 | |||
882 | static int __init piix_init(void) | 1018 | static int __init piix_init(void) |
883 | { | 1019 | { |
884 | int rc; | 1020 | int rc; |
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 007a14e5c3fd..e397129c90d1 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c | |||
@@ -507,7 +507,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd) | |||
507 | */ | 507 | */ |
508 | 508 | ||
509 | if (cmd->use_sg) { | 509 | if (cmd->use_sg) { |
510 | cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; | 510 | cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; |
511 | cmd->SCp.buffers_residual = cmd->use_sg - 1; | 511 | cmd->SCp.buffers_residual = cmd->use_sg - 1; |
512 | cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+ | 512 | cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+ |
513 | cmd->SCp.buffer->offset; | 513 | cmd->SCp.buffer->offset; |
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index dddd2acce76f..61f6024b61ba 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) | 5 | * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) |
6 | * by D. Gilbert and aeb (20020609) | 6 | * by D. Gilbert and aeb (20020609) |
7 | * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025 | 7 | * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025 |
8 | * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702 | ||
8 | */ | 9 | */ |
9 | 10 | ||
10 | #include <linux/blkdev.h> | 11 | #include <linux/blkdev.h> |
@@ -36,55 +37,56 @@ static const char * cdb_byte0_names[] = { | |||
36 | /* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", | 37 | /* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", |
37 | /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, | 38 | /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, |
38 | "Reasssign Blocks", | 39 | "Reasssign Blocks", |
39 | /* 08-0d */ "Read (6)", NULL, "Write (6)", "Seek (6)", NULL, NULL, | 40 | /* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL, |
40 | /* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", | 41 | /* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", |
41 | /* 13-16 */ "Verify (6)", "Recover Buffered Data", "Mode Select (6)", | 42 | /* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)", |
42 | "Reserve (6)", | 43 | "Reserve(6)", |
43 | /* 17-1a */ "Release (6)", "Copy", "Erase", "Mode Sense (6)", | 44 | /* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)", |
44 | /* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", | 45 | /* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", |
45 | /* 1e-1f */ "Prevent/Allow Medium Removal", NULL, | 46 | /* 1e-1f */ "Prevent/Allow Medium Removal", NULL, |
46 | /* 20-22 */ NULL, NULL, NULL, | 47 | /* 20-22 */ NULL, NULL, NULL, |
47 | /* 23-28 */ "Read Format Capacities", "Set Window", | 48 | /* 23-28 */ "Read Format Capacities", "Set Window", |
48 | "Read Capacity (10)", NULL, NULL, "Read (10)", | 49 | "Read Capacity(10)", NULL, NULL, "Read(10)", |
49 | /* 29-2d */ "Read Generation", "Write (10)", "Seek (10)", "Erase (10)", | 50 | /* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)", |
50 | "Read updated block", | 51 | "Read updated block", |
51 | /* 2e-31 */ "Write Verify (10)", "Verify (10)", "Search High", "Search Equal", | 52 | /* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal", |
52 | /* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", | 53 | /* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", |
53 | /* 35-37 */ "Synchronize Cache (10)", "Lock/Unlock Cache (10)", | 54 | /* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)", |
54 | "Read Defect Data(10)", | 55 | "Read Defect Data(10)", |
55 | /* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", | 56 | /* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", |
56 | "Read Buffer", | 57 | "Read Buffer", |
57 | /* 3d-3f */ "Update Block", "Read Long (10)", "Write Long (10)", | 58 | /* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)", |
58 | /* 40-41 */ "Change Definition", "Write Same (10)", | 59 | /* 40-41 */ "Change Definition", "Write Same(10)", |
59 | /* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", | 60 | /* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", |
60 | "Play audio (10)", "Get configuration", "Play audio msf", | 61 | "Play audio(10)", "Get configuration", "Play audio msf", |
61 | "Play audio track/index", | 62 | "Play audio track/index", |
62 | /* 49-4f */ "Play track relative (10)", "Get event status notification", | 63 | /* 49-4f */ "Play track relative(10)", "Get event status notification", |
63 | "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", | 64 | "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", |
64 | NULL, | 65 | NULL, |
65 | /* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info", | 66 | /* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info", |
66 | "Reserve track", "Send OPC info", "Mode Select (10)", | 67 | "Reserve track", "Send OPC info", "Mode Select(10)", |
67 | /* 56-5b */ "Reserve (10)", "Release (10)", "Repair track", "Read master cue", | 68 | /* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue", |
68 | "Mode Sense (10)", "Close track/session", | 69 | "Mode Sense(10)", "Close track/session", |
69 | /* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in", | 70 | /* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in", |
70 | "Persistent reserve out", | 71 | "Persistent reserve out", |
71 | /* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 72 | /* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
72 | /* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 73 | /* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
73 | /* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 74 | /* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
74 | /* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length", | 75 | /* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length", |
75 | /* 80-84 */ "Xdwrite (16)", "Rebuild (16)", "Regenerate (16)", "Extended copy", | 76 | /* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy", |
76 | "Receive copy results", | 77 | "Receive copy results", |
77 | /* 85-89 */ "Memory Export In (16)", "Access control in", "Access control out", | 78 | /* 85-89 */ "ATA command pass through(16)", "Access control in", |
78 | "Read (16)", "Memory Export Out (16)", | 79 | "Access control out", "Read(16)", "Memory Export Out(16)", |
79 | /* 8a-8f */ "Write (16)", NULL, "Read attributes", "Write attributes", | 80 | /* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes", |
80 | "Write and verify (16)", "Verify (16)", | 81 | "Write and verify(16)", "Verify(16)", |
81 | /* 90-94 */ "Pre-fetch (16)", "Synchronize cache (16)", | 82 | /* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)", |
82 | "Lock/unlock cache (16)", "Write same (16)", NULL, | 83 | "Lock/unlock cache(16)", "Write same(16)", NULL, |
83 | /* 95-99 */ NULL, NULL, NULL, NULL, NULL, | 84 | /* 95-99 */ NULL, NULL, NULL, NULL, NULL, |
84 | /* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in (16)", | 85 | /* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in(16)", |
85 | "Service action out (16)", | 86 | "Service action out(16)", |
86 | /* a0-a5 */ "Report luns", "Blank", "Send event", "Maintenance in", | 87 | /* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank", |
87 | "Maintenance out", "Move medium/play audio(12)", | 88 | "Security protocol in", "Maintenance in", "Maintenance out", |
89 | "Move medium/play audio(12)", | ||
88 | /* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)", | 90 | /* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)", |
89 | "Play track relative(12)", | 91 | "Play track relative(12)", |
90 | /* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance", | 92 | /* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance", |
@@ -92,12 +94,12 @@ static const char * cdb_byte0_names[] = { | |||
92 | /* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)", | 94 | /* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)", |
93 | /* b2-b4 */ "Search data low(12)", "Set limits(12)", | 95 | /* b2-b4 */ "Search data low(12)", "Set limits(12)", |
94 | "Read element status attached", | 96 | "Read element status attached", |
95 | /* b5-b6 */ "Request volume element address", "Send volume tag, set streaming", | 97 | /* b5-b6 */ "Security protocol out", "Send volume tag, set streaming", |
96 | /* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf", | 98 | /* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf", |
97 | /* ba-bc */ "Redundancy group (in), Scan", | 99 | /* ba-bc */ "Redundancy group (in), Scan", |
98 | "Redundancy group (out), Set cd-rom speed", "Spare in, Play cd", | 100 | "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd", |
99 | /* bd-bf */ "Spare out, Mechanism status", "Volume set in, Read cd", | 101 | /* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd", |
100 | "Volume set out, Send DVD structure", | 102 | "Volume set (out), Send DVD structure", |
101 | }; | 103 | }; |
102 | 104 | ||
103 | struct value_name_pair { | 105 | struct value_name_pair { |
@@ -112,6 +114,7 @@ static const struct value_name_pair maint_in_arr[] = { | |||
112 | {0xc, "Report supported operation codes"}, | 114 | {0xc, "Report supported operation codes"}, |
113 | {0xd, "Report supported task management functions"}, | 115 | {0xd, "Report supported task management functions"}, |
114 | {0xe, "Report priority"}, | 116 | {0xe, "Report priority"}, |
117 | {0xf, "Report timestamp"}, | ||
115 | }; | 118 | }; |
116 | #define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) | 119 | #define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) |
117 | 120 | ||
@@ -120,6 +123,7 @@ static const struct value_name_pair maint_out_arr[] = { | |||
120 | {0xa, "Set target port groups"}, | 123 | {0xa, "Set target port groups"}, |
121 | {0xb, "Change aliases"}, | 124 | {0xb, "Change aliases"}, |
122 | {0xe, "Set priority"}, | 125 | {0xe, "Set priority"}, |
126 | {0xe, "Set timestamp"}, | ||
123 | }; | 127 | }; |
124 | #define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) | 128 | #define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) |
125 | 129 | ||
@@ -427,6 +431,7 @@ static struct error_info additional[] = | |||
427 | {0x001A, "Rewind operation in progress"}, | 431 | {0x001A, "Rewind operation in progress"}, |
428 | {0x001B, "Set capacity operation in progress"}, | 432 | {0x001B, "Set capacity operation in progress"}, |
429 | {0x001C, "Verify operation in progress"}, | 433 | {0x001C, "Verify operation in progress"}, |
434 | {0x001D, "ATA pass through information available"}, | ||
430 | 435 | ||
431 | {0x0100, "No index/sector signal"}, | 436 | {0x0100, "No index/sector signal"}, |
432 | 437 | ||
@@ -438,7 +443,7 @@ static struct error_info additional[] = | |||
438 | 443 | ||
439 | {0x0400, "Logical unit not ready, cause not reportable"}, | 444 | {0x0400, "Logical unit not ready, cause not reportable"}, |
440 | {0x0401, "Logical unit is in process of becoming ready"}, | 445 | {0x0401, "Logical unit is in process of becoming ready"}, |
441 | {0x0402, "Logical unit not ready, initializing cmd. required"}, | 446 | {0x0402, "Logical unit not ready, initializing command required"}, |
442 | {0x0403, "Logical unit not ready, manual intervention required"}, | 447 | {0x0403, "Logical unit not ready, manual intervention required"}, |
443 | {0x0404, "Logical unit not ready, format in progress"}, | 448 | {0x0404, "Logical unit not ready, format in progress"}, |
444 | {0x0405, "Logical unit not ready, rebuild in progress"}, | 449 | {0x0405, "Logical unit not ready, rebuild in progress"}, |
@@ -478,6 +483,9 @@ static struct error_info additional[] = | |||
478 | {0x0B00, "Warning"}, | 483 | {0x0B00, "Warning"}, |
479 | {0x0B01, "Warning - specified temperature exceeded"}, | 484 | {0x0B01, "Warning - specified temperature exceeded"}, |
480 | {0x0B02, "Warning - enclosure degraded"}, | 485 | {0x0B02, "Warning - enclosure degraded"}, |
486 | {0x0B03, "Warning - background self-test failed"}, | ||
487 | {0x0B04, "Warning - background pre-scan detected medium error"}, | ||
488 | {0x0B05, "Warning - background medium scan detected medium error"}, | ||
481 | 489 | ||
482 | {0x0C00, "Write error"}, | 490 | {0x0C00, "Write error"}, |
483 | {0x0C01, "Write error - recovered with auto reallocation"}, | 491 | {0x0C01, "Write error - recovered with auto reallocation"}, |
@@ -493,6 +501,7 @@ static struct error_info additional[] = | |||
493 | {0x0C0B, "Auxiliary memory write error"}, | 501 | {0x0C0B, "Auxiliary memory write error"}, |
494 | {0x0C0C, "Write error - unexpected unsolicited data"}, | 502 | {0x0C0C, "Write error - unexpected unsolicited data"}, |
495 | {0x0C0D, "Write error - not enough unsolicited data"}, | 503 | {0x0C0D, "Write error - not enough unsolicited data"}, |
504 | {0x0C0F, "Defects in error window"}, | ||
496 | 505 | ||
497 | {0x0D00, "Error detected by third party temporary initiator"}, | 506 | {0x0D00, "Error detected by third party temporary initiator"}, |
498 | {0x0D01, "Third party device failure"}, | 507 | {0x0D01, "Third party device failure"}, |
@@ -504,11 +513,12 @@ static struct error_info additional[] = | |||
504 | {0x0E00, "Invalid information unit"}, | 513 | {0x0E00, "Invalid information unit"}, |
505 | {0x0E01, "Information unit too short"}, | 514 | {0x0E01, "Information unit too short"}, |
506 | {0x0E02, "Information unit too long"}, | 515 | {0x0E02, "Information unit too long"}, |
516 | {0x0E03, "Invalid field in command information unit"}, | ||
507 | 517 | ||
508 | {0x1000, "Id CRC or ECC error"}, | 518 | {0x1000, "Id CRC or ECC error"}, |
509 | {0x1001, "Data block guard check failed"}, | 519 | {0x1001, "Logical block guard check failed"}, |
510 | {0x1002, "Data block application tag check failed"}, | 520 | {0x1002, "Logical block application tag check failed"}, |
511 | {0x1003, "Data block reference tag check failed"}, | 521 | {0x1003, "Logical block reference tag check failed"}, |
512 | 522 | ||
513 | {0x1100, "Unrecovered read error"}, | 523 | {0x1100, "Unrecovered read error"}, |
514 | {0x1101, "Read retries exhausted"}, | 524 | {0x1101, "Read retries exhausted"}, |
@@ -530,6 +540,7 @@ static struct error_info additional[] = | |||
530 | {0x1111, "Read error - loss of streaming"}, | 540 | {0x1111, "Read error - loss of streaming"}, |
531 | {0x1112, "Auxiliary memory read error"}, | 541 | {0x1112, "Auxiliary memory read error"}, |
532 | {0x1113, "Read error - failed retransmission request"}, | 542 | {0x1113, "Read error - failed retransmission request"}, |
543 | {0x1114, "Read error - lba marked bad by application client"}, | ||
533 | 544 | ||
534 | {0x1200, "Address mark not found for id field"}, | 545 | {0x1200, "Address mark not found for id field"}, |
535 | 546 | ||
@@ -610,11 +621,14 @@ static struct error_info additional[] = | |||
610 | {0x2100, "Logical block address out of range"}, | 621 | {0x2100, "Logical block address out of range"}, |
611 | {0x2101, "Invalid element address"}, | 622 | {0x2101, "Invalid element address"}, |
612 | {0x2102, "Invalid address for write"}, | 623 | {0x2102, "Invalid address for write"}, |
624 | {0x2103, "Invalid write crossing layer jump"}, | ||
613 | 625 | ||
614 | {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, | 626 | {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, |
615 | 627 | ||
616 | {0x2400, "Invalid field in cdb"}, | 628 | {0x2400, "Invalid field in cdb"}, |
617 | {0x2401, "CDB decryption error"}, | 629 | {0x2401, "CDB decryption error"}, |
630 | {0x2402, "Obsolete"}, | ||
631 | {0x2403, "Obsolete"}, | ||
618 | {0x2404, "Security audit value frozen"}, | 632 | {0x2404, "Security audit value frozen"}, |
619 | {0x2405, "Security working key frozen"}, | 633 | {0x2405, "Security working key frozen"}, |
620 | {0x2406, "Nonce not unique"}, | 634 | {0x2406, "Nonce not unique"}, |
@@ -637,7 +651,10 @@ static struct error_info additional[] = | |||
637 | {0x260C, "Invalid operation for copy source or destination"}, | 651 | {0x260C, "Invalid operation for copy source or destination"}, |
638 | {0x260D, "Copy segment granularity violation"}, | 652 | {0x260D, "Copy segment granularity violation"}, |
639 | {0x260E, "Invalid parameter while port is enabled"}, | 653 | {0x260E, "Invalid parameter while port is enabled"}, |
640 | {0x260F, "Invalid data-out buffer integrity"}, | 654 | {0x260F, "Invalid data-out buffer integrity check value"}, |
655 | {0x2610, "Data decryption key fail limit reached"}, | ||
656 | {0x2611, "Incomplete key-associated data set"}, | ||
657 | {0x2612, "Vendor specific key reference not found"}, | ||
641 | 658 | ||
642 | {0x2700, "Write protected"}, | 659 | {0x2700, "Write protected"}, |
643 | {0x2701, "Hardware write protected"}, | 660 | {0x2701, "Hardware write protected"}, |
@@ -649,6 +666,7 @@ static struct error_info additional[] = | |||
649 | 666 | ||
650 | {0x2800, "Not ready to ready change, medium may have changed"}, | 667 | {0x2800, "Not ready to ready change, medium may have changed"}, |
651 | {0x2801, "Import or export element accessed"}, | 668 | {0x2801, "Import or export element accessed"}, |
669 | {0x2802, "Format-layer may have changed"}, | ||
652 | 670 | ||
653 | {0x2900, "Power on, reset, or bus device reset occurred"}, | 671 | {0x2900, "Power on, reset, or bus device reset occurred"}, |
654 | {0x2901, "Power on occurred"}, | 672 | {0x2901, "Power on occurred"}, |
@@ -669,6 +687,11 @@ static struct error_info additional[] = | |||
669 | {0x2A07, "Implicit asymmetric access state transition failed"}, | 687 | {0x2A07, "Implicit asymmetric access state transition failed"}, |
670 | {0x2A08, "Priority changed"}, | 688 | {0x2A08, "Priority changed"}, |
671 | {0x2A09, "Capacity data has changed"}, | 689 | {0x2A09, "Capacity data has changed"}, |
690 | {0x2A10, "Timestamp changed"}, | ||
691 | {0x2A11, "Data encryption parameters changed by another i_t nexus"}, | ||
692 | {0x2A12, "Data encryption parameters changed by vendor specific " | ||
693 | "event"}, | ||
694 | {0x2A13, "Data encryption key instance counter has changed"}, | ||
672 | 695 | ||
673 | {0x2B00, "Copy cannot execute since host cannot disconnect"}, | 696 | {0x2B00, "Copy cannot execute since host cannot disconnect"}, |
674 | 697 | ||
@@ -690,6 +713,7 @@ static struct error_info additional[] = | |||
690 | {0x2E00, "Insufficient time for operation"}, | 713 | {0x2E00, "Insufficient time for operation"}, |
691 | 714 | ||
692 | {0x2F00, "Commands cleared by another initiator"}, | 715 | {0x2F00, "Commands cleared by another initiator"}, |
716 | {0x2F01, "Commands cleared by power loss notification"}, | ||
693 | 717 | ||
694 | {0x3000, "Incompatible medium installed"}, | 718 | {0x3000, "Incompatible medium installed"}, |
695 | {0x3001, "Cannot read medium - unknown format"}, | 719 | {0x3001, "Cannot read medium - unknown format"}, |
@@ -702,7 +726,8 @@ static struct error_info additional[] = | |||
702 | {0x3008, "Cannot write - application code mismatch"}, | 726 | {0x3008, "Cannot write - application code mismatch"}, |
703 | {0x3009, "Current session not fixated for append"}, | 727 | {0x3009, "Current session not fixated for append"}, |
704 | {0x300A, "Cleaning request rejected"}, | 728 | {0x300A, "Cleaning request rejected"}, |
705 | {0x300C, "WORM medium, overwrite attempted"}, | 729 | {0x300C, "WORM medium - overwrite attempted"}, |
730 | {0x300D, "WORM medium - integrity check"}, | ||
706 | {0x3010, "Medium not formatted"}, | 731 | {0x3010, "Medium not formatted"}, |
707 | 732 | ||
708 | {0x3100, "Medium format corrupted"}, | 733 | {0x3100, "Medium format corrupted"}, |
@@ -790,6 +815,9 @@ static struct error_info additional[] = | |||
790 | {0x3F0F, "Echo buffer overwritten"}, | 815 | {0x3F0F, "Echo buffer overwritten"}, |
791 | {0x3F10, "Medium loadable"}, | 816 | {0x3F10, "Medium loadable"}, |
792 | {0x3F11, "Medium auxiliary memory accessible"}, | 817 | {0x3F11, "Medium auxiliary memory accessible"}, |
818 | {0x3F12, "iSCSI IP address added"}, | ||
819 | {0x3F13, "iSCSI IP address removed"}, | ||
820 | {0x3F14, "iSCSI IP address changed"}, | ||
793 | /* | 821 | /* |
794 | * {0x40NN, "Ram failure"}, | 822 | * {0x40NN, "Ram failure"}, |
795 | * {0x40NN, "Diagnostic failure on component nn"}, | 823 | * {0x40NN, "Diagnostic failure on component nn"}, |
@@ -799,6 +827,7 @@ static struct error_info additional[] = | |||
799 | {0x4300, "Message error"}, | 827 | {0x4300, "Message error"}, |
800 | 828 | ||
801 | {0x4400, "Internal target failure"}, | 829 | {0x4400, "Internal target failure"}, |
830 | {0x4471, "ATA device failed set features"}, | ||
802 | 831 | ||
803 | {0x4500, "Select or reselect failure"}, | 832 | {0x4500, "Select or reselect failure"}, |
804 | 833 | ||
@@ -807,9 +836,10 @@ static struct error_info additional[] = | |||
807 | {0x4700, "Scsi parity error"}, | 836 | {0x4700, "Scsi parity error"}, |
808 | {0x4701, "Data phase CRC error detected"}, | 837 | {0x4701, "Data phase CRC error detected"}, |
809 | {0x4702, "Scsi parity error detected during st data phase"}, | 838 | {0x4702, "Scsi parity error detected during st data phase"}, |
810 | {0x4703, "Information unit CRC error detected"}, | 839 | {0x4703, "Information unit iuCRC error detected"}, |
811 | {0x4704, "Asynchronous information protection error detected"}, | 840 | {0x4704, "Asynchronous information protection error detected"}, |
812 | {0x4705, "Protocol service CRC error"}, | 841 | {0x4705, "Protocol service CRC error"}, |
842 | {0x4706, "Phy test function in progress"}, | ||
813 | {0x477f, "Some commands cleared by iSCSI Protocol event"}, | 843 | {0x477f, "Some commands cleared by iSCSI Protocol event"}, |
814 | 844 | ||
815 | {0x4800, "Initiator detected error message received"}, | 845 | {0x4800, "Initiator detected error message received"}, |
@@ -844,6 +874,8 @@ static struct error_info additional[] = | |||
844 | {0x5300, "Media load or eject failed"}, | 874 | {0x5300, "Media load or eject failed"}, |
845 | {0x5301, "Unload tape failure"}, | 875 | {0x5301, "Unload tape failure"}, |
846 | {0x5302, "Medium removal prevented"}, | 876 | {0x5302, "Medium removal prevented"}, |
877 | {0x5303, "Medium removal prevented by data transfer element"}, | ||
878 | {0x5304, "Medium thread or unthread failure"}, | ||
847 | 879 | ||
848 | {0x5400, "Scsi to host system interface failure"}, | 880 | {0x5400, "Scsi to host system interface failure"}, |
849 | 881 | ||
@@ -855,6 +887,7 @@ static struct error_info additional[] = | |||
855 | {0x5505, "Insufficient access control resources"}, | 887 | {0x5505, "Insufficient access control resources"}, |
856 | {0x5506, "Auxiliary memory out of space"}, | 888 | {0x5506, "Auxiliary memory out of space"}, |
857 | {0x5507, "Quota error"}, | 889 | {0x5507, "Quota error"}, |
890 | {0x5508, "Maximum number of supplemental decryption keys exceeded"}, | ||
858 | 891 | ||
859 | {0x5700, "Unable to recover table-of-contents"}, | 892 | {0x5700, "Unable to recover table-of-contents"}, |
860 | 893 | ||
@@ -1004,6 +1037,7 @@ static struct error_info additional[] = | |||
1004 | {0x6708, "Assign failure occurred"}, | 1037 | {0x6708, "Assign failure occurred"}, |
1005 | {0x6709, "Multiply assigned logical unit"}, | 1038 | {0x6709, "Multiply assigned logical unit"}, |
1006 | {0x670A, "Set target port groups command failed"}, | 1039 | {0x670A, "Set target port groups command failed"}, |
1040 | {0x670B, "ATA device feature not enabled"}, | ||
1007 | 1041 | ||
1008 | {0x6800, "Logical unit not configured"}, | 1042 | {0x6800, "Logical unit not configured"}, |
1009 | 1043 | ||
@@ -1030,6 +1064,8 @@ static struct error_info additional[] = | |||
1030 | {0x6F03, "Read of scrambled sector without authentication"}, | 1064 | {0x6F03, "Read of scrambled sector without authentication"}, |
1031 | {0x6F04, "Media region code is mismatched to logical unit region"}, | 1065 | {0x6F04, "Media region code is mismatched to logical unit region"}, |
1032 | {0x6F05, "Drive region must be permanent/region reset count error"}, | 1066 | {0x6F05, "Drive region must be permanent/region reset count error"}, |
1067 | {0x6F06, "Insufficient block count for binding nonce recording"}, | ||
1068 | {0x6F07, "Conflict in binding nonce recording"}, | ||
1033 | /* | 1069 | /* |
1034 | * {0x70NN, "Decompression exception short algorithm id of nn"}, | 1070 | * {0x70NN, "Decompression exception short algorithm id of nn"}, |
1035 | */ | 1071 | */ |
@@ -1041,6 +1077,8 @@ static struct error_info additional[] = | |||
1041 | {0x7203, "Session fixation error - incomplete track in session"}, | 1077 | {0x7203, "Session fixation error - incomplete track in session"}, |
1042 | {0x7204, "Empty or partially written reserved track"}, | 1078 | {0x7204, "Empty or partially written reserved track"}, |
1043 | {0x7205, "No more track reservations allowed"}, | 1079 | {0x7205, "No more track reservations allowed"}, |
1080 | {0x7206, "RMZ extension is not allowed"}, | ||
1081 | {0x7207, "No more test zone extensions are allowed"}, | ||
1044 | 1082 | ||
1045 | {0x7300, "Cd control error"}, | 1083 | {0x7300, "Cd control error"}, |
1046 | {0x7301, "Power calibration area almost full"}, | 1084 | {0x7301, "Power calibration area almost full"}, |
@@ -1049,6 +1087,18 @@ static struct error_info additional[] = | |||
1049 | {0x7304, "Program memory area update failure"}, | 1087 | {0x7304, "Program memory area update failure"}, |
1050 | {0x7305, "Program memory area is full"}, | 1088 | {0x7305, "Program memory area is full"}, |
1051 | {0x7306, "RMA/PMA is almost full"}, | 1089 | {0x7306, "RMA/PMA is almost full"}, |
1090 | {0x7310, "Current power calibration area almost full"}, | ||
1091 | {0x7311, "Current power calibration area is full"}, | ||
1092 | {0x7317, "RDZ is full"}, | ||
1093 | |||
1094 | {0x7400, "Security error"}, | ||
1095 | {0x7401, "Unable to decrypt data"}, | ||
1096 | {0x7402, "Unencrypted data encountered while decrypting"}, | ||
1097 | {0x7403, "Incorrect data encryption key"}, | ||
1098 | {0x7404, "Cryptographic integrity validation failed"}, | ||
1099 | {0x7405, "Error decrypting data"}, | ||
1100 | {0x7471, "Logical unit access not authorized"}, | ||
1101 | |||
1052 | {0, NULL} | 1102 | {0, NULL} |
1053 | }; | 1103 | }; |
1054 | 1104 | ||
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c index 10573c24a50b..5630868c1b25 100644 --- a/drivers/scsi/esp.c +++ b/drivers/scsi/esp.c | |||
@@ -1146,7 +1146,7 @@ static struct sbus_dev sun4_esp_dev; | |||
1146 | static int __init esp_sun4_probe(struct scsi_host_template *tpnt) | 1146 | static int __init esp_sun4_probe(struct scsi_host_template *tpnt) |
1147 | { | 1147 | { |
1148 | if (sun4_esp_physaddr) { | 1148 | if (sun4_esp_physaddr) { |
1149 | memset(&sun4_esp_dev, 0, sizeof(esp_dev)); | 1149 | memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev)); |
1150 | sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; | 1150 | sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; |
1151 | sun4_esp_dev.irqs[0] = 4; | 1151 | sun4_esp_dev.irqs[0] = 4; |
1152 | sun4_esp_dev.resource[0].start = sun4_esp_physaddr; | 1152 | sun4_esp_dev.resource[0].start = sun4_esp_physaddr; |
@@ -1162,6 +1162,7 @@ static int __init esp_sun4_probe(struct scsi_host_template *tpnt) | |||
1162 | 1162 | ||
1163 | static int __devexit esp_sun4_remove(void) | 1163 | static int __devexit esp_sun4_remove(void) |
1164 | { | 1164 | { |
1165 | struct of_device *dev = &sun4_esp_dev.ofdev; | ||
1165 | struct esp *esp = dev_get_drvdata(&dev->dev); | 1166 | struct esp *esp = dev_get_drvdata(&dev->dev); |
1166 | 1167 | ||
1167 | return esp_remove_common(esp); | 1168 | return esp_remove_common(esp); |
@@ -1397,7 +1398,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp) | |||
1397 | sp->SCp.ptr = NULL; | 1398 | sp->SCp.ptr = NULL; |
1398 | } | 1399 | } |
1399 | } else { | 1400 | } else { |
1400 | sp->SCp.buffer = (struct scatterlist *) sp->buffer; | 1401 | sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; |
1401 | sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, | 1402 | sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, |
1402 | sp->SCp.buffer, | 1403 | sp->SCp.buffer, |
1403 | sp->use_sg, | 1404 | sp->use_sg, |
@@ -1410,7 +1411,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp) | |||
1410 | static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) | 1411 | static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) |
1411 | { | 1412 | { |
1412 | if (sp->use_sg) { | 1413 | if (sp->use_sg) { |
1413 | sbus_unmap_sg(esp->sdev, sp->buffer, sp->use_sg, | 1414 | sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg, |
1414 | sp->sc_data_direction); | 1415 | sp->sc_data_direction); |
1415 | } else if (sp->request_bufflen) { | 1416 | } else if (sp->request_bufflen) { |
1416 | sbus_unmap_single(esp->sdev, | 1417 | sbus_unmap_single(esp->sdev, |
@@ -2754,18 +2755,15 @@ static int esp_do_data_finale(struct esp *esp) | |||
2754 | */ | 2755 | */ |
2755 | static int esp_should_clear_sync(struct scsi_cmnd *sp) | 2756 | static int esp_should_clear_sync(struct scsi_cmnd *sp) |
2756 | { | 2757 | { |
2757 | u8 cmd1 = sp->cmnd[0]; | 2758 | u8 cmd = sp->cmnd[0]; |
2758 | u8 cmd2 = sp->data_cmnd[0]; | ||
2759 | 2759 | ||
2760 | /* These cases are for spinning up a disk and | 2760 | /* These cases are for spinning up a disk and |
2761 | * waiting for that spinup to complete. | 2761 | * waiting for that spinup to complete. |
2762 | */ | 2762 | */ |
2763 | if (cmd1 == START_STOP || | 2763 | if (cmd == START_STOP) |
2764 | cmd2 == START_STOP) | ||
2765 | return 0; | 2764 | return 0; |
2766 | 2765 | ||
2767 | if (cmd1 == TEST_UNIT_READY || | 2766 | if (cmd == TEST_UNIT_READY) |
2768 | cmd2 == TEST_UNIT_READY) | ||
2769 | return 0; | 2767 | return 0; |
2770 | 2768 | ||
2771 | /* One more special case for SCSI tape drives, | 2769 | /* One more special case for SCSI tape drives, |
@@ -2773,8 +2771,7 @@ static int esp_should_clear_sync(struct scsi_cmnd *sp) | |||
2773 | * completion of a rewind or tape load operation. | 2771 | * completion of a rewind or tape load operation. |
2774 | */ | 2772 | */ |
2775 | if (sp->device->type == TYPE_TAPE) { | 2773 | if (sp->device->type == TYPE_TAPE) { |
2776 | if (cmd1 == MODE_SENSE || | 2774 | if (cmd == MODE_SENSE) |
2777 | cmd2 == MODE_SENSE) | ||
2778 | return 0; | 2775 | return 0; |
2779 | } | 2776 | } |
2780 | 2777 | ||
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index ab2f8b267908..bcb3444f1dcf 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
@@ -45,10 +45,6 @@ static char driver_name[] = "hptiop"; | |||
45 | static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; | 45 | static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; |
46 | static const char driver_ver[] = "v1.0 (060426)"; | 46 | static const char driver_ver[] = "v1.0 (060426)"; |
47 | 47 | ||
48 | static DEFINE_SPINLOCK(hptiop_hba_list_lock); | ||
49 | static LIST_HEAD(hptiop_hba_list); | ||
50 | static int hptiop_cdev_major = -1; | ||
51 | |||
52 | static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); | 48 | static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); |
53 | static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); | 49 | static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); |
54 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); | 50 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); |
@@ -577,7 +573,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba) | |||
577 | if (atomic_xchg(&hba->resetting, 1) == 0) { | 573 | if (atomic_xchg(&hba->resetting, 1) == 0) { |
578 | atomic_inc(&hba->reset_count); | 574 | atomic_inc(&hba->reset_count); |
579 | writel(IOPMU_INBOUND_MSG0_RESET, | 575 | writel(IOPMU_INBOUND_MSG0_RESET, |
580 | &hba->iop->outbound_msgaddr0); | 576 | &hba->iop->inbound_msgaddr0); |
581 | hptiop_pci_posting_flush(hba->iop); | 577 | hptiop_pci_posting_flush(hba->iop); |
582 | } | 578 | } |
583 | 579 | ||
@@ -620,532 +616,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, | |||
620 | return queue_depth; | 616 | return queue_depth; |
621 | } | 617 | } |
622 | 618 | ||
623 | struct hptiop_getinfo { | ||
624 | char __user *buffer; | ||
625 | loff_t buflength; | ||
626 | loff_t bufoffset; | ||
627 | loff_t buffillen; | ||
628 | loff_t filpos; | ||
629 | }; | ||
630 | |||
631 | static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo, | ||
632 | char *data, int datalen) | ||
633 | { | ||
634 | if (pinfo->filpos < pinfo->bufoffset) { | ||
635 | if (pinfo->filpos + datalen <= pinfo->bufoffset) { | ||
636 | pinfo->filpos += datalen; | ||
637 | return; | ||
638 | } else { | ||
639 | data += (pinfo->bufoffset - pinfo->filpos); | ||
640 | datalen -= (pinfo->bufoffset - pinfo->filpos); | ||
641 | pinfo->filpos = pinfo->bufoffset; | ||
642 | } | ||
643 | } | ||
644 | |||
645 | pinfo->filpos += datalen; | ||
646 | if (pinfo->buffillen == pinfo->buflength) | ||
647 | return; | ||
648 | |||
649 | if (pinfo->buflength - pinfo->buffillen < datalen) | ||
650 | datalen = pinfo->buflength - pinfo->buffillen; | ||
651 | |||
652 | if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen)) | ||
653 | return; | ||
654 | |||
655 | pinfo->buffillen += datalen; | ||
656 | } | ||
657 | |||
658 | static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...) | ||
659 | { | ||
660 | va_list args; | ||
661 | char buf[128]; | ||
662 | int len; | ||
663 | |||
664 | va_start(args, fmt); | ||
665 | len = vsnprintf(buf, sizeof(buf), fmt, args); | ||
666 | va_end(args); | ||
667 | hptiop_copy_mem_info(pinfo, buf, len); | ||
668 | return len; | ||
669 | } | ||
670 | |||
671 | static void hptiop_ioctl_done(struct hpt_ioctl_k *arg) | ||
672 | { | ||
673 | arg->done = NULL; | ||
674 | wake_up(&arg->hba->ioctl_wq); | ||
675 | } | ||
676 | |||
677 | static void hptiop_do_ioctl(struct hpt_ioctl_k *arg) | ||
678 | { | ||
679 | struct hptiop_hba *hba = arg->hba; | ||
680 | u32 val; | ||
681 | struct hpt_iop_request_ioctl_command __iomem *req; | ||
682 | int ioctl_retry = 0; | ||
683 | |||
684 | dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no); | ||
685 | |||
686 | /* | ||
687 | * check (in + out) buff size from application. | ||
688 | * outbuf must be dword aligned. | ||
689 | */ | ||
690 | if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size > | ||
691 | hba->max_request_size | ||
692 | - sizeof(struct hpt_iop_request_header) | ||
693 | - 4 * sizeof(u32)) { | ||
694 | dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n", | ||
695 | hba->host->host_no, | ||
696 | arg->inbuf_size, arg->outbuf_size); | ||
697 | arg->result = HPT_IOCTL_RESULT_FAILED; | ||
698 | return; | ||
699 | } | ||
700 | |||
701 | retry: | ||
702 | spin_lock_irq(hba->host->host_lock); | ||
703 | |||
704 | val = readl(&hba->iop->inbound_queue); | ||
705 | if (val == IOPMU_QUEUE_EMPTY) { | ||
706 | spin_unlock_irq(hba->host->host_lock); | ||
707 | dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no); | ||
708 | arg->result = -1; | ||
709 | return; | ||
710 | } | ||
711 | |||
712 | req = (struct hpt_iop_request_ioctl_command __iomem *) | ||
713 | ((unsigned long)hba->iop + val); | ||
714 | |||
715 | writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code), | ||
716 | &req->ioctl_code); | ||
717 | writel(arg->inbuf_size, &req->inbuf_size); | ||
718 | writel(arg->outbuf_size, &req->outbuf_size); | ||
719 | |||
720 | /* | ||
721 | * use the buffer on the IOP local memory first, then copy it | ||
722 | * back to host. | ||
723 | * the caller's request buffer shoudl be little-endian. | ||
724 | */ | ||
725 | if (arg->inbuf_size) | ||
726 | memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size); | ||
727 | |||
728 | /* correct the controller ID for IOP */ | ||
729 | if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO || | ||
730 | arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 || | ||
731 | arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO) | ||
732 | && arg->inbuf_size >= sizeof(u32)) | ||
733 | writel(0, req->buf); | ||
734 | |||
735 | writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type); | ||
736 | writel(0, &req->header.flags); | ||
737 | writel(offsetof(struct hpt_iop_request_ioctl_command, buf) | ||
738 | + arg->inbuf_size, &req->header.size); | ||
739 | writel((u32)(unsigned long)arg, &req->header.context); | ||
740 | writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0, | ||
741 | &req->header.context_hi32); | ||
742 | writel(IOP_RESULT_PENDING, &req->header.result); | ||
743 | |||
744 | arg->result = HPT_IOCTL_RESULT_FAILED; | ||
745 | arg->done = hptiop_ioctl_done; | ||
746 | |||
747 | writel(val, &hba->iop->inbound_queue); | ||
748 | hptiop_pci_posting_flush(hba->iop); | ||
749 | |||
750 | spin_unlock_irq(hba->host->host_lock); | ||
751 | |||
752 | wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ); | ||
753 | |||
754 | if (arg->done != NULL) { | ||
755 | hptiop_reset_hba(hba); | ||
756 | if (ioctl_retry++ < 3) | ||
757 | goto retry; | ||
758 | } | ||
759 | |||
760 | dprintk("hpt_iop_ioctl %x result %d\n", | ||
761 | arg->ioctl_code, arg->result); | ||
762 | } | ||
763 | |||
764 | static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf, | ||
765 | u32 insize, void *outbuf, u32 outsize) | ||
766 | { | ||
767 | struct hpt_ioctl_k arg; | ||
768 | arg.hba = hba; | ||
769 | arg.ioctl_code = code; | ||
770 | arg.inbuf = inbuf; | ||
771 | arg.outbuf = outbuf; | ||
772 | arg.inbuf_size = insize; | ||
773 | arg.outbuf_size = outsize; | ||
774 | arg.bytes_returned = NULL; | ||
775 | hptiop_do_ioctl(&arg); | ||
776 | return arg.result; | ||
777 | } | ||
778 | |||
779 | static inline int hpt_id_valid(__le32 id) | ||
780 | { | ||
781 | return id != 0 && id != cpu_to_le32(0xffffffff); | ||
782 | } | ||
783 | |||
784 | static int hptiop_get_controller_info(struct hptiop_hba *hba, | ||
785 | struct hpt_controller_info *pinfo) | ||
786 | { | ||
787 | int id = 0; | ||
788 | |||
789 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO, | ||
790 | &id, sizeof(int), pinfo, sizeof(*pinfo)); | ||
791 | } | ||
792 | |||
793 | |||
794 | static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus, | ||
795 | struct hpt_channel_info *pinfo) | ||
796 | { | ||
797 | u32 ids[2]; | ||
798 | |||
799 | ids[0] = 0; | ||
800 | ids[1] = bus; | ||
801 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO, | ||
802 | ids, sizeof(ids), pinfo, sizeof(*pinfo)); | ||
803 | |||
804 | } | ||
805 | |||
806 | static int hptiop_get_logical_devices(struct hptiop_hba *hba, | ||
807 | __le32 *pids, int maxcount) | ||
808 | { | ||
809 | int i; | ||
810 | u32 count = maxcount - 1; | ||
811 | |||
812 | if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES, | ||
813 | &count, sizeof(u32), | ||
814 | pids, sizeof(u32) * maxcount)) | ||
815 | return -1; | ||
816 | |||
817 | maxcount = le32_to_cpu(pids[0]); | ||
818 | for (i = 0; i < maxcount; i++) | ||
819 | pids[i] = pids[i+1]; | ||
820 | |||
821 | return maxcount; | ||
822 | } | ||
823 | |||
824 | static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id, | ||
825 | struct hpt_logical_device_info_v3 *pinfo) | ||
826 | { | ||
827 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3, | ||
828 | &id, sizeof(u32), | ||
829 | pinfo, sizeof(*pinfo)); | ||
830 | } | ||
831 | |||
832 | static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo) | ||
833 | { | ||
834 | static char s[64]; | ||
835 | u32 flags = le32_to_cpu(devinfo->u.array.flags); | ||
836 | u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress); | ||
837 | u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress); | ||
838 | |||
839 | if (flags & ARRAY_FLAG_DISABLED) | ||
840 | return "Disabled"; | ||
841 | else if (flags & ARRAY_FLAG_TRANSFORMING) | ||
842 | sprintf(s, "Expanding/Migrating %d.%d%%%s%s", | ||
843 | trans_prog / 100, | ||
844 | trans_prog % 100, | ||
845 | (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))? | ||
846 | ", Critical" : "", | ||
847 | ((flags & ARRAY_FLAG_NEEDINITIALIZING) && | ||
848 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
849 | !(flags & ARRAY_FLAG_INITIALIZING))? | ||
850 | ", Unintialized" : ""); | ||
851 | else if ((flags & ARRAY_FLAG_BROKEN) && | ||
852 | devinfo->u.array.array_type != AT_RAID6) | ||
853 | return "Critical"; | ||
854 | else if (flags & ARRAY_FLAG_REBUILDING) | ||
855 | sprintf(s, | ||
856 | (flags & ARRAY_FLAG_NEEDINITIALIZING)? | ||
857 | "%sBackground initializing %d.%d%%" : | ||
858 | "%sRebuilding %d.%d%%", | ||
859 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
860 | reb_prog / 100, | ||
861 | reb_prog % 100); | ||
862 | else if (flags & ARRAY_FLAG_VERIFYING) | ||
863 | sprintf(s, "%sVerifying %d.%d%%", | ||
864 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
865 | reb_prog / 100, | ||
866 | reb_prog % 100); | ||
867 | else if (flags & ARRAY_FLAG_INITIALIZING) | ||
868 | sprintf(s, "%sForground initializing %d.%d%%", | ||
869 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
870 | reb_prog / 100, | ||
871 | reb_prog % 100); | ||
872 | else if (flags & ARRAY_FLAG_NEEDTRANSFORM) | ||
873 | sprintf(s,"%s%s%s", "Need Expanding/Migrating", | ||
874 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
875 | ((flags & ARRAY_FLAG_NEEDINITIALIZING) && | ||
876 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
877 | !(flags & ARRAY_FLAG_INITIALIZING))? | ||
878 | ", Unintialized" : ""); | ||
879 | else if (flags & ARRAY_FLAG_NEEDINITIALIZING && | ||
880 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
881 | !(flags & ARRAY_FLAG_INITIALIZING)) | ||
882 | sprintf(s,"%sUninitialized", | ||
883 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : ""); | ||
884 | else if ((flags & ARRAY_FLAG_NEEDBUILDING) || | ||
885 | (flags & ARRAY_FLAG_BROKEN)) | ||
886 | return "Critical"; | ||
887 | else | ||
888 | return "Normal"; | ||
889 | return s; | ||
890 | } | ||
891 | |||
892 | static void hptiop_dump_devinfo(struct hptiop_hba *hba, | ||
893 | struct hptiop_getinfo *pinfo, __le32 id, int indent) | ||
894 | { | ||
895 | struct hpt_logical_device_info_v3 devinfo; | ||
896 | int i; | ||
897 | u64 capacity; | ||
898 | |||
899 | for (i = 0; i < indent; i++) | ||
900 | hptiop_copy_info(pinfo, "\t"); | ||
901 | |||
902 | if (hptiop_get_device_info_v3(hba, id, &devinfo)) { | ||
903 | hptiop_copy_info(pinfo, "unknown\n"); | ||
904 | return; | ||
905 | } | ||
906 | |||
907 | switch (devinfo.type) { | ||
908 | |||
909 | case LDT_DEVICE: { | ||
910 | struct hd_driveid *driveid; | ||
911 | u32 flags = le32_to_cpu(devinfo.u.device.flags); | ||
912 | |||
913 | driveid = (struct hd_driveid *)devinfo.u.device.ident; | ||
914 | /* model[] is 40 chars long, but we just want 20 chars here */ | ||
915 | driveid->model[20] = 0; | ||
916 | |||
917 | if (indent) | ||
918 | if (flags & DEVICE_FLAG_DISABLED) | ||
919 | hptiop_copy_info(pinfo,"Missing\n"); | ||
920 | else | ||
921 | hptiop_copy_info(pinfo, "CH%d %s\n", | ||
922 | devinfo.u.device.path_id + 1, | ||
923 | driveid->model); | ||
924 | else { | ||
925 | capacity = le64_to_cpu(devinfo.capacity) * 512; | ||
926 | do_div(capacity, 1000000); | ||
927 | hptiop_copy_info(pinfo, | ||
928 | "CH%d %s, %lluMB, %s %s%s%s%s\n", | ||
929 | devinfo.u.device.path_id + 1, | ||
930 | driveid->model, | ||
931 | capacity, | ||
932 | (flags & DEVICE_FLAG_DISABLED)? | ||
933 | "Disabled" : "Normal", | ||
934 | devinfo.u.device.read_ahead_enabled? | ||
935 | "[RA]" : "", | ||
936 | devinfo.u.device.write_cache_enabled? | ||
937 | "[WC]" : "", | ||
938 | devinfo.u.device.TCQ_enabled? | ||
939 | "[TCQ]" : "", | ||
940 | devinfo.u.device.NCQ_enabled? | ||
941 | "[NCQ]" : "" | ||
942 | ); | ||
943 | } | ||
944 | break; | ||
945 | } | ||
946 | |||
947 | case LDT_ARRAY: | ||
948 | if (devinfo.target_id != INVALID_TARGET_ID) | ||
949 | hptiop_copy_info(pinfo, "[DISK %d_%d] ", | ||
950 | devinfo.vbus_id, devinfo.target_id); | ||
951 | |||
952 | capacity = le64_to_cpu(devinfo.capacity) * 512; | ||
953 | do_div(capacity, 1000000); | ||
954 | hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n", | ||
955 | devinfo.u.array.name, | ||
956 | devinfo.u.array.array_type==AT_RAID0? "RAID0" : | ||
957 | devinfo.u.array.array_type==AT_RAID1? "RAID1" : | ||
958 | devinfo.u.array.array_type==AT_RAID5? "RAID5" : | ||
959 | devinfo.u.array.array_type==AT_RAID6? "RAID6" : | ||
960 | devinfo.u.array.array_type==AT_JBOD? "JBOD" : | ||
961 | "unknown", | ||
962 | capacity, | ||
963 | get_array_status(&devinfo)); | ||
964 | for (i = 0; i < devinfo.u.array.ndisk; i++) { | ||
965 | if (hpt_id_valid(devinfo.u.array.members[i])) { | ||
966 | if (cpu_to_le16(1<<i) & | ||
967 | devinfo.u.array.critical_members) | ||
968 | hptiop_copy_info(pinfo, "\t*"); | ||
969 | hptiop_dump_devinfo(hba, pinfo, | ||
970 | devinfo.u.array.members[i], indent+1); | ||
971 | } | ||
972 | else | ||
973 | hptiop_copy_info(pinfo, "\tMissing\n"); | ||
974 | } | ||
975 | if (id == devinfo.u.array.transform_source) { | ||
976 | hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n"); | ||
977 | hptiop_dump_devinfo(hba, pinfo, | ||
978 | devinfo.u.array.transform_target, indent+1); | ||
979 | } | ||
980 | break; | ||
981 | } | ||
982 | } | ||
983 | |||
984 | static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) | 619 | static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) |
985 | { | 620 | { |
986 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); | 621 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); |
987 | } | 622 | } |
988 | 623 | ||
989 | static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf, | ||
990 | size_t count, loff_t *ppos) | ||
991 | { | ||
992 | struct hptiop_hba *hba = filp->private_data; | ||
993 | struct hptiop_getinfo info; | ||
994 | int i, j, ndev; | ||
995 | struct hpt_controller_info con_info; | ||
996 | struct hpt_channel_info chan_info; | ||
997 | __le32 ids[32]; | ||
998 | |||
999 | info.buffer = buf; | ||
1000 | info.buflength = count; | ||
1001 | info.bufoffset = ppos ? *ppos : 0; | ||
1002 | info.filpos = 0; | ||
1003 | info.buffillen = 0; | ||
1004 | |||
1005 | if (hptiop_get_controller_info(hba, &con_info)) | ||
1006 | return -EIO; | ||
1007 | |||
1008 | for (i = 0; i < con_info.num_buses; i++) { | ||
1009 | if (hptiop_get_channel_info(hba, i, &chan_info) == 0) { | ||
1010 | if (hpt_id_valid(chan_info.devices[0])) | ||
1011 | hptiop_dump_devinfo(hba, &info, | ||
1012 | chan_info.devices[0], 0); | ||
1013 | if (hpt_id_valid(chan_info.devices[1])) | ||
1014 | hptiop_dump_devinfo(hba, &info, | ||
1015 | chan_info.devices[1], 0); | ||
1016 | } | ||
1017 | } | ||
1018 | |||
1019 | ndev = hptiop_get_logical_devices(hba, ids, | ||
1020 | sizeof(ids) / sizeof(ids[0])); | ||
1021 | |||
1022 | /* | ||
1023 | * if hptiop_get_logical_devices fails, ndev==-1 and it just | ||
1024 | * output nothing here | ||
1025 | */ | ||
1026 | for (j = 0; j < ndev; j++) | ||
1027 | hptiop_dump_devinfo(hba, &info, ids[j], 0); | ||
1028 | |||
1029 | if (ppos) | ||
1030 | *ppos += info.buffillen; | ||
1031 | |||
1032 | return info.buffillen; | ||
1033 | } | ||
1034 | |||
1035 | static int hptiop_cdev_ioctl(struct inode *inode, struct file *file, | ||
1036 | unsigned int cmd, unsigned long arg) | ||
1037 | { | ||
1038 | struct hptiop_hba *hba = file->private_data; | ||
1039 | struct hpt_ioctl_u ioctl_u; | ||
1040 | struct hpt_ioctl_k ioctl_k; | ||
1041 | u32 bytes_returned; | ||
1042 | int err = -EINVAL; | ||
1043 | |||
1044 | if (copy_from_user(&ioctl_u, | ||
1045 | (void __user *)arg, sizeof(struct hpt_ioctl_u))) | ||
1046 | return -EINVAL; | ||
1047 | |||
1048 | if (ioctl_u.magic != HPT_IOCTL_MAGIC) | ||
1049 | return -EINVAL; | ||
1050 | |||
1051 | ioctl_k.ioctl_code = ioctl_u.ioctl_code; | ||
1052 | ioctl_k.inbuf = NULL; | ||
1053 | ioctl_k.inbuf_size = ioctl_u.inbuf_size; | ||
1054 | ioctl_k.outbuf = NULL; | ||
1055 | ioctl_k.outbuf_size = ioctl_u.outbuf_size; | ||
1056 | ioctl_k.hba = hba; | ||
1057 | ioctl_k.bytes_returned = &bytes_returned; | ||
1058 | |||
1059 | /* verify user buffer */ | ||
1060 | if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ, | ||
1061 | ioctl_u.inbuf, ioctl_k.inbuf_size)) || | ||
1062 | (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE, | ||
1063 | ioctl_u.outbuf, ioctl_k.outbuf_size)) || | ||
1064 | (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE, | ||
1065 | ioctl_u.bytes_returned, sizeof(u32))) || | ||
1066 | ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) { | ||
1067 | |||
1068 | dprintk("scsi%d: got bad user address\n", hba->host->host_no); | ||
1069 | return -EINVAL; | ||
1070 | } | ||
1071 | |||
1072 | /* map buffer to kernel. */ | ||
1073 | if (ioctl_k.inbuf_size) { | ||
1074 | ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL); | ||
1075 | if (!ioctl_k.inbuf) { | ||
1076 | dprintk("scsi%d: fail to alloc inbuf\n", | ||
1077 | hba->host->host_no); | ||
1078 | err = -ENOMEM; | ||
1079 | goto err_exit; | ||
1080 | } | ||
1081 | |||
1082 | if (copy_from_user(ioctl_k.inbuf, | ||
1083 | ioctl_u.inbuf, ioctl_k.inbuf_size)) { | ||
1084 | goto err_exit; | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | if (ioctl_k.outbuf_size) { | ||
1089 | ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL); | ||
1090 | if (!ioctl_k.outbuf) { | ||
1091 | dprintk("scsi%d: fail to alloc outbuf\n", | ||
1092 | hba->host->host_no); | ||
1093 | err = -ENOMEM; | ||
1094 | goto err_exit; | ||
1095 | } | ||
1096 | } | ||
1097 | |||
1098 | hptiop_do_ioctl(&ioctl_k); | ||
1099 | |||
1100 | if (ioctl_k.result == HPT_IOCTL_RESULT_OK) { | ||
1101 | if (ioctl_k.outbuf_size && | ||
1102 | copy_to_user(ioctl_u.outbuf, | ||
1103 | ioctl_k.outbuf, ioctl_k.outbuf_size)) | ||
1104 | goto err_exit; | ||
1105 | |||
1106 | if (ioctl_u.bytes_returned && | ||
1107 | copy_to_user(ioctl_u.bytes_returned, | ||
1108 | &bytes_returned, sizeof(u32))) | ||
1109 | goto err_exit; | ||
1110 | |||
1111 | err = 0; | ||
1112 | } | ||
1113 | |||
1114 | err_exit: | ||
1115 | kfree(ioctl_k.inbuf); | ||
1116 | kfree(ioctl_k.outbuf); | ||
1117 | |||
1118 | return err; | ||
1119 | } | ||
1120 | |||
1121 | static int hptiop_cdev_open(struct inode *inode, struct file *file) | ||
1122 | { | ||
1123 | struct hptiop_hba *hba; | ||
1124 | unsigned i = 0, minor = iminor(inode); | ||
1125 | int ret = -ENODEV; | ||
1126 | |||
1127 | spin_lock(&hptiop_hba_list_lock); | ||
1128 | list_for_each_entry(hba, &hptiop_hba_list, link) { | ||
1129 | if (i == minor) { | ||
1130 | file->private_data = hba; | ||
1131 | ret = 0; | ||
1132 | goto out; | ||
1133 | } | ||
1134 | i++; | ||
1135 | } | ||
1136 | |||
1137 | out: | ||
1138 | spin_unlock(&hptiop_hba_list_lock); | ||
1139 | return ret; | ||
1140 | } | ||
1141 | |||
1142 | static struct file_operations hptiop_cdev_fops = { | ||
1143 | .owner = THIS_MODULE, | ||
1144 | .read = hptiop_cdev_read, | ||
1145 | .ioctl = hptiop_cdev_ioctl, | ||
1146 | .open = hptiop_cdev_open, | ||
1147 | }; | ||
1148 | |||
1149 | static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) | 624 | static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) |
1150 | { | 625 | { |
1151 | struct Scsi_Host *host = class_to_shost(class_dev); | 626 | struct Scsi_Host *host = class_to_shost(class_dev); |
@@ -1296,19 +771,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, | |||
1296 | goto unmap_pci_bar; | 771 | goto unmap_pci_bar; |
1297 | } | 772 | } |
1298 | 773 | ||
1299 | if (scsi_add_host(host, &pcidev->dev)) { | ||
1300 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", | ||
1301 | hba->host->host_no); | ||
1302 | goto unmap_pci_bar; | ||
1303 | } | ||
1304 | |||
1305 | pci_set_drvdata(pcidev, host); | 774 | pci_set_drvdata(pcidev, host); |
1306 | 775 | ||
1307 | if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, | 776 | if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, |
1308 | driver_name, hba)) { | 777 | driver_name, hba)) { |
1309 | printk(KERN_ERR "scsi%d: request irq %d failed\n", | 778 | printk(KERN_ERR "scsi%d: request irq %d failed\n", |
1310 | hba->host->host_no, pcidev->irq); | 779 | hba->host->host_no, pcidev->irq); |
1311 | goto remove_scsi_host; | 780 | goto unmap_pci_bar; |
1312 | } | 781 | } |
1313 | 782 | ||
1314 | /* Allocate request mem */ | 783 | /* Allocate request mem */ |
@@ -1355,9 +824,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, | |||
1355 | if (hptiop_initialize_iop(hba)) | 824 | if (hptiop_initialize_iop(hba)) |
1356 | goto free_request_mem; | 825 | goto free_request_mem; |
1357 | 826 | ||
1358 | spin_lock(&hptiop_hba_list_lock); | 827 | if (scsi_add_host(host, &pcidev->dev)) { |
1359 | list_add_tail(&hba->link, &hptiop_hba_list); | 828 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", |
1360 | spin_unlock(&hptiop_hba_list_lock); | 829 | hba->host->host_no); |
830 | goto free_request_mem; | ||
831 | } | ||
832 | |||
1361 | 833 | ||
1362 | scsi_scan_host(host); | 834 | scsi_scan_host(host); |
1363 | 835 | ||
@@ -1372,9 +844,6 @@ free_request_mem: | |||
1372 | free_request_irq: | 844 | free_request_irq: |
1373 | free_irq(hba->pcidev->irq, hba); | 845 | free_irq(hba->pcidev->irq, hba); |
1374 | 846 | ||
1375 | remove_scsi_host: | ||
1376 | scsi_remove_host(host); | ||
1377 | |||
1378 | unmap_pci_bar: | 847 | unmap_pci_bar: |
1379 | iounmap(hba->iop); | 848 | iounmap(hba->iop); |
1380 | 849 | ||
@@ -1422,10 +891,6 @@ static void hptiop_remove(struct pci_dev *pcidev) | |||
1422 | 891 | ||
1423 | scsi_remove_host(host); | 892 | scsi_remove_host(host); |
1424 | 893 | ||
1425 | spin_lock(&hptiop_hba_list_lock); | ||
1426 | list_del_init(&hba->link); | ||
1427 | spin_unlock(&hptiop_hba_list_lock); | ||
1428 | |||
1429 | hptiop_shutdown(pcidev); | 894 | hptiop_shutdown(pcidev); |
1430 | 895 | ||
1431 | free_irq(hba->pcidev->irq, hba); | 896 | free_irq(hba->pcidev->irq, hba); |
@@ -1462,27 +927,12 @@ static struct pci_driver hptiop_pci_driver = { | |||
1462 | 927 | ||
1463 | static int __init hptiop_module_init(void) | 928 | static int __init hptiop_module_init(void) |
1464 | { | 929 | { |
1465 | int error; | ||
1466 | |||
1467 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); | 930 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); |
1468 | 931 | return pci_register_driver(&hptiop_pci_driver); | |
1469 | error = pci_register_driver(&hptiop_pci_driver); | ||
1470 | if (error < 0) | ||
1471 | return error; | ||
1472 | |||
1473 | hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops); | ||
1474 | if (hptiop_cdev_major < 0) { | ||
1475 | printk(KERN_WARNING "unable to register hptiop device.\n"); | ||
1476 | return hptiop_cdev_major; | ||
1477 | } | ||
1478 | |||
1479 | return 0; | ||
1480 | } | 932 | } |
1481 | 933 | ||
1482 | static void __exit hptiop_module_exit(void) | 934 | static void __exit hptiop_module_exit(void) |
1483 | { | 935 | { |
1484 | dprintk("hptiop_module_exit\n"); | ||
1485 | unregister_chrdev(hptiop_cdev_major, "hptiop"); | ||
1486 | pci_unregister_driver(&hptiop_pci_driver); | 936 | pci_unregister_driver(&hptiop_pci_driver); |
1487 | } | 937 | } |
1488 | 938 | ||
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c index 7eed0b098171..6aeb5f003c3c 100644 --- a/drivers/scsi/ibmvscsi/iseries_vscsi.c +++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c | |||
@@ -81,7 +81,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, | |||
81 | int rc; | 81 | int rc; |
82 | 82 | ||
83 | single_host_data = hostdata; | 83 | single_host_data = hostdata; |
84 | rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, 0); | 84 | rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests); |
85 | if (rc < 0) { | 85 | if (rc < 0) { |
86 | printk("viopath_open failed with rc %d in open_event_path\n", | 86 | printk("viopath_open failed with rc %d in open_event_path\n", |
87 | rc); | 87 | rc); |
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c index 242b8873b333..ed22b96580c6 100644 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c | |||
@@ -238,6 +238,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, | |||
238 | if (rc == 2) { | 238 | if (rc == 2) { |
239 | /* Adapter is good, but other end is not ready */ | 239 | /* Adapter is good, but other end is not ready */ |
240 | printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); | 240 | printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); |
241 | retrc = 0; | ||
241 | } else if (rc != 0) { | 242 | } else if (rc != 0) { |
242 | printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc); | 243 | printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc); |
243 | goto reg_crq_failed; | 244 | goto reg_crq_failed; |
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index f7b5d7372d26..94d1de55607f 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c | |||
@@ -517,7 +517,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive) | |||
517 | /* No more interrupts */ | 517 | /* No more interrupts */ |
518 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) | 518 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) |
519 | printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); | 519 | printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); |
520 | local_irq_enable(); | 520 | local_irq_enable_in_hardirq(); |
521 | if (status.b.check) | 521 | if (status.b.check) |
522 | rq->errors++; | 522 | rq->errors++; |
523 | idescsi_end_request (drive, 1, 0); | 523 | idescsi_end_request (drive, 1, 0); |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 848fb2aa4ca3..058f094f945a 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -43,13 +43,10 @@ | |||
43 | 43 | ||
44 | #include "iscsi_tcp.h" | 44 | #include "iscsi_tcp.h" |
45 | 45 | ||
46 | #define ISCSI_TCP_VERSION "1.0-595" | ||
47 | |||
48 | MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " | 46 | MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " |
49 | "Alex Aizman <itn780@yahoo.com>"); | 47 | "Alex Aizman <itn780@yahoo.com>"); |
50 | MODULE_DESCRIPTION("iSCSI/TCP data-path"); | 48 | MODULE_DESCRIPTION("iSCSI/TCP data-path"); |
51 | MODULE_LICENSE("GPL"); | 49 | MODULE_LICENSE("GPL"); |
52 | MODULE_VERSION(ISCSI_TCP_VERSION); | ||
53 | /* #define DEBUG_TCP */ | 50 | /* #define DEBUG_TCP */ |
54 | #define DEBUG_ASSERT | 51 | #define DEBUG_ASSERT |
55 | 52 | ||
@@ -185,11 +182,19 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn) | |||
185 | * must be called with session lock | 182 | * must be called with session lock |
186 | */ | 183 | */ |
187 | static void | 184 | static void |
188 | __iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | 185 | iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) |
189 | { | 186 | { |
190 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 187 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
188 | struct iscsi_r2t_info *r2t; | ||
191 | struct scsi_cmnd *sc; | 189 | struct scsi_cmnd *sc; |
192 | 190 | ||
191 | /* flush ctask's r2t queues */ | ||
192 | while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { | ||
193 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, | ||
194 | sizeof(void*)); | ||
195 | debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); | ||
196 | } | ||
197 | |||
193 | sc = ctask->sc; | 198 | sc = ctask->sc; |
194 | if (unlikely(!sc)) | 199 | if (unlikely(!sc)) |
195 | return; | 200 | return; |
@@ -374,6 +379,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
374 | spin_unlock(&session->lock); | 379 | spin_unlock(&session->lock); |
375 | return 0; | 380 | return 0; |
376 | } | 381 | } |
382 | |||
377 | rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); | 383 | rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); |
378 | BUG_ON(!rc); | 384 | BUG_ON(!rc); |
379 | 385 | ||
@@ -399,7 +405,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
399 | tcp_ctask->exp_r2tsn = r2tsn + 1; | 405 | tcp_ctask->exp_r2tsn = r2tsn + 1; |
400 | tcp_ctask->xmstate |= XMSTATE_SOL_HDR; | 406 | tcp_ctask->xmstate |= XMSTATE_SOL_HDR; |
401 | __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); | 407 | __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); |
402 | __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); | 408 | list_move_tail(&ctask->running, &conn->xmitqueue); |
403 | 409 | ||
404 | scsi_queue_work(session->host, &conn->xmitwork); | 410 | scsi_queue_work(session->host, &conn->xmitwork); |
405 | conn->r2t_pdus_cnt++; | 411 | conn->r2t_pdus_cnt++; |
@@ -477,6 +483,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | |||
477 | case ISCSI_OP_SCSI_DATA_IN: | 483 | case ISCSI_OP_SCSI_DATA_IN: |
478 | tcp_conn->in.ctask = session->cmds[itt]; | 484 | tcp_conn->in.ctask = session->cmds[itt]; |
479 | rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); | 485 | rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); |
486 | if (rc) | ||
487 | return rc; | ||
480 | /* fall through */ | 488 | /* fall through */ |
481 | case ISCSI_OP_SCSI_CMD_RSP: | 489 | case ISCSI_OP_SCSI_CMD_RSP: |
482 | tcp_conn->in.ctask = session->cmds[itt]; | 490 | tcp_conn->in.ctask = session->cmds[itt]; |
@@ -484,7 +492,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | |||
484 | goto copy_hdr; | 492 | goto copy_hdr; |
485 | 493 | ||
486 | spin_lock(&session->lock); | 494 | spin_lock(&session->lock); |
487 | __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); | 495 | iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); |
488 | rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); | 496 | rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); |
489 | spin_unlock(&session->lock); | 497 | spin_unlock(&session->lock); |
490 | break; | 498 | break; |
@@ -500,13 +508,28 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | |||
500 | break; | 508 | break; |
501 | case ISCSI_OP_LOGIN_RSP: | 509 | case ISCSI_OP_LOGIN_RSP: |
502 | case ISCSI_OP_TEXT_RSP: | 510 | case ISCSI_OP_TEXT_RSP: |
503 | case ISCSI_OP_LOGOUT_RSP: | ||
504 | case ISCSI_OP_NOOP_IN: | ||
505 | case ISCSI_OP_REJECT: | 511 | case ISCSI_OP_REJECT: |
506 | case ISCSI_OP_ASYNC_EVENT: | 512 | case ISCSI_OP_ASYNC_EVENT: |
513 | /* | ||
514 | * It is possible that we could get a PDU with a buffer larger | ||
515 | * than 8K, but there are no targets that currently do this. | ||
516 | * For now we fail until we find a vendor that needs it | ||
517 | */ | ||
518 | if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH < | ||
519 | tcp_conn->in.datalen) { | ||
520 | printk(KERN_ERR "iscsi_tcp: received buffer of len %u " | ||
521 | "but conn buffer is only %u (opcode %0x)\n", | ||
522 | tcp_conn->in.datalen, | ||
523 | DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode); | ||
524 | rc = ISCSI_ERR_PROTO; | ||
525 | break; | ||
526 | } | ||
527 | |||
507 | if (tcp_conn->in.datalen) | 528 | if (tcp_conn->in.datalen) |
508 | goto copy_hdr; | 529 | goto copy_hdr; |
509 | /* fall through */ | 530 | /* fall through */ |
531 | case ISCSI_OP_LOGOUT_RSP: | ||
532 | case ISCSI_OP_NOOP_IN: | ||
510 | case ISCSI_OP_SCSI_TMFUNC_RSP: | 533 | case ISCSI_OP_SCSI_TMFUNC_RSP: |
511 | rc = iscsi_complete_pdu(conn, hdr, NULL, 0); | 534 | rc = iscsi_complete_pdu(conn, hdr, NULL, 0); |
512 | break; | 535 | break; |
@@ -523,7 +546,7 @@ copy_hdr: | |||
523 | * skbs to complete the command then we have to copy the header | 546 | * skbs to complete the command then we have to copy the header |
524 | * for later use | 547 | * for later use |
525 | */ | 548 | */ |
526 | if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy < | 549 | if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <= |
527 | (tcp_conn->in.datalen + tcp_conn->in.padding + | 550 | (tcp_conn->in.datalen + tcp_conn->in.padding + |
528 | (conn->datadgst_en ? 4 : 0))) { | 551 | (conn->datadgst_en ? 4 : 0))) { |
529 | debug_tcp("Copying header for later use. in.copy %d in.datalen" | 552 | debug_tcp("Copying header for later use. in.copy %d in.datalen" |
@@ -614,9 +637,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask, | |||
614 | * byte counters. | 637 | * byte counters. |
615 | **/ | 638 | **/ |
616 | static inline int | 639 | static inline int |
617 | iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) | 640 | iscsi_tcp_copy(struct iscsi_conn *conn) |
618 | { | 641 | { |
619 | void *buf = tcp_conn->data; | 642 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
620 | int buf_size = tcp_conn->in.datalen; | 643 | int buf_size = tcp_conn->in.datalen; |
621 | int buf_left = buf_size - tcp_conn->data_copied; | 644 | int buf_left = buf_size - tcp_conn->data_copied; |
622 | int size = min(tcp_conn->in.copy, buf_left); | 645 | int size = min(tcp_conn->in.copy, buf_left); |
@@ -627,7 +650,7 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) | |||
627 | BUG_ON(size <= 0); | 650 | BUG_ON(size <= 0); |
628 | 651 | ||
629 | rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, | 652 | rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, |
630 | (char*)buf + tcp_conn->data_copied, size); | 653 | (char*)conn->data + tcp_conn->data_copied, size); |
631 | BUG_ON(rc); | 654 | BUG_ON(rc); |
632 | 655 | ||
633 | tcp_conn->in.offset += size; | 656 | tcp_conn->in.offset += size; |
@@ -745,10 +768,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) | |||
745 | done: | 768 | done: |
746 | /* check for non-exceptional status */ | 769 | /* check for non-exceptional status */ |
747 | if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { | 770 | if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { |
748 | debug_scsi("done [sc %lx res %d itt 0x%x]\n", | 771 | debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n", |
749 | (long)sc, sc->result, ctask->itt); | 772 | (long)sc, sc->result, ctask->itt, |
773 | tcp_conn->in.hdr->flags); | ||
750 | spin_lock(&conn->session->lock); | 774 | spin_lock(&conn->session->lock); |
751 | __iscsi_ctask_cleanup(conn, ctask); | 775 | iscsi_tcp_cleanup_ctask(conn, ctask); |
752 | __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); | 776 | __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); |
753 | spin_unlock(&conn->session->lock); | 777 | spin_unlock(&conn->session->lock); |
754 | } | 778 | } |
@@ -769,26 +793,25 @@ iscsi_data_recv(struct iscsi_conn *conn) | |||
769 | break; | 793 | break; |
770 | case ISCSI_OP_SCSI_CMD_RSP: | 794 | case ISCSI_OP_SCSI_CMD_RSP: |
771 | spin_lock(&conn->session->lock); | 795 | spin_lock(&conn->session->lock); |
772 | __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); | 796 | iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); |
773 | spin_unlock(&conn->session->lock); | 797 | spin_unlock(&conn->session->lock); |
774 | case ISCSI_OP_TEXT_RSP: | 798 | case ISCSI_OP_TEXT_RSP: |
775 | case ISCSI_OP_LOGIN_RSP: | 799 | case ISCSI_OP_LOGIN_RSP: |
776 | case ISCSI_OP_NOOP_IN: | ||
777 | case ISCSI_OP_ASYNC_EVENT: | 800 | case ISCSI_OP_ASYNC_EVENT: |
778 | case ISCSI_OP_REJECT: | 801 | case ISCSI_OP_REJECT: |
779 | /* | 802 | /* |
780 | * Collect data segment to the connection's data | 803 | * Collect data segment to the connection's data |
781 | * placeholder | 804 | * placeholder |
782 | */ | 805 | */ |
783 | if (iscsi_tcp_copy(tcp_conn)) { | 806 | if (iscsi_tcp_copy(conn)) { |
784 | rc = -EAGAIN; | 807 | rc = -EAGAIN; |
785 | goto exit; | 808 | goto exit; |
786 | } | 809 | } |
787 | 810 | ||
788 | rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data, | 811 | rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data, |
789 | tcp_conn->in.datalen); | 812 | tcp_conn->in.datalen); |
790 | if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) | 813 | if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) |
791 | iscsi_recv_digest_update(tcp_conn, tcp_conn->data, | 814 | iscsi_recv_digest_update(tcp_conn, conn->data, |
792 | tcp_conn->in.datalen); | 815 | tcp_conn->in.datalen); |
793 | break; | 816 | break; |
794 | default: | 817 | default: |
@@ -843,7 +866,7 @@ more: | |||
843 | if (rc == -EAGAIN) | 866 | if (rc == -EAGAIN) |
844 | goto nomore; | 867 | goto nomore; |
845 | else { | 868 | else { |
846 | iscsi_conn_failure(conn, rc); | 869 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
847 | return 0; | 870 | return 0; |
848 | } | 871 | } |
849 | } | 872 | } |
@@ -897,7 +920,7 @@ more: | |||
897 | if (rc) { | 920 | if (rc) { |
898 | if (rc == -EAGAIN) | 921 | if (rc == -EAGAIN) |
899 | goto again; | 922 | goto again; |
900 | iscsi_conn_failure(conn, rc); | 923 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
901 | return 0; | 924 | return 0; |
902 | } | 925 | } |
903 | tcp_conn->in.copy -= tcp_conn->in.padding; | 926 | tcp_conn->in.copy -= tcp_conn->in.padding; |
@@ -1028,9 +1051,8 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn) | |||
1028 | } | 1051 | } |
1029 | 1052 | ||
1030 | static void | 1053 | static void |
1031 | iscsi_conn_restore_callbacks(struct iscsi_conn *conn) | 1054 | iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn) |
1032 | { | 1055 | { |
1033 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1034 | struct sock *sk = tcp_conn->sock->sk; | 1056 | struct sock *sk = tcp_conn->sock->sk; |
1035 | 1057 | ||
1036 | /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ | 1058 | /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ |
@@ -1308,7 +1330,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) | |||
1308 | ctask->imm_count - | 1330 | ctask->imm_count - |
1309 | ctask->unsol_count; | 1331 | ctask->unsol_count; |
1310 | 1332 | ||
1311 | debug_scsi("cmd [itt %x total %d imm %d imm_data %d " | 1333 | debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d " |
1312 | "r2t_data %d]\n", | 1334 | "r2t_data %d]\n", |
1313 | ctask->itt, ctask->total_length, ctask->imm_count, | 1335 | ctask->itt, ctask->total_length, ctask->imm_count, |
1314 | ctask->unsol_count, tcp_ctask->r2t_data_count); | 1336 | ctask->unsol_count, tcp_ctask->r2t_data_count); |
@@ -1636,7 +1658,7 @@ handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1636 | } | 1658 | } |
1637 | solicit_again: | 1659 | solicit_again: |
1638 | /* | 1660 | /* |
1639 | * send Data-Out whitnin this R2T sequence. | 1661 | * send Data-Out within this R2T sequence. |
1640 | */ | 1662 | */ |
1641 | if (!r2t->data_count) | 1663 | if (!r2t->data_count) |
1642 | goto data_out_done; | 1664 | goto data_out_done; |
@@ -1731,7 +1753,7 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1731 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1753 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
1732 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 1754 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1733 | struct iscsi_data_task *dtask = tcp_ctask->dtask; | 1755 | struct iscsi_data_task *dtask = tcp_ctask->dtask; |
1734 | int sent, rc; | 1756 | int sent = 0, rc; |
1735 | 1757 | ||
1736 | tcp_ctask->xmstate &= ~XMSTATE_W_PAD; | 1758 | tcp_ctask->xmstate &= ~XMSTATE_W_PAD; |
1737 | iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, | 1759 | iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, |
@@ -1900,27 +1922,32 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
1900 | tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; | 1922 | tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; |
1901 | /* initial operational parameters */ | 1923 | /* initial operational parameters */ |
1902 | tcp_conn->hdr_size = sizeof(struct iscsi_hdr); | 1924 | tcp_conn->hdr_size = sizeof(struct iscsi_hdr); |
1903 | tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; | ||
1904 | |||
1905 | /* allocate initial PDU receive place holder */ | ||
1906 | if (tcp_conn->data_size <= PAGE_SIZE) | ||
1907 | tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL); | ||
1908 | else | ||
1909 | tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL, | ||
1910 | get_order(tcp_conn->data_size)); | ||
1911 | if (!tcp_conn->data) | ||
1912 | goto max_recv_dlenght_alloc_fail; | ||
1913 | 1925 | ||
1914 | return cls_conn; | 1926 | return cls_conn; |
1915 | 1927 | ||
1916 | max_recv_dlenght_alloc_fail: | ||
1917 | kfree(tcp_conn); | ||
1918 | tcp_conn_alloc_fail: | 1928 | tcp_conn_alloc_fail: |
1919 | iscsi_conn_teardown(cls_conn); | 1929 | iscsi_conn_teardown(cls_conn); |
1920 | return NULL; | 1930 | return NULL; |
1921 | } | 1931 | } |
1922 | 1932 | ||
1923 | static void | 1933 | static void |
1934 | iscsi_tcp_release_conn(struct iscsi_conn *conn) | ||
1935 | { | ||
1936 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1937 | |||
1938 | if (!tcp_conn->sock) | ||
1939 | return; | ||
1940 | |||
1941 | sock_hold(tcp_conn->sock->sk); | ||
1942 | iscsi_conn_restore_callbacks(tcp_conn); | ||
1943 | sock_put(tcp_conn->sock->sk); | ||
1944 | |||
1945 | sock_release(tcp_conn->sock); | ||
1946 | tcp_conn->sock = NULL; | ||
1947 | conn->recv_lock = NULL; | ||
1948 | } | ||
1949 | |||
1950 | static void | ||
1924 | iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) | 1951 | iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) |
1925 | { | 1952 | { |
1926 | struct iscsi_conn *conn = cls_conn->dd_data; | 1953 | struct iscsi_conn *conn = cls_conn->dd_data; |
@@ -1930,6 +1957,7 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) | |||
1930 | if (conn->hdrdgst_en || conn->datadgst_en) | 1957 | if (conn->hdrdgst_en || conn->datadgst_en) |
1931 | digest = 1; | 1958 | digest = 1; |
1932 | 1959 | ||
1960 | iscsi_tcp_release_conn(conn); | ||
1933 | iscsi_conn_teardown(cls_conn); | 1961 | iscsi_conn_teardown(cls_conn); |
1934 | 1962 | ||
1935 | /* now free tcp_conn */ | 1963 | /* now free tcp_conn */ |
@@ -1944,15 +1972,18 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) | |||
1944 | crypto_free_tfm(tcp_conn->data_rx_tfm); | 1972 | crypto_free_tfm(tcp_conn->data_rx_tfm); |
1945 | } | 1973 | } |
1946 | 1974 | ||
1947 | /* free conn->data, size = MaxRecvDataSegmentLength */ | ||
1948 | if (tcp_conn->data_size <= PAGE_SIZE) | ||
1949 | kfree(tcp_conn->data); | ||
1950 | else | ||
1951 | free_pages((unsigned long)tcp_conn->data, | ||
1952 | get_order(tcp_conn->data_size)); | ||
1953 | kfree(tcp_conn); | 1975 | kfree(tcp_conn); |
1954 | } | 1976 | } |
1955 | 1977 | ||
1978 | static void | ||
1979 | iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | ||
1980 | { | ||
1981 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
1982 | |||
1983 | iscsi_conn_stop(cls_conn, flag); | ||
1984 | iscsi_tcp_release_conn(conn); | ||
1985 | } | ||
1986 | |||
1956 | static int | 1987 | static int |
1957 | iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, | 1988 | iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, |
1958 | struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, | 1989 | struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, |
@@ -2001,52 +2032,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, | |||
2001 | return 0; | 2032 | return 0; |
2002 | } | 2033 | } |
2003 | 2034 | ||
2004 | static void | ||
2005 | iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | ||
2006 | { | ||
2007 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
2008 | struct iscsi_r2t_info *r2t; | ||
2009 | |||
2010 | /* flush ctask's r2t queues */ | ||
2011 | while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) | ||
2012 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, | ||
2013 | sizeof(void*)); | ||
2014 | |||
2015 | __iscsi_ctask_cleanup(conn, ctask); | ||
2016 | } | ||
2017 | |||
2018 | static void | ||
2019 | iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn) | ||
2020 | { | ||
2021 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
2022 | struct sock *sk; | ||
2023 | |||
2024 | if (!tcp_conn->sock) | ||
2025 | return; | ||
2026 | |||
2027 | sk = tcp_conn->sock->sk; | ||
2028 | write_lock_bh(&sk->sk_callback_lock); | ||
2029 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); | ||
2030 | write_unlock_bh(&sk->sk_callback_lock); | ||
2031 | } | ||
2032 | |||
2033 | static void | ||
2034 | iscsi_tcp_terminate_conn(struct iscsi_conn *conn) | ||
2035 | { | ||
2036 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
2037 | |||
2038 | if (!tcp_conn->sock) | ||
2039 | return; | ||
2040 | |||
2041 | sock_hold(tcp_conn->sock->sk); | ||
2042 | iscsi_conn_restore_callbacks(conn); | ||
2043 | sock_put(tcp_conn->sock->sk); | ||
2044 | |||
2045 | sock_release(tcp_conn->sock); | ||
2046 | tcp_conn->sock = NULL; | ||
2047 | conn->recv_lock = NULL; | ||
2048 | } | ||
2049 | |||
2050 | /* called with host lock */ | 2035 | /* called with host lock */ |
2051 | static void | 2036 | static void |
2052 | iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, | 2037 | iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, |
@@ -2057,6 +2042,7 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, | |||
2057 | iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, | 2042 | iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, |
2058 | sizeof(struct iscsi_hdr)); | 2043 | sizeof(struct iscsi_hdr)); |
2059 | tcp_mtask->xmstate = XMSTATE_IMM_HDR; | 2044 | tcp_mtask->xmstate = XMSTATE_IMM_HDR; |
2045 | tcp_mtask->sent = 0; | ||
2060 | 2046 | ||
2061 | if (mtask->data_count) | 2047 | if (mtask->data_count) |
2062 | iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, | 2048 | iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, |
@@ -2138,39 +2124,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, | |||
2138 | int value; | 2124 | int value; |
2139 | 2125 | ||
2140 | switch(param) { | 2126 | switch(param) { |
2141 | case ISCSI_PARAM_MAX_RECV_DLENGTH: { | ||
2142 | char *saveptr = tcp_conn->data; | ||
2143 | gfp_t flags = GFP_KERNEL; | ||
2144 | |||
2145 | sscanf(buf, "%d", &value); | ||
2146 | if (tcp_conn->data_size >= value) { | ||
2147 | iscsi_set_param(cls_conn, param, buf, buflen); | ||
2148 | break; | ||
2149 | } | ||
2150 | |||
2151 | spin_lock_bh(&session->lock); | ||
2152 | if (conn->stop_stage == STOP_CONN_RECOVER) | ||
2153 | flags = GFP_ATOMIC; | ||
2154 | spin_unlock_bh(&session->lock); | ||
2155 | |||
2156 | if (value <= PAGE_SIZE) | ||
2157 | tcp_conn->data = kmalloc(value, flags); | ||
2158 | else | ||
2159 | tcp_conn->data = (void*)__get_free_pages(flags, | ||
2160 | get_order(value)); | ||
2161 | if (tcp_conn->data == NULL) { | ||
2162 | tcp_conn->data = saveptr; | ||
2163 | return -ENOMEM; | ||
2164 | } | ||
2165 | if (tcp_conn->data_size <= PAGE_SIZE) | ||
2166 | kfree(saveptr); | ||
2167 | else | ||
2168 | free_pages((unsigned long)saveptr, | ||
2169 | get_order(tcp_conn->data_size)); | ||
2170 | iscsi_set_param(cls_conn, param, buf, buflen); | ||
2171 | tcp_conn->data_size = value; | ||
2172 | break; | ||
2173 | } | ||
2174 | case ISCSI_PARAM_HDRDGST_EN: | 2127 | case ISCSI_PARAM_HDRDGST_EN: |
2175 | iscsi_set_param(cls_conn, param, buf, buflen); | 2128 | iscsi_set_param(cls_conn, param, buf, buflen); |
2176 | tcp_conn->hdr_size = sizeof(struct iscsi_hdr); | 2129 | tcp_conn->hdr_size = sizeof(struct iscsi_hdr); |
@@ -2361,8 +2314,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) | |||
2361 | } | 2314 | } |
2362 | 2315 | ||
2363 | static struct scsi_host_template iscsi_sht = { | 2316 | static struct scsi_host_template iscsi_sht = { |
2364 | .name = "iSCSI Initiator over TCP/IP, v" | 2317 | .name = "iSCSI Initiator over TCP/IP", |
2365 | ISCSI_TCP_VERSION, | ||
2366 | .queuecommand = iscsi_queuecommand, | 2318 | .queuecommand = iscsi_queuecommand, |
2367 | .change_queue_depth = iscsi_change_queue_depth, | 2319 | .change_queue_depth = iscsi_change_queue_depth, |
2368 | .can_queue = ISCSI_XMIT_CMDS_MAX - 1, | 2320 | .can_queue = ISCSI_XMIT_CMDS_MAX - 1, |
@@ -2414,10 +2366,7 @@ static struct iscsi_transport iscsi_tcp_transport = { | |||
2414 | .get_conn_param = iscsi_tcp_conn_get_param, | 2366 | .get_conn_param = iscsi_tcp_conn_get_param, |
2415 | .get_session_param = iscsi_session_get_param, | 2367 | .get_session_param = iscsi_session_get_param, |
2416 | .start_conn = iscsi_conn_start, | 2368 | .start_conn = iscsi_conn_start, |
2417 | .stop_conn = iscsi_conn_stop, | 2369 | .stop_conn = iscsi_tcp_conn_stop, |
2418 | /* these are called as part of conn recovery */ | ||
2419 | .suspend_conn_recv = iscsi_tcp_suspend_conn_rx, | ||
2420 | .terminate_conn = iscsi_tcp_terminate_conn, | ||
2421 | /* IO */ | 2370 | /* IO */ |
2422 | .send_pdu = iscsi_conn_send_pdu, | 2371 | .send_pdu = iscsi_conn_send_pdu, |
2423 | .get_stats = iscsi_conn_get_stats, | 2372 | .get_stats = iscsi_conn_get_stats, |
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 808302832e68..6a4ee704e46e 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h | |||
@@ -78,8 +78,6 @@ struct iscsi_tcp_conn { | |||
78 | char hdrext[4*sizeof(__u16) + | 78 | char hdrext[4*sizeof(__u16) + |
79 | sizeof(__u32)]; | 79 | sizeof(__u32)]; |
80 | int data_copied; | 80 | int data_copied; |
81 | char *data; /* data placeholder */ | ||
82 | int data_size; /* actual recv_dlength */ | ||
83 | int stop_stage; /* conn_stop() flag: * | 81 | int stop_stage; /* conn_stop() flag: * |
84 | * stop to recover, * | 82 | * stop to recover, * |
85 | * stop to terminate */ | 83 | * stop to terminate */ |
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c index 3fd8a96f2af3..bfac4441d89f 100644 --- a/drivers/scsi/jazz_esp.c +++ b/drivers/scsi/jazz_esp.c | |||
@@ -257,7 +257,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp) | |||
257 | static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) | 257 | static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) |
258 | { | 258 | { |
259 | int sz = sp->use_sg - 1; | 259 | int sz = sp->use_sg - 1; |
260 | struct scatterlist *sg = (struct scatterlist *)sp->buffer; | 260 | struct scatterlist *sg = (struct scatterlist *)sp->request_buffer; |
261 | 261 | ||
262 | while(sz >= 0) { | 262 | while(sz >= 0) { |
263 | vdma_free(sg[sz].dma_address); | 263 | vdma_free(sg[sz].dma_address); |
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 1c960ac1617f..427b73a3886a 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -61,9 +61,9 @@ | |||
61 | #include "libata.h" | 61 | #include "libata.h" |
62 | 62 | ||
63 | /* debounce timing parameters in msecs { interval, duration, timeout } */ | 63 | /* debounce timing parameters in msecs { interval, duration, timeout } */ |
64 | const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 }; | 64 | const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; |
65 | const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 }; | 65 | const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; |
66 | const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 }; | 66 | const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; |
67 | 67 | ||
68 | static unsigned int ata_dev_init_params(struct ata_device *dev, | 68 | static unsigned int ata_dev_init_params(struct ata_device *dev, |
69 | u16 heads, u16 sectors); | 69 | u16 heads, u16 sectors); |
@@ -907,7 +907,7 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, | |||
907 | { | 907 | { |
908 | int rc; | 908 | int rc; |
909 | 909 | ||
910 | if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK) | 910 | if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) |
911 | return; | 911 | return; |
912 | 912 | ||
913 | PREPARE_WORK(&ap->port_task, fn, data); | 913 | PREPARE_WORK(&ap->port_task, fn, data); |
@@ -938,7 +938,7 @@ void ata_port_flush_task(struct ata_port *ap) | |||
938 | DPRINTK("ENTER\n"); | 938 | DPRINTK("ENTER\n"); |
939 | 939 | ||
940 | spin_lock_irqsave(ap->lock, flags); | 940 | spin_lock_irqsave(ap->lock, flags); |
941 | ap->flags |= ATA_FLAG_FLUSH_PORT_TASK; | 941 | ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK; |
942 | spin_unlock_irqrestore(ap->lock, flags); | 942 | spin_unlock_irqrestore(ap->lock, flags); |
943 | 943 | ||
944 | DPRINTK("flush #1\n"); | 944 | DPRINTK("flush #1\n"); |
@@ -957,7 +957,7 @@ void ata_port_flush_task(struct ata_port *ap) | |||
957 | } | 957 | } |
958 | 958 | ||
959 | spin_lock_irqsave(ap->lock, flags); | 959 | spin_lock_irqsave(ap->lock, flags); |
960 | ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK; | 960 | ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK; |
961 | spin_unlock_irqrestore(ap->lock, flags); | 961 | spin_unlock_irqrestore(ap->lock, flags); |
962 | 962 | ||
963 | if (ata_msg_ctl(ap)) | 963 | if (ata_msg_ctl(ap)) |
@@ -1009,7 +1009,7 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
1009 | spin_lock_irqsave(ap->lock, flags); | 1009 | spin_lock_irqsave(ap->lock, flags); |
1010 | 1010 | ||
1011 | /* no internal command while frozen */ | 1011 | /* no internal command while frozen */ |
1012 | if (ap->flags & ATA_FLAG_FROZEN) { | 1012 | if (ap->pflags & ATA_PFLAG_FROZEN) { |
1013 | spin_unlock_irqrestore(ap->lock, flags); | 1013 | spin_unlock_irqrestore(ap->lock, flags); |
1014 | return AC_ERR_SYSTEM; | 1014 | return AC_ERR_SYSTEM; |
1015 | } | 1015 | } |
@@ -1256,10 +1256,15 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
1256 | swap_buf_le16(id, ATA_ID_WORDS); | 1256 | swap_buf_le16(id, ATA_ID_WORDS); |
1257 | 1257 | ||
1258 | /* sanity check */ | 1258 | /* sanity check */ |
1259 | if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) { | 1259 | rc = -EINVAL; |
1260 | rc = -EINVAL; | 1260 | reason = "device reports illegal type"; |
1261 | reason = "device reports illegal type"; | 1261 | |
1262 | goto err_out; | 1262 | if (class == ATA_DEV_ATA) { |
1263 | if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) | ||
1264 | goto err_out; | ||
1265 | } else { | ||
1266 | if (ata_id_is_ata(id)) | ||
1267 | goto err_out; | ||
1263 | } | 1268 | } |
1264 | 1269 | ||
1265 | if (post_reset && class == ATA_DEV_ATA) { | 1270 | if (post_reset && class == ATA_DEV_ATA) { |
@@ -1325,6 +1330,19 @@ static void ata_dev_config_ncq(struct ata_device *dev, | |||
1325 | snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); | 1330 | snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); |
1326 | } | 1331 | } |
1327 | 1332 | ||
1333 | static void ata_set_port_max_cmd_len(struct ata_port *ap) | ||
1334 | { | ||
1335 | int i; | ||
1336 | |||
1337 | if (ap->host) { | ||
1338 | ap->host->max_cmd_len = 0; | ||
1339 | for (i = 0; i < ATA_MAX_DEVICES; i++) | ||
1340 | ap->host->max_cmd_len = max_t(unsigned int, | ||
1341 | ap->host->max_cmd_len, | ||
1342 | ap->device[i].cdb_len); | ||
1343 | } | ||
1344 | } | ||
1345 | |||
1328 | /** | 1346 | /** |
1329 | * ata_dev_configure - Configure the specified ATA/ATAPI device | 1347 | * ata_dev_configure - Configure the specified ATA/ATAPI device |
1330 | * @dev: Target device to configure | 1348 | * @dev: Target device to configure |
@@ -1344,7 +1362,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info) | |||
1344 | struct ata_port *ap = dev->ap; | 1362 | struct ata_port *ap = dev->ap; |
1345 | const u16 *id = dev->id; | 1363 | const u16 *id = dev->id; |
1346 | unsigned int xfer_mask; | 1364 | unsigned int xfer_mask; |
1347 | int i, rc; | 1365 | int rc; |
1348 | 1366 | ||
1349 | if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { | 1367 | if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { |
1350 | ata_dev_printk(dev, KERN_INFO, | 1368 | ata_dev_printk(dev, KERN_INFO, |
@@ -1404,7 +1422,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info) | |||
1404 | ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); | 1422 | ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); |
1405 | 1423 | ||
1406 | /* print device info to dmesg */ | 1424 | /* print device info to dmesg */ |
1407 | if (ata_msg_info(ap)) | 1425 | if (ata_msg_drv(ap) && print_info) |
1408 | ata_dev_printk(dev, KERN_INFO, "ATA-%d, " | 1426 | ata_dev_printk(dev, KERN_INFO, "ATA-%d, " |
1409 | "max %s, %Lu sectors: %s %s\n", | 1427 | "max %s, %Lu sectors: %s %s\n", |
1410 | ata_id_major_version(id), | 1428 | ata_id_major_version(id), |
@@ -1427,7 +1445,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info) | |||
1427 | } | 1445 | } |
1428 | 1446 | ||
1429 | /* print device info to dmesg */ | 1447 | /* print device info to dmesg */ |
1430 | if (ata_msg_info(ap)) | 1448 | if (ata_msg_drv(ap) && print_info) |
1431 | ata_dev_printk(dev, KERN_INFO, "ATA-%d, " | 1449 | ata_dev_printk(dev, KERN_INFO, "ATA-%d, " |
1432 | "max %s, %Lu sectors: CHS %u/%u/%u\n", | 1450 | "max %s, %Lu sectors: CHS %u/%u/%u\n", |
1433 | ata_id_major_version(id), | 1451 | ata_id_major_version(id), |
@@ -1439,7 +1457,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info) | |||
1439 | 1457 | ||
1440 | if (dev->id[59] & 0x100) { | 1458 | if (dev->id[59] & 0x100) { |
1441 | dev->multi_count = dev->id[59] & 0xff; | 1459 | dev->multi_count = dev->id[59] & 0xff; |
1442 | if (ata_msg_info(ap)) | 1460 | if (ata_msg_drv(ap) && print_info) |
1443 | ata_dev_printk(dev, KERN_INFO, | 1461 | ata_dev_printk(dev, KERN_INFO, |
1444 | "ata%u: dev %u multi count %u\n", | 1462 | "ata%u: dev %u multi count %u\n", |
1445 | ap->id, dev->devno, dev->multi_count); | 1463 | ap->id, dev->devno, dev->multi_count); |
@@ -1468,21 +1486,17 @@ int ata_dev_configure(struct ata_device *dev, int print_info) | |||
1468 | } | 1486 | } |
1469 | 1487 | ||
1470 | /* print device info to dmesg */ | 1488 | /* print device info to dmesg */ |
1471 | if (ata_msg_info(ap)) | 1489 | if (ata_msg_drv(ap) && print_info) |
1472 | ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n", | 1490 | ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n", |
1473 | ata_mode_string(xfer_mask), | 1491 | ata_mode_string(xfer_mask), |
1474 | cdb_intr_string); | 1492 | cdb_intr_string); |
1475 | } | 1493 | } |
1476 | 1494 | ||
1477 | ap->host->max_cmd_len = 0; | 1495 | ata_set_port_max_cmd_len(ap); |
1478 | for (i = 0; i < ATA_MAX_DEVICES; i++) | ||
1479 | ap->host->max_cmd_len = max_t(unsigned int, | ||
1480 | ap->host->max_cmd_len, | ||
1481 | ap->device[i].cdb_len); | ||
1482 | 1496 | ||
1483 | /* limit bridge transfers to udma5, 200 sectors */ | 1497 | /* limit bridge transfers to udma5, 200 sectors */ |
1484 | if (ata_dev_knobble(dev)) { | 1498 | if (ata_dev_knobble(dev)) { |
1485 | if (ata_msg_info(ap)) | 1499 | if (ata_msg_drv(ap) && print_info) |
1486 | ata_dev_printk(dev, KERN_INFO, | 1500 | ata_dev_printk(dev, KERN_INFO, |
1487 | "applying bridge limits\n"); | 1501 | "applying bridge limits\n"); |
1488 | dev->udma_mask &= ATA_UDMA5; | 1502 | dev->udma_mask &= ATA_UDMA5; |
@@ -2137,7 +2151,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) | |||
2137 | * return error code and failing device on failure. | 2151 | * return error code and failing device on failure. |
2138 | */ | 2152 | */ |
2139 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2153 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
2140 | if (ata_dev_enabled(&ap->device[i])) { | 2154 | if (ata_dev_ready(&ap->device[i])) { |
2141 | ap->ops->set_mode(ap); | 2155 | ap->ops->set_mode(ap); |
2142 | break; | 2156 | break; |
2143 | } | 2157 | } |
@@ -2203,7 +2217,8 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) | |||
2203 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2217 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
2204 | dev = &ap->device[i]; | 2218 | dev = &ap->device[i]; |
2205 | 2219 | ||
2206 | if (!ata_dev_enabled(dev)) | 2220 | /* don't udpate suspended devices' xfer mode */ |
2221 | if (!ata_dev_ready(dev)) | ||
2207 | continue; | 2222 | continue; |
2208 | 2223 | ||
2209 | rc = ata_dev_set_mode(dev); | 2224 | rc = ata_dev_set_mode(dev); |
@@ -2579,7 +2594,7 @@ static void ata_wait_spinup(struct ata_port *ap) | |||
2579 | 2594 | ||
2580 | /* first, debounce phy if SATA */ | 2595 | /* first, debounce phy if SATA */ |
2581 | if (ap->cbl == ATA_CBL_SATA) { | 2596 | if (ap->cbl == ATA_CBL_SATA) { |
2582 | rc = sata_phy_debounce(ap, sata_deb_timing_eh); | 2597 | rc = sata_phy_debounce(ap, sata_deb_timing_hotplug); |
2583 | 2598 | ||
2584 | /* if debounced successfully and offline, no need to wait */ | 2599 | /* if debounced successfully and offline, no need to wait */ |
2585 | if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap)) | 2600 | if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap)) |
@@ -2615,16 +2630,17 @@ static void ata_wait_spinup(struct ata_port *ap) | |||
2615 | int ata_std_prereset(struct ata_port *ap) | 2630 | int ata_std_prereset(struct ata_port *ap) |
2616 | { | 2631 | { |
2617 | struct ata_eh_context *ehc = &ap->eh_context; | 2632 | struct ata_eh_context *ehc = &ap->eh_context; |
2618 | const unsigned long *timing; | 2633 | const unsigned long *timing = sata_ehc_deb_timing(ehc); |
2619 | int rc; | 2634 | int rc; |
2620 | 2635 | ||
2621 | /* hotplug? */ | 2636 | /* handle link resume & hotplug spinup */ |
2622 | if (ehc->i.flags & ATA_EHI_HOTPLUGGED) { | 2637 | if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && |
2623 | if (ap->flags & ATA_FLAG_HRST_TO_RESUME) | 2638 | (ap->flags & ATA_FLAG_HRST_TO_RESUME)) |
2624 | ehc->i.action |= ATA_EH_HARDRESET; | 2639 | ehc->i.action |= ATA_EH_HARDRESET; |
2625 | if (ap->flags & ATA_FLAG_SKIP_D2H_BSY) | 2640 | |
2626 | ata_wait_spinup(ap); | 2641 | if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) && |
2627 | } | 2642 | (ap->flags & ATA_FLAG_SKIP_D2H_BSY)) |
2643 | ata_wait_spinup(ap); | ||
2628 | 2644 | ||
2629 | /* if we're about to do hardreset, nothing more to do */ | 2645 | /* if we're about to do hardreset, nothing more to do */ |
2630 | if (ehc->i.action & ATA_EH_HARDRESET) | 2646 | if (ehc->i.action & ATA_EH_HARDRESET) |
@@ -2632,11 +2648,6 @@ int ata_std_prereset(struct ata_port *ap) | |||
2632 | 2648 | ||
2633 | /* if SATA, resume phy */ | 2649 | /* if SATA, resume phy */ |
2634 | if (ap->cbl == ATA_CBL_SATA) { | 2650 | if (ap->cbl == ATA_CBL_SATA) { |
2635 | if (ap->flags & ATA_FLAG_LOADING) | ||
2636 | timing = sata_deb_timing_boot; | ||
2637 | else | ||
2638 | timing = sata_deb_timing_eh; | ||
2639 | |||
2640 | rc = sata_phy_resume(ap, timing); | 2651 | rc = sata_phy_resume(ap, timing); |
2641 | if (rc && rc != -EOPNOTSUPP) { | 2652 | if (rc && rc != -EOPNOTSUPP) { |
2642 | /* phy resume failed */ | 2653 | /* phy resume failed */ |
@@ -2724,6 +2735,8 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes) | |||
2724 | */ | 2735 | */ |
2725 | int sata_std_hardreset(struct ata_port *ap, unsigned int *class) | 2736 | int sata_std_hardreset(struct ata_port *ap, unsigned int *class) |
2726 | { | 2737 | { |
2738 | struct ata_eh_context *ehc = &ap->eh_context; | ||
2739 | const unsigned long *timing = sata_ehc_deb_timing(ehc); | ||
2727 | u32 scontrol; | 2740 | u32 scontrol; |
2728 | int rc; | 2741 | int rc; |
2729 | 2742 | ||
@@ -2738,7 +2751,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class) | |||
2738 | if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) | 2751 | if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) |
2739 | return rc; | 2752 | return rc; |
2740 | 2753 | ||
2741 | scontrol = (scontrol & 0x0f0) | 0x302; | 2754 | scontrol = (scontrol & 0x0f0) | 0x304; |
2742 | 2755 | ||
2743 | if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) | 2756 | if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) |
2744 | return rc; | 2757 | return rc; |
@@ -2761,7 +2774,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class) | |||
2761 | msleep(1); | 2774 | msleep(1); |
2762 | 2775 | ||
2763 | /* bring phy back */ | 2776 | /* bring phy back */ |
2764 | sata_phy_resume(ap, sata_deb_timing_eh); | 2777 | sata_phy_resume(ap, timing); |
2765 | 2778 | ||
2766 | /* TODO: phy layer with polling, timeouts, etc. */ | 2779 | /* TODO: phy layer with polling, timeouts, etc. */ |
2767 | if (ata_port_offline(ap)) { | 2780 | if (ata_port_offline(ap)) { |
@@ -4285,7 +4298,7 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) | |||
4285 | unsigned int i; | 4298 | unsigned int i; |
4286 | 4299 | ||
4287 | /* no command while frozen */ | 4300 | /* no command while frozen */ |
4288 | if (unlikely(ap->flags & ATA_FLAG_FROZEN)) | 4301 | if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) |
4289 | return NULL; | 4302 | return NULL; |
4290 | 4303 | ||
4291 | /* the last tag is reserved for internal command. */ | 4304 | /* the last tag is reserved for internal command. */ |
@@ -4407,7 +4420,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
4407 | * taken care of. | 4420 | * taken care of. |
4408 | */ | 4421 | */ |
4409 | if (ap->ops->error_handler) { | 4422 | if (ap->ops->error_handler) { |
4410 | WARN_ON(ap->flags & ATA_FLAG_FROZEN); | 4423 | WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); |
4411 | 4424 | ||
4412 | if (unlikely(qc->err_mask)) | 4425 | if (unlikely(qc->err_mask)) |
4413 | qc->flags |= ATA_QCFLAG_FAILED; | 4426 | qc->flags |= ATA_QCFLAG_FAILED; |
@@ -5001,86 +5014,120 @@ int ata_flush_cache(struct ata_device *dev) | |||
5001 | return 0; | 5014 | return 0; |
5002 | } | 5015 | } |
5003 | 5016 | ||
5004 | static int ata_standby_drive(struct ata_device *dev) | 5017 | static int ata_host_set_request_pm(struct ata_host_set *host_set, |
5018 | pm_message_t mesg, unsigned int action, | ||
5019 | unsigned int ehi_flags, int wait) | ||
5005 | { | 5020 | { |
5006 | unsigned int err_mask; | 5021 | unsigned long flags; |
5022 | int i, rc; | ||
5007 | 5023 | ||
5008 | err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1); | 5024 | for (i = 0; i < host_set->n_ports; i++) { |
5009 | if (err_mask) { | 5025 | struct ata_port *ap = host_set->ports[i]; |
5010 | ata_dev_printk(dev, KERN_ERR, "failed to standby drive " | ||
5011 | "(err_mask=0x%x)\n", err_mask); | ||
5012 | return -EIO; | ||
5013 | } | ||
5014 | 5026 | ||
5015 | return 0; | 5027 | /* Previous resume operation might still be in |
5016 | } | 5028 | * progress. Wait for PM_PENDING to clear. |
5029 | */ | ||
5030 | if (ap->pflags & ATA_PFLAG_PM_PENDING) { | ||
5031 | ata_port_wait_eh(ap); | ||
5032 | WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); | ||
5033 | } | ||
5017 | 5034 | ||
5018 | static int ata_start_drive(struct ata_device *dev) | 5035 | /* request PM ops to EH */ |
5019 | { | 5036 | spin_lock_irqsave(ap->lock, flags); |
5020 | unsigned int err_mask; | ||
5021 | 5037 | ||
5022 | err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE); | 5038 | ap->pm_mesg = mesg; |
5023 | if (err_mask) { | 5039 | if (wait) { |
5024 | ata_dev_printk(dev, KERN_ERR, "failed to start drive " | 5040 | rc = 0; |
5025 | "(err_mask=0x%x)\n", err_mask); | 5041 | ap->pm_result = &rc; |
5026 | return -EIO; | 5042 | } |
5043 | |||
5044 | ap->pflags |= ATA_PFLAG_PM_PENDING; | ||
5045 | ap->eh_info.action |= action; | ||
5046 | ap->eh_info.flags |= ehi_flags; | ||
5047 | |||
5048 | ata_port_schedule_eh(ap); | ||
5049 | |||
5050 | spin_unlock_irqrestore(ap->lock, flags); | ||
5051 | |||
5052 | /* wait and check result */ | ||
5053 | if (wait) { | ||
5054 | ata_port_wait_eh(ap); | ||
5055 | WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); | ||
5056 | if (rc) | ||
5057 | return rc; | ||
5058 | } | ||
5027 | } | 5059 | } |
5028 | 5060 | ||
5029 | return 0; | 5061 | return 0; |
5030 | } | 5062 | } |
5031 | 5063 | ||
5032 | /** | 5064 | /** |
5033 | * ata_device_resume - wakeup a previously suspended devices | 5065 | * ata_host_set_suspend - suspend host_set |
5034 | * @dev: the device to resume | 5066 | * @host_set: host_set to suspend |
5067 | * @mesg: PM message | ||
5035 | * | 5068 | * |
5036 | * Kick the drive back into action, by sending it an idle immediate | 5069 | * Suspend @host_set. Actual operation is performed by EH. This |
5037 | * command and making sure its transfer mode matches between drive | 5070 | * function requests EH to perform PM operations and waits for EH |
5038 | * and host. | 5071 | * to finish. |
5072 | * | ||
5073 | * LOCKING: | ||
5074 | * Kernel thread context (may sleep). | ||
5039 | * | 5075 | * |
5076 | * RETURNS: | ||
5077 | * 0 on success, -errno on failure. | ||
5040 | */ | 5078 | */ |
5041 | int ata_device_resume(struct ata_device *dev) | 5079 | int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg) |
5042 | { | 5080 | { |
5043 | struct ata_port *ap = dev->ap; | 5081 | int i, j, rc; |
5044 | 5082 | ||
5045 | if (ap->flags & ATA_FLAG_SUSPENDED) { | 5083 | rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1); |
5046 | struct ata_device *failed_dev; | 5084 | if (rc) |
5085 | goto fail; | ||
5047 | 5086 | ||
5048 | ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); | 5087 | /* EH is quiescent now. Fail if we have any ready device. |
5049 | ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000); | 5088 | * This happens if hotplug occurs between completion of device |
5089 | * suspension and here. | ||
5090 | */ | ||
5091 | for (i = 0; i < host_set->n_ports; i++) { | ||
5092 | struct ata_port *ap = host_set->ports[i]; | ||
5093 | |||
5094 | for (j = 0; j < ATA_MAX_DEVICES; j++) { | ||
5095 | struct ata_device *dev = &ap->device[j]; | ||
5050 | 5096 | ||
5051 | ap->flags &= ~ATA_FLAG_SUSPENDED; | 5097 | if (ata_dev_ready(dev)) { |
5052 | while (ata_set_mode(ap, &failed_dev)) | 5098 | ata_port_printk(ap, KERN_WARNING, |
5053 | ata_dev_disable(failed_dev); | 5099 | "suspend failed, device %d " |
5100 | "still active\n", dev->devno); | ||
5101 | rc = -EBUSY; | ||
5102 | goto fail; | ||
5103 | } | ||
5104 | } | ||
5054 | } | 5105 | } |
5055 | if (!ata_dev_enabled(dev)) | ||
5056 | return 0; | ||
5057 | if (dev->class == ATA_DEV_ATA) | ||
5058 | ata_start_drive(dev); | ||
5059 | 5106 | ||
5107 | host_set->dev->power.power_state = mesg; | ||
5060 | return 0; | 5108 | return 0; |
5109 | |||
5110 | fail: | ||
5111 | ata_host_set_resume(host_set); | ||
5112 | return rc; | ||
5061 | } | 5113 | } |
5062 | 5114 | ||
5063 | /** | 5115 | /** |
5064 | * ata_device_suspend - prepare a device for suspend | 5116 | * ata_host_set_resume - resume host_set |
5065 | * @dev: the device to suspend | 5117 | * @host_set: host_set to resume |
5066 | * @state: target power management state | ||
5067 | * | 5118 | * |
5068 | * Flush the cache on the drive, if appropriate, then issue a | 5119 | * Resume @host_set. Actual operation is performed by EH. This |
5069 | * standbynow command. | 5120 | * function requests EH to perform PM operations and returns. |
5121 | * Note that all resume operations are performed parallely. | ||
5122 | * | ||
5123 | * LOCKING: | ||
5124 | * Kernel thread context (may sleep). | ||
5070 | */ | 5125 | */ |
5071 | int ata_device_suspend(struct ata_device *dev, pm_message_t state) | 5126 | void ata_host_set_resume(struct ata_host_set *host_set) |
5072 | { | 5127 | { |
5073 | struct ata_port *ap = dev->ap; | 5128 | ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET, |
5074 | 5129 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); | |
5075 | if (!ata_dev_enabled(dev)) | 5130 | host_set->dev->power.power_state = PMSG_ON; |
5076 | return 0; | ||
5077 | if (dev->class == ATA_DEV_ATA) | ||
5078 | ata_flush_cache(dev); | ||
5079 | |||
5080 | if (state.event != PM_EVENT_FREEZE) | ||
5081 | ata_standby_drive(dev); | ||
5082 | ap->flags |= ATA_FLAG_SUSPENDED; | ||
5083 | return 0; | ||
5084 | } | 5131 | } |
5085 | 5132 | ||
5086 | /** | 5133 | /** |
@@ -5143,28 +5190,6 @@ void ata_host_stop (struct ata_host_set *host_set) | |||
5143 | iounmap(host_set->mmio_base); | 5190 | iounmap(host_set->mmio_base); |
5144 | } | 5191 | } |
5145 | 5192 | ||
5146 | |||
5147 | /** | ||
5148 | * ata_host_remove - Unregister SCSI host structure with upper layers | ||
5149 | * @ap: Port to unregister | ||
5150 | * @do_unregister: 1 if we fully unregister, 0 to just stop the port | ||
5151 | * | ||
5152 | * LOCKING: | ||
5153 | * Inherited from caller. | ||
5154 | */ | ||
5155 | |||
5156 | static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) | ||
5157 | { | ||
5158 | struct Scsi_Host *sh = ap->host; | ||
5159 | |||
5160 | DPRINTK("ENTER\n"); | ||
5161 | |||
5162 | if (do_unregister) | ||
5163 | scsi_remove_host(sh); | ||
5164 | |||
5165 | ap->ops->port_stop(ap); | ||
5166 | } | ||
5167 | |||
5168 | /** | 5193 | /** |
5169 | * ata_dev_init - Initialize an ata_device structure | 5194 | * ata_dev_init - Initialize an ata_device structure |
5170 | * @dev: Device structure to initialize | 5195 | * @dev: Device structure to initialize |
@@ -5440,6 +5465,7 @@ int ata_device_add(const struct ata_probe_ent *ent) | |||
5440 | } | 5465 | } |
5441 | 5466 | ||
5442 | if (ap->ops->error_handler) { | 5467 | if (ap->ops->error_handler) { |
5468 | struct ata_eh_info *ehi = &ap->eh_info; | ||
5443 | unsigned long flags; | 5469 | unsigned long flags; |
5444 | 5470 | ||
5445 | ata_port_probe(ap); | 5471 | ata_port_probe(ap); |
@@ -5447,10 +5473,11 @@ int ata_device_add(const struct ata_probe_ent *ent) | |||
5447 | /* kick EH for boot probing */ | 5473 | /* kick EH for boot probing */ |
5448 | spin_lock_irqsave(ap->lock, flags); | 5474 | spin_lock_irqsave(ap->lock, flags); |
5449 | 5475 | ||
5450 | ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1; | 5476 | ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1; |
5451 | ap->eh_info.action |= ATA_EH_SOFTRESET; | 5477 | ehi->action |= ATA_EH_SOFTRESET; |
5478 | ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; | ||
5452 | 5479 | ||
5453 | ap->flags |= ATA_FLAG_LOADING; | 5480 | ap->pflags |= ATA_PFLAG_LOADING; |
5454 | ata_port_schedule_eh(ap); | 5481 | ata_port_schedule_eh(ap); |
5455 | 5482 | ||
5456 | spin_unlock_irqrestore(ap->lock, flags); | 5483 | spin_unlock_irqrestore(ap->lock, flags); |
@@ -5488,8 +5515,11 @@ int ata_device_add(const struct ata_probe_ent *ent) | |||
5488 | 5515 | ||
5489 | err_out: | 5516 | err_out: |
5490 | for (i = 0; i < count; i++) { | 5517 | for (i = 0; i < count; i++) { |
5491 | ata_host_remove(host_set->ports[i], 1); | 5518 | struct ata_port *ap = host_set->ports[i]; |
5492 | scsi_host_put(host_set->ports[i]->host); | 5519 | if (ap) { |
5520 | ap->ops->port_stop(ap); | ||
5521 | scsi_host_put(ap->host); | ||
5522 | } | ||
5493 | } | 5523 | } |
5494 | err_free_ret: | 5524 | err_free_ret: |
5495 | kfree(host_set); | 5525 | kfree(host_set); |
@@ -5514,11 +5544,11 @@ void ata_port_detach(struct ata_port *ap) | |||
5514 | int i; | 5544 | int i; |
5515 | 5545 | ||
5516 | if (!ap->ops->error_handler) | 5546 | if (!ap->ops->error_handler) |
5517 | return; | 5547 | goto skip_eh; |
5518 | 5548 | ||
5519 | /* tell EH we're leaving & flush EH */ | 5549 | /* tell EH we're leaving & flush EH */ |
5520 | spin_lock_irqsave(ap->lock, flags); | 5550 | spin_lock_irqsave(ap->lock, flags); |
5521 | ap->flags |= ATA_FLAG_UNLOADING; | 5551 | ap->pflags |= ATA_PFLAG_UNLOADING; |
5522 | spin_unlock_irqrestore(ap->lock, flags); | 5552 | spin_unlock_irqrestore(ap->lock, flags); |
5523 | 5553 | ||
5524 | ata_port_wait_eh(ap); | 5554 | ata_port_wait_eh(ap); |
@@ -5550,6 +5580,7 @@ void ata_port_detach(struct ata_port *ap) | |||
5550 | cancel_delayed_work(&ap->hotplug_task); | 5580 | cancel_delayed_work(&ap->hotplug_task); |
5551 | flush_workqueue(ata_aux_wq); | 5581 | flush_workqueue(ata_aux_wq); |
5552 | 5582 | ||
5583 | skip_eh: | ||
5553 | /* remove the associated SCSI host */ | 5584 | /* remove the associated SCSI host */ |
5554 | scsi_remove_host(ap->host); | 5585 | scsi_remove_host(ap->host); |
5555 | } | 5586 | } |
@@ -5618,7 +5649,7 @@ int ata_scsi_release(struct Scsi_Host *host) | |||
5618 | DPRINTK("ENTER\n"); | 5649 | DPRINTK("ENTER\n"); |
5619 | 5650 | ||
5620 | ap->ops->port_disable(ap); | 5651 | ap->ops->port_disable(ap); |
5621 | ata_host_remove(ap, 0); | 5652 | ap->ops->port_stop(ap); |
5622 | 5653 | ||
5623 | DPRINTK("EXIT\n"); | 5654 | DPRINTK("EXIT\n"); |
5624 | return 1; | 5655 | return 1; |
@@ -5723,20 +5754,55 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) | |||
5723 | return (tmp == bits->val) ? 1 : 0; | 5754 | return (tmp == bits->val) ? 1 : 0; |
5724 | } | 5755 | } |
5725 | 5756 | ||
5726 | int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state) | 5757 | void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state) |
5727 | { | 5758 | { |
5728 | pci_save_state(pdev); | 5759 | pci_save_state(pdev); |
5729 | pci_disable_device(pdev); | 5760 | |
5730 | pci_set_power_state(pdev, PCI_D3hot); | 5761 | if (state.event == PM_EVENT_SUSPEND) { |
5731 | return 0; | 5762 | pci_disable_device(pdev); |
5763 | pci_set_power_state(pdev, PCI_D3hot); | ||
5764 | } | ||
5732 | } | 5765 | } |
5733 | 5766 | ||
5734 | int ata_pci_device_resume(struct pci_dev *pdev) | 5767 | void ata_pci_device_do_resume(struct pci_dev *pdev) |
5735 | { | 5768 | { |
5736 | pci_set_power_state(pdev, PCI_D0); | 5769 | pci_set_power_state(pdev, PCI_D0); |
5737 | pci_restore_state(pdev); | 5770 | pci_restore_state(pdev); |
5738 | pci_enable_device(pdev); | 5771 | pci_enable_device(pdev); |
5739 | pci_set_master(pdev); | 5772 | pci_set_master(pdev); |
5773 | } | ||
5774 | |||
5775 | int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state) | ||
5776 | { | ||
5777 | struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); | ||
5778 | int rc = 0; | ||
5779 | |||
5780 | rc = ata_host_set_suspend(host_set, state); | ||
5781 | if (rc) | ||
5782 | return rc; | ||
5783 | |||
5784 | if (host_set->next) { | ||
5785 | rc = ata_host_set_suspend(host_set->next, state); | ||
5786 | if (rc) { | ||
5787 | ata_host_set_resume(host_set); | ||
5788 | return rc; | ||
5789 | } | ||
5790 | } | ||
5791 | |||
5792 | ata_pci_device_do_suspend(pdev, state); | ||
5793 | |||
5794 | return 0; | ||
5795 | } | ||
5796 | |||
5797 | int ata_pci_device_resume(struct pci_dev *pdev) | ||
5798 | { | ||
5799 | struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); | ||
5800 | |||
5801 | ata_pci_device_do_resume(pdev); | ||
5802 | ata_host_set_resume(host_set); | ||
5803 | if (host_set->next) | ||
5804 | ata_host_set_resume(host_set->next); | ||
5805 | |||
5740 | return 0; | 5806 | return 0; |
5741 | } | 5807 | } |
5742 | #endif /* CONFIG_PCI */ | 5808 | #endif /* CONFIG_PCI */ |
@@ -5842,9 +5908,9 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | |||
5842 | * Do not depend on ABI/API stability. | 5908 | * Do not depend on ABI/API stability. |
5843 | */ | 5909 | */ |
5844 | 5910 | ||
5845 | EXPORT_SYMBOL_GPL(sata_deb_timing_boot); | 5911 | EXPORT_SYMBOL_GPL(sata_deb_timing_normal); |
5846 | EXPORT_SYMBOL_GPL(sata_deb_timing_eh); | 5912 | EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); |
5847 | EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst); | 5913 | EXPORT_SYMBOL_GPL(sata_deb_timing_long); |
5848 | EXPORT_SYMBOL_GPL(ata_std_bios_param); | 5914 | EXPORT_SYMBOL_GPL(ata_std_bios_param); |
5849 | EXPORT_SYMBOL_GPL(ata_std_ports); | 5915 | EXPORT_SYMBOL_GPL(ata_std_ports); |
5850 | EXPORT_SYMBOL_GPL(ata_device_add); | 5916 | EXPORT_SYMBOL_GPL(ata_device_add); |
@@ -5916,6 +5982,8 @@ EXPORT_SYMBOL_GPL(sata_scr_write); | |||
5916 | EXPORT_SYMBOL_GPL(sata_scr_write_flush); | 5982 | EXPORT_SYMBOL_GPL(sata_scr_write_flush); |
5917 | EXPORT_SYMBOL_GPL(ata_port_online); | 5983 | EXPORT_SYMBOL_GPL(ata_port_online); |
5918 | EXPORT_SYMBOL_GPL(ata_port_offline); | 5984 | EXPORT_SYMBOL_GPL(ata_port_offline); |
5985 | EXPORT_SYMBOL_GPL(ata_host_set_suspend); | ||
5986 | EXPORT_SYMBOL_GPL(ata_host_set_resume); | ||
5919 | EXPORT_SYMBOL_GPL(ata_id_string); | 5987 | EXPORT_SYMBOL_GPL(ata_id_string); |
5920 | EXPORT_SYMBOL_GPL(ata_id_c_string); | 5988 | EXPORT_SYMBOL_GPL(ata_id_c_string); |
5921 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); | 5989 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); |
@@ -5930,14 +5998,14 @@ EXPORT_SYMBOL_GPL(ata_pci_host_stop); | |||
5930 | EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); | 5998 | EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); |
5931 | EXPORT_SYMBOL_GPL(ata_pci_init_one); | 5999 | EXPORT_SYMBOL_GPL(ata_pci_init_one); |
5932 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); | 6000 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); |
6001 | EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); | ||
6002 | EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); | ||
5933 | EXPORT_SYMBOL_GPL(ata_pci_device_suspend); | 6003 | EXPORT_SYMBOL_GPL(ata_pci_device_suspend); |
5934 | EXPORT_SYMBOL_GPL(ata_pci_device_resume); | 6004 | EXPORT_SYMBOL_GPL(ata_pci_device_resume); |
5935 | EXPORT_SYMBOL_GPL(ata_pci_default_filter); | 6005 | EXPORT_SYMBOL_GPL(ata_pci_default_filter); |
5936 | EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); | 6006 | EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); |
5937 | #endif /* CONFIG_PCI */ | 6007 | #endif /* CONFIG_PCI */ |
5938 | 6008 | ||
5939 | EXPORT_SYMBOL_GPL(ata_device_suspend); | ||
5940 | EXPORT_SYMBOL_GPL(ata_device_resume); | ||
5941 | EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); | 6009 | EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); |
5942 | EXPORT_SYMBOL_GPL(ata_scsi_device_resume); | 6010 | EXPORT_SYMBOL_GPL(ata_scsi_device_resume); |
5943 | 6011 | ||
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c index bf5a72aca8a4..29f59345305d 100644 --- a/drivers/scsi/libata-eh.c +++ b/drivers/scsi/libata-eh.c | |||
@@ -47,6 +47,8 @@ | |||
47 | 47 | ||
48 | static void __ata_port_freeze(struct ata_port *ap); | 48 | static void __ata_port_freeze(struct ata_port *ap); |
49 | static void ata_eh_finish(struct ata_port *ap); | 49 | static void ata_eh_finish(struct ata_port *ap); |
50 | static void ata_eh_handle_port_suspend(struct ata_port *ap); | ||
51 | static void ata_eh_handle_port_resume(struct ata_port *ap); | ||
50 | 52 | ||
51 | static void ata_ering_record(struct ata_ering *ering, int is_io, | 53 | static void ata_ering_record(struct ata_ering *ering, int is_io, |
52 | unsigned int err_mask) | 54 | unsigned int err_mask) |
@@ -190,7 +192,6 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) | |||
190 | void ata_scsi_error(struct Scsi_Host *host) | 192 | void ata_scsi_error(struct Scsi_Host *host) |
191 | { | 193 | { |
192 | struct ata_port *ap = ata_shost_to_port(host); | 194 | struct ata_port *ap = ata_shost_to_port(host); |
193 | spinlock_t *ap_lock = ap->lock; | ||
194 | int i, repeat_cnt = ATA_EH_MAX_REPEAT; | 195 | int i, repeat_cnt = ATA_EH_MAX_REPEAT; |
195 | unsigned long flags; | 196 | unsigned long flags; |
196 | 197 | ||
@@ -217,7 +218,7 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
217 | struct scsi_cmnd *scmd, *tmp; | 218 | struct scsi_cmnd *scmd, *tmp; |
218 | int nr_timedout = 0; | 219 | int nr_timedout = 0; |
219 | 220 | ||
220 | spin_lock_irqsave(ap_lock, flags); | 221 | spin_lock_irqsave(ap->lock, flags); |
221 | 222 | ||
222 | list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { | 223 | list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { |
223 | struct ata_queued_cmd *qc; | 224 | struct ata_queued_cmd *qc; |
@@ -256,43 +257,49 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
256 | if (nr_timedout) | 257 | if (nr_timedout) |
257 | __ata_port_freeze(ap); | 258 | __ata_port_freeze(ap); |
258 | 259 | ||
259 | spin_unlock_irqrestore(ap_lock, flags); | 260 | spin_unlock_irqrestore(ap->lock, flags); |
260 | } else | 261 | } else |
261 | spin_unlock_wait(ap_lock); | 262 | spin_unlock_wait(ap->lock); |
262 | 263 | ||
263 | repeat: | 264 | repeat: |
264 | /* invoke error handler */ | 265 | /* invoke error handler */ |
265 | if (ap->ops->error_handler) { | 266 | if (ap->ops->error_handler) { |
267 | /* process port resume request */ | ||
268 | ata_eh_handle_port_resume(ap); | ||
269 | |||
266 | /* fetch & clear EH info */ | 270 | /* fetch & clear EH info */ |
267 | spin_lock_irqsave(ap_lock, flags); | 271 | spin_lock_irqsave(ap->lock, flags); |
268 | 272 | ||
269 | memset(&ap->eh_context, 0, sizeof(ap->eh_context)); | 273 | memset(&ap->eh_context, 0, sizeof(ap->eh_context)); |
270 | ap->eh_context.i = ap->eh_info; | 274 | ap->eh_context.i = ap->eh_info; |
271 | memset(&ap->eh_info, 0, sizeof(ap->eh_info)); | 275 | memset(&ap->eh_info, 0, sizeof(ap->eh_info)); |
272 | 276 | ||
273 | ap->flags |= ATA_FLAG_EH_IN_PROGRESS; | 277 | ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; |
274 | ap->flags &= ~ATA_FLAG_EH_PENDING; | 278 | ap->pflags &= ~ATA_PFLAG_EH_PENDING; |
275 | 279 | ||
276 | spin_unlock_irqrestore(ap_lock, flags); | 280 | spin_unlock_irqrestore(ap->lock, flags); |
277 | 281 | ||
278 | /* invoke EH. if unloading, just finish failed qcs */ | 282 | /* invoke EH, skip if unloading or suspended */ |
279 | if (!(ap->flags & ATA_FLAG_UNLOADING)) | 283 | if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) |
280 | ap->ops->error_handler(ap); | 284 | ap->ops->error_handler(ap); |
281 | else | 285 | else |
282 | ata_eh_finish(ap); | 286 | ata_eh_finish(ap); |
283 | 287 | ||
288 | /* process port suspend request */ | ||
289 | ata_eh_handle_port_suspend(ap); | ||
290 | |||
284 | /* Exception might have happend after ->error_handler | 291 | /* Exception might have happend after ->error_handler |
285 | * recovered the port but before this point. Repeat | 292 | * recovered the port but before this point. Repeat |
286 | * EH in such case. | 293 | * EH in such case. |
287 | */ | 294 | */ |
288 | spin_lock_irqsave(ap_lock, flags); | 295 | spin_lock_irqsave(ap->lock, flags); |
289 | 296 | ||
290 | if (ap->flags & ATA_FLAG_EH_PENDING) { | 297 | if (ap->pflags & ATA_PFLAG_EH_PENDING) { |
291 | if (--repeat_cnt) { | 298 | if (--repeat_cnt) { |
292 | ata_port_printk(ap, KERN_INFO, | 299 | ata_port_printk(ap, KERN_INFO, |
293 | "EH pending after completion, " | 300 | "EH pending after completion, " |
294 | "repeating EH (cnt=%d)\n", repeat_cnt); | 301 | "repeating EH (cnt=%d)\n", repeat_cnt); |
295 | spin_unlock_irqrestore(ap_lock, flags); | 302 | spin_unlock_irqrestore(ap->lock, flags); |
296 | goto repeat; | 303 | goto repeat; |
297 | } | 304 | } |
298 | ata_port_printk(ap, KERN_ERR, "EH pending after %d " | 305 | ata_port_printk(ap, KERN_ERR, "EH pending after %d " |
@@ -302,14 +309,14 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
302 | /* this run is complete, make sure EH info is clear */ | 309 | /* this run is complete, make sure EH info is clear */ |
303 | memset(&ap->eh_info, 0, sizeof(ap->eh_info)); | 310 | memset(&ap->eh_info, 0, sizeof(ap->eh_info)); |
304 | 311 | ||
305 | /* Clear host_eh_scheduled while holding ap_lock such | 312 | /* Clear host_eh_scheduled while holding ap->lock such |
306 | * that if exception occurs after this point but | 313 | * that if exception occurs after this point but |
307 | * before EH completion, SCSI midlayer will | 314 | * before EH completion, SCSI midlayer will |
308 | * re-initiate EH. | 315 | * re-initiate EH. |
309 | */ | 316 | */ |
310 | host->host_eh_scheduled = 0; | 317 | host->host_eh_scheduled = 0; |
311 | 318 | ||
312 | spin_unlock_irqrestore(ap_lock, flags); | 319 | spin_unlock_irqrestore(ap->lock, flags); |
313 | } else { | 320 | } else { |
314 | WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); | 321 | WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); |
315 | ap->ops->eng_timeout(ap); | 322 | ap->ops->eng_timeout(ap); |
@@ -321,24 +328,23 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
321 | scsi_eh_flush_done_q(&ap->eh_done_q); | 328 | scsi_eh_flush_done_q(&ap->eh_done_q); |
322 | 329 | ||
323 | /* clean up */ | 330 | /* clean up */ |
324 | spin_lock_irqsave(ap_lock, flags); | 331 | spin_lock_irqsave(ap->lock, flags); |
325 | 332 | ||
326 | if (ap->flags & ATA_FLAG_LOADING) { | 333 | if (ap->pflags & ATA_PFLAG_LOADING) |
327 | ap->flags &= ~ATA_FLAG_LOADING; | 334 | ap->pflags &= ~ATA_PFLAG_LOADING; |
328 | } else { | 335 | else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) |
329 | if (ap->flags & ATA_FLAG_SCSI_HOTPLUG) | 336 | queue_work(ata_aux_wq, &ap->hotplug_task); |
330 | queue_work(ata_aux_wq, &ap->hotplug_task); | ||
331 | if (ap->flags & ATA_FLAG_RECOVERED) | ||
332 | ata_port_printk(ap, KERN_INFO, "EH complete\n"); | ||
333 | } | ||
334 | 337 | ||
335 | ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED); | 338 | if (ap->pflags & ATA_PFLAG_RECOVERED) |
339 | ata_port_printk(ap, KERN_INFO, "EH complete\n"); | ||
340 | |||
341 | ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); | ||
336 | 342 | ||
337 | /* tell wait_eh that we're done */ | 343 | /* tell wait_eh that we're done */ |
338 | ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS; | 344 | ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; |
339 | wake_up_all(&ap->eh_wait_q); | 345 | wake_up_all(&ap->eh_wait_q); |
340 | 346 | ||
341 | spin_unlock_irqrestore(ap_lock, flags); | 347 | spin_unlock_irqrestore(ap->lock, flags); |
342 | 348 | ||
343 | DPRINTK("EXIT\n"); | 349 | DPRINTK("EXIT\n"); |
344 | } | 350 | } |
@@ -360,7 +366,7 @@ void ata_port_wait_eh(struct ata_port *ap) | |||
360 | retry: | 366 | retry: |
361 | spin_lock_irqsave(ap->lock, flags); | 367 | spin_lock_irqsave(ap->lock, flags); |
362 | 368 | ||
363 | while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) { | 369 | while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { |
364 | prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); | 370 | prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); |
365 | spin_unlock_irqrestore(ap->lock, flags); | 371 | spin_unlock_irqrestore(ap->lock, flags); |
366 | schedule(); | 372 | schedule(); |
@@ -489,7 +495,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) | |||
489 | WARN_ON(!ap->ops->error_handler); | 495 | WARN_ON(!ap->ops->error_handler); |
490 | 496 | ||
491 | qc->flags |= ATA_QCFLAG_FAILED; | 497 | qc->flags |= ATA_QCFLAG_FAILED; |
492 | qc->ap->flags |= ATA_FLAG_EH_PENDING; | 498 | qc->ap->pflags |= ATA_PFLAG_EH_PENDING; |
493 | 499 | ||
494 | /* The following will fail if timeout has already expired. | 500 | /* The following will fail if timeout has already expired. |
495 | * ata_scsi_error() takes care of such scmds on EH entry. | 501 | * ata_scsi_error() takes care of such scmds on EH entry. |
@@ -513,7 +519,7 @@ void ata_port_schedule_eh(struct ata_port *ap) | |||
513 | { | 519 | { |
514 | WARN_ON(!ap->ops->error_handler); | 520 | WARN_ON(!ap->ops->error_handler); |
515 | 521 | ||
516 | ap->flags |= ATA_FLAG_EH_PENDING; | 522 | ap->pflags |= ATA_PFLAG_EH_PENDING; |
517 | scsi_schedule_eh(ap->host); | 523 | scsi_schedule_eh(ap->host); |
518 | 524 | ||
519 | DPRINTK("port EH scheduled\n"); | 525 | DPRINTK("port EH scheduled\n"); |
@@ -578,7 +584,7 @@ static void __ata_port_freeze(struct ata_port *ap) | |||
578 | if (ap->ops->freeze) | 584 | if (ap->ops->freeze) |
579 | ap->ops->freeze(ap); | 585 | ap->ops->freeze(ap); |
580 | 586 | ||
581 | ap->flags |= ATA_FLAG_FROZEN; | 587 | ap->pflags |= ATA_PFLAG_FROZEN; |
582 | 588 | ||
583 | DPRINTK("ata%u port frozen\n", ap->id); | 589 | DPRINTK("ata%u port frozen\n", ap->id); |
584 | } | 590 | } |
@@ -646,7 +652,7 @@ void ata_eh_thaw_port(struct ata_port *ap) | |||
646 | 652 | ||
647 | spin_lock_irqsave(ap->lock, flags); | 653 | spin_lock_irqsave(ap->lock, flags); |
648 | 654 | ||
649 | ap->flags &= ~ATA_FLAG_FROZEN; | 655 | ap->pflags &= ~ATA_PFLAG_FROZEN; |
650 | 656 | ||
651 | if (ap->ops->thaw) | 657 | if (ap->ops->thaw) |
652 | ap->ops->thaw(ap); | 658 | ap->ops->thaw(ap); |
@@ -731,7 +737,7 @@ static void ata_eh_detach_dev(struct ata_device *dev) | |||
731 | 737 | ||
732 | if (ata_scsi_offline_dev(dev)) { | 738 | if (ata_scsi_offline_dev(dev)) { |
733 | dev->flags |= ATA_DFLAG_DETACHED; | 739 | dev->flags |= ATA_DFLAG_DETACHED; |
734 | ap->flags |= ATA_FLAG_SCSI_HOTPLUG; | 740 | ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; |
735 | } | 741 | } |
736 | 742 | ||
737 | /* clear per-dev EH actions */ | 743 | /* clear per-dev EH actions */ |
@@ -758,10 +764,29 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, | |||
758 | unsigned int action) | 764 | unsigned int action) |
759 | { | 765 | { |
760 | unsigned long flags; | 766 | unsigned long flags; |
767 | struct ata_eh_info *ehi = &ap->eh_info; | ||
768 | struct ata_eh_context *ehc = &ap->eh_context; | ||
761 | 769 | ||
762 | spin_lock_irqsave(ap->lock, flags); | 770 | spin_lock_irqsave(ap->lock, flags); |
763 | ata_eh_clear_action(dev, &ap->eh_info, action); | 771 | |
764 | ap->flags |= ATA_FLAG_RECOVERED; | 772 | /* Reset is represented by combination of actions and EHI |
773 | * flags. Suck in all related bits before clearing eh_info to | ||
774 | * avoid losing requested action. | ||
775 | */ | ||
776 | if (action & ATA_EH_RESET_MASK) { | ||
777 | ehc->i.action |= ehi->action & ATA_EH_RESET_MASK; | ||
778 | ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK; | ||
779 | |||
780 | /* make sure all reset actions are cleared & clear EHI flags */ | ||
781 | action |= ATA_EH_RESET_MASK; | ||
782 | ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK; | ||
783 | } | ||
784 | |||
785 | ata_eh_clear_action(dev, ehi, action); | ||
786 | |||
787 | if (!(ehc->i.flags & ATA_EHI_QUIET)) | ||
788 | ap->pflags |= ATA_PFLAG_RECOVERED; | ||
789 | |||
765 | spin_unlock_irqrestore(ap->lock, flags); | 790 | spin_unlock_irqrestore(ap->lock, flags); |
766 | } | 791 | } |
767 | 792 | ||
@@ -780,6 +805,12 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, | |||
780 | static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, | 805 | static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, |
781 | unsigned int action) | 806 | unsigned int action) |
782 | { | 807 | { |
808 | /* if reset is complete, clear all reset actions & reset modifier */ | ||
809 | if (action & ATA_EH_RESET_MASK) { | ||
810 | action |= ATA_EH_RESET_MASK; | ||
811 | ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK; | ||
812 | } | ||
813 | |||
783 | ata_eh_clear_action(dev, &ap->eh_context.i, action); | 814 | ata_eh_clear_action(dev, &ap->eh_context.i, action); |
784 | } | 815 | } |
785 | 816 | ||
@@ -1027,7 +1058,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap) | |||
1027 | int tag, rc; | 1058 | int tag, rc; |
1028 | 1059 | ||
1029 | /* if frozen, we can't do much */ | 1060 | /* if frozen, we can't do much */ |
1030 | if (ap->flags & ATA_FLAG_FROZEN) | 1061 | if (ap->pflags & ATA_PFLAG_FROZEN) |
1031 | return; | 1062 | return; |
1032 | 1063 | ||
1033 | /* is it NCQ device error? */ | 1064 | /* is it NCQ device error? */ |
@@ -1266,8 +1297,6 @@ static int ata_eh_speed_down(struct ata_device *dev, int is_io, | |||
1266 | static void ata_eh_autopsy(struct ata_port *ap) | 1297 | static void ata_eh_autopsy(struct ata_port *ap) |
1267 | { | 1298 | { |
1268 | struct ata_eh_context *ehc = &ap->eh_context; | 1299 | struct ata_eh_context *ehc = &ap->eh_context; |
1269 | unsigned int action = ehc->i.action; | ||
1270 | struct ata_device *failed_dev = NULL; | ||
1271 | unsigned int all_err_mask = 0; | 1300 | unsigned int all_err_mask = 0; |
1272 | int tag, is_io = 0; | 1301 | int tag, is_io = 0; |
1273 | u32 serror; | 1302 | u32 serror; |
@@ -1275,13 +1304,16 @@ static void ata_eh_autopsy(struct ata_port *ap) | |||
1275 | 1304 | ||
1276 | DPRINTK("ENTER\n"); | 1305 | DPRINTK("ENTER\n"); |
1277 | 1306 | ||
1307 | if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) | ||
1308 | return; | ||
1309 | |||
1278 | /* obtain and analyze SError */ | 1310 | /* obtain and analyze SError */ |
1279 | rc = sata_scr_read(ap, SCR_ERROR, &serror); | 1311 | rc = sata_scr_read(ap, SCR_ERROR, &serror); |
1280 | if (rc == 0) { | 1312 | if (rc == 0) { |
1281 | ehc->i.serror |= serror; | 1313 | ehc->i.serror |= serror; |
1282 | ata_eh_analyze_serror(ap); | 1314 | ata_eh_analyze_serror(ap); |
1283 | } else if (rc != -EOPNOTSUPP) | 1315 | } else if (rc != -EOPNOTSUPP) |
1284 | action |= ATA_EH_HARDRESET; | 1316 | ehc->i.action |= ATA_EH_HARDRESET; |
1285 | 1317 | ||
1286 | /* analyze NCQ failure */ | 1318 | /* analyze NCQ failure */ |
1287 | ata_eh_analyze_ncq_error(ap); | 1319 | ata_eh_analyze_ncq_error(ap); |
@@ -1302,7 +1334,7 @@ static void ata_eh_autopsy(struct ata_port *ap) | |||
1302 | qc->err_mask |= ehc->i.err_mask; | 1334 | qc->err_mask |= ehc->i.err_mask; |
1303 | 1335 | ||
1304 | /* analyze TF */ | 1336 | /* analyze TF */ |
1305 | action |= ata_eh_analyze_tf(qc, &qc->result_tf); | 1337 | ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); |
1306 | 1338 | ||
1307 | /* DEV errors are probably spurious in case of ATA_BUS error */ | 1339 | /* DEV errors are probably spurious in case of ATA_BUS error */ |
1308 | if (qc->err_mask & AC_ERR_ATA_BUS) | 1340 | if (qc->err_mask & AC_ERR_ATA_BUS) |
@@ -1316,38 +1348,35 @@ static void ata_eh_autopsy(struct ata_port *ap) | |||
1316 | /* SENSE_VALID trumps dev/unknown error and revalidation */ | 1348 | /* SENSE_VALID trumps dev/unknown error and revalidation */ |
1317 | if (qc->flags & ATA_QCFLAG_SENSE_VALID) { | 1349 | if (qc->flags & ATA_QCFLAG_SENSE_VALID) { |
1318 | qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); | 1350 | qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); |
1319 | action &= ~ATA_EH_REVALIDATE; | 1351 | ehc->i.action &= ~ATA_EH_REVALIDATE; |
1320 | } | 1352 | } |
1321 | 1353 | ||
1322 | /* accumulate error info */ | 1354 | /* accumulate error info */ |
1323 | failed_dev = qc->dev; | 1355 | ehc->i.dev = qc->dev; |
1324 | all_err_mask |= qc->err_mask; | 1356 | all_err_mask |= qc->err_mask; |
1325 | if (qc->flags & ATA_QCFLAG_IO) | 1357 | if (qc->flags & ATA_QCFLAG_IO) |
1326 | is_io = 1; | 1358 | is_io = 1; |
1327 | } | 1359 | } |
1328 | 1360 | ||
1329 | /* enforce default EH actions */ | 1361 | /* enforce default EH actions */ |
1330 | if (ap->flags & ATA_FLAG_FROZEN || | 1362 | if (ap->pflags & ATA_PFLAG_FROZEN || |
1331 | all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) | 1363 | all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) |
1332 | action |= ATA_EH_SOFTRESET; | 1364 | ehc->i.action |= ATA_EH_SOFTRESET; |
1333 | else if (all_err_mask) | 1365 | else if (all_err_mask) |
1334 | action |= ATA_EH_REVALIDATE; | 1366 | ehc->i.action |= ATA_EH_REVALIDATE; |
1335 | 1367 | ||
1336 | /* if we have offending qcs and the associated failed device */ | 1368 | /* if we have offending qcs and the associated failed device */ |
1337 | if (failed_dev) { | 1369 | if (ehc->i.dev) { |
1338 | /* speed down */ | 1370 | /* speed down */ |
1339 | action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask); | 1371 | ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io, |
1372 | all_err_mask); | ||
1340 | 1373 | ||
1341 | /* perform per-dev EH action only on the offending device */ | 1374 | /* perform per-dev EH action only on the offending device */ |
1342 | ehc->i.dev_action[failed_dev->devno] |= | 1375 | ehc->i.dev_action[ehc->i.dev->devno] |= |
1343 | action & ATA_EH_PERDEV_MASK; | 1376 | ehc->i.action & ATA_EH_PERDEV_MASK; |
1344 | action &= ~ATA_EH_PERDEV_MASK; | 1377 | ehc->i.action &= ~ATA_EH_PERDEV_MASK; |
1345 | } | 1378 | } |
1346 | 1379 | ||
1347 | /* record autopsy result */ | ||
1348 | ehc->i.dev = failed_dev; | ||
1349 | ehc->i.action = action; | ||
1350 | |||
1351 | DPRINTK("EXIT\n"); | 1380 | DPRINTK("EXIT\n"); |
1352 | } | 1381 | } |
1353 | 1382 | ||
@@ -1385,7 +1414,7 @@ static void ata_eh_report(struct ata_port *ap) | |||
1385 | return; | 1414 | return; |
1386 | 1415 | ||
1387 | frozen = ""; | 1416 | frozen = ""; |
1388 | if (ap->flags & ATA_FLAG_FROZEN) | 1417 | if (ap->pflags & ATA_PFLAG_FROZEN) |
1389 | frozen = " frozen"; | 1418 | frozen = " frozen"; |
1390 | 1419 | ||
1391 | if (ehc->i.dev) { | 1420 | if (ehc->i.dev) { |
@@ -1465,11 +1494,14 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1465 | struct ata_eh_context *ehc = &ap->eh_context; | 1494 | struct ata_eh_context *ehc = &ap->eh_context; |
1466 | unsigned int *classes = ehc->classes; | 1495 | unsigned int *classes = ehc->classes; |
1467 | int tries = ATA_EH_RESET_TRIES; | 1496 | int tries = ATA_EH_RESET_TRIES; |
1468 | int verbose = !(ap->flags & ATA_FLAG_LOADING); | 1497 | int verbose = !(ehc->i.flags & ATA_EHI_QUIET); |
1469 | unsigned int action; | 1498 | unsigned int action; |
1470 | ata_reset_fn_t reset; | 1499 | ata_reset_fn_t reset; |
1471 | int i, did_followup_srst, rc; | 1500 | int i, did_followup_srst, rc; |
1472 | 1501 | ||
1502 | /* about to reset */ | ||
1503 | ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); | ||
1504 | |||
1473 | /* Determine which reset to use and record in ehc->i.action. | 1505 | /* Determine which reset to use and record in ehc->i.action. |
1474 | * prereset() may examine and modify it. | 1506 | * prereset() may examine and modify it. |
1475 | */ | 1507 | */ |
@@ -1518,8 +1550,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1518 | ata_port_printk(ap, KERN_INFO, "%s resetting port\n", | 1550 | ata_port_printk(ap, KERN_INFO, "%s resetting port\n", |
1519 | reset == softreset ? "soft" : "hard"); | 1551 | reset == softreset ? "soft" : "hard"); |
1520 | 1552 | ||
1521 | /* reset */ | 1553 | /* mark that this EH session started with reset */ |
1522 | ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); | ||
1523 | ehc->i.flags |= ATA_EHI_DID_RESET; | 1554 | ehc->i.flags |= ATA_EHI_DID_RESET; |
1524 | 1555 | ||
1525 | rc = ata_do_reset(ap, reset, classes); | 1556 | rc = ata_do_reset(ap, reset, classes); |
@@ -1582,7 +1613,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, | |||
1582 | postreset(ap, classes); | 1613 | postreset(ap, classes); |
1583 | 1614 | ||
1584 | /* reset successful, schedule revalidation */ | 1615 | /* reset successful, schedule revalidation */ |
1585 | ata_eh_done(ap, NULL, ATA_EH_RESET_MASK); | 1616 | ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); |
1586 | ehc->i.action |= ATA_EH_REVALIDATE; | 1617 | ehc->i.action |= ATA_EH_REVALIDATE; |
1587 | } | 1618 | } |
1588 | 1619 | ||
@@ -1605,7 +1636,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap, | |||
1605 | dev = &ap->device[i]; | 1636 | dev = &ap->device[i]; |
1606 | action = ata_eh_dev_action(dev); | 1637 | action = ata_eh_dev_action(dev); |
1607 | 1638 | ||
1608 | if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) { | 1639 | if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) { |
1609 | if (ata_port_offline(ap)) { | 1640 | if (ata_port_offline(ap)) { |
1610 | rc = -EIO; | 1641 | rc = -EIO; |
1611 | break; | 1642 | break; |
@@ -1636,7 +1667,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap, | |||
1636 | } | 1667 | } |
1637 | 1668 | ||
1638 | spin_lock_irqsave(ap->lock, flags); | 1669 | spin_lock_irqsave(ap->lock, flags); |
1639 | ap->flags |= ATA_FLAG_SCSI_HOTPLUG; | 1670 | ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; |
1640 | spin_unlock_irqrestore(ap->lock, flags); | 1671 | spin_unlock_irqrestore(ap->lock, flags); |
1641 | } | 1672 | } |
1642 | } | 1673 | } |
@@ -1648,6 +1679,164 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap, | |||
1648 | return rc; | 1679 | return rc; |
1649 | } | 1680 | } |
1650 | 1681 | ||
1682 | /** | ||
1683 | * ata_eh_suspend - handle suspend EH action | ||
1684 | * @ap: target host port | ||
1685 | * @r_failed_dev: result parameter to indicate failing device | ||
1686 | * | ||
1687 | * Handle suspend EH action. Disk devices are spinned down and | ||
1688 | * other types of devices are just marked suspended. Once | ||
1689 | * suspended, no EH action to the device is allowed until it is | ||
1690 | * resumed. | ||
1691 | * | ||
1692 | * LOCKING: | ||
1693 | * Kernel thread context (may sleep). | ||
1694 | * | ||
1695 | * RETURNS: | ||
1696 | * 0 on success, -errno otherwise | ||
1697 | */ | ||
1698 | static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev) | ||
1699 | { | ||
1700 | struct ata_device *dev; | ||
1701 | int i, rc = 0; | ||
1702 | |||
1703 | DPRINTK("ENTER\n"); | ||
1704 | |||
1705 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
1706 | unsigned long flags; | ||
1707 | unsigned int action, err_mask; | ||
1708 | |||
1709 | dev = &ap->device[i]; | ||
1710 | action = ata_eh_dev_action(dev); | ||
1711 | |||
1712 | if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND)) | ||
1713 | continue; | ||
1714 | |||
1715 | WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED); | ||
1716 | |||
1717 | ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND); | ||
1718 | |||
1719 | if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) { | ||
1720 | /* flush cache */ | ||
1721 | rc = ata_flush_cache(dev); | ||
1722 | if (rc) | ||
1723 | break; | ||
1724 | |||
1725 | /* spin down */ | ||
1726 | err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1); | ||
1727 | if (err_mask) { | ||
1728 | ata_dev_printk(dev, KERN_ERR, "failed to " | ||
1729 | "spin down (err_mask=0x%x)\n", | ||
1730 | err_mask); | ||
1731 | rc = -EIO; | ||
1732 | break; | ||
1733 | } | ||
1734 | } | ||
1735 | |||
1736 | spin_lock_irqsave(ap->lock, flags); | ||
1737 | dev->flags |= ATA_DFLAG_SUSPENDED; | ||
1738 | spin_unlock_irqrestore(ap->lock, flags); | ||
1739 | |||
1740 | ata_eh_done(ap, dev, ATA_EH_SUSPEND); | ||
1741 | } | ||
1742 | |||
1743 | if (rc) | ||
1744 | *r_failed_dev = dev; | ||
1745 | |||
1746 | DPRINTK("EXIT\n"); | ||
1747 | return 0; | ||
1748 | } | ||
1749 | |||
1750 | /** | ||
1751 | * ata_eh_prep_resume - prep for resume EH action | ||
1752 | * @ap: target host port | ||
1753 | * | ||
1754 | * Clear SUSPENDED in preparation for scheduled resume actions. | ||
1755 | * This allows other parts of EH to access the devices being | ||
1756 | * resumed. | ||
1757 | * | ||
1758 | * LOCKING: | ||
1759 | * Kernel thread context (may sleep). | ||
1760 | */ | ||
1761 | static void ata_eh_prep_resume(struct ata_port *ap) | ||
1762 | { | ||
1763 | struct ata_device *dev; | ||
1764 | unsigned long flags; | ||
1765 | int i; | ||
1766 | |||
1767 | DPRINTK("ENTER\n"); | ||
1768 | |||
1769 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
1770 | unsigned int action; | ||
1771 | |||
1772 | dev = &ap->device[i]; | ||
1773 | action = ata_eh_dev_action(dev); | ||
1774 | |||
1775 | if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME)) | ||
1776 | continue; | ||
1777 | |||
1778 | spin_lock_irqsave(ap->lock, flags); | ||
1779 | dev->flags &= ~ATA_DFLAG_SUSPENDED; | ||
1780 | spin_unlock_irqrestore(ap->lock, flags); | ||
1781 | } | ||
1782 | |||
1783 | DPRINTK("EXIT\n"); | ||
1784 | } | ||
1785 | |||
1786 | /** | ||
1787 | * ata_eh_resume - handle resume EH action | ||
1788 | * @ap: target host port | ||
1789 | * @r_failed_dev: result parameter to indicate failing device | ||
1790 | * | ||
1791 | * Handle resume EH action. Target devices are already reset and | ||
1792 | * revalidated. Spinning up is the only operation left. | ||
1793 | * | ||
1794 | * LOCKING: | ||
1795 | * Kernel thread context (may sleep). | ||
1796 | * | ||
1797 | * RETURNS: | ||
1798 | * 0 on success, -errno otherwise | ||
1799 | */ | ||
1800 | static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev) | ||
1801 | { | ||
1802 | struct ata_device *dev; | ||
1803 | int i, rc = 0; | ||
1804 | |||
1805 | DPRINTK("ENTER\n"); | ||
1806 | |||
1807 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
1808 | unsigned int action, err_mask; | ||
1809 | |||
1810 | dev = &ap->device[i]; | ||
1811 | action = ata_eh_dev_action(dev); | ||
1812 | |||
1813 | if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME)) | ||
1814 | continue; | ||
1815 | |||
1816 | ata_eh_about_to_do(ap, dev, ATA_EH_RESUME); | ||
1817 | |||
1818 | if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) { | ||
1819 | err_mask = ata_do_simple_cmd(dev, | ||
1820 | ATA_CMD_IDLEIMMEDIATE); | ||
1821 | if (err_mask) { | ||
1822 | ata_dev_printk(dev, KERN_ERR, "failed to " | ||
1823 | "spin up (err_mask=0x%x)\n", | ||
1824 | err_mask); | ||
1825 | rc = -EIO; | ||
1826 | break; | ||
1827 | } | ||
1828 | } | ||
1829 | |||
1830 | ata_eh_done(ap, dev, ATA_EH_RESUME); | ||
1831 | } | ||
1832 | |||
1833 | if (rc) | ||
1834 | *r_failed_dev = dev; | ||
1835 | |||
1836 | DPRINTK("EXIT\n"); | ||
1837 | return 0; | ||
1838 | } | ||
1839 | |||
1651 | static int ata_port_nr_enabled(struct ata_port *ap) | 1840 | static int ata_port_nr_enabled(struct ata_port *ap) |
1652 | { | 1841 | { |
1653 | int i, cnt = 0; | 1842 | int i, cnt = 0; |
@@ -1673,7 +1862,20 @@ static int ata_eh_skip_recovery(struct ata_port *ap) | |||
1673 | struct ata_eh_context *ehc = &ap->eh_context; | 1862 | struct ata_eh_context *ehc = &ap->eh_context; |
1674 | int i; | 1863 | int i; |
1675 | 1864 | ||
1676 | if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap)) | 1865 | /* skip if all possible devices are suspended */ |
1866 | for (i = 0; i < ata_port_max_devices(ap); i++) { | ||
1867 | struct ata_device *dev = &ap->device[i]; | ||
1868 | |||
1869 | if (!(dev->flags & ATA_DFLAG_SUSPENDED)) | ||
1870 | break; | ||
1871 | } | ||
1872 | |||
1873 | if (i == ata_port_max_devices(ap)) | ||
1874 | return 1; | ||
1875 | |||
1876 | /* thaw frozen port, resume link and recover failed devices */ | ||
1877 | if ((ap->pflags & ATA_PFLAG_FROZEN) || | ||
1878 | (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap)) | ||
1677 | return 0; | 1879 | return 0; |
1678 | 1880 | ||
1679 | /* skip if class codes for all vacant slots are ATA_DEV_NONE */ | 1881 | /* skip if class codes for all vacant slots are ATA_DEV_NONE */ |
@@ -1744,9 +1946,12 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
1744 | rc = 0; | 1946 | rc = 0; |
1745 | 1947 | ||
1746 | /* if UNLOADING, finish immediately */ | 1948 | /* if UNLOADING, finish immediately */ |
1747 | if (ap->flags & ATA_FLAG_UNLOADING) | 1949 | if (ap->pflags & ATA_PFLAG_UNLOADING) |
1748 | goto out; | 1950 | goto out; |
1749 | 1951 | ||
1952 | /* prep for resume */ | ||
1953 | ata_eh_prep_resume(ap); | ||
1954 | |||
1750 | /* skip EH if possible. */ | 1955 | /* skip EH if possible. */ |
1751 | if (ata_eh_skip_recovery(ap)) | 1956 | if (ata_eh_skip_recovery(ap)) |
1752 | ehc->i.action = 0; | 1957 | ehc->i.action = 0; |
@@ -1774,6 +1979,11 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
1774 | if (rc) | 1979 | if (rc) |
1775 | goto dev_fail; | 1980 | goto dev_fail; |
1776 | 1981 | ||
1982 | /* resume devices */ | ||
1983 | rc = ata_eh_resume(ap, &dev); | ||
1984 | if (rc) | ||
1985 | goto dev_fail; | ||
1986 | |||
1777 | /* configure transfer mode if the port has been reset */ | 1987 | /* configure transfer mode if the port has been reset */ |
1778 | if (ehc->i.flags & ATA_EHI_DID_RESET) { | 1988 | if (ehc->i.flags & ATA_EHI_DID_RESET) { |
1779 | rc = ata_set_mode(ap, &dev); | 1989 | rc = ata_set_mode(ap, &dev); |
@@ -1783,6 +1993,11 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
1783 | } | 1993 | } |
1784 | } | 1994 | } |
1785 | 1995 | ||
1996 | /* suspend devices */ | ||
1997 | rc = ata_eh_suspend(ap, &dev); | ||
1998 | if (rc) | ||
1999 | goto dev_fail; | ||
2000 | |||
1786 | goto out; | 2001 | goto out; |
1787 | 2002 | ||
1788 | dev_fail: | 2003 | dev_fail: |
@@ -1908,11 +2123,124 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
1908 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, | 2123 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, |
1909 | ata_postreset_fn_t postreset) | 2124 | ata_postreset_fn_t postreset) |
1910 | { | 2125 | { |
1911 | if (!(ap->flags & ATA_FLAG_LOADING)) { | 2126 | ata_eh_autopsy(ap); |
1912 | ata_eh_autopsy(ap); | 2127 | ata_eh_report(ap); |
1913 | ata_eh_report(ap); | ||
1914 | } | ||
1915 | |||
1916 | ata_eh_recover(ap, prereset, softreset, hardreset, postreset); | 2128 | ata_eh_recover(ap, prereset, softreset, hardreset, postreset); |
1917 | ata_eh_finish(ap); | 2129 | ata_eh_finish(ap); |
1918 | } | 2130 | } |
2131 | |||
2132 | /** | ||
2133 | * ata_eh_handle_port_suspend - perform port suspend operation | ||
2134 | * @ap: port to suspend | ||
2135 | * | ||
2136 | * Suspend @ap. | ||
2137 | * | ||
2138 | * LOCKING: | ||
2139 | * Kernel thread context (may sleep). | ||
2140 | */ | ||
2141 | static void ata_eh_handle_port_suspend(struct ata_port *ap) | ||
2142 | { | ||
2143 | unsigned long flags; | ||
2144 | int rc = 0; | ||
2145 | |||
2146 | /* are we suspending? */ | ||
2147 | spin_lock_irqsave(ap->lock, flags); | ||
2148 | if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || | ||
2149 | ap->pm_mesg.event == PM_EVENT_ON) { | ||
2150 | spin_unlock_irqrestore(ap->lock, flags); | ||
2151 | return; | ||
2152 | } | ||
2153 | spin_unlock_irqrestore(ap->lock, flags); | ||
2154 | |||
2155 | WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); | ||
2156 | |||
2157 | /* suspend */ | ||
2158 | ata_eh_freeze_port(ap); | ||
2159 | |||
2160 | if (ap->ops->port_suspend) | ||
2161 | rc = ap->ops->port_suspend(ap, ap->pm_mesg); | ||
2162 | |||
2163 | /* report result */ | ||
2164 | spin_lock_irqsave(ap->lock, flags); | ||
2165 | |||
2166 | ap->pflags &= ~ATA_PFLAG_PM_PENDING; | ||
2167 | if (rc == 0) | ||
2168 | ap->pflags |= ATA_PFLAG_SUSPENDED; | ||
2169 | else | ||
2170 | ata_port_schedule_eh(ap); | ||
2171 | |||
2172 | if (ap->pm_result) { | ||
2173 | *ap->pm_result = rc; | ||
2174 | ap->pm_result = NULL; | ||
2175 | } | ||
2176 | |||
2177 | spin_unlock_irqrestore(ap->lock, flags); | ||
2178 | |||
2179 | return; | ||
2180 | } | ||
2181 | |||
2182 | /** | ||
2183 | * ata_eh_handle_port_resume - perform port resume operation | ||
2184 | * @ap: port to resume | ||
2185 | * | ||
2186 | * Resume @ap. | ||
2187 | * | ||
2188 | * This function also waits upto one second until all devices | ||
2189 | * hanging off this port requests resume EH action. This is to | ||
2190 | * prevent invoking EH and thus reset multiple times on resume. | ||
2191 | * | ||
2192 | * On DPM resume, where some of devices might not be resumed | ||
2193 | * together, this may delay port resume upto one second, but such | ||
2194 | * DPM resumes are rare and 1 sec delay isn't too bad. | ||
2195 | * | ||
2196 | * LOCKING: | ||
2197 | * Kernel thread context (may sleep). | ||
2198 | */ | ||
2199 | static void ata_eh_handle_port_resume(struct ata_port *ap) | ||
2200 | { | ||
2201 | unsigned long timeout; | ||
2202 | unsigned long flags; | ||
2203 | int i, rc = 0; | ||
2204 | |||
2205 | /* are we resuming? */ | ||
2206 | spin_lock_irqsave(ap->lock, flags); | ||
2207 | if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || | ||
2208 | ap->pm_mesg.event != PM_EVENT_ON) { | ||
2209 | spin_unlock_irqrestore(ap->lock, flags); | ||
2210 | return; | ||
2211 | } | ||
2212 | spin_unlock_irqrestore(ap->lock, flags); | ||
2213 | |||
2214 | /* spurious? */ | ||
2215 | if (!(ap->pflags & ATA_PFLAG_SUSPENDED)) | ||
2216 | goto done; | ||
2217 | |||
2218 | if (ap->ops->port_resume) | ||
2219 | rc = ap->ops->port_resume(ap); | ||
2220 | |||
2221 | /* give devices time to request EH */ | ||
2222 | timeout = jiffies + HZ; /* 1s max */ | ||
2223 | while (1) { | ||
2224 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
2225 | struct ata_device *dev = &ap->device[i]; | ||
2226 | unsigned int action = ata_eh_dev_action(dev); | ||
2227 | |||
2228 | if ((dev->flags & ATA_DFLAG_SUSPENDED) && | ||
2229 | !(action & ATA_EH_RESUME)) | ||
2230 | break; | ||
2231 | } | ||
2232 | |||
2233 | if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout)) | ||
2234 | break; | ||
2235 | msleep(10); | ||
2236 | } | ||
2237 | |||
2238 | done: | ||
2239 | spin_lock_irqsave(ap->lock, flags); | ||
2240 | ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); | ||
2241 | if (ap->pm_result) { | ||
2242 | *ap->pm_result = rc; | ||
2243 | ap->pm_result = NULL; | ||
2244 | } | ||
2245 | spin_unlock_irqrestore(ap->lock, flags); | ||
2246 | } | ||
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index 2915bca691e8..e92c31d698ff 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c | |||
@@ -397,20 +397,129 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf) | |||
397 | } | 397 | } |
398 | } | 398 | } |
399 | 399 | ||
400 | int ata_scsi_device_resume(struct scsi_device *sdev) | 400 | /** |
401 | * ata_scsi_device_suspend - suspend ATA device associated with sdev | ||
402 | * @sdev: the SCSI device to suspend | ||
403 | * @state: target power management state | ||
404 | * | ||
405 | * Request suspend EH action on the ATA device associated with | ||
406 | * @sdev and wait for the operation to complete. | ||
407 | * | ||
408 | * LOCKING: | ||
409 | * Kernel thread context (may sleep). | ||
410 | * | ||
411 | * RETURNS: | ||
412 | * 0 on success, -errno otherwise. | ||
413 | */ | ||
414 | int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) | ||
401 | { | 415 | { |
402 | struct ata_port *ap = ata_shost_to_port(sdev->host); | 416 | struct ata_port *ap = ata_shost_to_port(sdev->host); |
403 | struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); | 417 | struct ata_device *dev = ata_scsi_find_dev(ap, sdev); |
418 | unsigned long flags; | ||
419 | unsigned int action; | ||
420 | int rc = 0; | ||
421 | |||
422 | if (!dev) | ||
423 | goto out; | ||
424 | |||
425 | spin_lock_irqsave(ap->lock, flags); | ||
426 | |||
427 | /* wait for the previous resume to complete */ | ||
428 | while (dev->flags & ATA_DFLAG_SUSPENDED) { | ||
429 | spin_unlock_irqrestore(ap->lock, flags); | ||
430 | ata_port_wait_eh(ap); | ||
431 | spin_lock_irqsave(ap->lock, flags); | ||
432 | } | ||
433 | |||
434 | /* if @sdev is already detached, nothing to do */ | ||
435 | if (sdev->sdev_state == SDEV_OFFLINE || | ||
436 | sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) | ||
437 | goto out_unlock; | ||
438 | |||
439 | /* request suspend */ | ||
440 | action = ATA_EH_SUSPEND; | ||
441 | if (state.event != PM_EVENT_SUSPEND) | ||
442 | action |= ATA_EH_PM_FREEZE; | ||
443 | ap->eh_info.dev_action[dev->devno] |= action; | ||
444 | ap->eh_info.flags |= ATA_EHI_QUIET; | ||
445 | ata_port_schedule_eh(ap); | ||
446 | |||
447 | spin_unlock_irqrestore(ap->lock, flags); | ||
448 | |||
449 | /* wait for EH to do the job */ | ||
450 | ata_port_wait_eh(ap); | ||
451 | |||
452 | spin_lock_irqsave(ap->lock, flags); | ||
453 | |||
454 | /* If @sdev is still attached but the associated ATA device | ||
455 | * isn't suspended, the operation failed. | ||
456 | */ | ||
457 | if (sdev->sdev_state != SDEV_OFFLINE && | ||
458 | sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL && | ||
459 | !(dev->flags & ATA_DFLAG_SUSPENDED)) | ||
460 | rc = -EIO; | ||
404 | 461 | ||
405 | return ata_device_resume(dev); | 462 | out_unlock: |
463 | spin_unlock_irqrestore(ap->lock, flags); | ||
464 | out: | ||
465 | if (rc == 0) | ||
466 | sdev->sdev_gendev.power.power_state = state; | ||
467 | return rc; | ||
406 | } | 468 | } |
407 | 469 | ||
408 | int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) | 470 | /** |
471 | * ata_scsi_device_resume - resume ATA device associated with sdev | ||
472 | * @sdev: the SCSI device to resume | ||
473 | * | ||
474 | * Request resume EH action on the ATA device associated with | ||
475 | * @sdev and return immediately. This enables parallel | ||
476 | * wakeup/spinup of devices. | ||
477 | * | ||
478 | * LOCKING: | ||
479 | * Kernel thread context (may sleep). | ||
480 | * | ||
481 | * RETURNS: | ||
482 | * 0. | ||
483 | */ | ||
484 | int ata_scsi_device_resume(struct scsi_device *sdev) | ||
409 | { | 485 | { |
410 | struct ata_port *ap = ata_shost_to_port(sdev->host); | 486 | struct ata_port *ap = ata_shost_to_port(sdev->host); |
411 | struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); | 487 | struct ata_device *dev = ata_scsi_find_dev(ap, sdev); |
488 | struct ata_eh_info *ehi = &ap->eh_info; | ||
489 | unsigned long flags; | ||
490 | unsigned int action; | ||
412 | 491 | ||
413 | return ata_device_suspend(dev, state); | 492 | if (!dev) |
493 | goto out; | ||
494 | |||
495 | spin_lock_irqsave(ap->lock, flags); | ||
496 | |||
497 | /* if @sdev is already detached, nothing to do */ | ||
498 | if (sdev->sdev_state == SDEV_OFFLINE || | ||
499 | sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) | ||
500 | goto out_unlock; | ||
501 | |||
502 | /* request resume */ | ||
503 | action = ATA_EH_RESUME; | ||
504 | if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND) | ||
505 | __ata_ehi_hotplugged(ehi); | ||
506 | else | ||
507 | action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET; | ||
508 | ehi->dev_action[dev->devno] |= action; | ||
509 | |||
510 | /* We don't want autopsy and verbose EH messages. Disable | ||
511 | * those if we're the only device on this link. | ||
512 | */ | ||
513 | if (ata_port_max_devices(ap) == 1) | ||
514 | ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; | ||
515 | |||
516 | ata_port_schedule_eh(ap); | ||
517 | |||
518 | out_unlock: | ||
519 | spin_unlock_irqrestore(ap->lock, flags); | ||
520 | out: | ||
521 | sdev->sdev_gendev.power.power_state = PMSG_ON; | ||
522 | return 0; | ||
414 | } | 523 | } |
415 | 524 | ||
416 | /** | 525 | /** |
@@ -2244,6 +2353,19 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) | |||
2244 | ata_gen_ata_desc_sense(qc); | 2353 | ata_gen_ata_desc_sense(qc); |
2245 | } | 2354 | } |
2246 | 2355 | ||
2356 | /* SCSI EH automatically locks door if sdev->locked is | ||
2357 | * set. Sometimes door lock request continues to | ||
2358 | * fail, for example, when no media is present. This | ||
2359 | * creates a loop - SCSI EH issues door lock which | ||
2360 | * fails and gets invoked again to acquire sense data | ||
2361 | * for the failed command. | ||
2362 | * | ||
2363 | * If door lock fails, always clear sdev->locked to | ||
2364 | * avoid this infinite loop. | ||
2365 | */ | ||
2366 | if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) | ||
2367 | qc->dev->sdev->locked = 0; | ||
2368 | |||
2247 | qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; | 2369 | qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; |
2248 | qc->scsidone(cmd); | 2370 | qc->scsidone(cmd); |
2249 | ata_qc_free(qc); | 2371 | ata_qc_free(qc); |
@@ -2930,7 +3052,7 @@ void ata_scsi_hotplug(void *data) | |||
2930 | struct ata_port *ap = data; | 3052 | struct ata_port *ap = data; |
2931 | int i; | 3053 | int i; |
2932 | 3054 | ||
2933 | if (ap->flags & ATA_FLAG_UNLOADING) { | 3055 | if (ap->pflags & ATA_PFLAG_UNLOADING) { |
2934 | DPRINTK("ENTER/EXIT - unloading\n"); | 3056 | DPRINTK("ENTER/EXIT - unloading\n"); |
2935 | return; | 3057 | return; |
2936 | } | 3058 | } |
@@ -3011,6 +3133,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
3011 | if (dev) { | 3133 | if (dev) { |
3012 | ap->eh_info.probe_mask |= 1 << dev->devno; | 3134 | ap->eh_info.probe_mask |= 1 << dev->devno; |
3013 | ap->eh_info.action |= ATA_EH_SOFTRESET; | 3135 | ap->eh_info.action |= ATA_EH_SOFTRESET; |
3136 | ap->eh_info.flags |= ATA_EHI_RESUME_LINK; | ||
3014 | } else | 3137 | } else |
3015 | rc = -EINVAL; | 3138 | rc = -EINVAL; |
3016 | } | 3139 | } |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 7e6e031cc41b..5884cd26d53a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -189,6 +189,7 @@ static void iscsi_complete_command(struct iscsi_session *session, | |||
189 | { | 189 | { |
190 | struct scsi_cmnd *sc = ctask->sc; | 190 | struct scsi_cmnd *sc = ctask->sc; |
191 | 191 | ||
192 | ctask->state = ISCSI_TASK_COMPLETED; | ||
192 | ctask->sc = NULL; | 193 | ctask->sc = NULL; |
193 | list_del_init(&ctask->running); | 194 | list_del_init(&ctask->running); |
194 | __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); | 195 | __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); |
@@ -275,6 +276,25 @@ out: | |||
275 | return rc; | 276 | return rc; |
276 | } | 277 | } |
277 | 278 | ||
279 | static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | ||
280 | { | ||
281 | struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; | ||
282 | |||
283 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; | ||
284 | conn->tmfrsp_pdus_cnt++; | ||
285 | |||
286 | if (conn->tmabort_state != TMABORT_INITIAL) | ||
287 | return; | ||
288 | |||
289 | if (tmf->response == ISCSI_TMF_RSP_COMPLETE) | ||
290 | conn->tmabort_state = TMABORT_SUCCESS; | ||
291 | else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) | ||
292 | conn->tmabort_state = TMABORT_NOT_FOUND; | ||
293 | else | ||
294 | conn->tmabort_state = TMABORT_FAILED; | ||
295 | wake_up(&conn->ehwait); | ||
296 | } | ||
297 | |||
278 | /** | 298 | /** |
279 | * __iscsi_complete_pdu - complete pdu | 299 | * __iscsi_complete_pdu - complete pdu |
280 | * @conn: iscsi conn | 300 | * @conn: iscsi conn |
@@ -340,6 +360,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
340 | 360 | ||
341 | switch(opcode) { | 361 | switch(opcode) { |
342 | case ISCSI_OP_LOGOUT_RSP: | 362 | case ISCSI_OP_LOGOUT_RSP: |
363 | if (datalen) { | ||
364 | rc = ISCSI_ERR_PROTO; | ||
365 | break; | ||
366 | } | ||
343 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; | 367 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; |
344 | /* fall through */ | 368 | /* fall through */ |
345 | case ISCSI_OP_LOGIN_RSP: | 369 | case ISCSI_OP_LOGIN_RSP: |
@@ -348,7 +372,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
348 | * login related PDU's exp_statsn is handled in | 372 | * login related PDU's exp_statsn is handled in |
349 | * userspace | 373 | * userspace |
350 | */ | 374 | */ |
351 | rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); | 375 | if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) |
376 | rc = ISCSI_ERR_CONN_FAILED; | ||
352 | list_del(&mtask->running); | 377 | list_del(&mtask->running); |
353 | if (conn->login_mtask != mtask) | 378 | if (conn->login_mtask != mtask) |
354 | __kfifo_put(session->mgmtpool.queue, | 379 | __kfifo_put(session->mgmtpool.queue, |
@@ -360,25 +385,17 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
360 | break; | 385 | break; |
361 | } | 386 | } |
362 | 387 | ||
363 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; | 388 | iscsi_tmf_rsp(conn, hdr); |
364 | conn->tmfrsp_pdus_cnt++; | ||
365 | if (conn->tmabort_state == TMABORT_INITIAL) { | ||
366 | conn->tmabort_state = | ||
367 | ((struct iscsi_tm_rsp *)hdr)-> | ||
368 | response == ISCSI_TMF_RSP_COMPLETE ? | ||
369 | TMABORT_SUCCESS:TMABORT_FAILED; | ||
370 | /* unblock eh_abort() */ | ||
371 | wake_up(&conn->ehwait); | ||
372 | } | ||
373 | break; | 389 | break; |
374 | case ISCSI_OP_NOOP_IN: | 390 | case ISCSI_OP_NOOP_IN: |
375 | if (hdr->ttt != ISCSI_RESERVED_TAG) { | 391 | if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) { |
376 | rc = ISCSI_ERR_PROTO; | 392 | rc = ISCSI_ERR_PROTO; |
377 | break; | 393 | break; |
378 | } | 394 | } |
379 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; | 395 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; |
380 | 396 | ||
381 | rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); | 397 | if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) |
398 | rc = ISCSI_ERR_CONN_FAILED; | ||
382 | list_del(&mtask->running); | 399 | list_del(&mtask->running); |
383 | if (conn->login_mtask != mtask) | 400 | if (conn->login_mtask != mtask) |
384 | __kfifo_put(session->mgmtpool.queue, | 401 | __kfifo_put(session->mgmtpool.queue, |
@@ -391,14 +408,21 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
391 | } else if (itt == ISCSI_RESERVED_TAG) { | 408 | } else if (itt == ISCSI_RESERVED_TAG) { |
392 | switch(opcode) { | 409 | switch(opcode) { |
393 | case ISCSI_OP_NOOP_IN: | 410 | case ISCSI_OP_NOOP_IN: |
394 | if (!datalen) { | 411 | if (datalen) { |
395 | rc = iscsi_check_assign_cmdsn(session, | ||
396 | (struct iscsi_nopin*)hdr); | ||
397 | if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) | ||
398 | rc = iscsi_recv_pdu(conn->cls_conn, | ||
399 | hdr, NULL, 0); | ||
400 | } else | ||
401 | rc = ISCSI_ERR_PROTO; | 412 | rc = ISCSI_ERR_PROTO; |
413 | break; | ||
414 | } | ||
415 | |||
416 | rc = iscsi_check_assign_cmdsn(session, | ||
417 | (struct iscsi_nopin*)hdr); | ||
418 | if (rc) | ||
419 | break; | ||
420 | |||
421 | if (hdr->ttt == ISCSI_RESERVED_TAG) | ||
422 | break; | ||
423 | |||
424 | if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0)) | ||
425 | rc = ISCSI_ERR_CONN_FAILED; | ||
402 | break; | 426 | break; |
403 | case ISCSI_OP_REJECT: | 427 | case ISCSI_OP_REJECT: |
404 | /* we need sth like iscsi_reject_rsp()*/ | 428 | /* we need sth like iscsi_reject_rsp()*/ |
@@ -568,20 +592,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) | |||
568 | } | 592 | } |
569 | 593 | ||
570 | /* process command queue */ | 594 | /* process command queue */ |
571 | while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask, | 595 | spin_lock_bh(&conn->session->lock); |
572 | sizeof(void*))) { | 596 | while (!list_empty(&conn->xmitqueue)) { |
573 | /* | 597 | /* |
574 | * iscsi tcp may readd the task to the xmitqueue to send | 598 | * iscsi tcp may readd the task to the xmitqueue to send |
575 | * write data | 599 | * write data |
576 | */ | 600 | */ |
577 | spin_lock_bh(&conn->session->lock); | 601 | conn->ctask = list_entry(conn->xmitqueue.next, |
578 | if (list_empty(&conn->ctask->running)) | 602 | struct iscsi_cmd_task, running); |
579 | list_add_tail(&conn->ctask->running, &conn->run_list); | 603 | conn->ctask->state = ISCSI_TASK_RUNNING; |
604 | list_move_tail(conn->xmitqueue.next, &conn->run_list); | ||
580 | spin_unlock_bh(&conn->session->lock); | 605 | spin_unlock_bh(&conn->session->lock); |
606 | |||
581 | rc = tt->xmit_cmd_task(conn, conn->ctask); | 607 | rc = tt->xmit_cmd_task(conn, conn->ctask); |
582 | if (rc) | 608 | if (rc) |
583 | goto again; | 609 | goto again; |
610 | spin_lock_bh(&conn->session->lock); | ||
584 | } | 611 | } |
612 | spin_unlock_bh(&conn->session->lock); | ||
585 | /* done with this ctask */ | 613 | /* done with this ctask */ |
586 | conn->ctask = NULL; | 614 | conn->ctask = NULL; |
587 | 615 | ||
@@ -691,6 +719,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
691 | sc->SCp.phase = session->age; | 719 | sc->SCp.phase = session->age; |
692 | sc->SCp.ptr = (char *)ctask; | 720 | sc->SCp.ptr = (char *)ctask; |
693 | 721 | ||
722 | ctask->state = ISCSI_TASK_PENDING; | ||
694 | ctask->mtask = NULL; | 723 | ctask->mtask = NULL; |
695 | ctask->conn = conn; | 724 | ctask->conn = conn; |
696 | ctask->sc = sc; | 725 | ctask->sc = sc; |
@@ -700,7 +729,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
700 | 729 | ||
701 | session->tt->init_cmd_task(ctask); | 730 | session->tt->init_cmd_task(ctask); |
702 | 731 | ||
703 | __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); | 732 | list_add_tail(&ctask->running, &conn->xmitqueue); |
704 | debug_scsi( | 733 | debug_scsi( |
705 | "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", | 734 | "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", |
706 | sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", | 735 | sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", |
@@ -977,31 +1006,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc, | |||
977 | /* | 1006 | /* |
978 | * xmit mutex and session lock must be held | 1007 | * xmit mutex and session lock must be held |
979 | */ | 1008 | */ |
980 | #define iscsi_remove_task(tasktype) \ | 1009 | static struct iscsi_mgmt_task * |
981 | static struct iscsi_##tasktype * \ | 1010 | iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt) |
982 | iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \ | 1011 | { |
983 | { \ | 1012 | int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); |
984 | int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \ | 1013 | struct iscsi_mgmt_task *task; |
985 | struct iscsi_##tasktype *task; \ | ||
986 | \ | ||
987 | debug_scsi("searching %d tasks\n", nr_tasks); \ | ||
988 | \ | ||
989 | for (i = 0; i < nr_tasks; i++) { \ | ||
990 | __kfifo_get(fifo, (void*)&task, sizeof(void*)); \ | ||
991 | debug_scsi("check task %u\n", task->itt); \ | ||
992 | \ | ||
993 | if (task->itt == itt) { \ | ||
994 | debug_scsi("matched task\n"); \ | ||
995 | return task; \ | ||
996 | } \ | ||
997 | \ | ||
998 | __kfifo_put(fifo, (void*)&task, sizeof(void*)); \ | ||
999 | } \ | ||
1000 | return NULL; \ | ||
1001 | } | ||
1002 | 1014 | ||
1003 | iscsi_remove_task(mgmt_task); | 1015 | debug_scsi("searching %d tasks\n", nr_tasks); |
1004 | iscsi_remove_task(cmd_task); | 1016 | |
1017 | for (i = 0; i < nr_tasks; i++) { | ||
1018 | __kfifo_get(fifo, (void*)&task, sizeof(void*)); | ||
1019 | debug_scsi("check task %u\n", task->itt); | ||
1020 | |||
1021 | if (task->itt == itt) { | ||
1022 | debug_scsi("matched task\n"); | ||
1023 | return task; | ||
1024 | } | ||
1025 | |||
1026 | __kfifo_put(fifo, (void*)&task, sizeof(void*)); | ||
1027 | } | ||
1028 | return NULL; | ||
1029 | } | ||
1005 | 1030 | ||
1006 | static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) | 1031 | static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) |
1007 | { | 1032 | { |
@@ -1027,12 +1052,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
1027 | { | 1052 | { |
1028 | struct scsi_cmnd *sc; | 1053 | struct scsi_cmnd *sc; |
1029 | 1054 | ||
1030 | conn->session->tt->cleanup_cmd_task(conn, ctask); | ||
1031 | iscsi_ctask_mtask_cleanup(ctask); | ||
1032 | |||
1033 | sc = ctask->sc; | 1055 | sc = ctask->sc; |
1034 | if (!sc) | 1056 | if (!sc) |
1035 | return; | 1057 | return; |
1058 | |||
1059 | conn->session->tt->cleanup_cmd_task(conn, ctask); | ||
1060 | iscsi_ctask_mtask_cleanup(ctask); | ||
1061 | |||
1036 | sc->result = err; | 1062 | sc->result = err; |
1037 | sc->resid = sc->request_bufflen; | 1063 | sc->resid = sc->request_bufflen; |
1038 | iscsi_complete_command(conn->session, ctask); | 1064 | iscsi_complete_command(conn->session, ctask); |
@@ -1043,7 +1069,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1043 | struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; | 1069 | struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; |
1044 | struct iscsi_conn *conn = ctask->conn; | 1070 | struct iscsi_conn *conn = ctask->conn; |
1045 | struct iscsi_session *session = conn->session; | 1071 | struct iscsi_session *session = conn->session; |
1046 | struct iscsi_cmd_task *pending_ctask; | ||
1047 | int rc; | 1072 | int rc; |
1048 | 1073 | ||
1049 | conn->eh_abort_cnt++; | 1074 | conn->eh_abort_cnt++; |
@@ -1061,8 +1086,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1061 | goto failed; | 1086 | goto failed; |
1062 | 1087 | ||
1063 | /* ctask completed before time out */ | 1088 | /* ctask completed before time out */ |
1064 | if (!ctask->sc) | 1089 | if (!ctask->sc) { |
1065 | goto success; | 1090 | spin_unlock_bh(&session->lock); |
1091 | debug_scsi("sc completed while abort in progress\n"); | ||
1092 | goto success_rel_mutex; | ||
1093 | } | ||
1066 | 1094 | ||
1067 | /* what should we do here ? */ | 1095 | /* what should we do here ? */ |
1068 | if (conn->ctask == ctask) { | 1096 | if (conn->ctask == ctask) { |
@@ -1071,17 +1099,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1071 | goto failed; | 1099 | goto failed; |
1072 | } | 1100 | } |
1073 | 1101 | ||
1074 | /* check for the easy pending cmd abort */ | 1102 | if (ctask->state == ISCSI_TASK_PENDING) |
1075 | pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt); | 1103 | goto success_cleanup; |
1076 | if (pending_ctask) { | ||
1077 | /* iscsi_tcp queues write transfers on the xmitqueue */ | ||
1078 | if (list_empty(&pending_ctask->running)) { | ||
1079 | debug_scsi("found pending task\n"); | ||
1080 | goto success; | ||
1081 | } else | ||
1082 | __kfifo_put(conn->xmitqueue, (void*)&pending_ctask, | ||
1083 | sizeof(void*)); | ||
1084 | } | ||
1085 | 1104 | ||
1086 | conn->tmabort_state = TMABORT_INITIAL; | 1105 | conn->tmabort_state = TMABORT_INITIAL; |
1087 | 1106 | ||
@@ -1089,25 +1108,31 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1089 | rc = iscsi_exec_abort_task(sc, ctask); | 1108 | rc = iscsi_exec_abort_task(sc, ctask); |
1090 | spin_lock_bh(&session->lock); | 1109 | spin_lock_bh(&session->lock); |
1091 | 1110 | ||
1092 | iscsi_ctask_mtask_cleanup(ctask); | ||
1093 | if (rc || sc->SCp.phase != session->age || | 1111 | if (rc || sc->SCp.phase != session->age || |
1094 | session->state != ISCSI_STATE_LOGGED_IN) | 1112 | session->state != ISCSI_STATE_LOGGED_IN) |
1095 | goto failed; | 1113 | goto failed; |
1114 | iscsi_ctask_mtask_cleanup(ctask); | ||
1096 | 1115 | ||
1097 | /* ctask completed before tmf abort response */ | 1116 | switch (conn->tmabort_state) { |
1098 | if (!ctask->sc) { | 1117 | case TMABORT_SUCCESS: |
1099 | debug_scsi("sc completed while abort in progress\n"); | 1118 | goto success_cleanup; |
1100 | goto success; | 1119 | case TMABORT_NOT_FOUND: |
1101 | } | 1120 | if (!ctask->sc) { |
1102 | 1121 | /* ctask completed before tmf abort response */ | |
1103 | if (conn->tmabort_state != TMABORT_SUCCESS) { | 1122 | spin_unlock_bh(&session->lock); |
1123 | debug_scsi("sc completed while abort in progress\n"); | ||
1124 | goto success_rel_mutex; | ||
1125 | } | ||
1126 | /* fall through */ | ||
1127 | default: | ||
1128 | /* timedout or failed */ | ||
1104 | spin_unlock_bh(&session->lock); | 1129 | spin_unlock_bh(&session->lock); |
1105 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1130 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
1106 | spin_lock_bh(&session->lock); | 1131 | spin_lock_bh(&session->lock); |
1107 | goto failed; | 1132 | goto failed; |
1108 | } | 1133 | } |
1109 | 1134 | ||
1110 | success: | 1135 | success_cleanup: |
1111 | debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); | 1136 | debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); |
1112 | spin_unlock_bh(&session->lock); | 1137 | spin_unlock_bh(&session->lock); |
1113 | 1138 | ||
@@ -1121,6 +1146,7 @@ success: | |||
1121 | spin_unlock(&session->lock); | 1146 | spin_unlock(&session->lock); |
1122 | write_unlock_bh(conn->recv_lock); | 1147 | write_unlock_bh(conn->recv_lock); |
1123 | 1148 | ||
1149 | success_rel_mutex: | ||
1124 | mutex_unlock(&conn->xmitmutex); | 1150 | mutex_unlock(&conn->xmitmutex); |
1125 | return SUCCESS; | 1151 | return SUCCESS; |
1126 | 1152 | ||
@@ -1263,6 +1289,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, | |||
1263 | if (cmd_task_size) | 1289 | if (cmd_task_size) |
1264 | ctask->dd_data = &ctask[1]; | 1290 | ctask->dd_data = &ctask[1]; |
1265 | ctask->itt = cmd_i; | 1291 | ctask->itt = cmd_i; |
1292 | INIT_LIST_HEAD(&ctask->running); | ||
1266 | } | 1293 | } |
1267 | 1294 | ||
1268 | spin_lock_init(&session->lock); | 1295 | spin_lock_init(&session->lock); |
@@ -1282,6 +1309,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, | |||
1282 | if (mgmt_task_size) | 1309 | if (mgmt_task_size) |
1283 | mtask->dd_data = &mtask[1]; | 1310 | mtask->dd_data = &mtask[1]; |
1284 | mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; | 1311 | mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; |
1312 | INIT_LIST_HEAD(&mtask->running); | ||
1285 | } | 1313 | } |
1286 | 1314 | ||
1287 | if (scsi_add_host(shost, NULL)) | 1315 | if (scsi_add_host(shost, NULL)) |
@@ -1322,15 +1350,18 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) | |||
1322 | { | 1350 | { |
1323 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); | 1351 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); |
1324 | struct iscsi_session *session = iscsi_hostdata(shost->hostdata); | 1352 | struct iscsi_session *session = iscsi_hostdata(shost->hostdata); |
1353 | struct module *owner = cls_session->transport->owner; | ||
1325 | 1354 | ||
1326 | scsi_remove_host(shost); | 1355 | scsi_remove_host(shost); |
1327 | 1356 | ||
1328 | iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); | 1357 | iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); |
1329 | iscsi_pool_free(&session->cmdpool, (void**)session->cmds); | 1358 | iscsi_pool_free(&session->cmdpool, (void**)session->cmds); |
1330 | 1359 | ||
1360 | kfree(session->targetname); | ||
1361 | |||
1331 | iscsi_destroy_session(cls_session); | 1362 | iscsi_destroy_session(cls_session); |
1332 | scsi_host_put(shost); | 1363 | scsi_host_put(shost); |
1333 | module_put(cls_session->transport->owner); | 1364 | module_put(owner); |
1334 | } | 1365 | } |
1335 | EXPORT_SYMBOL_GPL(iscsi_session_teardown); | 1366 | EXPORT_SYMBOL_GPL(iscsi_session_teardown); |
1336 | 1367 | ||
@@ -1361,12 +1392,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
1361 | conn->tmabort_state = TMABORT_INITIAL; | 1392 | conn->tmabort_state = TMABORT_INITIAL; |
1362 | INIT_LIST_HEAD(&conn->run_list); | 1393 | INIT_LIST_HEAD(&conn->run_list); |
1363 | INIT_LIST_HEAD(&conn->mgmt_run_list); | 1394 | INIT_LIST_HEAD(&conn->mgmt_run_list); |
1364 | 1395 | INIT_LIST_HEAD(&conn->xmitqueue); | |
1365 | /* initialize general xmit PDU commands queue */ | ||
1366 | conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), | ||
1367 | GFP_KERNEL, NULL); | ||
1368 | if (conn->xmitqueue == ERR_PTR(-ENOMEM)) | ||
1369 | goto xmitqueue_alloc_fail; | ||
1370 | 1396 | ||
1371 | /* initialize general immediate & non-immediate PDU commands queue */ | 1397 | /* initialize general immediate & non-immediate PDU commands queue */ |
1372 | conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), | 1398 | conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), |
@@ -1394,7 +1420,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
1394 | data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); | 1420 | data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); |
1395 | if (!data) | 1421 | if (!data) |
1396 | goto login_mtask_data_alloc_fail; | 1422 | goto login_mtask_data_alloc_fail; |
1397 | conn->login_mtask->data = data; | 1423 | conn->login_mtask->data = conn->data = data; |
1398 | 1424 | ||
1399 | init_timer(&conn->tmabort_timer); | 1425 | init_timer(&conn->tmabort_timer); |
1400 | mutex_init(&conn->xmitmutex); | 1426 | mutex_init(&conn->xmitmutex); |
@@ -1410,8 +1436,6 @@ login_mtask_alloc_fail: | |||
1410 | mgmtqueue_alloc_fail: | 1436 | mgmtqueue_alloc_fail: |
1411 | kfifo_free(conn->immqueue); | 1437 | kfifo_free(conn->immqueue); |
1412 | immqueue_alloc_fail: | 1438 | immqueue_alloc_fail: |
1413 | kfifo_free(conn->xmitqueue); | ||
1414 | xmitqueue_alloc_fail: | ||
1415 | iscsi_destroy_conn(cls_conn); | 1439 | iscsi_destroy_conn(cls_conn); |
1416 | return NULL; | 1440 | return NULL; |
1417 | } | 1441 | } |
@@ -1432,12 +1456,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) | |||
1432 | 1456 | ||
1433 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); | 1457 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); |
1434 | mutex_lock(&conn->xmitmutex); | 1458 | mutex_lock(&conn->xmitmutex); |
1435 | if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) { | ||
1436 | if (session->tt->suspend_conn_recv) | ||
1437 | session->tt->suspend_conn_recv(conn); | ||
1438 | |||
1439 | session->tt->terminate_conn(conn); | ||
1440 | } | ||
1441 | 1459 | ||
1442 | spin_lock_bh(&session->lock); | 1460 | spin_lock_bh(&session->lock); |
1443 | conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; | 1461 | conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; |
@@ -1474,7 +1492,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) | |||
1474 | } | 1492 | } |
1475 | 1493 | ||
1476 | spin_lock_bh(&session->lock); | 1494 | spin_lock_bh(&session->lock); |
1477 | kfree(conn->login_mtask->data); | 1495 | kfree(conn->data); |
1496 | kfree(conn->persistent_address); | ||
1478 | __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, | 1497 | __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, |
1479 | sizeof(void*)); | 1498 | sizeof(void*)); |
1480 | list_del(&conn->item); | 1499 | list_del(&conn->item); |
@@ -1489,7 +1508,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) | |||
1489 | session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; | 1508 | session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; |
1490 | spin_unlock_bh(&session->lock); | 1509 | spin_unlock_bh(&session->lock); |
1491 | 1510 | ||
1492 | kfifo_free(conn->xmitqueue); | ||
1493 | kfifo_free(conn->immqueue); | 1511 | kfifo_free(conn->immqueue); |
1494 | kfifo_free(conn->mgmtqueue); | 1512 | kfifo_free(conn->mgmtqueue); |
1495 | 1513 | ||
@@ -1572,7 +1590,7 @@ static void fail_all_commands(struct iscsi_conn *conn) | |||
1572 | struct iscsi_cmd_task *ctask, *tmp; | 1590 | struct iscsi_cmd_task *ctask, *tmp; |
1573 | 1591 | ||
1574 | /* flush pending */ | 1592 | /* flush pending */ |
1575 | while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) { | 1593 | list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) { |
1576 | debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, | 1594 | debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, |
1577 | ctask->itt); | 1595 | ctask->itt); |
1578 | fail_command(conn, ctask, DID_BUS_BUSY << 16); | 1596 | fail_command(conn, ctask, DID_BUS_BUSY << 16); |
@@ -1615,8 +1633,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, | |||
1615 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); | 1633 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); |
1616 | spin_unlock_bh(&session->lock); | 1634 | spin_unlock_bh(&session->lock); |
1617 | 1635 | ||
1618 | if (session->tt->suspend_conn_recv) | 1636 | write_lock_bh(conn->recv_lock); |
1619 | session->tt->suspend_conn_recv(conn); | 1637 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); |
1638 | write_unlock_bh(conn->recv_lock); | ||
1620 | 1639 | ||
1621 | mutex_lock(&conn->xmitmutex); | 1640 | mutex_lock(&conn->xmitmutex); |
1622 | /* | 1641 | /* |
@@ -1635,7 +1654,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, | |||
1635 | } | 1654 | } |
1636 | } | 1655 | } |
1637 | 1656 | ||
1638 | session->tt->terminate_conn(conn); | ||
1639 | /* | 1657 | /* |
1640 | * flush queues. | 1658 | * flush queues. |
1641 | */ | 1659 | */ |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index f81691fcf177..d44f9aac6b8f 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -21,10 +21,12 @@ | |||
21 | 21 | ||
22 | struct lpfc_sli2_slim; | 22 | struct lpfc_sli2_slim; |
23 | 23 | ||
24 | #define LPFC_MAX_TARGET 256 /* max targets supported */ | ||
25 | #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */ | ||
26 | #define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */ | ||
27 | 24 | ||
25 | #define LPFC_MAX_TARGET 256 /* max number of targets supported */ | ||
26 | #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els | ||
27 | requests */ | ||
28 | #define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact | ||
29 | the NameServer before giving up. */ | ||
28 | #define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */ | 30 | #define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */ |
29 | #define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */ | 31 | #define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */ |
30 | #define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */ | 32 | #define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */ |
@@ -41,7 +43,6 @@ struct lpfc_sli2_slim; | |||
41 | (( (u64)(high)<<16 ) << 16)|( (u64)(low)))) | 43 | (( (u64)(high)<<16 ) << 16)|( (u64)(low)))) |
42 | /* Provide maximum configuration definitions. */ | 44 | /* Provide maximum configuration definitions. */ |
43 | #define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */ | 45 | #define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */ |
44 | #define MAX_FCP_TARGET 256 /* max num of FCP targets supported */ | ||
45 | #define FC_MAX_ADPTMSG 64 | 46 | #define FC_MAX_ADPTMSG 64 |
46 | 47 | ||
47 | #define MAX_HBAEVT 32 | 48 | #define MAX_HBAEVT 32 |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index b62a72dfab29..d384c16f4a87 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -219,8 +219,18 @@ lpfc_issue_lip(struct Scsi_Host *host) | |||
219 | return -ENOMEM; | 219 | return -ENOMEM; |
220 | 220 | ||
221 | memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); | 221 | memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); |
222 | lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); | 222 | pmboxq->mb.mbxCommand = MBX_DOWN_LINK; |
223 | mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); | 223 | pmboxq->mb.mbxOwner = OWN_HOST; |
224 | |||
225 | mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); | ||
226 | |||
227 | if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { | ||
228 | memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); | ||
229 | lpfc_init_link(phba, pmboxq, phba->cfg_topology, | ||
230 | phba->cfg_link_speed); | ||
231 | mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, | ||
232 | phba->fc_ratov * 2); | ||
233 | } | ||
224 | 234 | ||
225 | if (mbxstatus == MBX_TIMEOUT) | 235 | if (mbxstatus == MBX_TIMEOUT) |
226 | pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | 236 | pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
@@ -233,51 +243,53 @@ lpfc_issue_lip(struct Scsi_Host *host) | |||
233 | return 0; | 243 | return 0; |
234 | } | 244 | } |
235 | 245 | ||
236 | static ssize_t | 246 | static int |
237 | lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) | 247 | lpfc_selective_reset(struct lpfc_hba *phba) |
238 | { | 248 | { |
239 | struct Scsi_Host *host = class_to_shost(cdev); | 249 | struct completion online_compl; |
240 | struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; | 250 | int status = 0; |
241 | return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); | 251 | |
252 | init_completion(&online_compl); | ||
253 | lpfc_workq_post_event(phba, &status, &online_compl, | ||
254 | LPFC_EVT_OFFLINE); | ||
255 | wait_for_completion(&online_compl); | ||
256 | |||
257 | if (status != 0) | ||
258 | return -EIO; | ||
259 | |||
260 | init_completion(&online_compl); | ||
261 | lpfc_workq_post_event(phba, &status, &online_compl, | ||
262 | LPFC_EVT_ONLINE); | ||
263 | wait_for_completion(&online_compl); | ||
264 | |||
265 | if (status != 0) | ||
266 | return -EIO; | ||
267 | |||
268 | return 0; | ||
242 | } | 269 | } |
243 | 270 | ||
244 | static ssize_t | 271 | static ssize_t |
245 | lpfc_board_online_show(struct class_device *cdev, char *buf) | 272 | lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count) |
246 | { | 273 | { |
247 | struct Scsi_Host *host = class_to_shost(cdev); | 274 | struct Scsi_Host *host = class_to_shost(cdev); |
248 | struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; | 275 | struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; |
276 | int status = -EINVAL; | ||
249 | 277 | ||
250 | if (phba->fc_flag & FC_OFFLINE_MODE) | 278 | if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) |
251 | return snprintf(buf, PAGE_SIZE, "0\n"); | 279 | status = lpfc_selective_reset(phba); |
280 | |||
281 | if (status == 0) | ||
282 | return strlen(buf); | ||
252 | else | 283 | else |
253 | return snprintf(buf, PAGE_SIZE, "1\n"); | 284 | return status; |
254 | } | 285 | } |
255 | 286 | ||
256 | static ssize_t | 287 | static ssize_t |
257 | lpfc_board_online_store(struct class_device *cdev, const char *buf, | 288 | lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) |
258 | size_t count) | ||
259 | { | 289 | { |
260 | struct Scsi_Host *host = class_to_shost(cdev); | 290 | struct Scsi_Host *host = class_to_shost(cdev); |
261 | struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; | 291 | struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; |
262 | struct completion online_compl; | 292 | return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); |
263 | int val=0, status=0; | ||
264 | |||
265 | if (sscanf(buf, "%d", &val) != 1) | ||
266 | return -EINVAL; | ||
267 | |||
268 | init_completion(&online_compl); | ||
269 | |||
270 | if (val) | ||
271 | lpfc_workq_post_event(phba, &status, &online_compl, | ||
272 | LPFC_EVT_ONLINE); | ||
273 | else | ||
274 | lpfc_workq_post_event(phba, &status, &online_compl, | ||
275 | LPFC_EVT_OFFLINE); | ||
276 | wait_for_completion(&online_compl); | ||
277 | if (!status) | ||
278 | return strlen(buf); | ||
279 | else | ||
280 | return -EIO; | ||
281 | } | 293 | } |
282 | 294 | ||
283 | static ssize_t | 295 | static ssize_t |
@@ -532,10 +544,9 @@ static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, | |||
532 | NULL); | 544 | NULL); |
533 | static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, | 545 | static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, |
534 | NULL); | 546 | NULL); |
535 | static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR, | ||
536 | lpfc_board_online_show, lpfc_board_online_store); | ||
537 | static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, | 547 | static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, |
538 | lpfc_board_mode_show, lpfc_board_mode_store); | 548 | lpfc_board_mode_show, lpfc_board_mode_store); |
549 | static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); | ||
539 | 550 | ||
540 | static int lpfc_poll = 0; | 551 | static int lpfc_poll = 0; |
541 | module_param(lpfc_poll, int, 0); | 552 | module_param(lpfc_poll, int, 0); |
@@ -695,12 +706,12 @@ LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " | |||
695 | "during discovery"); | 706 | "during discovery"); |
696 | 707 | ||
697 | /* | 708 | /* |
698 | # lpfc_max_luns: maximum number of LUNs per target driver will support | 709 | # lpfc_max_luns: maximum allowed LUN. |
699 | # Value range is [1,32768]. Default value is 256. | 710 | # Value range is [0,65535]. Default value is 255. |
700 | # NOTE: The SCSI layer will scan each target for this many luns | 711 | # NOTE: The SCSI layer might probe all allowed LUN on some old targets. |
701 | */ | 712 | */ |
702 | LPFC_ATTR_R(max_luns, 256, 1, 32768, | 713 | LPFC_ATTR_R(max_luns, 255, 0, 65535, |
703 | "Maximum number of LUNs per target driver will support"); | 714 | "Maximum allowed LUN"); |
704 | 715 | ||
705 | /* | 716 | /* |
706 | # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. | 717 | # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. |
@@ -739,8 +750,8 @@ struct class_device_attribute *lpfc_host_attrs[] = { | |||
739 | &class_device_attr_lpfc_max_luns, | 750 | &class_device_attr_lpfc_max_luns, |
740 | &class_device_attr_nport_evt_cnt, | 751 | &class_device_attr_nport_evt_cnt, |
741 | &class_device_attr_management_version, | 752 | &class_device_attr_management_version, |
742 | &class_device_attr_board_online, | ||
743 | &class_device_attr_board_mode, | 753 | &class_device_attr_board_mode, |
754 | &class_device_attr_issue_reset, | ||
744 | &class_device_attr_lpfc_poll, | 755 | &class_device_attr_lpfc_poll, |
745 | &class_device_attr_lpfc_poll_tmo, | 756 | &class_device_attr_lpfc_poll_tmo, |
746 | NULL, | 757 | NULL, |
@@ -873,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count) | |||
873 | phba->sysfs_mbox.mbox == NULL ) { | 884 | phba->sysfs_mbox.mbox == NULL ) { |
874 | sysfs_mbox_idle(phba); | 885 | sysfs_mbox_idle(phba); |
875 | spin_unlock_irq(host->host_lock); | 886 | spin_unlock_irq(host->host_lock); |
876 | return -EINVAL; | 887 | return -EAGAIN; |
877 | } | 888 | } |
878 | } | 889 | } |
879 | 890 | ||
@@ -989,14 +1000,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) | |||
989 | spin_unlock_irq(phba->host->host_lock); | 1000 | spin_unlock_irq(phba->host->host_lock); |
990 | rc = lpfc_sli_issue_mbox_wait (phba, | 1001 | rc = lpfc_sli_issue_mbox_wait (phba, |
991 | phba->sysfs_mbox.mbox, | 1002 | phba->sysfs_mbox.mbox, |
992 | phba->fc_ratov * 2); | 1003 | lpfc_mbox_tmo_val(phba, |
1004 | phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); | ||
993 | spin_lock_irq(phba->host->host_lock); | 1005 | spin_lock_irq(phba->host->host_lock); |
994 | } | 1006 | } |
995 | 1007 | ||
996 | if (rc != MBX_SUCCESS) { | 1008 | if (rc != MBX_SUCCESS) { |
997 | sysfs_mbox_idle(phba); | 1009 | sysfs_mbox_idle(phba); |
998 | spin_unlock_irq(host->host_lock); | 1010 | spin_unlock_irq(host->host_lock); |
999 | return -ENODEV; | 1011 | return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; |
1000 | } | 1012 | } |
1001 | phba->sysfs_mbox.state = SMBOX_READING; | 1013 | phba->sysfs_mbox.state = SMBOX_READING; |
1002 | } | 1014 | } |
@@ -1005,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) | |||
1005 | printk(KERN_WARNING "mbox_read: Bad State\n"); | 1017 | printk(KERN_WARNING "mbox_read: Bad State\n"); |
1006 | sysfs_mbox_idle(phba); | 1018 | sysfs_mbox_idle(phba); |
1007 | spin_unlock_irq(host->host_lock); | 1019 | spin_unlock_irq(host->host_lock); |
1008 | return -EINVAL; | 1020 | return -EAGAIN; |
1009 | } | 1021 | } |
1010 | 1022 | ||
1011 | memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); | 1023 | memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); |
@@ -1199,8 +1211,10 @@ lpfc_get_stats(struct Scsi_Host *shost) | |||
1199 | struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; | 1211 | struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; |
1200 | struct lpfc_sli *psli = &phba->sli; | 1212 | struct lpfc_sli *psli = &phba->sli; |
1201 | struct fc_host_statistics *hs = &phba->link_stats; | 1213 | struct fc_host_statistics *hs = &phba->link_stats; |
1214 | struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; | ||
1202 | LPFC_MBOXQ_t *pmboxq; | 1215 | LPFC_MBOXQ_t *pmboxq; |
1203 | MAILBOX_t *pmb; | 1216 | MAILBOX_t *pmb; |
1217 | unsigned long seconds; | ||
1204 | int rc = 0; | 1218 | int rc = 0; |
1205 | 1219 | ||
1206 | pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 1220 | pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
@@ -1261,22 +1275,103 @@ lpfc_get_stats(struct Scsi_Host *shost) | |||
1261 | hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; | 1275 | hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; |
1262 | hs->error_frames = pmb->un.varRdLnk.crcCnt; | 1276 | hs->error_frames = pmb->un.varRdLnk.crcCnt; |
1263 | 1277 | ||
1278 | hs->link_failure_count -= lso->link_failure_count; | ||
1279 | hs->loss_of_sync_count -= lso->loss_of_sync_count; | ||
1280 | hs->loss_of_signal_count -= lso->loss_of_signal_count; | ||
1281 | hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; | ||
1282 | hs->invalid_tx_word_count -= lso->invalid_tx_word_count; | ||
1283 | hs->invalid_crc_count -= lso->invalid_crc_count; | ||
1284 | hs->error_frames -= lso->error_frames; | ||
1285 | |||
1264 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 1286 | if (phba->fc_topology == TOPOLOGY_LOOP) { |
1265 | hs->lip_count = (phba->fc_eventTag >> 1); | 1287 | hs->lip_count = (phba->fc_eventTag >> 1); |
1288 | hs->lip_count -= lso->link_events; | ||
1266 | hs->nos_count = -1; | 1289 | hs->nos_count = -1; |
1267 | } else { | 1290 | } else { |
1268 | hs->lip_count = -1; | 1291 | hs->lip_count = -1; |
1269 | hs->nos_count = (phba->fc_eventTag >> 1); | 1292 | hs->nos_count = (phba->fc_eventTag >> 1); |
1293 | hs->nos_count -= lso->link_events; | ||
1270 | } | 1294 | } |
1271 | 1295 | ||
1272 | hs->dumped_frames = -1; | 1296 | hs->dumped_frames = -1; |
1273 | 1297 | ||
1274 | /* FIX ME */ | 1298 | seconds = get_seconds(); |
1275 | /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/ | 1299 | if (seconds < psli->stats_start) |
1300 | hs->seconds_since_last_reset = seconds + | ||
1301 | ((unsigned long)-1 - psli->stats_start); | ||
1302 | else | ||
1303 | hs->seconds_since_last_reset = seconds - psli->stats_start; | ||
1276 | 1304 | ||
1277 | return hs; | 1305 | return hs; |
1278 | } | 1306 | } |
1279 | 1307 | ||
1308 | static void | ||
1309 | lpfc_reset_stats(struct Scsi_Host *shost) | ||
1310 | { | ||
1311 | struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; | ||
1312 | struct lpfc_sli *psli = &phba->sli; | ||
1313 | struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; | ||
1314 | LPFC_MBOXQ_t *pmboxq; | ||
1315 | MAILBOX_t *pmb; | ||
1316 | int rc = 0; | ||
1317 | |||
1318 | pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
1319 | if (!pmboxq) | ||
1320 | return; | ||
1321 | memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); | ||
1322 | |||
1323 | pmb = &pmboxq->mb; | ||
1324 | pmb->mbxCommand = MBX_READ_STATUS; | ||
1325 | pmb->mbxOwner = OWN_HOST; | ||
1326 | pmb->un.varWords[0] = 0x1; /* reset request */ | ||
1327 | pmboxq->context1 = NULL; | ||
1328 | |||
1329 | if ((phba->fc_flag & FC_OFFLINE_MODE) || | ||
1330 | (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) | ||
1331 | rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); | ||
1332 | else | ||
1333 | rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); | ||
1334 | |||
1335 | if (rc != MBX_SUCCESS) { | ||
1336 | if (rc == MBX_TIMEOUT) | ||
1337 | pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
1338 | else | ||
1339 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
1340 | return; | ||
1341 | } | ||
1342 | |||
1343 | memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); | ||
1344 | pmb->mbxCommand = MBX_READ_LNK_STAT; | ||
1345 | pmb->mbxOwner = OWN_HOST; | ||
1346 | pmboxq->context1 = NULL; | ||
1347 | |||
1348 | if ((phba->fc_flag & FC_OFFLINE_MODE) || | ||
1349 | (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) | ||
1350 | rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); | ||
1351 | else | ||
1352 | rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); | ||
1353 | |||
1354 | if (rc != MBX_SUCCESS) { | ||
1355 | if (rc == MBX_TIMEOUT) | ||
1356 | pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
1357 | else | ||
1358 | mempool_free( pmboxq, phba->mbox_mem_pool); | ||
1359 | return; | ||
1360 | } | ||
1361 | |||
1362 | lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; | ||
1363 | lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; | ||
1364 | lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; | ||
1365 | lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; | ||
1366 | lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; | ||
1367 | lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; | ||
1368 | lso->error_frames = pmb->un.varRdLnk.crcCnt; | ||
1369 | lso->link_events = (phba->fc_eventTag >> 1); | ||
1370 | |||
1371 | psli->stats_start = get_seconds(); | ||
1372 | |||
1373 | return; | ||
1374 | } | ||
1280 | 1375 | ||
1281 | /* | 1376 | /* |
1282 | * The LPFC driver treats linkdown handling as target loss events so there | 1377 | * The LPFC driver treats linkdown handling as target loss events so there |
@@ -1420,8 +1515,7 @@ struct fc_function_template lpfc_transport_functions = { | |||
1420 | */ | 1515 | */ |
1421 | 1516 | ||
1422 | .get_fc_host_stats = lpfc_get_stats, | 1517 | .get_fc_host_stats = lpfc_get_stats, |
1423 | 1518 | .reset_fc_host_stats = lpfc_reset_stats, | |
1424 | /* the LPFC driver doesn't support resetting stats yet */ | ||
1425 | 1519 | ||
1426 | .dd_fcrport_size = sizeof(struct lpfc_rport_data), | 1520 | .dd_fcrport_size = sizeof(struct lpfc_rport_data), |
1427 | .show_rport_maxframe_size = 1, | 1521 | .show_rport_maxframe_size = 1, |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index ee22173fce43..2a176467f71b 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -127,6 +127,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); | |||
127 | void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); | 127 | void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); |
128 | void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); | 128 | void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); |
129 | LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); | 129 | LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); |
130 | int lpfc_mbox_tmo_val(struct lpfc_hba *, int); | ||
130 | 131 | ||
131 | int lpfc_mem_alloc(struct lpfc_hba *); | 132 | int lpfc_mem_alloc(struct lpfc_hba *); |
132 | void lpfc_mem_free(struct lpfc_hba *); | 133 | void lpfc_mem_free(struct lpfc_hba *); |
@@ -147,6 +148,7 @@ int lpfc_sli_hba_setup(struct lpfc_hba *); | |||
147 | int lpfc_sli_hba_down(struct lpfc_hba *); | 148 | int lpfc_sli_hba_down(struct lpfc_hba *); |
148 | int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); | 149 | int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); |
149 | int lpfc_sli_handle_mb_event(struct lpfc_hba *); | 150 | int lpfc_sli_handle_mb_event(struct lpfc_hba *); |
151 | int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); | ||
150 | int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, | 152 | int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, |
151 | struct lpfc_sli_ring *, uint32_t); | 153 | struct lpfc_sli_ring *, uint32_t); |
152 | void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); | 154 | void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index b65ee57af53e..bbb7310210b0 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba, | |||
131 | } | 131 | } |
132 | 132 | ||
133 | ct_unsol_event_exit_piocbq: | 133 | ct_unsol_event_exit_piocbq: |
134 | list_del(&head); | ||
134 | if (pmbuf) { | 135 | if (pmbuf) { |
135 | list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { | 136 | list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { |
136 | lpfc_mbuf_free(phba, matp->virt, matp->phys); | 137 | lpfc_mbuf_free(phba, matp->virt, matp->phys); |
@@ -481,7 +482,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | |||
481 | if (CTrsp->CommandResponse.bits.CmdRsp == | 482 | if (CTrsp->CommandResponse.bits.CmdRsp == |
482 | be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { | 483 | be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { |
483 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, | 484 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, |
484 | "%d:0239 NameServer Rsp " | 485 | "%d:0208 NameServer Rsp " |
485 | "Data: x%x\n", | 486 | "Data: x%x\n", |
486 | phba->brd_no, | 487 | phba->brd_no, |
487 | phba->fc_flag); | 488 | phba->fc_flag); |
@@ -588,13 +589,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) | |||
588 | 589 | ||
589 | lpfc_decode_firmware_rev(phba, fwrev, 0); | 590 | lpfc_decode_firmware_rev(phba, fwrev, 0); |
590 | 591 | ||
591 | if (phba->Port[0]) { | 592 | sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, |
592 | sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName, | 593 | fwrev, lpfc_release_version); |
593 | phba->Port, fwrev, lpfc_release_version); | 594 | return; |
594 | } else { | ||
595 | sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, | ||
596 | fwrev, lpfc_release_version); | ||
597 | } | ||
598 | } | 595 | } |
599 | 596 | ||
600 | /* | 597 | /* |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4126fd87956f..3567de613162 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -648,33 +648,32 @@ lpfc_more_plogi(struct lpfc_hba * phba) | |||
648 | } | 648 | } |
649 | 649 | ||
650 | static struct lpfc_nodelist * | 650 | static struct lpfc_nodelist * |
651 | lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | 651 | lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp, |
652 | struct lpfc_nodelist *ndlp) | 652 | struct lpfc_nodelist *ndlp) |
653 | { | 653 | { |
654 | struct lpfc_nodelist *new_ndlp; | 654 | struct lpfc_nodelist *new_ndlp; |
655 | struct lpfc_dmabuf *pcmd, *prsp; | ||
656 | uint32_t *lp; | 655 | uint32_t *lp; |
657 | struct serv_parm *sp; | 656 | struct serv_parm *sp; |
658 | uint8_t name[sizeof (struct lpfc_name)]; | 657 | uint8_t name[sizeof (struct lpfc_name)]; |
659 | uint32_t rc; | 658 | uint32_t rc; |
660 | 659 | ||
661 | pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; | ||
662 | prsp = (struct lpfc_dmabuf *) pcmd->list.next; | ||
663 | lp = (uint32_t *) prsp->virt; | 660 | lp = (uint32_t *) prsp->virt; |
664 | sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); | 661 | sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); |
662 | memset(name, 0, sizeof (struct lpfc_name)); | ||
665 | 663 | ||
666 | /* Now we to find out if the NPort we are logging into, matches the WWPN | 664 | /* Now we to find out if the NPort we are logging into, matches the WWPN |
667 | * we have for that ndlp. If not, we have some work to do. | 665 | * we have for that ndlp. If not, we have some work to do. |
668 | */ | 666 | */ |
669 | new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName); | 667 | new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName); |
670 | 668 | ||
671 | memset(name, 0, sizeof (struct lpfc_name)); | 669 | if (new_ndlp == ndlp) |
672 | rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); | ||
673 | if (!rc || (new_ndlp == ndlp)) { | ||
674 | return ndlp; | 670 | return ndlp; |
675 | } | ||
676 | 671 | ||
677 | if (!new_ndlp) { | 672 | if (!new_ndlp) { |
673 | rc = | ||
674 | memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); | ||
675 | if (!rc) | ||
676 | return ndlp; | ||
678 | new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); | 677 | new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); |
679 | if (!new_ndlp) | 678 | if (!new_ndlp) |
680 | return ndlp; | 679 | return ndlp; |
@@ -683,17 +682,21 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | |||
683 | } | 682 | } |
684 | 683 | ||
685 | lpfc_unreg_rpi(phba, new_ndlp); | 684 | lpfc_unreg_rpi(phba, new_ndlp); |
686 | new_ndlp->nlp_prev_state = ndlp->nlp_state; | ||
687 | new_ndlp->nlp_DID = ndlp->nlp_DID; | 685 | new_ndlp->nlp_DID = ndlp->nlp_DID; |
688 | new_ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; | 686 | new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; |
689 | lpfc_nlp_list(phba, new_ndlp, NLP_PLOGI_LIST); | 687 | new_ndlp->nlp_state = ndlp->nlp_state; |
688 | lpfc_nlp_list(phba, new_ndlp, ndlp->nlp_flag & NLP_LIST_MASK); | ||
690 | 689 | ||
691 | /* Move this back to NPR list */ | 690 | /* Move this back to NPR list */ |
692 | lpfc_unreg_rpi(phba, ndlp); | 691 | if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { |
693 | ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ | 692 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); |
694 | ndlp->nlp_state = NLP_STE_NPR_NODE; | 693 | } |
695 | lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); | 694 | else { |
696 | 695 | lpfc_unreg_rpi(phba, ndlp); | |
696 | ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ | ||
697 | ndlp->nlp_state = NLP_STE_NPR_NODE; | ||
698 | lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); | ||
699 | } | ||
697 | return new_ndlp; | 700 | return new_ndlp; |
698 | } | 701 | } |
699 | 702 | ||
@@ -703,6 +706,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | |||
703 | { | 706 | { |
704 | IOCB_t *irsp; | 707 | IOCB_t *irsp; |
705 | struct lpfc_nodelist *ndlp; | 708 | struct lpfc_nodelist *ndlp; |
709 | struct lpfc_dmabuf *prsp; | ||
706 | int disc, rc, did, type; | 710 | int disc, rc, did, type; |
707 | 711 | ||
708 | 712 | ||
@@ -769,7 +773,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | |||
769 | } | 773 | } |
770 | } else { | 774 | } else { |
771 | /* Good status, call state machine */ | 775 | /* Good status, call state machine */ |
772 | ndlp = lpfc_plogi_confirm_nport(phba, cmdiocb, ndlp); | 776 | prsp = list_entry(((struct lpfc_dmabuf *) |
777 | cmdiocb->context2)->list.next, | ||
778 | struct lpfc_dmabuf, list); | ||
779 | ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp); | ||
773 | rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, | 780 | rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, |
774 | NLP_EVT_CMPL_PLOGI); | 781 | NLP_EVT_CMPL_PLOGI); |
775 | } | 782 | } |
@@ -1841,9 +1848,12 @@ static void | |||
1841 | lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | 1848 | lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, |
1842 | struct lpfc_iocbq * rspiocb) | 1849 | struct lpfc_iocbq * rspiocb) |
1843 | { | 1850 | { |
1851 | IOCB_t *irsp; | ||
1844 | struct lpfc_nodelist *ndlp; | 1852 | struct lpfc_nodelist *ndlp; |
1845 | LPFC_MBOXQ_t *mbox = NULL; | 1853 | LPFC_MBOXQ_t *mbox = NULL; |
1846 | 1854 | ||
1855 | irsp = &rspiocb->iocb; | ||
1856 | |||
1847 | ndlp = (struct lpfc_nodelist *) cmdiocb->context1; | 1857 | ndlp = (struct lpfc_nodelist *) cmdiocb->context1; |
1848 | if (cmdiocb->context_un.mbox) | 1858 | if (cmdiocb->context_un.mbox) |
1849 | mbox = cmdiocb->context_un.mbox; | 1859 | mbox = cmdiocb->context_un.mbox; |
@@ -1886,9 +1896,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | |||
1886 | mempool_free( mbox, phba->mbox_mem_pool); | 1896 | mempool_free( mbox, phba->mbox_mem_pool); |
1887 | } else { | 1897 | } else { |
1888 | mempool_free( mbox, phba->mbox_mem_pool); | 1898 | mempool_free( mbox, phba->mbox_mem_pool); |
1889 | if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { | 1899 | /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ |
1890 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); | 1900 | if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && |
1891 | ndlp = NULL; | 1901 | ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || |
1902 | (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || | ||
1903 | (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) { | ||
1904 | if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { | ||
1905 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); | ||
1906 | ndlp = NULL; | ||
1907 | } | ||
1892 | } | 1908 | } |
1893 | } | 1909 | } |
1894 | } | 1910 | } |
@@ -2832,7 +2848,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
2832 | 2848 | ||
2833 | /* Xmit ELS RPS ACC response tag <ulpIoTag> */ | 2849 | /* Xmit ELS RPS ACC response tag <ulpIoTag> */ |
2834 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 2850 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
2835 | "%d:0128 Xmit ELS RPS ACC response tag x%x " | 2851 | "%d:0118 Xmit ELS RPS ACC response tag x%x " |
2836 | "Data: x%x x%x x%x x%x x%x\n", | 2852 | "Data: x%x x%x x%x x%x x%x\n", |
2837 | phba->brd_no, | 2853 | phba->brd_no, |
2838 | elsiocb->iocb.ulpIoTag, | 2854 | elsiocb->iocb.ulpIoTag, |
@@ -2941,7 +2957,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, | |||
2941 | 2957 | ||
2942 | /* Xmit ELS RPL ACC response tag <ulpIoTag> */ | 2958 | /* Xmit ELS RPL ACC response tag <ulpIoTag> */ |
2943 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 2959 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
2944 | "%d:0128 Xmit ELS RPL ACC response tag x%x " | 2960 | "%d:0120 Xmit ELS RPL ACC response tag x%x " |
2945 | "Data: x%x x%x x%x x%x x%x\n", | 2961 | "Data: x%x x%x x%x x%x x%x\n", |
2946 | phba->brd_no, | 2962 | phba->brd_no, |
2947 | elsiocb->iocb.ulpIoTag, | 2963 | elsiocb->iocb.ulpIoTag, |
@@ -3102,7 +3118,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | |||
3102 | struct lpfc_nodelist *ndlp, *next_ndlp; | 3118 | struct lpfc_nodelist *ndlp, *next_ndlp; |
3103 | 3119 | ||
3104 | /* FAN received */ | 3120 | /* FAN received */ |
3105 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n", | 3121 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n", |
3106 | phba->brd_no); | 3122 | phba->brd_no); |
3107 | 3123 | ||
3108 | icmd = &cmdiocb->iocb; | 3124 | icmd = &cmdiocb->iocb; |
@@ -3282,10 +3298,9 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba) | |||
3282 | } else | 3298 | } else |
3283 | lpfc_sli_release_iocbq(phba, piocb); | 3299 | lpfc_sli_release_iocbq(phba, piocb); |
3284 | } | 3300 | } |
3285 | if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) { | 3301 | if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) |
3286 | phba->els_tmofunc.expires = jiffies + HZ * timeout; | 3302 | mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout); |
3287 | add_timer(&phba->els_tmofunc); | 3303 | |
3288 | } | ||
3289 | spin_unlock_irq(phba->host->host_lock); | 3304 | spin_unlock_irq(phba->host->host_lock); |
3290 | } | 3305 | } |
3291 | 3306 | ||
@@ -3442,6 +3457,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, | |||
3442 | if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { | 3457 | if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { |
3443 | ndlp->nlp_type |= NLP_FABRIC; | 3458 | ndlp->nlp_type |= NLP_FABRIC; |
3444 | } | 3459 | } |
3460 | ndlp->nlp_state = NLP_STE_UNUSED_NODE; | ||
3461 | lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); | ||
3445 | } | 3462 | } |
3446 | 3463 | ||
3447 | phba->fc_stat.elsRcvFrame++; | 3464 | phba->fc_stat.elsRcvFrame++; |
@@ -3463,13 +3480,14 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, | |||
3463 | rjt_err = 1; | 3480 | rjt_err = 1; |
3464 | break; | 3481 | break; |
3465 | } | 3482 | } |
3483 | ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp); | ||
3466 | lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); | 3484 | lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); |
3467 | break; | 3485 | break; |
3468 | case ELS_CMD_FLOGI: | 3486 | case ELS_CMD_FLOGI: |
3469 | phba->fc_stat.elsRcvFLOGI++; | 3487 | phba->fc_stat.elsRcvFLOGI++; |
3470 | lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); | 3488 | lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); |
3471 | if (newnode) { | 3489 | if (newnode) { |
3472 | mempool_free( ndlp, phba->nlp_mem_pool); | 3490 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); |
3473 | } | 3491 | } |
3474 | break; | 3492 | break; |
3475 | case ELS_CMD_LOGO: | 3493 | case ELS_CMD_LOGO: |
@@ -3492,7 +3510,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, | |||
3492 | phba->fc_stat.elsRcvRSCN++; | 3510 | phba->fc_stat.elsRcvRSCN++; |
3493 | lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); | 3511 | lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); |
3494 | if (newnode) { | 3512 | if (newnode) { |
3495 | mempool_free( ndlp, phba->nlp_mem_pool); | 3513 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); |
3496 | } | 3514 | } |
3497 | break; | 3515 | break; |
3498 | case ELS_CMD_ADISC: | 3516 | case ELS_CMD_ADISC: |
@@ -3535,28 +3553,28 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, | |||
3535 | phba->fc_stat.elsRcvLIRR++; | 3553 | phba->fc_stat.elsRcvLIRR++; |
3536 | lpfc_els_rcv_lirr(phba, elsiocb, ndlp); | 3554 | lpfc_els_rcv_lirr(phba, elsiocb, ndlp); |
3537 | if (newnode) { | 3555 | if (newnode) { |
3538 | mempool_free( ndlp, phba->nlp_mem_pool); | 3556 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); |
3539 | } | 3557 | } |
3540 | break; | 3558 | break; |
3541 | case ELS_CMD_RPS: | 3559 | case ELS_CMD_RPS: |
3542 | phba->fc_stat.elsRcvRPS++; | 3560 | phba->fc_stat.elsRcvRPS++; |
3543 | lpfc_els_rcv_rps(phba, elsiocb, ndlp); | 3561 | lpfc_els_rcv_rps(phba, elsiocb, ndlp); |
3544 | if (newnode) { | 3562 | if (newnode) { |
3545 | mempool_free( ndlp, phba->nlp_mem_pool); | 3563 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); |
3546 | } | 3564 | } |
3547 | break; | 3565 | break; |
3548 | case ELS_CMD_RPL: | 3566 | case ELS_CMD_RPL: |
3549 | phba->fc_stat.elsRcvRPL++; | 3567 | phba->fc_stat.elsRcvRPL++; |
3550 | lpfc_els_rcv_rpl(phba, elsiocb, ndlp); | 3568 | lpfc_els_rcv_rpl(phba, elsiocb, ndlp); |
3551 | if (newnode) { | 3569 | if (newnode) { |
3552 | mempool_free( ndlp, phba->nlp_mem_pool); | 3570 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); |
3553 | } | 3571 | } |
3554 | break; | 3572 | break; |
3555 | case ELS_CMD_RNID: | 3573 | case ELS_CMD_RNID: |
3556 | phba->fc_stat.elsRcvRNID++; | 3574 | phba->fc_stat.elsRcvRNID++; |
3557 | lpfc_els_rcv_rnid(phba, elsiocb, ndlp); | 3575 | lpfc_els_rcv_rnid(phba, elsiocb, ndlp); |
3558 | if (newnode) { | 3576 | if (newnode) { |
3559 | mempool_free( ndlp, phba->nlp_mem_pool); | 3577 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); |
3560 | } | 3578 | } |
3561 | break; | 3579 | break; |
3562 | default: | 3580 | default: |
@@ -3568,7 +3586,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, | |||
3568 | "%d:0115 Unknown ELS command x%x received from " | 3586 | "%d:0115 Unknown ELS command x%x received from " |
3569 | "NPORT x%x\n", phba->brd_no, cmd, did); | 3587 | "NPORT x%x\n", phba->brd_no, cmd, did); |
3570 | if (newnode) { | 3588 | if (newnode) { |
3571 | mempool_free( ndlp, phba->nlp_mem_pool); | 3589 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); |
3572 | } | 3590 | } |
3573 | break; | 3591 | break; |
3574 | } | 3592 | } |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index adb086009ae0..b2f1552f1848 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -1084,7 +1084,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba, | |||
1084 | fc_remote_port_rolechg(rport, rport_ids.roles); | 1084 | fc_remote_port_rolechg(rport, rport_ids.roles); |
1085 | 1085 | ||
1086 | if ((rport->scsi_target_id != -1) && | 1086 | if ((rport->scsi_target_id != -1) && |
1087 | (rport->scsi_target_id < MAX_FCP_TARGET)) { | 1087 | (rport->scsi_target_id < LPFC_MAX_TARGET)) { |
1088 | ndlp->nlp_sid = rport->scsi_target_id; | 1088 | ndlp->nlp_sid = rport->scsi_target_id; |
1089 | } | 1089 | } |
1090 | 1090 | ||
@@ -1313,7 +1313,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) | |||
1313 | if ((rport_add == mapped) && | 1313 | if ((rport_add == mapped) && |
1314 | ((!nlp->rport) || | 1314 | ((!nlp->rport) || |
1315 | (nlp->rport->scsi_target_id == -1) || | 1315 | (nlp->rport->scsi_target_id == -1) || |
1316 | (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) { | 1316 | (nlp->rport->scsi_target_id >= LPFC_MAX_TARGET))) { |
1317 | nlp->nlp_state = NLP_STE_UNMAPPED_NODE; | 1317 | nlp->nlp_state = NLP_STE_UNMAPPED_NODE; |
1318 | spin_lock_irq(phba->host->host_lock); | 1318 | spin_lock_irq(phba->host->host_lock); |
1319 | nlp->nlp_flag |= NLP_TGT_NO_SCSIID; | 1319 | nlp->nlp_flag |= NLP_TGT_NO_SCSIID; |
@@ -1557,6 +1557,8 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1557 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | 1557 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
1558 | } | 1558 | } |
1559 | } | 1559 | } |
1560 | |||
1561 | spin_lock_irq(phba->host->host_lock); | ||
1560 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { | 1562 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { |
1561 | if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && | 1563 | if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && |
1562 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { | 1564 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { |
@@ -1569,6 +1571,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1569 | mempool_free(mb, phba->mbox_mem_pool); | 1571 | mempool_free(mb, phba->mbox_mem_pool); |
1570 | } | 1572 | } |
1571 | } | 1573 | } |
1574 | spin_unlock_irq(phba->host->host_lock); | ||
1572 | 1575 | ||
1573 | lpfc_els_abort(phba,ndlp,0); | 1576 | lpfc_els_abort(phba,ndlp,0); |
1574 | spin_lock_irq(phba->host->host_lock); | 1577 | spin_lock_irq(phba->host->host_lock); |
@@ -1782,7 +1785,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) | |||
1782 | /* LOG change to REGLOGIN */ | 1785 | /* LOG change to REGLOGIN */ |
1783 | /* FIND node DID reglogin */ | 1786 | /* FIND node DID reglogin */ |
1784 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 1787 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, |
1785 | "%d:0931 FIND node DID reglogin" | 1788 | "%d:0901 FIND node DID reglogin" |
1786 | " Data: x%p x%x x%x x%x\n", | 1789 | " Data: x%p x%x x%x x%x\n", |
1787 | phba->brd_no, | 1790 | phba->brd_no, |
1788 | ndlp, ndlp->nlp_DID, | 1791 | ndlp, ndlp->nlp_DID, |
@@ -1805,7 +1808,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) | |||
1805 | /* LOG change to PRLI */ | 1808 | /* LOG change to PRLI */ |
1806 | /* FIND node DID prli */ | 1809 | /* FIND node DID prli */ |
1807 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 1810 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, |
1808 | "%d:0931 FIND node DID prli " | 1811 | "%d:0902 FIND node DID prli " |
1809 | "Data: x%p x%x x%x x%x\n", | 1812 | "Data: x%p x%x x%x x%x\n", |
1810 | phba->brd_no, | 1813 | phba->brd_no, |
1811 | ndlp, ndlp->nlp_DID, | 1814 | ndlp, ndlp->nlp_DID, |
@@ -1828,7 +1831,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) | |||
1828 | /* LOG change to NPR */ | 1831 | /* LOG change to NPR */ |
1829 | /* FIND node DID npr */ | 1832 | /* FIND node DID npr */ |
1830 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 1833 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, |
1831 | "%d:0931 FIND node DID npr " | 1834 | "%d:0903 FIND node DID npr " |
1832 | "Data: x%p x%x x%x x%x\n", | 1835 | "Data: x%p x%x x%x x%x\n", |
1833 | phba->brd_no, | 1836 | phba->brd_no, |
1834 | ndlp, ndlp->nlp_DID, | 1837 | ndlp, ndlp->nlp_DID, |
@@ -1851,7 +1854,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) | |||
1851 | /* LOG change to UNUSED */ | 1854 | /* LOG change to UNUSED */ |
1852 | /* FIND node DID unused */ | 1855 | /* FIND node DID unused */ |
1853 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 1856 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, |
1854 | "%d:0931 FIND node DID unused " | 1857 | "%d:0905 FIND node DID unused " |
1855 | "Data: x%p x%x x%x x%x\n", | 1858 | "Data: x%p x%x x%x x%x\n", |
1856 | phba->brd_no, | 1859 | phba->brd_no, |
1857 | ndlp, ndlp->nlp_DID, | 1860 | ndlp, ndlp->nlp_DID, |
@@ -2335,7 +2338,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) | |||
2335 | initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 2338 | initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
2336 | if (!initlinkmbox) { | 2339 | if (!initlinkmbox) { |
2337 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 2340 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2338 | "%d:0226 Device Discovery " | 2341 | "%d:0206 Device Discovery " |
2339 | "completion error\n", | 2342 | "completion error\n", |
2340 | phba->brd_no); | 2343 | phba->brd_no); |
2341 | phba->hba_state = LPFC_HBA_ERROR; | 2344 | phba->hba_state = LPFC_HBA_ERROR; |
@@ -2365,7 +2368,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) | |||
2365 | if (!clearlambox) { | 2368 | if (!clearlambox) { |
2366 | clrlaerr = 1; | 2369 | clrlaerr = 1; |
2367 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 2370 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2368 | "%d:0226 Device Discovery " | 2371 | "%d:0207 Device Discovery " |
2369 | "completion error\n", | 2372 | "completion error\n", |
2370 | phba->brd_no); | 2373 | phba->brd_no); |
2371 | phba->hba_state = LPFC_HBA_ERROR; | 2374 | phba->hba_state = LPFC_HBA_ERROR; |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 81755a3f7c68..f6948ffe689a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -71,6 +71,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba) | |||
71 | uint16_t offset = 0; | 71 | uint16_t offset = 0; |
72 | static char licensed[56] = | 72 | static char licensed[56] = |
73 | "key unlock for use with gnu public licensed code only\0"; | 73 | "key unlock for use with gnu public licensed code only\0"; |
74 | static int init_key = 1; | ||
74 | 75 | ||
75 | pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 76 | pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
76 | if (!pmb) { | 77 | if (!pmb) { |
@@ -82,10 +83,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba) | |||
82 | phba->hba_state = LPFC_INIT_MBX_CMDS; | 83 | phba->hba_state = LPFC_INIT_MBX_CMDS; |
83 | 84 | ||
84 | if (lpfc_is_LC_HBA(phba->pcidev->device)) { | 85 | if (lpfc_is_LC_HBA(phba->pcidev->device)) { |
85 | uint32_t *ptext = (uint32_t *) licensed; | 86 | if (init_key) { |
87 | uint32_t *ptext = (uint32_t *) licensed; | ||
86 | 88 | ||
87 | for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) | 89 | for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) |
88 | *ptext = cpu_to_be32(*ptext); | 90 | *ptext = cpu_to_be32(*ptext); |
91 | init_key = 0; | ||
92 | } | ||
89 | 93 | ||
90 | lpfc_read_nv(phba, pmb); | 94 | lpfc_read_nv(phba, pmb); |
91 | memset((char*)mb->un.varRDnvp.rsvd3, 0, | 95 | memset((char*)mb->un.varRDnvp.rsvd3, 0, |
@@ -405,19 +409,26 @@ lpfc_config_port_post(struct lpfc_hba * phba) | |||
405 | } | 409 | } |
406 | /* MBOX buffer will be freed in mbox compl */ | 410 | /* MBOX buffer will be freed in mbox compl */ |
407 | 411 | ||
408 | i = 0; | 412 | return (0); |
413 | } | ||
414 | |||
415 | static int | ||
416 | lpfc_discovery_wait(struct lpfc_hba *phba) | ||
417 | { | ||
418 | int i = 0; | ||
419 | |||
409 | while ((phba->hba_state != LPFC_HBA_READY) || | 420 | while ((phba->hba_state != LPFC_HBA_READY) || |
410 | (phba->num_disc_nodes) || (phba->fc_prli_sent) || | 421 | (phba->num_disc_nodes) || (phba->fc_prli_sent) || |
411 | ((phba->fc_map_cnt == 0) && (i<2)) || | 422 | ((phba->fc_map_cnt == 0) && (i<2)) || |
412 | (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) { | 423 | (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) { |
413 | /* Check every second for 30 retries. */ | 424 | /* Check every second for 30 retries. */ |
414 | i++; | 425 | i++; |
415 | if (i > 30) { | 426 | if (i > 30) { |
416 | break; | 427 | return -ETIMEDOUT; |
417 | } | 428 | } |
418 | if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { | 429 | if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { |
419 | /* The link is down. Set linkdown timeout */ | 430 | /* The link is down. Set linkdown timeout */ |
420 | break; | 431 | return -ETIMEDOUT; |
421 | } | 432 | } |
422 | 433 | ||
423 | /* Delay for 1 second to give discovery time to complete. */ | 434 | /* Delay for 1 second to give discovery time to complete. */ |
@@ -425,12 +436,7 @@ lpfc_config_port_post(struct lpfc_hba * phba) | |||
425 | 436 | ||
426 | } | 437 | } |
427 | 438 | ||
428 | /* Since num_disc_nodes keys off of PLOGI, delay a bit to let | 439 | return 0; |
429 | * any potential PRLIs to flush thru the SLI sub-system. | ||
430 | */ | ||
431 | msleep(50); | ||
432 | |||
433 | return (0); | ||
434 | } | 440 | } |
435 | 441 | ||
436 | /************************************************************************/ | 442 | /************************************************************************/ |
@@ -1339,7 +1345,8 @@ lpfc_offline(struct lpfc_hba * phba) | |||
1339 | struct lpfc_sli_ring *pring; | 1345 | struct lpfc_sli_ring *pring; |
1340 | struct lpfc_sli *psli; | 1346 | struct lpfc_sli *psli; |
1341 | unsigned long iflag; | 1347 | unsigned long iflag; |
1342 | int i = 0; | 1348 | int i; |
1349 | int cnt = 0; | ||
1343 | 1350 | ||
1344 | if (!phba) | 1351 | if (!phba) |
1345 | return 0; | 1352 | return 0; |
@@ -1348,20 +1355,31 @@ lpfc_offline(struct lpfc_hba * phba) | |||
1348 | return 0; | 1355 | return 0; |
1349 | 1356 | ||
1350 | psli = &phba->sli; | 1357 | psli = &phba->sli; |
1351 | pring = &psli->ring[psli->fcp_ring]; | ||
1352 | 1358 | ||
1353 | lpfc_linkdown(phba); | 1359 | lpfc_linkdown(phba); |
1360 | lpfc_sli_flush_mbox_queue(phba); | ||
1354 | 1361 | ||
1355 | /* The linkdown event takes 30 seconds to timeout. */ | 1362 | for (i = 0; i < psli->num_rings; i++) { |
1356 | while (pring->txcmplq_cnt) { | 1363 | pring = &psli->ring[i]; |
1357 | mdelay(10); | 1364 | /* The linkdown event takes 30 seconds to timeout. */ |
1358 | if (i++ > 3000) | 1365 | while (pring->txcmplq_cnt) { |
1359 | break; | 1366 | mdelay(10); |
1367 | if (cnt++ > 3000) { | ||
1368 | lpfc_printf_log(phba, | ||
1369 | KERN_WARNING, LOG_INIT, | ||
1370 | "%d:0466 Outstanding IO when " | ||
1371 | "bringing Adapter offline\n", | ||
1372 | phba->brd_no); | ||
1373 | break; | ||
1374 | } | ||
1375 | } | ||
1360 | } | 1376 | } |
1361 | 1377 | ||
1378 | |||
1362 | /* stop all timers associated with this hba */ | 1379 | /* stop all timers associated with this hba */ |
1363 | lpfc_stop_timer(phba); | 1380 | lpfc_stop_timer(phba); |
1364 | phba->work_hba_events = 0; | 1381 | phba->work_hba_events = 0; |
1382 | phba->work_ha = 0; | ||
1365 | 1383 | ||
1366 | lpfc_printf_log(phba, | 1384 | lpfc_printf_log(phba, |
1367 | KERN_WARNING, | 1385 | KERN_WARNING, |
@@ -1599,7 +1617,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
1599 | goto out_free_iocbq; | 1617 | goto out_free_iocbq; |
1600 | } | 1618 | } |
1601 | 1619 | ||
1602 | /* We can rely on a queue depth attribute only after SLI HBA setup */ | 1620 | /* |
1621 | * Set initial can_queue value since 0 is no longer supported and | ||
1622 | * scsi_add_host will fail. This will be adjusted later based on the | ||
1623 | * max xri value determined in hba setup. | ||
1624 | */ | ||
1603 | host->can_queue = phba->cfg_hba_queue_depth - 10; | 1625 | host->can_queue = phba->cfg_hba_queue_depth - 10; |
1604 | 1626 | ||
1605 | /* Tell the midlayer we support 16 byte commands */ | 1627 | /* Tell the midlayer we support 16 byte commands */ |
@@ -1639,6 +1661,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
1639 | goto out_free_irq; | 1661 | goto out_free_irq; |
1640 | } | 1662 | } |
1641 | 1663 | ||
1664 | /* | ||
1665 | * hba setup may have changed the hba_queue_depth so we need to adjust | ||
1666 | * the value of can_queue. | ||
1667 | */ | ||
1668 | host->can_queue = phba->cfg_hba_queue_depth - 10; | ||
1669 | |||
1670 | lpfc_discovery_wait(phba); | ||
1671 | |||
1642 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 1672 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
1643 | spin_lock_irq(phba->host->host_lock); | 1673 | spin_lock_irq(phba->host->host_lock); |
1644 | lpfc_poll_start_timer(phba); | 1674 | lpfc_poll_start_timer(phba); |
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index e42f22aaf71b..4d016c2a1b26 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba) | |||
651 | 651 | ||
652 | return mbq; | 652 | return mbq; |
653 | } | 653 | } |
654 | |||
655 | int | ||
656 | lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) | ||
657 | { | ||
658 | switch (cmd) { | ||
659 | case MBX_WRITE_NV: /* 0x03 */ | ||
660 | case MBX_UPDATE_CFG: /* 0x1B */ | ||
661 | case MBX_DOWN_LOAD: /* 0x1C */ | ||
662 | case MBX_DEL_LD_ENTRY: /* 0x1D */ | ||
663 | case MBX_LOAD_AREA: /* 0x81 */ | ||
664 | case MBX_FLASH_WR_ULA: /* 0x98 */ | ||
665 | case MBX_LOAD_EXP_ROM: /* 0x9C */ | ||
666 | return LPFC_MBOX_TMO_FLASH_CMD; | ||
667 | } | ||
668 | return LPFC_MBOX_TMO; | ||
669 | } | ||
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 07017658ac56..066292d3995a 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c | |||
@@ -133,6 +133,11 @@ lpfc_mem_free(struct lpfc_hba * phba) | |||
133 | 133 | ||
134 | pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); | 134 | pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); |
135 | pci_pool_destroy(phba->lpfc_mbuf_pool); | 135 | pci_pool_destroy(phba->lpfc_mbuf_pool); |
136 | |||
137 | /* Free the iocb lookup array */ | ||
138 | kfree(psli->iocbq_lookup); | ||
139 | psli->iocbq_lookup = NULL; | ||
140 | |||
136 | } | 141 | } |
137 | 142 | ||
138 | void * | 143 | void * |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 27d60ad897cd..20449a8dd53d 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, | |||
179 | 179 | ||
180 | /* Abort outstanding I/O on NPort <nlp_DID> */ | 180 | /* Abort outstanding I/O on NPort <nlp_DID> */ |
181 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, | 181 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, |
182 | "%d:0201 Abort outstanding I/O on NPort x%x " | 182 | "%d:0205 Abort outstanding I/O on NPort x%x " |
183 | "Data: x%x x%x x%x\n", | 183 | "Data: x%x x%x x%x\n", |
184 | phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, | 184 | phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, |
185 | ndlp->nlp_state, ndlp->nlp_rpi); | 185 | ndlp->nlp_state, ndlp->nlp_rpi); |
@@ -393,6 +393,20 @@ lpfc_rcv_plogi(struct lpfc_hba * phba, | |||
393 | mbox->context2 = ndlp; | 393 | mbox->context2 = ndlp; |
394 | ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); | 394 | ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); |
395 | 395 | ||
396 | /* | ||
397 | * If there is an outstanding PLOGI issued, abort it before | ||
398 | * sending ACC rsp for received PLOGI. If pending plogi | ||
399 | * is not canceled here, the plogi will be rejected by | ||
400 | * remote port and will be retried. On a configuration with | ||
401 | * single discovery thread, this will cause a huge delay in | ||
402 | * discovery. Also this will cause multiple state machines | ||
403 | * running in parallel for this node. | ||
404 | */ | ||
405 | if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { | ||
406 | /* software abort outstanding PLOGI */ | ||
407 | lpfc_els_abort(phba, ndlp, 1); | ||
408 | } | ||
409 | |||
396 | lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); | 410 | lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); |
397 | return 1; | 411 | return 1; |
398 | 412 | ||
@@ -1110,6 +1124,17 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba, | |||
1110 | phba->brd_no, | 1124 | phba->brd_no, |
1111 | did, mb->mbxStatus, phba->hba_state); | 1125 | did, mb->mbxStatus, phba->hba_state); |
1112 | 1126 | ||
1127 | /* | ||
1128 | * If RegLogin failed due to lack of HBA resources do not | ||
1129 | * retry discovery. | ||
1130 | */ | ||
1131 | if (mb->mbxStatus == MBXERR_RPI_FULL) { | ||
1132 | ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; | ||
1133 | ndlp->nlp_state = NLP_STE_UNUSED_NODE; | ||
1134 | lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); | ||
1135 | return ndlp->nlp_state; | ||
1136 | } | ||
1137 | |||
1113 | /* Put ndlp in npr list set plogi timer for 1 sec */ | 1138 | /* Put ndlp in npr list set plogi timer for 1 sec */ |
1114 | mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); | 1139 | mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); |
1115 | spin_lock_irq(phba->host->host_lock); | 1140 | spin_lock_irq(phba->host->host_lock); |
@@ -1590,7 +1615,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, | |||
1590 | 1615 | ||
1591 | lpfc_rcv_padisc(phba, ndlp, cmdiocb); | 1616 | lpfc_rcv_padisc(phba, ndlp, cmdiocb); |
1592 | 1617 | ||
1593 | if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { | 1618 | /* |
1619 | * Do not start discovery if discovery is about to start | ||
1620 | * or discovery in progress for this node. Starting discovery | ||
1621 | * here will affect the counting of discovery threads. | ||
1622 | */ | ||
1623 | if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) && | ||
1624 | (ndlp->nlp_flag & NLP_NPR_2B_DISC)){ | ||
1594 | if (ndlp->nlp_flag & NLP_NPR_ADISC) { | 1625 | if (ndlp->nlp_flag & NLP_NPR_ADISC) { |
1595 | ndlp->nlp_prev_state = NLP_STE_NPR_NODE; | 1626 | ndlp->nlp_prev_state = NLP_STE_NPR_NODE; |
1596 | ndlp->nlp_state = NLP_STE_ADISC_ISSUE; | 1627 | ndlp->nlp_state = NLP_STE_ADISC_ISSUE; |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index aea1ee472f3d..a8816a8738f8 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/delay.h> | ||
24 | 25 | ||
25 | #include <scsi/scsi.h> | 26 | #include <scsi/scsi.h> |
26 | #include <scsi/scsi_device.h> | 27 | #include <scsi/scsi_device.h> |
@@ -153,22 +154,6 @@ static void | |||
153 | lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | 154 | lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) |
154 | { | 155 | { |
155 | unsigned long iflag = 0; | 156 | unsigned long iflag = 0; |
156 | /* | ||
157 | * There are only two special cases to consider. (1) the scsi command | ||
158 | * requested scatter-gather usage or (2) the scsi command allocated | ||
159 | * a request buffer, but did not request use_sg. There is a third | ||
160 | * case, but it does not require resource deallocation. | ||
161 | */ | ||
162 | if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { | ||
163 | dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, | ||
164 | psb->seg_cnt, psb->pCmd->sc_data_direction); | ||
165 | } else { | ||
166 | if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { | ||
167 | dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, | ||
168 | psb->pCmd->request_bufflen, | ||
169 | psb->pCmd->sc_data_direction); | ||
170 | } | ||
171 | } | ||
172 | 157 | ||
173 | spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); | 158 | spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); |
174 | psb->pCmd = NULL; | 159 | psb->pCmd = NULL; |
@@ -282,6 +267,27 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) | |||
282 | } | 267 | } |
283 | 268 | ||
284 | static void | 269 | static void |
270 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | ||
271 | { | ||
272 | /* | ||
273 | * There are only two special cases to consider. (1) the scsi command | ||
274 | * requested scatter-gather usage or (2) the scsi command allocated | ||
275 | * a request buffer, but did not request use_sg. There is a third | ||
276 | * case, but it does not require resource deallocation. | ||
277 | */ | ||
278 | if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { | ||
279 | dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, | ||
280 | psb->seg_cnt, psb->pCmd->sc_data_direction); | ||
281 | } else { | ||
282 | if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { | ||
283 | dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, | ||
284 | psb->pCmd->request_bufflen, | ||
285 | psb->pCmd->sc_data_direction); | ||
286 | } | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static void | ||
285 | lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) | 291 | lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) |
286 | { | 292 | { |
287 | struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; | 293 | struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; |
@@ -454,6 +460,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
454 | cmd->scsi_done(cmd); | 460 | cmd->scsi_done(cmd); |
455 | 461 | ||
456 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | 462 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
463 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | ||
457 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 464 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
458 | return; | 465 | return; |
459 | } | 466 | } |
@@ -511,6 +518,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
511 | } | 518 | } |
512 | } | 519 | } |
513 | 520 | ||
521 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | ||
514 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 522 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
515 | } | 523 | } |
516 | 524 | ||
@@ -609,6 +617,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, | |||
609 | static int | 617 | static int |
610 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, | 618 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, |
611 | struct lpfc_scsi_buf *lpfc_cmd, | 619 | struct lpfc_scsi_buf *lpfc_cmd, |
620 | unsigned int lun, | ||
612 | uint8_t task_mgmt_cmd) | 621 | uint8_t task_mgmt_cmd) |
613 | { | 622 | { |
614 | struct lpfc_sli *psli; | 623 | struct lpfc_sli *psli; |
@@ -627,8 +636,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, | |||
627 | piocb = &piocbq->iocb; | 636 | piocb = &piocbq->iocb; |
628 | 637 | ||
629 | fcp_cmnd = lpfc_cmd->fcp_cmnd; | 638 | fcp_cmnd = lpfc_cmd->fcp_cmnd; |
630 | int_to_scsilun(lpfc_cmd->pCmd->device->lun, | 639 | int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); |
631 | &lpfc_cmd->fcp_cmnd->fcp_lun); | ||
632 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; | 640 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; |
633 | 641 | ||
634 | piocb->ulpCommand = CMD_FCP_ICMND64_CR; | 642 | piocb->ulpCommand = CMD_FCP_ICMND64_CR; |
@@ -655,14 +663,16 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, | |||
655 | 663 | ||
656 | static int | 664 | static int |
657 | lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, | 665 | lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, |
658 | unsigned tgt_id, struct lpfc_rport_data *rdata) | 666 | unsigned tgt_id, unsigned int lun, |
667 | struct lpfc_rport_data *rdata) | ||
659 | { | 668 | { |
660 | struct lpfc_iocbq *iocbq; | 669 | struct lpfc_iocbq *iocbq; |
661 | struct lpfc_iocbq *iocbqrsp; | 670 | struct lpfc_iocbq *iocbqrsp; |
662 | int ret; | 671 | int ret; |
663 | 672 | ||
664 | lpfc_cmd->rdata = rdata; | 673 | lpfc_cmd->rdata = rdata; |
665 | ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); | 674 | ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, |
675 | FCP_TARGET_RESET); | ||
666 | if (!ret) | 676 | if (!ret) |
667 | return FAILED; | 677 | return FAILED; |
668 | 678 | ||
@@ -822,6 +832,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
822 | return 0; | 832 | return 0; |
823 | 833 | ||
824 | out_host_busy_free_buf: | 834 | out_host_busy_free_buf: |
835 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | ||
825 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 836 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
826 | out_host_busy: | 837 | out_host_busy: |
827 | return SCSI_MLQUEUE_HOST_BUSY; | 838 | return SCSI_MLQUEUE_HOST_BUSY; |
@@ -831,6 +842,21 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
831 | return 0; | 842 | return 0; |
832 | } | 843 | } |
833 | 844 | ||
845 | static void | ||
846 | lpfc_block_error_handler(struct scsi_cmnd *cmnd) | ||
847 | { | ||
848 | struct Scsi_Host *shost = cmnd->device->host; | ||
849 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); | ||
850 | |||
851 | spin_lock_irq(shost->host_lock); | ||
852 | while (rport->port_state == FC_PORTSTATE_BLOCKED) { | ||
853 | spin_unlock_irq(shost->host_lock); | ||
854 | msleep(1000); | ||
855 | spin_lock_irq(shost->host_lock); | ||
856 | } | ||
857 | spin_unlock_irq(shost->host_lock); | ||
858 | return; | ||
859 | } | ||
834 | 860 | ||
835 | static int | 861 | static int |
836 | lpfc_abort_handler(struct scsi_cmnd *cmnd) | 862 | lpfc_abort_handler(struct scsi_cmnd *cmnd) |
@@ -845,6 +871,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
845 | unsigned int loop_count = 0; | 871 | unsigned int loop_count = 0; |
846 | int ret = SUCCESS; | 872 | int ret = SUCCESS; |
847 | 873 | ||
874 | lpfc_block_error_handler(cmnd); | ||
848 | spin_lock_irq(shost->host_lock); | 875 | spin_lock_irq(shost->host_lock); |
849 | 876 | ||
850 | lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; | 877 | lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; |
@@ -947,6 +974,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
947 | int ret = FAILED; | 974 | int ret = FAILED; |
948 | int cnt, loopcnt; | 975 | int cnt, loopcnt; |
949 | 976 | ||
977 | lpfc_block_error_handler(cmnd); | ||
950 | spin_lock_irq(shost->host_lock); | 978 | spin_lock_irq(shost->host_lock); |
951 | /* | 979 | /* |
952 | * If target is not in a MAPPED state, delay the reset until | 980 | * If target is not in a MAPPED state, delay the reset until |
@@ -969,12 +997,12 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
969 | if (lpfc_cmd == NULL) | 997 | if (lpfc_cmd == NULL) |
970 | goto out; | 998 | goto out; |
971 | 999 | ||
972 | lpfc_cmd->pCmd = cmnd; | ||
973 | lpfc_cmd->timeout = 60; | 1000 | lpfc_cmd->timeout = 60; |
974 | lpfc_cmd->scsi_hba = phba; | 1001 | lpfc_cmd->scsi_hba = phba; |
975 | lpfc_cmd->rdata = rdata; | 1002 | lpfc_cmd->rdata = rdata; |
976 | 1003 | ||
977 | ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); | 1004 | ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun, |
1005 | FCP_LUN_RESET); | ||
978 | if (!ret) | 1006 | if (!ret) |
979 | goto out_free_scsi_buf; | 1007 | goto out_free_scsi_buf; |
980 | 1008 | ||
@@ -1001,7 +1029,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
1001 | cmd_status = iocbqrsp->iocb.ulpStatus; | 1029 | cmd_status = iocbqrsp->iocb.ulpStatus; |
1002 | 1030 | ||
1003 | lpfc_sli_release_iocbq(phba, iocbqrsp); | 1031 | lpfc_sli_release_iocbq(phba, iocbqrsp); |
1004 | lpfc_release_scsi_buf(phba, lpfc_cmd); | ||
1005 | 1032 | ||
1006 | /* | 1033 | /* |
1007 | * All outstanding txcmplq I/Os should have been aborted by the device. | 1034 | * All outstanding txcmplq I/Os should have been aborted by the device. |
@@ -1040,6 +1067,8 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
1040 | } | 1067 | } |
1041 | 1068 | ||
1042 | out_free_scsi_buf: | 1069 | out_free_scsi_buf: |
1070 | lpfc_release_scsi_buf(phba, lpfc_cmd); | ||
1071 | |||
1043 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, | 1072 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, |
1044 | "%d:0713 SCSI layer issued LUN reset (%d, %d) " | 1073 | "%d:0713 SCSI layer issued LUN reset (%d, %d) " |
1045 | "Data: x%x x%x x%x\n", | 1074 | "Data: x%x x%x x%x\n", |
@@ -1062,6 +1091,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) | |||
1062 | int cnt, loopcnt; | 1091 | int cnt, loopcnt; |
1063 | struct lpfc_scsi_buf * lpfc_cmd; | 1092 | struct lpfc_scsi_buf * lpfc_cmd; |
1064 | 1093 | ||
1094 | lpfc_block_error_handler(cmnd); | ||
1065 | spin_lock_irq(shost->host_lock); | 1095 | spin_lock_irq(shost->host_lock); |
1066 | 1096 | ||
1067 | lpfc_cmd = lpfc_get_scsi_buf(phba); | 1097 | lpfc_cmd = lpfc_get_scsi_buf(phba); |
@@ -1070,7 +1100,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) | |||
1070 | 1100 | ||
1071 | /* The lpfc_cmd storage is reused. Set all loop invariants. */ | 1101 | /* The lpfc_cmd storage is reused. Set all loop invariants. */ |
1072 | lpfc_cmd->timeout = 60; | 1102 | lpfc_cmd->timeout = 60; |
1073 | lpfc_cmd->pCmd = cmnd; | ||
1074 | lpfc_cmd->scsi_hba = phba; | 1103 | lpfc_cmd->scsi_hba = phba; |
1075 | 1104 | ||
1076 | /* | 1105 | /* |
@@ -1078,7 +1107,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) | |||
1078 | * targets known to the driver. Should any target reset | 1107 | * targets known to the driver. Should any target reset |
1079 | * fail, this routine returns failure to the midlayer. | 1108 | * fail, this routine returns failure to the midlayer. |
1080 | */ | 1109 | */ |
1081 | for (i = 0; i < MAX_FCP_TARGET; i++) { | 1110 | for (i = 0; i < LPFC_MAX_TARGET; i++) { |
1082 | /* Search the mapped list for this target ID */ | 1111 | /* Search the mapped list for this target ID */ |
1083 | match = 0; | 1112 | match = 0; |
1084 | list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { | 1113 | list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { |
@@ -1090,11 +1119,11 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) | |||
1090 | if (!match) | 1119 | if (!match) |
1091 | continue; | 1120 | continue; |
1092 | 1121 | ||
1093 | ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, | 1122 | ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun, |
1094 | i, ndlp->rport->dd_data); | 1123 | ndlp->rport->dd_data); |
1095 | if (ret != SUCCESS) { | 1124 | if (ret != SUCCESS) { |
1096 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, | 1125 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, |
1097 | "%d:0713 Bus Reset on target %d failed\n", | 1126 | "%d:0700 Bus Reset on target %d failed\n", |
1098 | phba->brd_no, i); | 1127 | phba->brd_no, i); |
1099 | err_count++; | 1128 | err_count++; |
1100 | } | 1129 | } |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index bb69a7a1ec59..70f4d5a1348e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -191,35 +191,12 @@ static int | |||
191 | lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, | 191 | lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, |
192 | struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) | 192 | struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) |
193 | { | 193 | { |
194 | uint16_t iotag; | ||
195 | |||
196 | list_add_tail(&piocb->list, &pring->txcmplq); | 194 | list_add_tail(&piocb->list, &pring->txcmplq); |
197 | pring->txcmplq_cnt++; | 195 | pring->txcmplq_cnt++; |
198 | if (unlikely(pring->ringno == LPFC_ELS_RING)) | 196 | if (unlikely(pring->ringno == LPFC_ELS_RING)) |
199 | mod_timer(&phba->els_tmofunc, | 197 | mod_timer(&phba->els_tmofunc, |
200 | jiffies + HZ * (phba->fc_ratov << 1)); | 198 | jiffies + HZ * (phba->fc_ratov << 1)); |
201 | 199 | ||
202 | if (pring->fast_lookup) { | ||
203 | /* Setup fast lookup based on iotag for completion */ | ||
204 | iotag = piocb->iocb.ulpIoTag; | ||
205 | if (iotag && (iotag < pring->fast_iotag)) | ||
206 | *(pring->fast_lookup + iotag) = piocb; | ||
207 | else { | ||
208 | |||
209 | /* Cmd ring <ringno> put: iotag <iotag> greater then | ||
210 | configured max <fast_iotag> wd0 <icmd> */ | ||
211 | lpfc_printf_log(phba, | ||
212 | KERN_ERR, | ||
213 | LOG_SLI, | ||
214 | "%d:0316 Cmd ring %d put: iotag x%x " | ||
215 | "greater then configured max x%x " | ||
216 | "wd0 x%x\n", | ||
217 | phba->brd_no, | ||
218 | pring->ringno, iotag, | ||
219 | pring->fast_iotag, | ||
220 | *(((uint32_t *)(&piocb->iocb)) + 7)); | ||
221 | } | ||
222 | } | ||
223 | return (0); | 200 | return (0); |
224 | } | 201 | } |
225 | 202 | ||
@@ -343,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) | |||
343 | kfree(old_arr); | 320 | kfree(old_arr); |
344 | return iotag; | 321 | return iotag; |
345 | } | 322 | } |
346 | } | 323 | } else |
324 | spin_unlock_irq(phba->host->host_lock); | ||
347 | 325 | ||
348 | lpfc_printf_log(phba, KERN_ERR,LOG_SLI, | 326 | lpfc_printf_log(phba, KERN_ERR,LOG_SLI, |
349 | "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", | 327 | "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", |
@@ -601,7 +579,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba) | |||
601 | /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus | 579 | /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus |
602 | <status> */ | 580 | <status> */ |
603 | lpfc_printf_log(phba, | 581 | lpfc_printf_log(phba, |
604 | KERN_ERR, | 582 | KERN_WARNING, |
605 | LOG_MBOX | LOG_SLI, | 583 | LOG_MBOX | LOG_SLI, |
606 | "%d:0304 Stray Mailbox Interrupt " | 584 | "%d:0304 Stray Mailbox Interrupt " |
607 | "mbxCommand x%x mbxStatus x%x\n", | 585 | "mbxCommand x%x mbxStatus x%x\n", |
@@ -992,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) | |||
992 | * resources need to be recovered. | 970 | * resources need to be recovered. |
993 | */ | 971 | */ |
994 | if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { | 972 | if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { |
995 | printk(KERN_INFO "%s: IOCB cmd 0x%x processed." | 973 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
996 | " Skipping completion\n", __FUNCTION__, | 974 | "%d:0314 IOCB cmd 0x%x" |
997 | irsp->ulpCommand); | 975 | " processed. Skipping" |
976 | " completion", phba->brd_no, | ||
977 | irsp->ulpCommand); | ||
998 | break; | 978 | break; |
999 | } | 979 | } |
1000 | 980 | ||
@@ -1127,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, | |||
1127 | if (unlikely(irsp->ulpStatus)) { | 1107 | if (unlikely(irsp->ulpStatus)) { |
1128 | /* Rsp ring <ringno> error: IOCB */ | 1108 | /* Rsp ring <ringno> error: IOCB */ |
1129 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | 1109 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
1130 | "%d:0326 Rsp Ring %d error: IOCB Data: " | 1110 | "%d:0336 Rsp Ring %d error: IOCB Data: " |
1131 | "x%x x%x x%x x%x x%x x%x x%x x%x\n", | 1111 | "x%x x%x x%x x%x x%x x%x x%x x%x\n", |
1132 | phba->brd_no, pring->ringno, | 1112 | phba->brd_no, pring->ringno, |
1133 | irsp->un.ulpWord[0], irsp->un.ulpWord[1], | 1113 | irsp->un.ulpWord[0], irsp->un.ulpWord[1], |
@@ -1145,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, | |||
1145 | * resources need to be recovered. | 1125 | * resources need to be recovered. |
1146 | */ | 1126 | */ |
1147 | if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { | 1127 | if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { |
1148 | printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " | 1128 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
1149 | "Skipping completion\n", __FUNCTION__, | 1129 | "%d:0333 IOCB cmd 0x%x" |
1150 | irsp->ulpCommand); | 1130 | " processed. Skipping" |
1131 | " completion\n", phba->brd_no, | ||
1132 | irsp->ulpCommand); | ||
1151 | break; | 1133 | break; |
1152 | } | 1134 | } |
1153 | 1135 | ||
@@ -1178,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, | |||
1178 | } else { | 1160 | } else { |
1179 | /* Unknown IOCB command */ | 1161 | /* Unknown IOCB command */ |
1180 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 1162 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
1181 | "%d:0321 Unknown IOCB command " | 1163 | "%d:0334 Unknown IOCB command " |
1182 | "Data: x%x, x%x x%x x%x x%x\n", | 1164 | "Data: x%x, x%x x%x x%x x%x\n", |
1183 | phba->brd_no, type, irsp->ulpCommand, | 1165 | phba->brd_no, type, irsp->ulpCommand, |
1184 | irsp->ulpStatus, irsp->ulpIoTag, | 1166 | irsp->ulpStatus, irsp->ulpIoTag, |
@@ -1261,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, | |||
1261 | lpfc_printf_log(phba, | 1243 | lpfc_printf_log(phba, |
1262 | KERN_ERR, | 1244 | KERN_ERR, |
1263 | LOG_SLI, | 1245 | LOG_SLI, |
1264 | "%d:0312 Ring %d handler: portRspPut %d " | 1246 | "%d:0303 Ring %d handler: portRspPut %d " |
1265 | "is bigger then rsp ring %d\n", | 1247 | "is bigger then rsp ring %d\n", |
1266 | phba->brd_no, | 1248 | phba->brd_no, |
1267 | pring->ringno, portRspPut, portRspMax); | 1249 | pring->ringno, portRspPut, portRspMax); |
@@ -1406,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, | |||
1406 | lpfc_printf_log(phba, | 1388 | lpfc_printf_log(phba, |
1407 | KERN_ERR, | 1389 | KERN_ERR, |
1408 | LOG_SLI, | 1390 | LOG_SLI, |
1409 | "%d:0321 Unknown IOCB command " | 1391 | "%d:0335 Unknown IOCB command " |
1410 | "Data: x%x x%x x%x x%x\n", | 1392 | "Data: x%x x%x x%x x%x\n", |
1411 | phba->brd_no, | 1393 | phba->brd_no, |
1412 | irsp->ulpCommand, | 1394 | irsp->ulpCommand, |
@@ -1422,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, | |||
1422 | next_iocb, | 1404 | next_iocb, |
1423 | &saveq->list, | 1405 | &saveq->list, |
1424 | list) { | 1406 | list) { |
1407 | list_del(&rspiocbp->list); | ||
1425 | lpfc_sli_release_iocbq(phba, | 1408 | lpfc_sli_release_iocbq(phba, |
1426 | rspiocbp); | 1409 | rspiocbp); |
1427 | } | 1410 | } |
1428 | } | 1411 | } |
1429 | |||
1430 | lpfc_sli_release_iocbq(phba, saveq); | 1412 | lpfc_sli_release_iocbq(phba, saveq); |
1431 | } | 1413 | } |
1432 | } | 1414 | } |
@@ -1570,8 +1552,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) | |||
1570 | 1552 | ||
1571 | void lpfc_reset_barrier(struct lpfc_hba * phba) | 1553 | void lpfc_reset_barrier(struct lpfc_hba * phba) |
1572 | { | 1554 | { |
1573 | uint32_t * resp_buf; | 1555 | uint32_t __iomem *resp_buf; |
1574 | uint32_t * mbox_buf; | 1556 | uint32_t __iomem *mbox_buf; |
1575 | volatile uint32_t mbox; | 1557 | volatile uint32_t mbox; |
1576 | uint32_t hc_copy; | 1558 | uint32_t hc_copy; |
1577 | int i; | 1559 | int i; |
@@ -1587,7 +1569,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba) | |||
1587 | * Tell the other part of the chip to suspend temporarily all | 1569 | * Tell the other part of the chip to suspend temporarily all |
1588 | * its DMA activity. | 1570 | * its DMA activity. |
1589 | */ | 1571 | */ |
1590 | resp_buf = (uint32_t *)phba->MBslimaddr; | 1572 | resp_buf = phba->MBslimaddr; |
1591 | 1573 | ||
1592 | /* Disable the error attention */ | 1574 | /* Disable the error attention */ |
1593 | hc_copy = readl(phba->HCregaddr); | 1575 | hc_copy = readl(phba->HCregaddr); |
@@ -1605,7 +1587,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba) | |||
1605 | ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; | 1587 | ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; |
1606 | 1588 | ||
1607 | writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); | 1589 | writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); |
1608 | mbox_buf = (uint32_t *)phba->MBslimaddr; | 1590 | mbox_buf = phba->MBslimaddr; |
1609 | writel(mbox, mbox_buf); | 1591 | writel(mbox, mbox_buf); |
1610 | 1592 | ||
1611 | for (i = 0; | 1593 | for (i = 0; |
@@ -1734,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba) | |||
1734 | phba->fc_myDID = 0; | 1716 | phba->fc_myDID = 0; |
1735 | phba->fc_prevDID = 0; | 1717 | phba->fc_prevDID = 0; |
1736 | 1718 | ||
1737 | psli->sli_flag = 0; | ||
1738 | |||
1739 | /* Turn off parity checking and serr during the physical reset */ | 1719 | /* Turn off parity checking and serr during the physical reset */ |
1740 | pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); | 1720 | pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); |
1741 | pci_write_config_word(phba->pcidev, PCI_COMMAND, | 1721 | pci_write_config_word(phba->pcidev, PCI_COMMAND, |
1742 | (cfg_value & | 1722 | (cfg_value & |
1743 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); | 1723 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); |
1744 | 1724 | ||
1745 | psli->sli_flag &= ~LPFC_SLI2_ACTIVE; | 1725 | psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); |
1746 | /* Now toggle INITFF bit in the Host Control Register */ | 1726 | /* Now toggle INITFF bit in the Host Control Register */ |
1747 | writel(HC_INITFF, phba->HCregaddr); | 1727 | writel(HC_INITFF, phba->HCregaddr); |
1748 | mdelay(1); | 1728 | mdelay(1); |
@@ -1783,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) | |||
1783 | 1763 | ||
1784 | /* Restart HBA */ | 1764 | /* Restart HBA */ |
1785 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | 1765 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
1786 | "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, | 1766 | "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, |
1787 | phba->hba_state, psli->sli_flag); | 1767 | phba->hba_state, psli->sli_flag); |
1788 | 1768 | ||
1789 | word0 = 0; | 1769 | word0 = 0; |
@@ -1805,7 +1785,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) | |||
1805 | skip_post = 0; | 1785 | skip_post = 0; |
1806 | word0 = 0; /* This is really setting up word1 */ | 1786 | word0 = 0; /* This is really setting up word1 */ |
1807 | } | 1787 | } |
1808 | to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t); | 1788 | to_slim = phba->MBslimaddr + sizeof (uint32_t); |
1809 | writel(*(uint32_t *) mb, to_slim); | 1789 | writel(*(uint32_t *) mb, to_slim); |
1810 | readl(to_slim); /* flush */ | 1790 | readl(to_slim); /* flush */ |
1811 | 1791 | ||
@@ -1815,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) | |||
1815 | 1795 | ||
1816 | spin_unlock_irq(phba->host->host_lock); | 1796 | spin_unlock_irq(phba->host->host_lock); |
1817 | 1797 | ||
1798 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); | ||
1799 | psli->stats_start = get_seconds(); | ||
1800 | |||
1818 | if (skip_post) | 1801 | if (skip_post) |
1819 | mdelay(100); | 1802 | mdelay(100); |
1820 | else | 1803 | else |
@@ -1925,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) | |||
1925 | } | 1908 | } |
1926 | 1909 | ||
1927 | while (resetcount < 2 && !done) { | 1910 | while (resetcount < 2 && !done) { |
1911 | spin_lock_irq(phba->host->host_lock); | ||
1912 | phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; | ||
1913 | spin_unlock_irq(phba->host->host_lock); | ||
1928 | phba->hba_state = LPFC_STATE_UNKNOWN; | 1914 | phba->hba_state = LPFC_STATE_UNKNOWN; |
1929 | lpfc_sli_brdrestart(phba); | 1915 | lpfc_sli_brdrestart(phba); |
1930 | msleep(2500); | 1916 | msleep(2500); |
@@ -1932,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) | |||
1932 | if (rc) | 1918 | if (rc) |
1933 | break; | 1919 | break; |
1934 | 1920 | ||
1921 | spin_lock_irq(phba->host->host_lock); | ||
1922 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | ||
1923 | spin_unlock_irq(phba->host->host_lock); | ||
1935 | resetcount++; | 1924 | resetcount++; |
1936 | 1925 | ||
1937 | /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 | 1926 | /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 |
@@ -2217,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) | |||
2217 | return (MBX_NOT_FINISHED); | 2206 | return (MBX_NOT_FINISHED); |
2218 | } | 2207 | } |
2219 | /* timeout active mbox command */ | 2208 | /* timeout active mbox command */ |
2220 | mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); | 2209 | mod_timer(&psli->mbox_tmo, (jiffies + |
2210 | (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); | ||
2221 | } | 2211 | } |
2222 | 2212 | ||
2223 | /* Mailbox cmd <cmd> issue */ | 2213 | /* Mailbox cmd <cmd> issue */ |
@@ -2277,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) | |||
2277 | break; | 2267 | break; |
2278 | 2268 | ||
2279 | case MBX_POLL: | 2269 | case MBX_POLL: |
2280 | i = 0; | ||
2281 | psli->mbox_active = NULL; | 2270 | psli->mbox_active = NULL; |
2282 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { | 2271 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { |
2283 | /* First read mbox status word */ | 2272 | /* First read mbox status word */ |
@@ -2291,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) | |||
2291 | /* Read the HBA Host Attention Register */ | 2280 | /* Read the HBA Host Attention Register */ |
2292 | ha_copy = readl(phba->HAregaddr); | 2281 | ha_copy = readl(phba->HAregaddr); |
2293 | 2282 | ||
2283 | i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); | ||
2284 | i *= 1000; /* Convert to ms */ | ||
2285 | |||
2294 | /* Wait for command to complete */ | 2286 | /* Wait for command to complete */ |
2295 | while (((word0 & OWN_CHIP) == OWN_CHIP) || | 2287 | while (((word0 & OWN_CHIP) == OWN_CHIP) || |
2296 | (!(ha_copy & HA_MBATT) && | 2288 | (!(ha_copy & HA_MBATT) && |
2297 | (phba->hba_state > LPFC_WARM_START))) { | 2289 | (phba->hba_state > LPFC_WARM_START))) { |
2298 | if (i++ >= 100) { | 2290 | if (i-- <= 0) { |
2299 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | 2291 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
2300 | spin_unlock_irqrestore(phba->host->host_lock, | 2292 | spin_unlock_irqrestore(phba->host->host_lock, |
2301 | drvr_flag); | 2293 | drvr_flag); |
@@ -2313,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) | |||
2313 | 2305 | ||
2314 | /* Can be in interrupt context, do not sleep */ | 2306 | /* Can be in interrupt context, do not sleep */ |
2315 | /* (or might be called with interrupts disabled) */ | 2307 | /* (or might be called with interrupts disabled) */ |
2316 | mdelay(i); | 2308 | mdelay(1); |
2317 | 2309 | ||
2318 | spin_lock_irqsave(phba->host->host_lock, drvr_flag); | 2310 | spin_lock_irqsave(phba->host->host_lock, drvr_flag); |
2319 | 2311 | ||
@@ -2659,8 +2651,6 @@ lpfc_sli_hba_down(struct lpfc_hba * phba) | |||
2659 | 2651 | ||
2660 | INIT_LIST_HEAD(&(pring->txq)); | 2652 | INIT_LIST_HEAD(&(pring->txq)); |
2661 | 2653 | ||
2662 | kfree(pring->fast_lookup); | ||
2663 | pring->fast_lookup = NULL; | ||
2664 | } | 2654 | } |
2665 | 2655 | ||
2666 | spin_unlock_irqrestore(phba->host->host_lock, flags); | 2656 | spin_unlock_irqrestore(phba->host->host_lock, flags); |
@@ -3030,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, | |||
3030 | 3020 | ||
3031 | if (timeleft == 0) { | 3021 | if (timeleft == 0) { |
3032 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 3022 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
3033 | "%d:0329 IOCB wait timeout error - no " | 3023 | "%d:0338 IOCB wait timeout error - no " |
3034 | "wake response Data x%x\n", | 3024 | "wake response Data x%x\n", |
3035 | phba->brd_no, timeout); | 3025 | phba->brd_no, timeout); |
3036 | retval = IOCB_TIMEDOUT; | 3026 | retval = IOCB_TIMEDOUT; |
@@ -3110,6 +3100,24 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, | |||
3110 | return retval; | 3100 | return retval; |
3111 | } | 3101 | } |
3112 | 3102 | ||
3103 | int | ||
3104 | lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) | ||
3105 | { | ||
3106 | int i = 0; | ||
3107 | |||
3108 | while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) { | ||
3109 | if (i++ > LPFC_MBOX_TMO * 1000) | ||
3110 | return 1; | ||
3111 | |||
3112 | if (lpfc_sli_handle_mb_event(phba) == 0) | ||
3113 | i = 0; | ||
3114 | |||
3115 | msleep(1); | ||
3116 | } | ||
3117 | |||
3118 | return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; | ||
3119 | } | ||
3120 | |||
3113 | irqreturn_t | 3121 | irqreturn_t |
3114 | lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs) | 3122 | lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs) |
3115 | { | 3123 | { |
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index a52d6c6cf083..e26de6809358 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h | |||
@@ -135,8 +135,6 @@ struct lpfc_sli_ring { | |||
135 | uint32_t fast_iotag; /* max fastlookup based iotag */ | 135 | uint32_t fast_iotag; /* max fastlookup based iotag */ |
136 | uint32_t iotag_ctr; /* keeps track of the next iotag to use */ | 136 | uint32_t iotag_ctr; /* keeps track of the next iotag to use */ |
137 | uint32_t iotag_max; /* max iotag value to use */ | 137 | uint32_t iotag_max; /* max iotag value to use */ |
138 | struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by | ||
139 | iotag */ | ||
140 | struct list_head txq; | 138 | struct list_head txq; |
141 | uint16_t txq_cnt; /* current length of queue */ | 139 | uint16_t txq_cnt; /* current length of queue */ |
142 | uint16_t txq_max; /* max length */ | 140 | uint16_t txq_max; /* max length */ |
@@ -174,6 +172,18 @@ struct lpfc_sli_stat { | |||
174 | uint32_t mbox_busy; /* Mailbox cmd busy */ | 172 | uint32_t mbox_busy; /* Mailbox cmd busy */ |
175 | }; | 173 | }; |
176 | 174 | ||
175 | /* Structure to store link status values when port stats are reset */ | ||
176 | struct lpfc_lnk_stat { | ||
177 | uint32_t link_failure_count; | ||
178 | uint32_t loss_of_sync_count; | ||
179 | uint32_t loss_of_signal_count; | ||
180 | uint32_t prim_seq_protocol_err_count; | ||
181 | uint32_t invalid_tx_word_count; | ||
182 | uint32_t invalid_crc_count; | ||
183 | uint32_t error_frames; | ||
184 | uint32_t link_events; | ||
185 | }; | ||
186 | |||
177 | /* Structure used to hold SLI information */ | 187 | /* Structure used to hold SLI information */ |
178 | struct lpfc_sli { | 188 | struct lpfc_sli { |
179 | uint32_t num_rings; | 189 | uint32_t num_rings; |
@@ -203,6 +213,8 @@ struct lpfc_sli { | |||
203 | struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ | 213 | struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ |
204 | size_t iocbq_lookup_len; /* current lengs of the array */ | 214 | size_t iocbq_lookup_len; /* current lengs of the array */ |
205 | uint16_t last_iotag; /* last allocated IOTAG */ | 215 | uint16_t last_iotag; /* last allocated IOTAG */ |
216 | unsigned long stats_start; /* in seconds */ | ||
217 | struct lpfc_lnk_stat lnk_stat_offsets; | ||
206 | }; | 218 | }; |
207 | 219 | ||
208 | /* Given a pointer to the start of the ring, and the slot number of | 220 | /* Given a pointer to the start of the ring, and the slot number of |
@@ -213,3 +225,9 @@ struct lpfc_sli { | |||
213 | 225 | ||
214 | #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox | 226 | #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox |
215 | command */ | 227 | command */ |
228 | #define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write | ||
229 | * or erase cmds. This is especially | ||
230 | * long because of the potential of | ||
231 | * multiple flash erases that can be | ||
232 | * spawned. | ||
233 | */ | ||
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 6b737568b831..c7091ea29f3f 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.1.6" | 21 | #define LPFC_DRIVER_VERSION "8.1.9" |
22 | 22 | ||
23 | #define LPFC_DRIVER_NAME "lpfc" | 23 | #define LPFC_DRIVER_NAME "lpfc" |
24 | 24 | ||
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index 93edaa8696cf..89ef34df5a1d 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c | |||
@@ -378,7 +378,7 @@ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd) | |||
378 | int nseg; | 378 | int nseg; |
379 | 379 | ||
380 | total = 0; | 380 | total = 0; |
381 | scl = (struct scatterlist *) cmd->buffer; | 381 | scl = (struct scatterlist *) cmd->request_buffer; |
382 | nseg = pci_map_sg(state->pdev, scl, cmd->use_sg, | 382 | nseg = pci_map_sg(state->pdev, scl, cmd->use_sg, |
383 | cmd->sc_data_direction); | 383 | cmd->sc_data_direction); |
384 | for (i = 0; i < nseg; ++i) { | 384 | for (i = 0; i < nseg; ++i) { |
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h index 4675343228ad..8cd0bd1d0f7c 100644 --- a/drivers/scsi/megaraid/mega_common.h +++ b/drivers/scsi/megaraid/mega_common.h | |||
@@ -37,6 +37,12 @@ | |||
37 | #define LSI_MAX_CHANNELS 16 | 37 | #define LSI_MAX_CHANNELS 16 |
38 | #define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) | 38 | #define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) |
39 | 39 | ||
40 | #define HBA_SIGNATURE_64_BIT 0x299 | ||
41 | #define PCI_CONF_AMISIG64 0xa4 | ||
42 | |||
43 | #define MEGA_SCSI_INQ_EVPD 1 | ||
44 | #define MEGA_INVALID_FIELD_IN_CDB 0x24 | ||
45 | |||
40 | 46 | ||
41 | /** | 47 | /** |
42 | * scb_t - scsi command control block | 48 | * scb_t - scsi command control block |
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h index bdaee144a1c3..b8aa34202ec3 100644 --- a/drivers/scsi/megaraid/megaraid_ioctl.h +++ b/drivers/scsi/megaraid/megaraid_ioctl.h | |||
@@ -132,6 +132,10 @@ typedef struct uioc { | |||
132 | /* Driver Data: */ | 132 | /* Driver Data: */ |
133 | void __user * user_data; | 133 | void __user * user_data; |
134 | uint32_t user_data_len; | 134 | uint32_t user_data_len; |
135 | |||
136 | /* 64bit alignment */ | ||
137 | uint32_t pad_for_64bit_align; | ||
138 | |||
135 | mraid_passthru_t __user *user_pthru; | 139 | mraid_passthru_t __user *user_pthru; |
136 | 140 | ||
137 | mraid_passthru_t *pthru32; | 141 | mraid_passthru_t *pthru32; |
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 92715130ac09..cd982c877da0 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | * | 11 | * |
12 | * FILE : megaraid_mbox.c | 12 | * FILE : megaraid_mbox.c |
13 | * Version : v2.20.4.8 (Apr 11 2006) | 13 | * Version : v2.20.4.9 (Jul 16 2006) |
14 | * | 14 | * |
15 | * Authors: | 15 | * Authors: |
16 | * Atul Mukker <Atul.Mukker@lsil.com> | 16 | * Atul Mukker <Atul.Mukker@lsil.com> |
@@ -720,6 +720,7 @@ megaraid_init_mbox(adapter_t *adapter) | |||
720 | struct pci_dev *pdev; | 720 | struct pci_dev *pdev; |
721 | mraid_device_t *raid_dev; | 721 | mraid_device_t *raid_dev; |
722 | int i; | 722 | int i; |
723 | uint32_t magic64; | ||
723 | 724 | ||
724 | 725 | ||
725 | adapter->ito = MBOX_TIMEOUT; | 726 | adapter->ito = MBOX_TIMEOUT; |
@@ -863,12 +864,33 @@ megaraid_init_mbox(adapter_t *adapter) | |||
863 | 864 | ||
864 | // Set the DMA mask to 64-bit. All supported controllers as capable of | 865 | // Set the DMA mask to 64-bit. All supported controllers as capable of |
865 | // DMA in this range | 866 | // DMA in this range |
866 | if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) { | 867 | pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64); |
867 | 868 | ||
868 | con_log(CL_ANN, (KERN_WARNING | 869 | if (((magic64 == HBA_SIGNATURE_64_BIT) && |
869 | "megaraid: could not set DMA mask for 64-bit.\n")); | 870 | ((adapter->pdev->subsystem_device != |
871 | PCI_SUBSYS_ID_MEGARAID_SATA_150_6) || | ||
872 | (adapter->pdev->subsystem_device != | ||
873 | PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) || | ||
874 | (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && | ||
875 | adapter->pdev->device == PCI_DEVICE_ID_VERDE) || | ||
876 | (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && | ||
877 | adapter->pdev->device == PCI_DEVICE_ID_DOBSON) || | ||
878 | (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && | ||
879 | adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) || | ||
880 | (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && | ||
881 | adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) || | ||
882 | (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && | ||
883 | adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) { | ||
884 | if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) { | ||
885 | con_log(CL_ANN, (KERN_WARNING | ||
886 | "megaraid: DMA mask for 64-bit failed\n")); | ||
870 | 887 | ||
871 | goto out_free_sysfs_res; | 888 | if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) { |
889 | con_log(CL_ANN, (KERN_WARNING | ||
890 | "megaraid: 32-bit DMA mask failed\n")); | ||
891 | goto out_free_sysfs_res; | ||
892 | } | ||
893 | } | ||
872 | } | 894 | } |
873 | 895 | ||
874 | // setup tasklet for DPC | 896 | // setup tasklet for DPC |
@@ -1622,6 +1644,14 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) | |||
1622 | rdev->last_disp |= (1L << SCP2CHANNEL(scp)); | 1644 | rdev->last_disp |= (1L << SCP2CHANNEL(scp)); |
1623 | } | 1645 | } |
1624 | 1646 | ||
1647 | if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) { | ||
1648 | scp->sense_buffer[0] = 0x70; | ||
1649 | scp->sense_buffer[2] = ILLEGAL_REQUEST; | ||
1650 | scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB; | ||
1651 | scp->result = CHECK_CONDITION << 1; | ||
1652 | return NULL; | ||
1653 | } | ||
1654 | |||
1625 | /* Fall through */ | 1655 | /* Fall through */ |
1626 | 1656 | ||
1627 | case READ_CAPACITY: | 1657 | case READ_CAPACITY: |
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h index 868fb0ec93e7..2b5a3285f799 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.h +++ b/drivers/scsi/megaraid/megaraid_mbox.h | |||
@@ -21,8 +21,8 @@ | |||
21 | #include "megaraid_ioctl.h" | 21 | #include "megaraid_ioctl.h" |
22 | 22 | ||
23 | 23 | ||
24 | #define MEGARAID_VERSION "2.20.4.8" | 24 | #define MEGARAID_VERSION "2.20.4.9" |
25 | #define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" | 25 | #define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)" |
26 | 26 | ||
27 | 27 | ||
28 | /* | 28 | /* |
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index e8f534fb336b..d85b9a8f1b8d 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | * | 11 | * |
12 | * FILE : megaraid_mm.c | 12 | * FILE : megaraid_mm.c |
13 | * Version : v2.20.2.6 (Mar 7 2005) | 13 | * Version : v2.20.2.7 (Jul 16 2006) |
14 | * | 14 | * |
15 | * Common management module | 15 | * Common management module |
16 | */ | 16 | */ |
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h index 3d9e67d6849d..c8762b2b8ed1 100644 --- a/drivers/scsi/megaraid/megaraid_mm.h +++ b/drivers/scsi/megaraid/megaraid_mm.h | |||
@@ -27,9 +27,9 @@ | |||
27 | #include "megaraid_ioctl.h" | 27 | #include "megaraid_ioctl.h" |
28 | 28 | ||
29 | 29 | ||
30 | #define LSI_COMMON_MOD_VERSION "2.20.2.6" | 30 | #define LSI_COMMON_MOD_VERSION "2.20.2.7" |
31 | #define LSI_COMMON_MOD_EXT_VERSION \ | 31 | #define LSI_COMMON_MOD_EXT_VERSION \ |
32 | "(Release Date: Mon Mar 7 00:01:03 EST 2005)" | 32 | "(Release Date: Sun Jul 16 00:01:03 EST 2006)" |
33 | 33 | ||
34 | 34 | ||
35 | #define LSI_DBGLVL dbglevel | 35 | #define LSI_DBGLVL dbglevel |
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index c88717727be8..5572981a9f92 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c | |||
@@ -1268,7 +1268,7 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd) | |||
1268 | if (cmd->use_sg > 0) { | 1268 | if (cmd->use_sg > 0) { |
1269 | int nseg; | 1269 | int nseg; |
1270 | total = 0; | 1270 | total = 0; |
1271 | scl = (struct scatterlist *) cmd->buffer; | 1271 | scl = (struct scatterlist *) cmd->request_buffer; |
1272 | off = ms->data_ptr; | 1272 | off = ms->data_ptr; |
1273 | nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg, | 1273 | nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg, |
1274 | cmd->sc_data_direction); | 1274 | cmd->sc_data_direction); |
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c index d1f38c32aa15..efc8fff1d250 100644 --- a/drivers/scsi/pdc_adma.c +++ b/drivers/scsi/pdc_adma.c | |||
@@ -183,7 +183,8 @@ static struct ata_port_info adma_port_info[] = { | |||
183 | { | 183 | { |
184 | .sht = &adma_ata_sht, | 184 | .sht = &adma_ata_sht, |
185 | .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | | 185 | .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | |
186 | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO, | 186 | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | |
187 | ATA_FLAG_PIO_POLLING, | ||
187 | .pio_mask = 0x10, /* pio4 */ | 188 | .pio_mask = 0x10, /* pio4 */ |
188 | .udma_mask = 0x1f, /* udma0-4 */ | 189 | .udma_mask = 0x1f, /* udma0-4 */ |
189 | .port_ops = &adma_ata_ops, | 190 | .port_ops = &adma_ata_ops, |
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c index 7abf64d1bfc9..0bd9c60e6455 100644 --- a/drivers/scsi/pluto.c +++ b/drivers/scsi/pluto.c | |||
@@ -169,8 +169,6 @@ int __init pluto_detect(struct scsi_host_template *tpnt) | |||
169 | SCpnt->request->rq_status = RQ_SCSI_BUSY; | 169 | SCpnt->request->rq_status = RQ_SCSI_BUSY; |
170 | 170 | ||
171 | SCpnt->done = pluto_detect_done; | 171 | SCpnt->done = pluto_detect_done; |
172 | SCpnt->bufflen = 256; | ||
173 | SCpnt->buffer = fcs[i].inquiry; | ||
174 | SCpnt->request_bufflen = 256; | 172 | SCpnt->request_bufflen = 256; |
175 | SCpnt->request_buffer = fcs[i].inquiry; | 173 | SCpnt->request_buffer = fcs[i].inquiry; |
176 | PLD(("set up %d %08lx\n", i, (long)SCpnt)) | 174 | PLD(("set up %d %08lx\n", i, (long)SCpnt)) |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 139ea0e27fd7..0930260aec2c 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -487,6 +487,7 @@ typedef struct { | |||
487 | #define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ | 487 | #define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ |
488 | #define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ | 488 | #define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ |
489 | /* used. */ | 489 | /* used. */ |
490 | #define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */ | ||
490 | #define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ | 491 | #define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ |
491 | #define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ | 492 | #define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ |
492 | #define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ | 493 | #define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9758dba95542..859649160caa 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -3063,6 +3063,7 @@ qla2x00_update_fcports(scsi_qla_host_t *ha) | |||
3063 | int | 3063 | int |
3064 | qla2x00_abort_isp(scsi_qla_host_t *ha) | 3064 | qla2x00_abort_isp(scsi_qla_host_t *ha) |
3065 | { | 3065 | { |
3066 | int rval; | ||
3066 | unsigned long flags = 0; | 3067 | unsigned long flags = 0; |
3067 | uint16_t cnt; | 3068 | uint16_t cnt; |
3068 | srb_t *sp; | 3069 | srb_t *sp; |
@@ -3119,6 +3120,16 @@ qla2x00_abort_isp(scsi_qla_host_t *ha) | |||
3119 | 3120 | ||
3120 | ha->isp_abort_cnt = 0; | 3121 | ha->isp_abort_cnt = 0; |
3121 | clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); | 3122 | clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); |
3123 | |||
3124 | if (ha->eft) { | ||
3125 | rval = qla2x00_trace_control(ha, TC_ENABLE, | ||
3126 | ha->eft_dma, EFT_NUM_BUFFERS); | ||
3127 | if (rval) { | ||
3128 | qla_printk(KERN_WARNING, ha, | ||
3129 | "Unable to reinitialize EFT " | ||
3130 | "(%d).\n", rval); | ||
3131 | } | ||
3132 | } | ||
3122 | } else { /* failed the ISP abort */ | 3133 | } else { /* failed the ISP abort */ |
3123 | ha->flags.online = 1; | 3134 | ha->flags.online = 1; |
3124 | if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { | 3135 | if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 2b60a27eff0b..c5b3c610a32a 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -471,6 +471,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, | |||
471 | mrk24->nport_handle = cpu_to_le16(loop_id); | 471 | mrk24->nport_handle = cpu_to_le16(loop_id); |
472 | mrk24->lun[1] = LSB(lun); | 472 | mrk24->lun[1] = LSB(lun); |
473 | mrk24->lun[2] = MSB(lun); | 473 | mrk24->lun[2] = MSB(lun); |
474 | host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); | ||
474 | } else { | 475 | } else { |
475 | SET_TARGET_ID(ha, mrk->target, loop_id); | 476 | SET_TARGET_ID(ha, mrk->target, loop_id); |
476 | mrk->lun = cpu_to_le16(lun); | 477 | mrk->lun = cpu_to_le16(lun); |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 795bf15b1b8f..de0613135f70 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -587,6 +587,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
587 | DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " | 587 | DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " |
588 | "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); | 588 | "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); |
589 | break; | 589 | break; |
590 | |||
591 | case MBA_TRACE_NOTIFICATION: | ||
592 | DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", | ||
593 | ha->host_no, mb[1], mb[2])); | ||
594 | break; | ||
590 | } | 595 | } |
591 | } | 596 | } |
592 | 597 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ec7ebb6037e6..65cbe2f5eea2 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -744,7 +744,6 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
744 | { | 744 | { |
745 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); | 745 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); |
746 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 746 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
747 | srb_t *sp; | ||
748 | int ret; | 747 | int ret; |
749 | unsigned int id, lun; | 748 | unsigned int id, lun; |
750 | unsigned long serial; | 749 | unsigned long serial; |
@@ -755,8 +754,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
755 | lun = cmd->device->lun; | 754 | lun = cmd->device->lun; |
756 | serial = cmd->serial_number; | 755 | serial = cmd->serial_number; |
757 | 756 | ||
758 | sp = (srb_t *) CMD_SP(cmd); | 757 | if (!fcport) |
759 | if (!sp || !fcport) | ||
760 | return ret; | 758 | return ret; |
761 | 759 | ||
762 | qla_printk(KERN_INFO, ha, | 760 | qla_printk(KERN_INFO, ha, |
@@ -875,7 +873,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
875 | { | 873 | { |
876 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); | 874 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); |
877 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 875 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
878 | srb_t *sp; | ||
879 | int ret; | 876 | int ret; |
880 | unsigned int id, lun; | 877 | unsigned int id, lun; |
881 | unsigned long serial; | 878 | unsigned long serial; |
@@ -886,8 +883,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
886 | lun = cmd->device->lun; | 883 | lun = cmd->device->lun; |
887 | serial = cmd->serial_number; | 884 | serial = cmd->serial_number; |
888 | 885 | ||
889 | sp = (srb_t *) CMD_SP(cmd); | 886 | if (!fcport) |
890 | if (!sp || !fcport) | ||
891 | return ret; | 887 | return ret; |
892 | 888 | ||
893 | qla_printk(KERN_INFO, ha, | 889 | qla_printk(KERN_INFO, ha, |
@@ -936,7 +932,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
936 | { | 932 | { |
937 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); | 933 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); |
938 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 934 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
939 | srb_t *sp; | ||
940 | int ret; | 935 | int ret; |
941 | unsigned int id, lun; | 936 | unsigned int id, lun; |
942 | unsigned long serial; | 937 | unsigned long serial; |
@@ -947,8 +942,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
947 | lun = cmd->device->lun; | 942 | lun = cmd->device->lun; |
948 | serial = cmd->serial_number; | 943 | serial = cmd->serial_number; |
949 | 944 | ||
950 | sp = (srb_t *) CMD_SP(cmd); | 945 | if (!fcport) |
951 | if (!sp || !fcport) | ||
952 | return ret; | 946 | return ret; |
953 | 947 | ||
954 | qla_printk(KERN_INFO, ha, | 948 | qla_printk(KERN_INFO, ha, |
@@ -2244,9 +2238,6 @@ qla2x00_do_dpc(void *data) | |||
2244 | 2238 | ||
2245 | next_loopid = 0; | 2239 | next_loopid = 0; |
2246 | list_for_each_entry(fcport, &ha->fcports, list) { | 2240 | list_for_each_entry(fcport, &ha->fcports, list) { |
2247 | if (fcport->port_type != FCT_TARGET) | ||
2248 | continue; | ||
2249 | |||
2250 | /* | 2241 | /* |
2251 | * If the port is not ONLINE then try to login | 2242 | * If the port is not ONLINE then try to login |
2252 | * to it if we haven't run out of retries. | 2243 | * to it if we haven't run out of retries. |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index d2d683440659..971259032ef7 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,9 +7,9 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.01.05-k3" | 10 | #define QLA2XXX_VERSION "8.01.07-k1" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 1 | 13 | #define QLA_DRIVER_MINOR_VER 1 |
14 | #define QLA_DRIVER_PATCH_VER 5 | 14 | #define QLA_DRIVER_PATCH_VER 7 |
15 | #define QLA_DRIVER_BETA_VER 0 | 15 | #define QLA_DRIVER_BETA_VER 0 |
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 69e0551a81d2..5b2f0741a55b 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -874,7 +874,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, | |||
874 | if (Cmnd->use_sg) { | 874 | if (Cmnd->use_sg) { |
875 | int sg_count; | 875 | int sg_count; |
876 | 876 | ||
877 | sg = (struct scatterlist *) Cmnd->buffer; | 877 | sg = (struct scatterlist *) Cmnd->request_buffer; |
878 | sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction); | 878 | sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction); |
879 | 879 | ||
880 | ds = cmd->dataseg; | 880 | ds = cmd->dataseg; |
@@ -1278,7 +1278,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti) | |||
1278 | 1278 | ||
1279 | if (Cmnd->use_sg) { | 1279 | if (Cmnd->use_sg) { |
1280 | sbus_unmap_sg(qpti->sdev, | 1280 | sbus_unmap_sg(qpti->sdev, |
1281 | (struct scatterlist *)Cmnd->buffer, | 1281 | (struct scatterlist *)Cmnd->request_buffer, |
1282 | Cmnd->use_sg, | 1282 | Cmnd->use_sg, |
1283 | Cmnd->sc_data_direction); | 1283 | Cmnd->sc_data_direction); |
1284 | } else { | 1284 | } else { |
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index 1053c7c76b7d..fa38a413d16b 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
@@ -1961,8 +1961,7 @@ comreset_retry: | |||
1961 | timeout = jiffies + msecs_to_jiffies(200); | 1961 | timeout = jiffies + msecs_to_jiffies(200); |
1962 | do { | 1962 | do { |
1963 | sata_scr_read(ap, SCR_STATUS, &sstatus); | 1963 | sata_scr_read(ap, SCR_STATUS, &sstatus); |
1964 | sstatus &= 0x3; | 1964 | if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) |
1965 | if ((sstatus == 3) || (sstatus == 0)) | ||
1966 | break; | 1965 | break; |
1967 | 1966 | ||
1968 | __msleep(1, can_sleep); | 1967 | __msleep(1, can_sleep); |
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index 64631bd38952..4776f4e55839 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c | |||
@@ -269,8 +269,15 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = { | |||
269 | { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 269 | { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
270 | board_20619 }, | 270 | board_20619 }, |
271 | 271 | ||
272 | /* TODO: remove all associated board_20771 code, as it completely | ||
273 | * duplicates board_2037x code, unless reason for separation can be | ||
274 | * divined. | ||
275 | */ | ||
276 | #if 0 | ||
272 | { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 277 | { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
273 | board_20771 }, | 278 | board_20771 }, |
279 | #endif | ||
280 | |||
274 | { } /* terminate list */ | 281 | { } /* terminate list */ |
275 | }; | 282 | }; |
276 | 283 | ||
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c index 7aabb45c35e5..d0a85073ebf7 100644 --- a/drivers/scsi/sata_sil.c +++ b/drivers/scsi/sata_sil.c | |||
@@ -109,6 +109,7 @@ enum { | |||
109 | }; | 109 | }; |
110 | 110 | ||
111 | static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 111 | static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
112 | static int sil_pci_device_resume(struct pci_dev *pdev); | ||
112 | static void sil_dev_config(struct ata_port *ap, struct ata_device *dev); | 113 | static void sil_dev_config(struct ata_port *ap, struct ata_device *dev); |
113 | static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); | 114 | static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); |
114 | static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 115 | static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); |
@@ -160,6 +161,8 @@ static struct pci_driver sil_pci_driver = { | |||
160 | .id_table = sil_pci_tbl, | 161 | .id_table = sil_pci_tbl, |
161 | .probe = sil_init_one, | 162 | .probe = sil_init_one, |
162 | .remove = ata_pci_remove_one, | 163 | .remove = ata_pci_remove_one, |
164 | .suspend = ata_pci_device_suspend, | ||
165 | .resume = sil_pci_device_resume, | ||
163 | }; | 166 | }; |
164 | 167 | ||
165 | static struct scsi_host_template sil_sht = { | 168 | static struct scsi_host_template sil_sht = { |
@@ -178,6 +181,8 @@ static struct scsi_host_template sil_sht = { | |||
178 | .slave_configure = ata_scsi_slave_config, | 181 | .slave_configure = ata_scsi_slave_config, |
179 | .slave_destroy = ata_scsi_slave_destroy, | 182 | .slave_destroy = ata_scsi_slave_destroy, |
180 | .bios_param = ata_std_bios_param, | 183 | .bios_param = ata_std_bios_param, |
184 | .suspend = ata_scsi_device_suspend, | ||
185 | .resume = ata_scsi_device_resume, | ||
181 | }; | 186 | }; |
182 | 187 | ||
183 | static const struct ata_port_operations sil_ops = { | 188 | static const struct ata_port_operations sil_ops = { |
@@ -370,7 +375,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) | |||
370 | * during hardreset makes controllers with broken SIEN | 375 | * during hardreset makes controllers with broken SIEN |
371 | * repeat probing needlessly. | 376 | * repeat probing needlessly. |
372 | */ | 377 | */ |
373 | if (!(ap->flags & ATA_FLAG_FROZEN)) { | 378 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) { |
374 | ata_ehi_hotplugged(&ap->eh_info); | 379 | ata_ehi_hotplugged(&ap->eh_info); |
375 | ap->eh_info.serror |= serror; | 380 | ap->eh_info.serror |= serror; |
376 | } | 381 | } |
@@ -561,6 +566,52 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) | |||
561 | } | 566 | } |
562 | } | 567 | } |
563 | 568 | ||
569 | static void sil_init_controller(struct pci_dev *pdev, | ||
570 | int n_ports, unsigned long host_flags, | ||
571 | void __iomem *mmio_base) | ||
572 | { | ||
573 | u8 cls; | ||
574 | u32 tmp; | ||
575 | int i; | ||
576 | |||
577 | /* Initialize FIFO PCI bus arbitration */ | ||
578 | cls = sil_get_device_cache_line(pdev); | ||
579 | if (cls) { | ||
580 | cls >>= 3; | ||
581 | cls++; /* cls = (line_size/8)+1 */ | ||
582 | for (i = 0; i < n_ports; i++) | ||
583 | writew(cls << 8 | cls, | ||
584 | mmio_base + sil_port[i].fifo_cfg); | ||
585 | } else | ||
586 | dev_printk(KERN_WARNING, &pdev->dev, | ||
587 | "cache line size not set. Driver may not function\n"); | ||
588 | |||
589 | /* Apply R_ERR on DMA activate FIS errata workaround */ | ||
590 | if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) { | ||
591 | int cnt; | ||
592 | |||
593 | for (i = 0, cnt = 0; i < n_ports; i++) { | ||
594 | tmp = readl(mmio_base + sil_port[i].sfis_cfg); | ||
595 | if ((tmp & 0x3) != 0x01) | ||
596 | continue; | ||
597 | if (!cnt) | ||
598 | dev_printk(KERN_INFO, &pdev->dev, | ||
599 | "Applying R_ERR on DMA activate " | ||
600 | "FIS errata fix\n"); | ||
601 | writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg); | ||
602 | cnt++; | ||
603 | } | ||
604 | } | ||
605 | |||
606 | if (n_ports == 4) { | ||
607 | /* flip the magic "make 4 ports work" bit */ | ||
608 | tmp = readl(mmio_base + sil_port[2].bmdma); | ||
609 | if ((tmp & SIL_INTR_STEERING) == 0) | ||
610 | writel(tmp | SIL_INTR_STEERING, | ||
611 | mmio_base + sil_port[2].bmdma); | ||
612 | } | ||
613 | } | ||
614 | |||
564 | static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 615 | static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) |
565 | { | 616 | { |
566 | static int printed_version; | 617 | static int printed_version; |
@@ -570,8 +621,6 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
570 | int rc; | 621 | int rc; |
571 | unsigned int i; | 622 | unsigned int i; |
572 | int pci_dev_busy = 0; | 623 | int pci_dev_busy = 0; |
573 | u32 tmp; | ||
574 | u8 cls; | ||
575 | 624 | ||
576 | if (!printed_version++) | 625 | if (!printed_version++) |
577 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 626 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
@@ -630,42 +679,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
630 | ata_std_ports(&probe_ent->port[i]); | 679 | ata_std_ports(&probe_ent->port[i]); |
631 | } | 680 | } |
632 | 681 | ||
633 | /* Initialize FIFO PCI bus arbitration */ | 682 | sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags, |
634 | cls = sil_get_device_cache_line(pdev); | 683 | mmio_base); |
635 | if (cls) { | ||
636 | cls >>= 3; | ||
637 | cls++; /* cls = (line_size/8)+1 */ | ||
638 | for (i = 0; i < probe_ent->n_ports; i++) | ||
639 | writew(cls << 8 | cls, | ||
640 | mmio_base + sil_port[i].fifo_cfg); | ||
641 | } else | ||
642 | dev_printk(KERN_WARNING, &pdev->dev, | ||
643 | "cache line size not set. Driver may not function\n"); | ||
644 | |||
645 | /* Apply R_ERR on DMA activate FIS errata workaround */ | ||
646 | if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) { | ||
647 | int cnt; | ||
648 | |||
649 | for (i = 0, cnt = 0; i < probe_ent->n_ports; i++) { | ||
650 | tmp = readl(mmio_base + sil_port[i].sfis_cfg); | ||
651 | if ((tmp & 0x3) != 0x01) | ||
652 | continue; | ||
653 | if (!cnt) | ||
654 | dev_printk(KERN_INFO, &pdev->dev, | ||
655 | "Applying R_ERR on DMA activate " | ||
656 | "FIS errata fix\n"); | ||
657 | writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg); | ||
658 | cnt++; | ||
659 | } | ||
660 | } | ||
661 | |||
662 | if (ent->driver_data == sil_3114) { | ||
663 | /* flip the magic "make 4 ports work" bit */ | ||
664 | tmp = readl(mmio_base + sil_port[2].bmdma); | ||
665 | if ((tmp & SIL_INTR_STEERING) == 0) | ||
666 | writel(tmp | SIL_INTR_STEERING, | ||
667 | mmio_base + sil_port[2].bmdma); | ||
668 | } | ||
669 | 684 | ||
670 | pci_set_master(pdev); | 685 | pci_set_master(pdev); |
671 | 686 | ||
@@ -685,6 +700,18 @@ err_out: | |||
685 | return rc; | 700 | return rc; |
686 | } | 701 | } |
687 | 702 | ||
703 | static int sil_pci_device_resume(struct pci_dev *pdev) | ||
704 | { | ||
705 | struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); | ||
706 | |||
707 | ata_pci_device_do_resume(pdev); | ||
708 | sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags, | ||
709 | host_set->mmio_base); | ||
710 | ata_host_set_resume(host_set); | ||
711 | |||
712 | return 0; | ||
713 | } | ||
714 | |||
688 | static int __init sil_init(void) | 715 | static int __init sil_init(void) |
689 | { | 716 | { |
690 | return pci_module_init(&sil_pci_driver); | 717 | return pci_module_init(&sil_pci_driver); |
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c index 07a1c6a8a414..3f368c7d3ef9 100644 --- a/drivers/scsi/sata_sil24.c +++ b/drivers/scsi/sata_sil24.c | |||
@@ -92,6 +92,7 @@ enum { | |||
92 | HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */ | 92 | HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */ |
93 | HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */ | 93 | HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */ |
94 | HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */ | 94 | HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */ |
95 | HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */ | ||
95 | 96 | ||
96 | /* | 97 | /* |
97 | * Port registers | 98 | * Port registers |
@@ -338,6 +339,7 @@ static int sil24_port_start(struct ata_port *ap); | |||
338 | static void sil24_port_stop(struct ata_port *ap); | 339 | static void sil24_port_stop(struct ata_port *ap); |
339 | static void sil24_host_stop(struct ata_host_set *host_set); | 340 | static void sil24_host_stop(struct ata_host_set *host_set); |
340 | static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 341 | static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
342 | static int sil24_pci_device_resume(struct pci_dev *pdev); | ||
341 | 343 | ||
342 | static const struct pci_device_id sil24_pci_tbl[] = { | 344 | static const struct pci_device_id sil24_pci_tbl[] = { |
343 | { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, | 345 | { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, |
@@ -353,6 +355,8 @@ static struct pci_driver sil24_pci_driver = { | |||
353 | .id_table = sil24_pci_tbl, | 355 | .id_table = sil24_pci_tbl, |
354 | .probe = sil24_init_one, | 356 | .probe = sil24_init_one, |
355 | .remove = ata_pci_remove_one, /* safe? */ | 357 | .remove = ata_pci_remove_one, /* safe? */ |
358 | .suspend = ata_pci_device_suspend, | ||
359 | .resume = sil24_pci_device_resume, | ||
356 | }; | 360 | }; |
357 | 361 | ||
358 | static struct scsi_host_template sil24_sht = { | 362 | static struct scsi_host_template sil24_sht = { |
@@ -372,6 +376,8 @@ static struct scsi_host_template sil24_sht = { | |||
372 | .slave_configure = ata_scsi_slave_config, | 376 | .slave_configure = ata_scsi_slave_config, |
373 | .slave_destroy = ata_scsi_slave_destroy, | 377 | .slave_destroy = ata_scsi_slave_destroy, |
374 | .bios_param = ata_std_bios_param, | 378 | .bios_param = ata_std_bios_param, |
379 | .suspend = ata_scsi_device_suspend, | ||
380 | .resume = ata_scsi_device_resume, | ||
375 | }; | 381 | }; |
376 | 382 | ||
377 | static const struct ata_port_operations sil24_ops = { | 383 | static const struct ata_port_operations sil24_ops = { |
@@ -607,7 +613,7 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class) | |||
607 | /* SStatus oscillates between zero and valid status after | 613 | /* SStatus oscillates between zero and valid status after |
608 | * DEV_RST, debounce it. | 614 | * DEV_RST, debounce it. |
609 | */ | 615 | */ |
610 | rc = sata_phy_debounce(ap, sata_deb_timing_before_fsrst); | 616 | rc = sata_phy_debounce(ap, sata_deb_timing_long); |
611 | if (rc) { | 617 | if (rc) { |
612 | reason = "PHY debouncing failed"; | 618 | reason = "PHY debouncing failed"; |
613 | goto err; | 619 | goto err; |
@@ -988,6 +994,64 @@ static void sil24_host_stop(struct ata_host_set *host_set) | |||
988 | kfree(hpriv); | 994 | kfree(hpriv); |
989 | } | 995 | } |
990 | 996 | ||
997 | static void sil24_init_controller(struct pci_dev *pdev, int n_ports, | ||
998 | unsigned long host_flags, | ||
999 | void __iomem *host_base, | ||
1000 | void __iomem *port_base) | ||
1001 | { | ||
1002 | u32 tmp; | ||
1003 | int i; | ||
1004 | |||
1005 | /* GPIO off */ | ||
1006 | writel(0, host_base + HOST_FLASH_CMD); | ||
1007 | |||
1008 | /* clear global reset & mask interrupts during initialization */ | ||
1009 | writel(0, host_base + HOST_CTRL); | ||
1010 | |||
1011 | /* init ports */ | ||
1012 | for (i = 0; i < n_ports; i++) { | ||
1013 | void __iomem *port = port_base + i * PORT_REGS_SIZE; | ||
1014 | |||
1015 | /* Initial PHY setting */ | ||
1016 | writel(0x20c, port + PORT_PHY_CFG); | ||
1017 | |||
1018 | /* Clear port RST */ | ||
1019 | tmp = readl(port + PORT_CTRL_STAT); | ||
1020 | if (tmp & PORT_CS_PORT_RST) { | ||
1021 | writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); | ||
1022 | tmp = ata_wait_register(port + PORT_CTRL_STAT, | ||
1023 | PORT_CS_PORT_RST, | ||
1024 | PORT_CS_PORT_RST, 10, 100); | ||
1025 | if (tmp & PORT_CS_PORT_RST) | ||
1026 | dev_printk(KERN_ERR, &pdev->dev, | ||
1027 | "failed to clear port RST\n"); | ||
1028 | } | ||
1029 | |||
1030 | /* Configure IRQ WoC */ | ||
1031 | if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC) | ||
1032 | writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); | ||
1033 | else | ||
1034 | writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); | ||
1035 | |||
1036 | /* Zero error counters. */ | ||
1037 | writel(0x8000, port + PORT_DECODE_ERR_THRESH); | ||
1038 | writel(0x8000, port + PORT_CRC_ERR_THRESH); | ||
1039 | writel(0x8000, port + PORT_HSHK_ERR_THRESH); | ||
1040 | writel(0x0000, port + PORT_DECODE_ERR_CNT); | ||
1041 | writel(0x0000, port + PORT_CRC_ERR_CNT); | ||
1042 | writel(0x0000, port + PORT_HSHK_ERR_CNT); | ||
1043 | |||
1044 | /* Always use 64bit activation */ | ||
1045 | writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); | ||
1046 | |||
1047 | /* Clear port multiplier enable and resume bits */ | ||
1048 | writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); | ||
1049 | } | ||
1050 | |||
1051 | /* Turn on interrupts */ | ||
1052 | writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL); | ||
1053 | } | ||
1054 | |||
991 | static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 1055 | static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
992 | { | 1056 | { |
993 | static int printed_version = 0; | 1057 | static int printed_version = 0; |
@@ -1042,7 +1106,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1042 | 1106 | ||
1043 | probe_ent->irq = pdev->irq; | 1107 | probe_ent->irq = pdev->irq; |
1044 | probe_ent->irq_flags = IRQF_SHARED; | 1108 | probe_ent->irq_flags = IRQF_SHARED; |
1045 | probe_ent->mmio_base = port_base; | ||
1046 | probe_ent->private_data = hpriv; | 1109 | probe_ent->private_data = hpriv; |
1047 | 1110 | ||
1048 | hpriv->host_base = host_base; | 1111 | hpriv->host_base = host_base; |
@@ -1076,9 +1139,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1076 | } | 1139 | } |
1077 | } | 1140 | } |
1078 | 1141 | ||
1079 | /* GPIO off */ | ||
1080 | writel(0, host_base + HOST_FLASH_CMD); | ||
1081 | |||
1082 | /* Apply workaround for completion IRQ loss on PCI-X errata */ | 1142 | /* Apply workaround for completion IRQ loss on PCI-X errata */ |
1083 | if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) { | 1143 | if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) { |
1084 | tmp = readl(host_base + HOST_CTRL); | 1144 | tmp = readl(host_base + HOST_CTRL); |
@@ -1090,56 +1150,18 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1090 | probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; | 1150 | probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; |
1091 | } | 1151 | } |
1092 | 1152 | ||
1093 | /* clear global reset & mask interrupts during initialization */ | ||
1094 | writel(0, host_base + HOST_CTRL); | ||
1095 | |||
1096 | for (i = 0; i < probe_ent->n_ports; i++) { | 1153 | for (i = 0; i < probe_ent->n_ports; i++) { |
1097 | void __iomem *port = port_base + i * PORT_REGS_SIZE; | 1154 | unsigned long portu = |
1098 | unsigned long portu = (unsigned long)port; | 1155 | (unsigned long)port_base + i * PORT_REGS_SIZE; |
1099 | 1156 | ||
1100 | probe_ent->port[i].cmd_addr = portu; | 1157 | probe_ent->port[i].cmd_addr = portu; |
1101 | probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; | 1158 | probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; |
1102 | 1159 | ||
1103 | ata_std_ports(&probe_ent->port[i]); | 1160 | ata_std_ports(&probe_ent->port[i]); |
1104 | |||
1105 | /* Initial PHY setting */ | ||
1106 | writel(0x20c, port + PORT_PHY_CFG); | ||
1107 | |||
1108 | /* Clear port RST */ | ||
1109 | tmp = readl(port + PORT_CTRL_STAT); | ||
1110 | if (tmp & PORT_CS_PORT_RST) { | ||
1111 | writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); | ||
1112 | tmp = ata_wait_register(port + PORT_CTRL_STAT, | ||
1113 | PORT_CS_PORT_RST, | ||
1114 | PORT_CS_PORT_RST, 10, 100); | ||
1115 | if (tmp & PORT_CS_PORT_RST) | ||
1116 | dev_printk(KERN_ERR, &pdev->dev, | ||
1117 | "failed to clear port RST\n"); | ||
1118 | } | ||
1119 | |||
1120 | /* Configure IRQ WoC */ | ||
1121 | if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) | ||
1122 | writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); | ||
1123 | else | ||
1124 | writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); | ||
1125 | |||
1126 | /* Zero error counters. */ | ||
1127 | writel(0x8000, port + PORT_DECODE_ERR_THRESH); | ||
1128 | writel(0x8000, port + PORT_CRC_ERR_THRESH); | ||
1129 | writel(0x8000, port + PORT_HSHK_ERR_THRESH); | ||
1130 | writel(0x0000, port + PORT_DECODE_ERR_CNT); | ||
1131 | writel(0x0000, port + PORT_CRC_ERR_CNT); | ||
1132 | writel(0x0000, port + PORT_HSHK_ERR_CNT); | ||
1133 | |||
1134 | /* Always use 64bit activation */ | ||
1135 | writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); | ||
1136 | |||
1137 | /* Clear port multiplier enable and resume bits */ | ||
1138 | writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); | ||
1139 | } | 1161 | } |
1140 | 1162 | ||
1141 | /* Turn on interrupts */ | 1163 | sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags, |
1142 | writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL); | 1164 | host_base, port_base); |
1143 | 1165 | ||
1144 | pci_set_master(pdev); | 1166 | pci_set_master(pdev); |
1145 | 1167 | ||
@@ -1162,6 +1184,25 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1162 | return rc; | 1184 | return rc; |
1163 | } | 1185 | } |
1164 | 1186 | ||
1187 | static int sil24_pci_device_resume(struct pci_dev *pdev) | ||
1188 | { | ||
1189 | struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); | ||
1190 | struct sil24_host_priv *hpriv = host_set->private_data; | ||
1191 | |||
1192 | ata_pci_device_do_resume(pdev); | ||
1193 | |||
1194 | if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) | ||
1195 | writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL); | ||
1196 | |||
1197 | sil24_init_controller(pdev, host_set->n_ports, | ||
1198 | host_set->ports[0]->flags, | ||
1199 | hpriv->host_base, hpriv->port_base); | ||
1200 | |||
1201 | ata_host_set_resume(host_set); | ||
1202 | |||
1203 | return 0; | ||
1204 | } | ||
1205 | |||
1165 | static int __init sil24_init(void) | 1206 | static int __init sil24_init(void) |
1166 | { | 1207 | { |
1167 | return pci_module_init(&sil24_pci_driver); | 1208 | return pci_module_init(&sil24_pci_driver); |
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c index 03baec2191bf..a3727af8b9c1 100644 --- a/drivers/scsi/sata_via.c +++ b/drivers/scsi/sata_via.c | |||
@@ -74,8 +74,10 @@ enum { | |||
74 | static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 74 | static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
75 | static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); | 75 | static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); |
76 | static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 76 | static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); |
77 | static void vt6420_error_handler(struct ata_port *ap); | ||
77 | 78 | ||
78 | static const struct pci_device_id svia_pci_tbl[] = { | 79 | static const struct pci_device_id svia_pci_tbl[] = { |
80 | { 0x1106, 0x0591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, | ||
79 | { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, | 81 | { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, |
80 | { 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 }, | 82 | { 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 }, |
81 | 83 | ||
@@ -107,7 +109,38 @@ static struct scsi_host_template svia_sht = { | |||
107 | .bios_param = ata_std_bios_param, | 109 | .bios_param = ata_std_bios_param, |
108 | }; | 110 | }; |
109 | 111 | ||
110 | static const struct ata_port_operations svia_sata_ops = { | 112 | static const struct ata_port_operations vt6420_sata_ops = { |
113 | .port_disable = ata_port_disable, | ||
114 | |||
115 | .tf_load = ata_tf_load, | ||
116 | .tf_read = ata_tf_read, | ||
117 | .check_status = ata_check_status, | ||
118 | .exec_command = ata_exec_command, | ||
119 | .dev_select = ata_std_dev_select, | ||
120 | |||
121 | .bmdma_setup = ata_bmdma_setup, | ||
122 | .bmdma_start = ata_bmdma_start, | ||
123 | .bmdma_stop = ata_bmdma_stop, | ||
124 | .bmdma_status = ata_bmdma_status, | ||
125 | |||
126 | .qc_prep = ata_qc_prep, | ||
127 | .qc_issue = ata_qc_issue_prot, | ||
128 | .data_xfer = ata_pio_data_xfer, | ||
129 | |||
130 | .freeze = ata_bmdma_freeze, | ||
131 | .thaw = ata_bmdma_thaw, | ||
132 | .error_handler = vt6420_error_handler, | ||
133 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
134 | |||
135 | .irq_handler = ata_interrupt, | ||
136 | .irq_clear = ata_bmdma_irq_clear, | ||
137 | |||
138 | .port_start = ata_port_start, | ||
139 | .port_stop = ata_port_stop, | ||
140 | .host_stop = ata_host_stop, | ||
141 | }; | ||
142 | |||
143 | static const struct ata_port_operations vt6421_sata_ops = { | ||
111 | .port_disable = ata_port_disable, | 144 | .port_disable = ata_port_disable, |
112 | 145 | ||
113 | .tf_load = ata_tf_load, | 146 | .tf_load = ata_tf_load, |
@@ -141,13 +174,13 @@ static const struct ata_port_operations svia_sata_ops = { | |||
141 | .host_stop = ata_host_stop, | 174 | .host_stop = ata_host_stop, |
142 | }; | 175 | }; |
143 | 176 | ||
144 | static struct ata_port_info svia_port_info = { | 177 | static struct ata_port_info vt6420_port_info = { |
145 | .sht = &svia_sht, | 178 | .sht = &svia_sht, |
146 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, | 179 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, |
147 | .pio_mask = 0x1f, | 180 | .pio_mask = 0x1f, |
148 | .mwdma_mask = 0x07, | 181 | .mwdma_mask = 0x07, |
149 | .udma_mask = 0x7f, | 182 | .udma_mask = 0x7f, |
150 | .port_ops = &svia_sata_ops, | 183 | .port_ops = &vt6420_sata_ops, |
151 | }; | 184 | }; |
152 | 185 | ||
153 | MODULE_AUTHOR("Jeff Garzik"); | 186 | MODULE_AUTHOR("Jeff Garzik"); |
@@ -170,6 +203,81 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | |||
170 | outl(val, ap->ioaddr.scr_addr + (4 * sc_reg)); | 203 | outl(val, ap->ioaddr.scr_addr + (4 * sc_reg)); |
171 | } | 204 | } |
172 | 205 | ||
206 | /** | ||
207 | * vt6420_prereset - prereset for vt6420 | ||
208 | * @ap: target ATA port | ||
209 | * | ||
210 | * SCR registers on vt6420 are pieces of shit and may hang the | ||
211 | * whole machine completely if accessed with the wrong timing. | ||
212 | * To avoid such catastrophe, vt6420 doesn't provide generic SCR | ||
213 | * access operations, but uses SStatus and SControl only during | ||
214 | * boot probing in controlled way. | ||
215 | * | ||
216 | * As the old (pre EH update) probing code is proven to work, we | ||
217 | * strictly follow the access pattern. | ||
218 | * | ||
219 | * LOCKING: | ||
220 | * Kernel thread context (may sleep) | ||
221 | * | ||
222 | * RETURNS: | ||
223 | * 0 on success, -errno otherwise. | ||
224 | */ | ||
225 | static int vt6420_prereset(struct ata_port *ap) | ||
226 | { | ||
227 | struct ata_eh_context *ehc = &ap->eh_context; | ||
228 | unsigned long timeout = jiffies + (HZ * 5); | ||
229 | u32 sstatus, scontrol; | ||
230 | int online; | ||
231 | |||
232 | /* don't do any SCR stuff if we're not loading */ | ||
233 | if (!ATA_PFLAG_LOADING) | ||
234 | goto skip_scr; | ||
235 | |||
236 | /* Resume phy. This is the old resume sequence from | ||
237 | * __sata_phy_reset(). | ||
238 | */ | ||
239 | svia_scr_write(ap, SCR_CONTROL, 0x300); | ||
240 | svia_scr_read(ap, SCR_CONTROL); /* flush */ | ||
241 | |||
242 | /* wait for phy to become ready, if necessary */ | ||
243 | do { | ||
244 | msleep(200); | ||
245 | if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1) | ||
246 | break; | ||
247 | } while (time_before(jiffies, timeout)); | ||
248 | |||
249 | /* open code sata_print_link_status() */ | ||
250 | sstatus = svia_scr_read(ap, SCR_STATUS); | ||
251 | scontrol = svia_scr_read(ap, SCR_CONTROL); | ||
252 | |||
253 | online = (sstatus & 0xf) == 0x3; | ||
254 | |||
255 | ata_port_printk(ap, KERN_INFO, | ||
256 | "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n", | ||
257 | online ? "up" : "down", sstatus, scontrol); | ||
258 | |||
259 | /* SStatus is read one more time */ | ||
260 | svia_scr_read(ap, SCR_STATUS); | ||
261 | |||
262 | if (!online) { | ||
263 | /* tell EH to bail */ | ||
264 | ehc->i.action &= ~ATA_EH_RESET_MASK; | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | skip_scr: | ||
269 | /* wait for !BSY */ | ||
270 | ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static void vt6420_error_handler(struct ata_port *ap) | ||
276 | { | ||
277 | return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, | ||
278 | NULL, ata_std_postreset); | ||
279 | } | ||
280 | |||
173 | static const unsigned int svia_bar_sizes[] = { | 281 | static const unsigned int svia_bar_sizes[] = { |
174 | 8, 4, 8, 4, 16, 256 | 282 | 8, 4, 8, 4, 16, 256 |
175 | }; | 283 | }; |
@@ -210,7 +318,7 @@ static void vt6421_init_addrs(struct ata_probe_ent *probe_ent, | |||
210 | static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev) | 318 | static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev) |
211 | { | 319 | { |
212 | struct ata_probe_ent *probe_ent; | 320 | struct ata_probe_ent *probe_ent; |
213 | struct ata_port_info *ppi = &svia_port_info; | 321 | struct ata_port_info *ppi = &vt6420_port_info; |
214 | 322 | ||
215 | probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); | 323 | probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); |
216 | if (!probe_ent) | 324 | if (!probe_ent) |
@@ -239,7 +347,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev) | |||
239 | 347 | ||
240 | probe_ent->sht = &svia_sht; | 348 | probe_ent->sht = &svia_sht; |
241 | probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; | 349 | probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; |
242 | probe_ent->port_ops = &svia_sata_ops; | 350 | probe_ent->port_ops = &vt6421_sata_ops; |
243 | probe_ent->n_ports = N_PORTS; | 351 | probe_ent->n_ports = N_PORTS; |
244 | probe_ent->irq = pdev->irq; | 352 | probe_ent->irq = pdev->irq; |
245 | probe_ent->irq_flags = IRQF_SHARED; | 353 | probe_ent->irq_flags = IRQF_SHARED; |
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index 916fe6fba756..ad37871594f5 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c | |||
@@ -297,7 +297,7 @@ static const struct ata_port_operations vsc_sata_ops = { | |||
297 | .bmdma_status = ata_bmdma_status, | 297 | .bmdma_status = ata_bmdma_status, |
298 | .qc_prep = ata_qc_prep, | 298 | .qc_prep = ata_qc_prep, |
299 | .qc_issue = ata_qc_issue_prot, | 299 | .qc_issue = ata_qc_issue_prot, |
300 | .data_xfer = ata_pio_data_xfer, | 300 | .data_xfer = ata_mmio_data_xfer, |
301 | .freeze = ata_bmdma_freeze, | 301 | .freeze = ata_bmdma_freeze, |
302 | .thaw = ata_bmdma_thaw, | 302 | .thaw = ata_bmdma_thaw, |
303 | .error_handler = ata_bmdma_error_handler, | 303 | .error_handler = ata_bmdma_error_handler, |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 2ab7df0dcfe8..b332caddd5b3 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -346,7 +346,7 @@ void scsi_log_send(struct scsi_cmnd *cmd) | |||
346 | if (level > 3) { | 346 | if (level > 3) { |
347 | printk(KERN_INFO "buffer = 0x%p, bufflen = %d," | 347 | printk(KERN_INFO "buffer = 0x%p, bufflen = %d," |
348 | " done = 0x%p, queuecommand 0x%p\n", | 348 | " done = 0x%p, queuecommand 0x%p\n", |
349 | cmd->buffer, cmd->bufflen, | 349 | cmd->request_buffer, cmd->request_bufflen, |
350 | cmd->done, | 350 | cmd->done, |
351 | sdev->host->hostt->queuecommand); | 351 | sdev->host->hostt->queuecommand); |
352 | 352 | ||
@@ -661,11 +661,6 @@ void __scsi_done(struct scsi_cmnd *cmd) | |||
661 | */ | 661 | */ |
662 | int scsi_retry_command(struct scsi_cmnd *cmd) | 662 | int scsi_retry_command(struct scsi_cmnd *cmd) |
663 | { | 663 | { |
664 | /* | ||
665 | * Restore the SCSI command state. | ||
666 | */ | ||
667 | scsi_setup_cmd_retry(cmd); | ||
668 | |||
669 | /* | 664 | /* |
670 | * Zero the sense information from the last time we tried | 665 | * Zero the sense information from the last time we tried |
671 | * this command. | 666 | * this command. |
@@ -711,10 +706,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd) | |||
711 | "Notifying upper driver of completion " | 706 | "Notifying upper driver of completion " |
712 | "(result %x)\n", cmd->result)); | 707 | "(result %x)\n", cmd->result)); |
713 | 708 | ||
714 | /* | ||
715 | * We can get here with use_sg=0, causing a panic in the upper level | ||
716 | */ | ||
717 | cmd->use_sg = cmd->old_use_sg; | ||
718 | cmd->done(cmd); | 709 | cmd->done(cmd); |
719 | } | 710 | } |
720 | EXPORT_SYMBOL(scsi_finish_command); | 711 | EXPORT_SYMBOL(scsi_finish_command); |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 9c63b00773c4..a80303c6b3fd 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -286,7 +286,7 @@ static int inquiry_evpd_83(unsigned char * arr, int target_dev_id, | |||
286 | int dev_id_num, const char * dev_id_str, | 286 | int dev_id_num, const char * dev_id_str, |
287 | int dev_id_str_len); | 287 | int dev_id_str_len); |
288 | static int inquiry_evpd_88(unsigned char * arr, int target_dev_id); | 288 | static int inquiry_evpd_88(unsigned char * arr, int target_dev_id); |
289 | static void do_create_driverfs_files(void); | 289 | static int do_create_driverfs_files(void); |
290 | static void do_remove_driverfs_files(void); | 290 | static void do_remove_driverfs_files(void); |
291 | 291 | ||
292 | static int sdebug_add_adapter(void); | 292 | static int sdebug_add_adapter(void); |
@@ -2487,19 +2487,22 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp, | |||
2487 | DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, | 2487 | DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, |
2488 | sdebug_add_host_store); | 2488 | sdebug_add_host_store); |
2489 | 2489 | ||
2490 | static void do_create_driverfs_files(void) | 2490 | static int do_create_driverfs_files(void) |
2491 | { | 2491 | { |
2492 | driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); | 2492 | int ret; |
2493 | driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); | 2493 | |
2494 | driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); | 2494 | ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); |
2495 | driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); | 2495 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); |
2496 | driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); | 2496 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); |
2497 | driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); | 2497 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); |
2498 | driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); | 2498 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); |
2499 | driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); | 2499 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); |
2500 | driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); | 2500 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); |
2501 | driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); | 2501 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); |
2502 | driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); | 2502 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); |
2503 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); | ||
2504 | ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); | ||
2505 | return ret; | ||
2503 | } | 2506 | } |
2504 | 2507 | ||
2505 | static void do_remove_driverfs_files(void) | 2508 | static void do_remove_driverfs_files(void) |
@@ -2522,6 +2525,7 @@ static int __init scsi_debug_init(void) | |||
2522 | unsigned int sz; | 2525 | unsigned int sz; |
2523 | int host_to_add; | 2526 | int host_to_add; |
2524 | int k; | 2527 | int k; |
2528 | int ret; | ||
2525 | 2529 | ||
2526 | if (scsi_debug_dev_size_mb < 1) | 2530 | if (scsi_debug_dev_size_mb < 1) |
2527 | scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ | 2531 | scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ |
@@ -2560,12 +2564,32 @@ static int __init scsi_debug_init(void) | |||
2560 | if (scsi_debug_num_parts > 0) | 2564 | if (scsi_debug_num_parts > 0) |
2561 | sdebug_build_parts(fake_storep); | 2565 | sdebug_build_parts(fake_storep); |
2562 | 2566 | ||
2563 | init_all_queued(); | 2567 | ret = device_register(&pseudo_primary); |
2568 | if (ret < 0) { | ||
2569 | printk(KERN_WARNING "scsi_debug: device_register error: %d\n", | ||
2570 | ret); | ||
2571 | goto free_vm; | ||
2572 | } | ||
2573 | ret = bus_register(&pseudo_lld_bus); | ||
2574 | if (ret < 0) { | ||
2575 | printk(KERN_WARNING "scsi_debug: bus_register error: %d\n", | ||
2576 | ret); | ||
2577 | goto dev_unreg; | ||
2578 | } | ||
2579 | ret = driver_register(&sdebug_driverfs_driver); | ||
2580 | if (ret < 0) { | ||
2581 | printk(KERN_WARNING "scsi_debug: driver_register error: %d\n", | ||
2582 | ret); | ||
2583 | goto bus_unreg; | ||
2584 | } | ||
2585 | ret = do_create_driverfs_files(); | ||
2586 | if (ret < 0) { | ||
2587 | printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n", | ||
2588 | ret); | ||
2589 | goto del_files; | ||
2590 | } | ||
2564 | 2591 | ||
2565 | device_register(&pseudo_primary); | 2592 | init_all_queued(); |
2566 | bus_register(&pseudo_lld_bus); | ||
2567 | driver_register(&sdebug_driverfs_driver); | ||
2568 | do_create_driverfs_files(); | ||
2569 | 2593 | ||
2570 | sdebug_driver_template.proc_name = (char *)sdebug_proc_name; | 2594 | sdebug_driver_template.proc_name = (char *)sdebug_proc_name; |
2571 | 2595 | ||
@@ -2585,6 +2609,18 @@ static int __init scsi_debug_init(void) | |||
2585 | scsi_debug_add_host); | 2609 | scsi_debug_add_host); |
2586 | } | 2610 | } |
2587 | return 0; | 2611 | return 0; |
2612 | |||
2613 | del_files: | ||
2614 | do_remove_driverfs_files(); | ||
2615 | driver_unregister(&sdebug_driverfs_driver); | ||
2616 | bus_unreg: | ||
2617 | bus_unregister(&pseudo_lld_bus); | ||
2618 | dev_unreg: | ||
2619 | device_unregister(&pseudo_primary); | ||
2620 | free_vm: | ||
2621 | vfree(fake_storep); | ||
2622 | |||
2623 | return ret; | ||
2588 | } | 2624 | } |
2589 | 2625 | ||
2590 | static void __exit scsi_debug_exit(void) | 2626 | static void __exit scsi_debug_exit(void) |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 6683d596234a..3d355d054612 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -460,19 +460,71 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) | |||
460 | * Return value: | 460 | * Return value: |
461 | * SUCCESS or FAILED or NEEDS_RETRY | 461 | * SUCCESS or FAILED or NEEDS_RETRY |
462 | **/ | 462 | **/ |
463 | static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) | 463 | static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, |
464 | int cmnd_size, int timeout, int copy_sense) | ||
464 | { | 465 | { |
465 | struct scsi_device *sdev = scmd->device; | 466 | struct scsi_device *sdev = scmd->device; |
466 | struct Scsi_Host *shost = sdev->host; | 467 | struct Scsi_Host *shost = sdev->host; |
467 | DECLARE_COMPLETION(done); | 468 | int old_result = scmd->result; |
469 | DECLARE_COMPLETION_ONSTACK(done); | ||
468 | unsigned long timeleft; | 470 | unsigned long timeleft; |
469 | unsigned long flags; | 471 | unsigned long flags; |
472 | unsigned char old_cmnd[MAX_COMMAND_SIZE]; | ||
473 | enum dma_data_direction old_data_direction; | ||
474 | unsigned short old_use_sg; | ||
475 | unsigned char old_cmd_len; | ||
476 | unsigned old_bufflen; | ||
477 | void *old_buffer; | ||
470 | int rtn; | 478 | int rtn; |
471 | 479 | ||
480 | /* | ||
481 | * We need saved copies of a number of fields - this is because | ||
482 | * error handling may need to overwrite these with different values | ||
483 | * to run different commands, and once error handling is complete, | ||
484 | * we will need to restore these values prior to running the actual | ||
485 | * command. | ||
486 | */ | ||
487 | old_buffer = scmd->request_buffer; | ||
488 | old_bufflen = scmd->request_bufflen; | ||
489 | memcpy(old_cmnd, scmd->cmnd, sizeof(scmd->cmnd)); | ||
490 | old_data_direction = scmd->sc_data_direction; | ||
491 | old_cmd_len = scmd->cmd_len; | ||
492 | old_use_sg = scmd->use_sg; | ||
493 | |||
494 | memset(scmd->cmnd, 0, sizeof(scmd->cmnd)); | ||
495 | memcpy(scmd->cmnd, cmnd, cmnd_size); | ||
496 | |||
497 | if (copy_sense) { | ||
498 | int gfp_mask = GFP_ATOMIC; | ||
499 | |||
500 | if (shost->hostt->unchecked_isa_dma) | ||
501 | gfp_mask |= __GFP_DMA; | ||
502 | |||
503 | scmd->sc_data_direction = DMA_FROM_DEVICE; | ||
504 | scmd->request_bufflen = 252; | ||
505 | scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask); | ||
506 | if (!scmd->request_buffer) | ||
507 | return FAILED; | ||
508 | } else { | ||
509 | scmd->request_buffer = NULL; | ||
510 | scmd->request_bufflen = 0; | ||
511 | scmd->sc_data_direction = DMA_NONE; | ||
512 | } | ||
513 | |||
514 | scmd->underflow = 0; | ||
515 | scmd->use_sg = 0; | ||
516 | scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); | ||
517 | |||
472 | if (sdev->scsi_level <= SCSI_2) | 518 | if (sdev->scsi_level <= SCSI_2) |
473 | scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | | 519 | scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | |
474 | (sdev->lun << 5 & 0xe0); | 520 | (sdev->lun << 5 & 0xe0); |
475 | 521 | ||
522 | /* | ||
523 | * Zero the sense buffer. The scsi spec mandates that any | ||
524 | * untransferred sense data should be interpreted as being zero. | ||
525 | */ | ||
526 | memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); | ||
527 | |||
476 | shost->eh_action = &done; | 528 | shost->eh_action = &done; |
477 | 529 | ||
478 | spin_lock_irqsave(shost->host_lock, flags); | 530 | spin_lock_irqsave(shost->host_lock, flags); |
@@ -522,6 +574,29 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) | |||
522 | rtn = FAILED; | 574 | rtn = FAILED; |
523 | } | 575 | } |
524 | 576 | ||
577 | |||
578 | /* | ||
579 | * Last chance to have valid sense data. | ||
580 | */ | ||
581 | if (copy_sense) { | ||
582 | if (!SCSI_SENSE_VALID(scmd)) { | ||
583 | memcpy(scmd->sense_buffer, scmd->request_buffer, | ||
584 | sizeof(scmd->sense_buffer)); | ||
585 | } | ||
586 | kfree(scmd->request_buffer); | ||
587 | } | ||
588 | |||
589 | |||
590 | /* | ||
591 | * Restore original data | ||
592 | */ | ||
593 | scmd->request_buffer = old_buffer; | ||
594 | scmd->request_bufflen = old_bufflen; | ||
595 | memcpy(scmd->cmnd, old_cmnd, sizeof(scmd->cmnd)); | ||
596 | scmd->sc_data_direction = old_data_direction; | ||
597 | scmd->cmd_len = old_cmd_len; | ||
598 | scmd->use_sg = old_use_sg; | ||
599 | scmd->result = old_result; | ||
525 | return rtn; | 600 | return rtn; |
526 | } | 601 | } |
527 | 602 | ||
@@ -537,56 +612,9 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) | |||
537 | static int scsi_request_sense(struct scsi_cmnd *scmd) | 612 | static int scsi_request_sense(struct scsi_cmnd *scmd) |
538 | { | 613 | { |
539 | static unsigned char generic_sense[6] = | 614 | static unsigned char generic_sense[6] = |
540 | {REQUEST_SENSE, 0, 0, 0, 252, 0}; | 615 | {REQUEST_SENSE, 0, 0, 0, 252, 0}; |
541 | unsigned char *scsi_result; | ||
542 | int saved_result; | ||
543 | int rtn; | ||
544 | |||
545 | memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); | ||
546 | |||
547 | scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0)); | ||
548 | |||
549 | |||
550 | if (unlikely(!scsi_result)) { | ||
551 | printk(KERN_ERR "%s: cannot allocate scsi_result.\n", | ||
552 | __FUNCTION__); | ||
553 | return FAILED; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * zero the sense buffer. some host adapters automatically always | ||
558 | * request sense, so it is not a good idea that | ||
559 | * scmd->request_buffer and scmd->sense_buffer point to the same | ||
560 | * address (db). 0 is not a valid sense code. | ||
561 | */ | ||
562 | memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); | ||
563 | memset(scsi_result, 0, 252); | ||
564 | 616 | ||
565 | saved_result = scmd->result; | 617 | return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1); |
566 | scmd->request_buffer = scsi_result; | ||
567 | scmd->request_bufflen = 252; | ||
568 | scmd->use_sg = 0; | ||
569 | scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); | ||
570 | scmd->sc_data_direction = DMA_FROM_DEVICE; | ||
571 | scmd->underflow = 0; | ||
572 | |||
573 | rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); | ||
574 | |||
575 | /* last chance to have valid sense data */ | ||
576 | if(!SCSI_SENSE_VALID(scmd)) { | ||
577 | memcpy(scmd->sense_buffer, scmd->request_buffer, | ||
578 | sizeof(scmd->sense_buffer)); | ||
579 | } | ||
580 | |||
581 | kfree(scsi_result); | ||
582 | |||
583 | /* | ||
584 | * when we eventually call scsi_finish, we really wish to complete | ||
585 | * the original request, so let's restore the original data. (db) | ||
586 | */ | ||
587 | scsi_setup_cmd_retry(scmd); | ||
588 | scmd->result = saved_result; | ||
589 | return rtn; | ||
590 | } | 618 | } |
591 | 619 | ||
592 | /** | 620 | /** |
@@ -605,12 +633,6 @@ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) | |||
605 | { | 633 | { |
606 | scmd->device->host->host_failed--; | 634 | scmd->device->host->host_failed--; |
607 | scmd->eh_eflags = 0; | 635 | scmd->eh_eflags = 0; |
608 | |||
609 | /* | ||
610 | * set this back so that the upper level can correctly free up | ||
611 | * things. | ||
612 | */ | ||
613 | scsi_setup_cmd_retry(scmd); | ||
614 | list_move_tail(&scmd->eh_entry, done_q); | 636 | list_move_tail(&scmd->eh_entry, done_q); |
615 | } | 637 | } |
616 | EXPORT_SYMBOL(scsi_eh_finish_cmd); | 638 | EXPORT_SYMBOL(scsi_eh_finish_cmd); |
@@ -715,47 +737,23 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd) | |||
715 | { | 737 | { |
716 | static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; | 738 | static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; |
717 | int retry_cnt = 1, rtn; | 739 | int retry_cnt = 1, rtn; |
718 | int saved_result; | ||
719 | 740 | ||
720 | retry_tur: | 741 | retry_tur: |
721 | memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); | 742 | rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0); |
722 | |||
723 | /* | ||
724 | * zero the sense buffer. the scsi spec mandates that any | ||
725 | * untransferred sense data should be interpreted as being zero. | ||
726 | */ | ||
727 | memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); | ||
728 | |||
729 | saved_result = scmd->result; | ||
730 | scmd->request_buffer = NULL; | ||
731 | scmd->request_bufflen = 0; | ||
732 | scmd->use_sg = 0; | ||
733 | scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); | ||
734 | scmd->underflow = 0; | ||
735 | scmd->sc_data_direction = DMA_NONE; | ||
736 | 743 | ||
737 | rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); | ||
738 | |||
739 | /* | ||
740 | * when we eventually call scsi_finish, we really wish to complete | ||
741 | * the original request, so let's restore the original data. (db) | ||
742 | */ | ||
743 | scsi_setup_cmd_retry(scmd); | ||
744 | scmd->result = saved_result; | ||
745 | |||
746 | /* | ||
747 | * hey, we are done. let's look to see what happened. | ||
748 | */ | ||
749 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", | 744 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", |
750 | __FUNCTION__, scmd, rtn)); | 745 | __FUNCTION__, scmd, rtn)); |
751 | if (rtn == SUCCESS) | 746 | |
752 | return 0; | 747 | switch (rtn) { |
753 | else if (rtn == NEEDS_RETRY) { | 748 | case NEEDS_RETRY: |
754 | if (retry_cnt--) | 749 | if (retry_cnt--) |
755 | goto retry_tur; | 750 | goto retry_tur; |
751 | /*FALLTHRU*/ | ||
752 | case SUCCESS: | ||
756 | return 0; | 753 | return 0; |
754 | default: | ||
755 | return 1; | ||
757 | } | 756 | } |
758 | return 1; | ||
759 | } | 757 | } |
760 | 758 | ||
761 | /** | 759 | /** |
@@ -837,44 +835,16 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) | |||
837 | static int scsi_eh_try_stu(struct scsi_cmnd *scmd) | 835 | static int scsi_eh_try_stu(struct scsi_cmnd *scmd) |
838 | { | 836 | { |
839 | static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; | 837 | static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; |
840 | int rtn; | ||
841 | int saved_result; | ||
842 | |||
843 | if (!scmd->device->allow_restart) | ||
844 | return 1; | ||
845 | |||
846 | memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); | ||
847 | |||
848 | /* | ||
849 | * zero the sense buffer. the scsi spec mandates that any | ||
850 | * untransferred sense data should be interpreted as being zero. | ||
851 | */ | ||
852 | memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); | ||
853 | 838 | ||
854 | saved_result = scmd->result; | 839 | if (scmd->device->allow_restart) { |
855 | scmd->request_buffer = NULL; | 840 | int rtn; |
856 | scmd->request_bufflen = 0; | ||
857 | scmd->use_sg = 0; | ||
858 | scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); | ||
859 | scmd->underflow = 0; | ||
860 | scmd->sc_data_direction = DMA_NONE; | ||
861 | 841 | ||
862 | rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT); | 842 | rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, |
863 | 843 | START_UNIT_TIMEOUT, 0); | |
864 | /* | 844 | if (rtn == SUCCESS) |
865 | * when we eventually call scsi_finish, we really wish to complete | 845 | return 0; |
866 | * the original request, so let's restore the original data. (db) | 846 | } |
867 | */ | ||
868 | scsi_setup_cmd_retry(scmd); | ||
869 | scmd->result = saved_result; | ||
870 | 847 | ||
871 | /* | ||
872 | * hey, we are done. let's look to see what happened. | ||
873 | */ | ||
874 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", | ||
875 | __FUNCTION__, scmd, rtn)); | ||
876 | if (rtn == SUCCESS) | ||
877 | return 0; | ||
878 | return 1; | 848 | return 1; |
879 | } | 849 | } |
880 | 850 | ||
@@ -1684,8 +1654,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag) | |||
1684 | 1654 | ||
1685 | scmd->scsi_done = scsi_reset_provider_done_command; | 1655 | scmd->scsi_done = scsi_reset_provider_done_command; |
1686 | scmd->done = NULL; | 1656 | scmd->done = NULL; |
1687 | scmd->buffer = NULL; | ||
1688 | scmd->bufflen = 0; | ||
1689 | scmd->request_buffer = NULL; | 1657 | scmd->request_buffer = NULL; |
1690 | scmd->request_bufflen = 0; | 1658 | scmd->request_bufflen = 0; |
1691 | 1659 | ||
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index a89c4115cfba..32293f451669 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c | |||
@@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, | |||
110 | sshdr.asc, sshdr.ascq); | 110 | sshdr.asc, sshdr.ascq); |
111 | break; | 111 | break; |
112 | case NOT_READY: /* This happens if there is no disc in drive */ | 112 | case NOT_READY: /* This happens if there is no disc in drive */ |
113 | if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) { | 113 | if (sdev->removable) |
114 | printk(KERN_INFO "Device not ready. Make sure" | ||
115 | " there is a disc in the drive.\n"); | ||
116 | break; | 114 | break; |
117 | } | ||
118 | case UNIT_ATTENTION: | 115 | case UNIT_ATTENTION: |
119 | if (sdev->removable) { | 116 | if (sdev->removable) { |
120 | sdev->changed = 1; | 117 | sdev->changed = 1; |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 08af9aae7df3..077c1c691210 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -436,60 +436,16 @@ EXPORT_SYMBOL_GPL(scsi_execute_async); | |||
436 | * | 436 | * |
437 | * Arguments: cmd - command that is ready to be queued. | 437 | * Arguments: cmd - command that is ready to be queued. |
438 | * | 438 | * |
439 | * Returns: Nothing | ||
440 | * | ||
441 | * Notes: This function has the job of initializing a number of | 439 | * Notes: This function has the job of initializing a number of |
442 | * fields related to error handling. Typically this will | 440 | * fields related to error handling. Typically this will |
443 | * be called once for each command, as required. | 441 | * be called once for each command, as required. |
444 | */ | 442 | */ |
445 | static int scsi_init_cmd_errh(struct scsi_cmnd *cmd) | 443 | static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) |
446 | { | 444 | { |
447 | cmd->serial_number = 0; | 445 | cmd->serial_number = 0; |
448 | |||
449 | memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); | 446 | memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); |
450 | |||
451 | if (cmd->cmd_len == 0) | 447 | if (cmd->cmd_len == 0) |
452 | cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); | 448 | cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); |
453 | |||
454 | /* | ||
455 | * We need saved copies of a number of fields - this is because | ||
456 | * error handling may need to overwrite these with different values | ||
457 | * to run different commands, and once error handling is complete, | ||
458 | * we will need to restore these values prior to running the actual | ||
459 | * command. | ||
460 | */ | ||
461 | cmd->old_use_sg = cmd->use_sg; | ||
462 | cmd->old_cmd_len = cmd->cmd_len; | ||
463 | cmd->sc_old_data_direction = cmd->sc_data_direction; | ||
464 | cmd->old_underflow = cmd->underflow; | ||
465 | memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd)); | ||
466 | cmd->buffer = cmd->request_buffer; | ||
467 | cmd->bufflen = cmd->request_bufflen; | ||
468 | |||
469 | return 1; | ||
470 | } | ||
471 | |||
472 | /* | ||
473 | * Function: scsi_setup_cmd_retry() | ||
474 | * | ||
475 | * Purpose: Restore the command state for a retry | ||
476 | * | ||
477 | * Arguments: cmd - command to be restored | ||
478 | * | ||
479 | * Returns: Nothing | ||
480 | * | ||
481 | * Notes: Immediately prior to retrying a command, we need | ||
482 | * to restore certain fields that we saved above. | ||
483 | */ | ||
484 | void scsi_setup_cmd_retry(struct scsi_cmnd *cmd) | ||
485 | { | ||
486 | memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd)); | ||
487 | cmd->request_buffer = cmd->buffer; | ||
488 | cmd->request_bufflen = cmd->bufflen; | ||
489 | cmd->use_sg = cmd->old_use_sg; | ||
490 | cmd->cmd_len = cmd->old_cmd_len; | ||
491 | cmd->sc_data_direction = cmd->sc_old_data_direction; | ||
492 | cmd->underflow = cmd->old_underflow; | ||
493 | } | 449 | } |
494 | 450 | ||
495 | void scsi_device_unbusy(struct scsi_device *sdev) | 451 | void scsi_device_unbusy(struct scsi_device *sdev) |
@@ -807,22 +763,13 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index) | |||
807 | */ | 763 | */ |
808 | static void scsi_release_buffers(struct scsi_cmnd *cmd) | 764 | static void scsi_release_buffers(struct scsi_cmnd *cmd) |
809 | { | 765 | { |
810 | struct request *req = cmd->request; | ||
811 | |||
812 | /* | ||
813 | * Free up any indirection buffers we allocated for DMA purposes. | ||
814 | */ | ||
815 | if (cmd->use_sg) | 766 | if (cmd->use_sg) |
816 | scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); | 767 | scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); |
817 | else if (cmd->request_buffer != req->buffer) | ||
818 | kfree(cmd->request_buffer); | ||
819 | 768 | ||
820 | /* | 769 | /* |
821 | * Zero these out. They now point to freed memory, and it is | 770 | * Zero these out. They now point to freed memory, and it is |
822 | * dangerous to hang onto the pointers. | 771 | * dangerous to hang onto the pointers. |
823 | */ | 772 | */ |
824 | cmd->buffer = NULL; | ||
825 | cmd->bufflen = 0; | ||
826 | cmd->request_buffer = NULL; | 773 | cmd->request_buffer = NULL; |
827 | cmd->request_bufflen = 0; | 774 | cmd->request_bufflen = 0; |
828 | } | 775 | } |
@@ -858,7 +805,7 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd) | |||
858 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | 805 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) |
859 | { | 806 | { |
860 | int result = cmd->result; | 807 | int result = cmd->result; |
861 | int this_count = cmd->bufflen; | 808 | int this_count = cmd->request_bufflen; |
862 | request_queue_t *q = cmd->device->request_queue; | 809 | request_queue_t *q = cmd->device->request_queue; |
863 | struct request *req = cmd->request; | 810 | struct request *req = cmd->request; |
864 | int clear_errors = 1; | 811 | int clear_errors = 1; |
@@ -866,28 +813,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
866 | int sense_valid = 0; | 813 | int sense_valid = 0; |
867 | int sense_deferred = 0; | 814 | int sense_deferred = 0; |
868 | 815 | ||
869 | /* | 816 | scsi_release_buffers(cmd); |
870 | * Free up any indirection buffers we allocated for DMA purposes. | ||
871 | * For the case of a READ, we need to copy the data out of the | ||
872 | * bounce buffer and into the real buffer. | ||
873 | */ | ||
874 | if (cmd->use_sg) | ||
875 | scsi_free_sgtable(cmd->buffer, cmd->sglist_len); | ||
876 | else if (cmd->buffer != req->buffer) { | ||
877 | if (rq_data_dir(req) == READ) { | ||
878 | unsigned long flags; | ||
879 | char *to = bio_kmap_irq(req->bio, &flags); | ||
880 | memcpy(to, cmd->buffer, cmd->bufflen); | ||
881 | bio_kunmap_irq(to, &flags); | ||
882 | } | ||
883 | kfree(cmd->buffer); | ||
884 | } | ||
885 | 817 | ||
886 | if (result) { | 818 | if (result) { |
887 | sense_valid = scsi_command_normalize_sense(cmd, &sshdr); | 819 | sense_valid = scsi_command_normalize_sense(cmd, &sshdr); |
888 | if (sense_valid) | 820 | if (sense_valid) |
889 | sense_deferred = scsi_sense_is_deferred(&sshdr); | 821 | sense_deferred = scsi_sense_is_deferred(&sshdr); |
890 | } | 822 | } |
823 | |||
891 | if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ | 824 | if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ |
892 | req->errors = result; | 825 | req->errors = result; |
893 | if (result) { | 826 | if (result) { |
@@ -908,15 +841,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
908 | } | 841 | } |
909 | 842 | ||
910 | /* | 843 | /* |
911 | * Zero these out. They now point to freed memory, and it is | ||
912 | * dangerous to hang onto the pointers. | ||
913 | */ | ||
914 | cmd->buffer = NULL; | ||
915 | cmd->bufflen = 0; | ||
916 | cmd->request_buffer = NULL; | ||
917 | cmd->request_bufflen = 0; | ||
918 | |||
919 | /* | ||
920 | * Next deal with any sectors which we were able to correctly | 844 | * Next deal with any sectors which we were able to correctly |
921 | * handle. | 845 | * handle. |
922 | */ | 846 | */ |
@@ -1012,7 +936,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
1012 | if (!(req->flags & REQ_QUIET)) { | 936 | if (!(req->flags & REQ_QUIET)) { |
1013 | scmd_printk(KERN_INFO, cmd, | 937 | scmd_printk(KERN_INFO, cmd, |
1014 | "Volume overflow, CDB: "); | 938 | "Volume overflow, CDB: "); |
1015 | __scsi_print_command(cmd->data_cmnd); | 939 | __scsi_print_command(cmd->cmnd); |
1016 | scsi_print_sense("", cmd); | 940 | scsi_print_sense("", cmd); |
1017 | } | 941 | } |
1018 | /* See SSC3rXX or current. */ | 942 | /* See SSC3rXX or current. */ |
@@ -1143,7 +1067,7 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd) | |||
1143 | * successfully. Since this is a REQ_BLOCK_PC command the | 1067 | * successfully. Since this is a REQ_BLOCK_PC command the |
1144 | * caller should check the request's errors value | 1068 | * caller should check the request's errors value |
1145 | */ | 1069 | */ |
1146 | scsi_io_completion(cmd, cmd->bufflen); | 1070 | scsi_io_completion(cmd, cmd->request_bufflen); |
1147 | } | 1071 | } |
1148 | 1072 | ||
1149 | static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) | 1073 | static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) |
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index e2fbe9a9d5a9..ae24c85aaeea 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h | |||
@@ -57,7 +57,6 @@ extern int scsi_eh_scmd_add(struct scsi_cmnd *, int); | |||
57 | 57 | ||
58 | /* scsi_lib.c */ | 58 | /* scsi_lib.c */ |
59 | extern int scsi_maybe_unblock_host(struct scsi_device *sdev); | 59 | extern int scsi_maybe_unblock_host(struct scsi_device *sdev); |
60 | extern void scsi_setup_cmd_retry(struct scsi_cmnd *cmd); | ||
61 | extern void scsi_device_unbusy(struct scsi_device *sdev); | 60 | extern void scsi_device_unbusy(struct scsi_device *sdev); |
62 | extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); | 61 | extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); |
63 | extern void scsi_next_command(struct scsi_cmnd *cmd); | 62 | extern void scsi_next_command(struct scsi_cmnd *cmd); |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 7b9e8fa1a4e0..2ecd14188574 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #define ISCSI_SESSION_ATTRS 11 | 34 | #define ISCSI_SESSION_ATTRS 11 |
35 | #define ISCSI_CONN_ATTRS 11 | 35 | #define ISCSI_CONN_ATTRS 11 |
36 | #define ISCSI_HOST_ATTRS 0 | 36 | #define ISCSI_HOST_ATTRS 0 |
37 | #define ISCSI_TRANSPORT_VERSION "1.1-646" | ||
37 | 38 | ||
38 | struct iscsi_internal { | 39 | struct iscsi_internal { |
39 | int daemon_pid; | 40 | int daemon_pid; |
@@ -634,13 +635,13 @@ mempool_zone_get_skb(struct mempool_zone *zone) | |||
634 | } | 635 | } |
635 | 636 | ||
636 | static int | 637 | static int |
637 | iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb) | 638 | iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp) |
638 | { | 639 | { |
639 | unsigned long flags; | 640 | unsigned long flags; |
640 | int rc; | 641 | int rc; |
641 | 642 | ||
642 | skb_get(skb); | 643 | skb_get(skb); |
643 | rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL); | 644 | rc = netlink_broadcast(nls, skb, 0, 1, gfp); |
644 | if (rc < 0) { | 645 | if (rc < 0) { |
645 | mempool_free(skb, zone->pool); | 646 | mempool_free(skb, zone->pool); |
646 | printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc); | 647 | printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc); |
@@ -749,7 +750,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) | |||
749 | ev->r.connerror.cid = conn->cid; | 750 | ev->r.connerror.cid = conn->cid; |
750 | ev->r.connerror.sid = iscsi_conn_get_sid(conn); | 751 | ev->r.connerror.sid = iscsi_conn_get_sid(conn); |
751 | 752 | ||
752 | iscsi_broadcast_skb(conn->z_error, skb); | 753 | iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC); |
753 | 754 | ||
754 | dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", | 755 | dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", |
755 | error); | 756 | error); |
@@ -895,7 +896,7 @@ int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn) | |||
895 | * this will occur if the daemon is not up, so we just warn | 896 | * this will occur if the daemon is not up, so we just warn |
896 | * the user and when the daemon is restarted it will handle it | 897 | * the user and when the daemon is restarted it will handle it |
897 | */ | 898 | */ |
898 | rc = iscsi_broadcast_skb(conn->z_pdu, skb); | 899 | rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); |
899 | if (rc < 0) | 900 | if (rc < 0) |
900 | dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " | 901 | dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " |
901 | "session destruction event. Check iscsi daemon\n"); | 902 | "session destruction event. Check iscsi daemon\n"); |
@@ -958,7 +959,7 @@ int iscsi_if_create_session_done(struct iscsi_cls_conn *conn) | |||
958 | * this will occur if the daemon is not up, so we just warn | 959 | * this will occur if the daemon is not up, so we just warn |
959 | * the user and when the daemon is restarted it will handle it | 960 | * the user and when the daemon is restarted it will handle it |
960 | */ | 961 | */ |
961 | rc = iscsi_broadcast_skb(conn->z_pdu, skb); | 962 | rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); |
962 | if (rc < 0) | 963 | if (rc < 0) |
963 | dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " | 964 | dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " |
964 | "session creation event. Check iscsi daemon\n"); | 965 | "session creation event. Check iscsi daemon\n"); |
@@ -1613,6 +1614,9 @@ static __init int iscsi_transport_init(void) | |||
1613 | { | 1614 | { |
1614 | int err; | 1615 | int err; |
1615 | 1616 | ||
1617 | printk(KERN_INFO "Loading iSCSI transport class v%s.", | ||
1618 | ISCSI_TRANSPORT_VERSION); | ||
1619 | |||
1616 | err = class_register(&iscsi_transport_class); | 1620 | err = class_register(&iscsi_transport_class); |
1617 | if (err) | 1621 | if (err) |
1618 | return err; | 1622 | return err; |
@@ -1678,3 +1682,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, " | |||
1678 | "Alex Aizman <itn780@yahoo.com>"); | 1682 | "Alex Aizman <itn780@yahoo.com>"); |
1679 | MODULE_DESCRIPTION("iSCSI Transport Interface"); | 1683 | MODULE_DESCRIPTION("iSCSI Transport Interface"); |
1680 | MODULE_LICENSE("GPL"); | 1684 | MODULE_LICENSE("GPL"); |
1685 | MODULE_VERSION(ISCSI_TRANSPORT_VERSION); | ||
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index dd075627e605..5a625c3fddae 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
@@ -41,6 +41,7 @@ struct sas_host_attrs { | |||
41 | struct mutex lock; | 41 | struct mutex lock; |
42 | u32 next_target_id; | 42 | u32 next_target_id; |
43 | u32 next_expander_id; | 43 | u32 next_expander_id; |
44 | int next_port_id; | ||
44 | }; | 45 | }; |
45 | #define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data) | 46 | #define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data) |
46 | 47 | ||
@@ -146,6 +147,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev, | |||
146 | mutex_init(&sas_host->lock); | 147 | mutex_init(&sas_host->lock); |
147 | sas_host->next_target_id = 0; | 148 | sas_host->next_target_id = 0; |
148 | sas_host->next_expander_id = 0; | 149 | sas_host->next_expander_id = 0; |
150 | sas_host->next_port_id = 0; | ||
149 | return 0; | 151 | return 0; |
150 | } | 152 | } |
151 | 153 | ||
@@ -327,7 +329,7 @@ sas_phy_protocol_attr(identify.target_port_protocols, | |||
327 | sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", | 329 | sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", |
328 | unsigned long long); | 330 | unsigned long long); |
329 | sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); | 331 | sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); |
330 | //sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8); | 332 | //sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int); |
331 | sas_phy_linkspeed_attr(negotiated_linkrate); | 333 | sas_phy_linkspeed_attr(negotiated_linkrate); |
332 | sas_phy_linkspeed_attr(minimum_linkrate_hw); | 334 | sas_phy_linkspeed_attr(minimum_linkrate_hw); |
333 | sas_phy_linkspeed_attr(minimum_linkrate); | 335 | sas_phy_linkspeed_attr(minimum_linkrate); |
@@ -590,6 +592,38 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id) | |||
590 | } | 592 | } |
591 | EXPORT_SYMBOL(sas_port_alloc); | 593 | EXPORT_SYMBOL(sas_port_alloc); |
592 | 594 | ||
595 | /** sas_port_alloc_num - allocate and initialize a SAS port structure | ||
596 | * | ||
597 | * @parent: parent device | ||
598 | * | ||
599 | * Allocates a SAS port structure and a number to go with it. This | ||
600 | * interface is really for adapters where the port number has no | ||
601 | * meansing, so the sas class should manage them. It will be added to | ||
602 | * the device tree below the device specified by @parent which must be | ||
603 | * either a Scsi_Host or a sas_expander_device. | ||
604 | * | ||
605 | * Returns %NULL on error | ||
606 | */ | ||
607 | struct sas_port *sas_port_alloc_num(struct device *parent) | ||
608 | { | ||
609 | int index; | ||
610 | struct Scsi_Host *shost = dev_to_shost(parent); | ||
611 | struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); | ||
612 | |||
613 | /* FIXME: use idr for this eventually */ | ||
614 | mutex_lock(&sas_host->lock); | ||
615 | if (scsi_is_sas_expander_device(parent)) { | ||
616 | struct sas_rphy *rphy = dev_to_rphy(parent); | ||
617 | struct sas_expander_device *exp = rphy_to_expander_device(rphy); | ||
618 | |||
619 | index = exp->next_port_id++; | ||
620 | } else | ||
621 | index = sas_host->next_port_id++; | ||
622 | mutex_unlock(&sas_host->lock); | ||
623 | return sas_port_alloc(parent, index); | ||
624 | } | ||
625 | EXPORT_SYMBOL(sas_port_alloc_num); | ||
626 | |||
593 | /** | 627 | /** |
594 | * sas_port_add - add a SAS port to the device hierarchy | 628 | * sas_port_add - add a SAS port to the device hierarchy |
595 | * | 629 | * |
@@ -658,6 +692,13 @@ void sas_port_delete(struct sas_port *port) | |||
658 | } | 692 | } |
659 | mutex_unlock(&port->phy_list_mutex); | 693 | mutex_unlock(&port->phy_list_mutex); |
660 | 694 | ||
695 | if (port->is_backlink) { | ||
696 | struct device *parent = port->dev.parent; | ||
697 | |||
698 | sysfs_remove_link(&port->dev.kobj, parent->bus_id); | ||
699 | port->is_backlink = 0; | ||
700 | } | ||
701 | |||
661 | transport_remove_device(dev); | 702 | transport_remove_device(dev); |
662 | device_del(dev); | 703 | device_del(dev); |
663 | transport_destroy_device(dev); | 704 | transport_destroy_device(dev); |
@@ -733,6 +774,19 @@ void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy) | |||
733 | } | 774 | } |
734 | EXPORT_SYMBOL(sas_port_delete_phy); | 775 | EXPORT_SYMBOL(sas_port_delete_phy); |
735 | 776 | ||
777 | void sas_port_mark_backlink(struct sas_port *port) | ||
778 | { | ||
779 | struct device *parent = port->dev.parent->parent->parent; | ||
780 | |||
781 | if (port->is_backlink) | ||
782 | return; | ||
783 | port->is_backlink = 1; | ||
784 | sysfs_create_link(&port->dev.kobj, &parent->kobj, | ||
785 | parent->bus_id); | ||
786 | |||
787 | } | ||
788 | EXPORT_SYMBOL(sas_port_mark_backlink); | ||
789 | |||
736 | /* | 790 | /* |
737 | * SAS remote PHY attributes. | 791 | * SAS remote PHY attributes. |
738 | */ | 792 | */ |
@@ -1140,7 +1194,7 @@ int sas_rphy_add(struct sas_rphy *rphy) | |||
1140 | 1194 | ||
1141 | if (identify->device_type == SAS_END_DEVICE && | 1195 | if (identify->device_type == SAS_END_DEVICE && |
1142 | rphy->scsi_target_id != -1) { | 1196 | rphy->scsi_target_id != -1) { |
1143 | scsi_scan_target(&rphy->dev, parent->port_identifier, | 1197 | scsi_scan_target(&rphy->dev, 0, |
1144 | rphy->scsi_target_id, ~0, 0); | 1198 | rphy->scsi_target_id, ~0, 0); |
1145 | } | 1199 | } |
1146 | 1200 | ||
@@ -1242,15 +1296,13 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel, | |||
1242 | 1296 | ||
1243 | mutex_lock(&sas_host->lock); | 1297 | mutex_lock(&sas_host->lock); |
1244 | list_for_each_entry(rphy, &sas_host->rphy_list, list) { | 1298 | list_for_each_entry(rphy, &sas_host->rphy_list, list) { |
1245 | struct sas_port *parent = dev_to_sas_port(rphy->dev.parent); | ||
1246 | |||
1247 | if (rphy->identify.device_type != SAS_END_DEVICE || | 1299 | if (rphy->identify.device_type != SAS_END_DEVICE || |
1248 | rphy->scsi_target_id == -1) | 1300 | rphy->scsi_target_id == -1) |
1249 | continue; | 1301 | continue; |
1250 | 1302 | ||
1251 | if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) && | 1303 | if ((channel == SCAN_WILD_CARD || channel == 0) && |
1252 | (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { | 1304 | (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { |
1253 | scsi_scan_target(&rphy->dev, parent->port_identifier, | 1305 | scsi_scan_target(&rphy->dev, 0, |
1254 | rphy->scsi_target_id, lun, 1); | 1306 | rphy->scsi_target_id, lun, 1); |
1255 | } | 1307 | } |
1256 | } | 1308 | } |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 3225d31449e1..98bd3aab9739 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -502,8 +502,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt) | |||
502 | SCpnt->cmnd[4] = (unsigned char) this_count; | 502 | SCpnt->cmnd[4] = (unsigned char) this_count; |
503 | SCpnt->cmnd[5] = 0; | 503 | SCpnt->cmnd[5] = 0; |
504 | } | 504 | } |
505 | SCpnt->request_bufflen = SCpnt->bufflen = | 505 | SCpnt->request_bufflen = this_count * sdp->sector_size; |
506 | this_count * sdp->sector_size; | ||
507 | 506 | ||
508 | /* | 507 | /* |
509 | * We shouldn't disconnect in the middle of a sector, so with a dumb | 508 | * We shouldn't disconnect in the middle of a sector, so with a dumb |
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c index 3f312a84c6a7..2679ea8bff1a 100644 --- a/drivers/scsi/seagate.c +++ b/drivers/scsi/seagate.c | |||
@@ -1002,7 +1002,7 @@ connect_loop: | |||
1002 | } | 1002 | } |
1003 | #endif | 1003 | #endif |
1004 | 1004 | ||
1005 | buffer = (struct scatterlist *) SCint->buffer; | 1005 | buffer = (struct scatterlist *) SCint->request_buffer; |
1006 | len = buffer->length; | 1006 | len = buffer->length; |
1007 | data = page_address(buffer->page) + buffer->offset; | 1007 | data = page_address(buffer->page) + buffer->offset; |
1008 | } else { | 1008 | } else { |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 65eef33846bb..34f9343ed0af 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -18,8 +18,8 @@ | |||
18 | * | 18 | * |
19 | */ | 19 | */ |
20 | 20 | ||
21 | static int sg_version_num = 30533; /* 2 digits for each component */ | 21 | static int sg_version_num = 30534; /* 2 digits for each component */ |
22 | #define SG_VERSION_STR "3.5.33" | 22 | #define SG_VERSION_STR "3.5.34" |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: | 25 | * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: |
@@ -60,7 +60,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */ | |||
60 | 60 | ||
61 | #ifdef CONFIG_SCSI_PROC_FS | 61 | #ifdef CONFIG_SCSI_PROC_FS |
62 | #include <linux/proc_fs.h> | 62 | #include <linux/proc_fs.h> |
63 | static char *sg_version_date = "20050908"; | 63 | static char *sg_version_date = "20060818"; |
64 | 64 | ||
65 | static int sg_proc_init(void); | 65 | static int sg_proc_init(void); |
66 | static void sg_proc_cleanup(void); | 66 | static void sg_proc_cleanup(void); |
@@ -1164,7 +1164,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) | |||
1164 | len = vma->vm_end - sa; | 1164 | len = vma->vm_end - sa; |
1165 | len = (len < sg->length) ? len : sg->length; | 1165 | len = (len < sg->length) ? len : sg->length; |
1166 | if (offset < len) { | 1166 | if (offset < len) { |
1167 | page = sg->page; | 1167 | page = virt_to_page(page_address(sg->page) + offset); |
1168 | get_page(page); /* increment page count */ | 1168 | get_page(page); /* increment page count */ |
1169 | break; | 1169 | break; |
1170 | } | 1170 | } |
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index fd94408577e5..fae6e95a6298 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c | |||
@@ -360,7 +360,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt) | |||
360 | "mismatch count %d, bytes %d\n", | 360 | "mismatch count %d, bytes %d\n", |
361 | size, SCpnt->request_bufflen); | 361 | size, SCpnt->request_bufflen); |
362 | if (SCpnt->request_bufflen > size) | 362 | if (SCpnt->request_bufflen > size) |
363 | SCpnt->request_bufflen = SCpnt->bufflen = size; | 363 | SCpnt->request_bufflen = size; |
364 | } | 364 | } |
365 | } | 365 | } |
366 | 366 | ||
@@ -387,8 +387,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt) | |||
387 | 387 | ||
388 | if (this_count > 0xffff) { | 388 | if (this_count > 0xffff) { |
389 | this_count = 0xffff; | 389 | this_count = 0xffff; |
390 | SCpnt->request_bufflen = SCpnt->bufflen = | 390 | SCpnt->request_bufflen = this_count * s_size; |
391 | this_count * s_size; | ||
392 | } | 391 | } |
393 | 392 | ||
394 | SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; | 393 | SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 756ceb93ddc8..7f669b600677 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -368,7 +368,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) | |||
368 | SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], | 368 | SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], |
369 | SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); | 369 | SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); |
370 | if (cmdstatp->have_sense) | 370 | if (cmdstatp->have_sense) |
371 | __scsi_print_sense("st", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); | 371 | __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); |
372 | } ) /* end DEB */ | 372 | } ) /* end DEB */ |
373 | if (!debugging) { /* Abnormal conditions for tape */ | 373 | if (!debugging) { /* Abnormal conditions for tape */ |
374 | if (!cmdstatp->have_sense) | 374 | if (!cmdstatp->have_sense) |
@@ -384,9 +384,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) | |||
384 | scode != VOLUME_OVERFLOW && | 384 | scode != VOLUME_OVERFLOW && |
385 | SRpnt->cmd[0] != MODE_SENSE && | 385 | SRpnt->cmd[0] != MODE_SENSE && |
386 | SRpnt->cmd[0] != TEST_UNIT_READY) { | 386 | SRpnt->cmd[0] != TEST_UNIT_READY) { |
387 | printk(KERN_WARNING "%s: Error with sense data: ", name); | 387 | |
388 | __scsi_print_sense("st", SRpnt->sense, | 388 | __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); |
389 | SCSI_SENSE_BUFFERSIZE); | ||
390 | } | 389 | } |
391 | } | 390 | } |
392 | 391 | ||
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 2ebe0d663899..2f8073b73bf3 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c | |||
@@ -517,7 +517,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd) | |||
517 | */ | 517 | */ |
518 | 518 | ||
519 | if (cmd->use_sg) { | 519 | if (cmd->use_sg) { |
520 | cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; | 520 | cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; |
521 | cmd->SCp.buffers_residual = cmd->use_sg - 1; | 521 | cmd->SCp.buffers_residual = cmd->use_sg - 1; |
522 | cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); | 522 | cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); |
523 | cmd->SCp.this_residual = cmd->SCp.buffer->length; | 523 | cmd->SCp.this_residual = cmd->SCp.buffer->length; |
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index 1f328cae5c05..6b60536ac92b 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c | |||
@@ -347,7 +347,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) | |||
347 | static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) | 347 | static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) |
348 | { | 348 | { |
349 | int sz = sp->use_sg - 1; | 349 | int sz = sp->use_sg - 1; |
350 | struct scatterlist *sg = (struct scatterlist *)sp->buffer; | 350 | struct scatterlist *sg = (struct scatterlist *)sp->request_buffer; |
351 | 351 | ||
352 | while(sz >= 0) { | 352 | while(sz >= 0) { |
353 | dvma_unmap((char *)sg[sz].dma_address); | 353 | dvma_unmap((char *)sg[sz].dma_address); |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 8c505076c0eb..739d3ef46a40 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
@@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = { | |||
2084 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, | 2084 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, |
2085 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 2085 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
2086 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, | 2086 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, |
2087 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 2087 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, |
2088 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, | 2088 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, |
2089 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 2089 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
2090 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, | 2090 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, |
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index 680f38ab60d8..2083454db511 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c | |||
@@ -373,7 +373,7 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd, | |||
373 | */ | 373 | */ |
374 | 374 | ||
375 | if (cmd->use_sg) { | 375 | if (cmd->use_sg) { |
376 | cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; | 376 | cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; |
377 | cmd->SCp.buffers_residual = cmd->use_sg - 1; | 377 | cmd->SCp.buffers_residual = cmd->use_sg - 1; |
378 | cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + | 378 | cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + |
379 | cmd->SCp.buffer->offset; | 379 | cmd->SCp.buffer->offset; |