aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/.gitignore2
-rw-r--r--drivers/scsi/3w-9xxx.c1
-rw-r--r--drivers/scsi/53c700.c11
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/Kconfig32
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/NCR5380.c23
-rw-r--r--drivers/scsi/a2091.c36
-rw-r--r--drivers/scsi/a3000.c15
-rw-r--r--drivers/scsi/aacraid/aachba.c400
-rw-r--r--drivers/scsi/aacraid/aacraid.h335
-rw-r--r--drivers/scsi/aacraid/commctrl.c112
-rw-r--r--drivers/scsi/aacraid/comminit.c4
-rw-r--r--drivers/scsi/aacraid/commsup.c394
-rw-r--r--drivers/scsi/aacraid/dpcsup.c10
-rw-r--r--drivers/scsi/aacraid/linit.c242
-rw-r--r--drivers/scsi/aacraid/rx.c6
-rw-r--r--drivers/scsi/advansys.c14
-rw-r--r--drivers/scsi/aha152x.c38
-rw-r--r--drivers/scsi/aha1542.c49
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic7xxx/Makefile45
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c6
-rw-r--r--drivers/scsi/aic7xxx_old.c11
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c6
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.h3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c190
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c6
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c389
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.h121
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c50
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c12
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/atari_NCR5380.c24
-rw-r--r--drivers/scsi/atp870u.c102
-rw-r--r--drivers/scsi/ch.c215
-rw-r--r--drivers/scsi/constants.c3
-rw-r--r--drivers/scsi/dc395x.c16
-rw-r--r--drivers/scsi/dpt_i2o.c5
-rw-r--r--drivers/scsi/eata.c4
-rw-r--r--drivers/scsi/eata_pio.c13
-rw-r--r--drivers/scsi/fd_mcs.c36
-rw-r--r--drivers/scsi/gdth.c22
-rw-r--r--drivers/scsi/hosts.c4
-rw-r--r--drivers/scsi/hptiop.c593
-rw-r--r--drivers/scsi/hptiop.h124
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c155
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/imm.c13
-rw-r--r--drivers/scsi/in2000.c10
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/ips.c326
-rw-r--r--drivers/scsi/ips.h32
-rw-r--r--drivers/scsi/iscsi_tcp.c2075
-rw-r--r--drivers/scsi/iscsi_tcp.h134
-rw-r--r--drivers/scsi/libiscsi.c1091
-rw-r--r--drivers/scsi/libsas/Kconfig9
-rw-r--r--drivers/scsi/libsas/Makefile4
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c35
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c274
-rw-r--r--drivers/scsi/libsas/sas_internal.h16
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c9
-rw-r--r--drivers/scsi/libsas/sas_task.c36
-rw-r--r--drivers/scsi/libsrp.c23
-rw-r--r--drivers/scsi/lpfc/lpfc.h53
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c217
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c304
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c157
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c623
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c372
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h112
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c429
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c534
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h2
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c572
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h18
-rw-r--r--drivers/scsi/ncr53c8xx.c3
-rw-r--r--drivers/scsi/pcmcia/Kconfig3
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c54
-rw-r--r--drivers/scsi/ppa.c12
-rw-r--r--drivers/scsi/psi240i.c689
-rw-r--r--drivers/scsi/psi240i.h315
-rw-r--r--drivers/scsi/psi_chip.h195
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c54
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h52
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c175
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h51
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c430
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c80
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c7
-rw-r--r--drivers/scsi/qlogicpti.c31
-rw-r--r--drivers/scsi/scsi.c282
-rw-r--r--drivers/scsi/scsi_debug.c37
-rw-r--r--drivers/scsi/scsi_devinfo.c34
-rw-r--r--drivers/scsi/scsi_error.c131
-rw-r--r--drivers/scsi/scsi_ioctl.c26
-rw-r--r--drivers/scsi/scsi_lib.c117
-rw-r--r--drivers/scsi/scsi_netlink.c19
-rw-r--r--drivers/scsi/scsi_proc.c110
-rw-r--r--drivers/scsi/scsi_scan.c36
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/scsi/scsi_tgt_if.c2
-rw-r--r--drivers/scsi/scsi_tgt_lib.c30
-rw-r--r--drivers/scsi/scsi_transport_fc.c102
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c330
-rw-r--r--drivers/scsi/scsi_transport_sas.c41
-rw-r--r--drivers/scsi/scsi_transport_spi.c258
-rw-r--r--drivers/scsi/scsi_transport_srp.c13
-rw-r--r--drivers/scsi/scsicam.c35
-rw-r--r--drivers/scsi/sd.c44
-rw-r--r--drivers/scsi/seagate.c1667
-rw-r--r--drivers/scsi/sg.c24
-rw-r--r--drivers/scsi/sgiwd93.c1
-rw-r--r--drivers/scsi/sr.c35
-rw-r--r--drivers/scsi/sr.h4
-rw-r--r--drivers/scsi/sr_ioctl.c48
-rw-r--r--drivers/scsi/st.c9
-rw-r--r--drivers/scsi/sun3_NCR5380.c24
-rw-r--r--drivers/scsi/sym53c416.c16
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c22
-rw-r--r--drivers/scsi/tmscsim.c6
-rw-r--r--drivers/scsi/u14-34f.c4
-rw-r--r--drivers/scsi/ultrastor.c15
-rw-r--r--drivers/scsi/wd33c93.c10
-rw-r--r--drivers/scsi/wd7000.c12
155 files changed, 9359 insertions, 8524 deletions
diff --git a/drivers/scsi/.gitignore b/drivers/scsi/.gitignore
index b385af314356..c89ae9a04399 100644
--- a/drivers/scsi/.gitignore
+++ b/drivers/scsi/.gitignore
@@ -1,3 +1 @@
153c700_d.h 53c700_d.h
253c7xx_d.h
353c7xx_u.h
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index afb262b4be15..1c244832c6c8 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2010,6 +2010,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2010 } 2010 }
2011 2011
2012 pci_set_master(pdev); 2012 pci_set_master(pdev);
2013 pci_try_set_mwi(pdev);
2013 2014
2014 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) 2015 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
2015 || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) 2016 || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 71ff3fbfce12..f4c4fe90240a 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -608,7 +608,8 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
608 scsi_print_sense("53c700", SCp); 608 scsi_print_sense("53c700", SCp);
609 609
610#endif 610#endif
611 dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); 611 dma_unmap_single(hostdata->dev, slot->dma_handle,
612 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
612 /* restore the old result if the request sense was 613 /* restore the old result if the request sense was
613 * successful */ 614 * successful */
614 if (result == 0) 615 if (result == 0)
@@ -1010,7 +1011,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1010 cmnd[1] = (SCp->device->lun & 0x7) << 5; 1011 cmnd[1] = (SCp->device->lun & 0x7) << 5;
1011 cmnd[2] = 0; 1012 cmnd[2] = 0;
1012 cmnd[3] = 0; 1013 cmnd[3] = 0;
1013 cmnd[4] = sizeof(SCp->sense_buffer); 1014 cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1014 cmnd[5] = 0; 1015 cmnd[5] = 0;
1015 /* Here's a quiet hack: the 1016 /* Here's a quiet hack: the
1016 * REQUEST_SENSE command is six bytes, 1017 * REQUEST_SENSE command is six bytes,
@@ -1024,14 +1025,14 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1024 SCp->cmd_len = 6; /* command length for 1025 SCp->cmd_len = 6; /* command length for
1025 * REQUEST_SENSE */ 1026 * REQUEST_SENSE */
1026 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE); 1027 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1027 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); 1028 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1028 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer)); 1029 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1029 slot->SG[0].pAddr = bS_to_host(slot->dma_handle); 1030 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1030 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN); 1031 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1031 slot->SG[1].pAddr = 0; 1032 slot->SG[1].pAddr = 0;
1032 slot->resume_offset = hostdata->pScript; 1033 slot->resume_offset = hostdata->pScript;
1033 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE); 1034 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1034 dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); 1035 dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1035 1036
1036 /* queue the command for reissue */ 1037 /* queue the command for reissue */
1037 slot->state = NCR_700_SLOT_QUEUED; 1038 slot->state = NCR_700_SLOT_QUEUED;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 49e1ffa4b2ff..ead47c143ce0 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2947,7 +2947,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
2947 } 2947 }
2948 } 2948 }
2949 memcpy(CCB->CDB, CDB, CDB_Length); 2949 memcpy(CCB->CDB, CDB, CDB_Length);
2950 CCB->SenseDataLength = sizeof(Command->sense_buffer); 2950 CCB->SenseDataLength = SCSI_SENSE_BUFFERSIZE;
2951 CCB->SenseDataPointer = pci_map_single(HostAdapter->PCI_Device, Command->sense_buffer, CCB->SenseDataLength, PCI_DMA_FROMDEVICE); 2951 CCB->SenseDataPointer = pci_map_single(HostAdapter->PCI_Device, Command->sense_buffer, CCB->SenseDataLength, PCI_DMA_FROMDEVICE);
2952 CCB->Command = Command; 2952 CCB->Command = Command;
2953 Command->scsi_done = CompletionRoutine; 2953 Command->scsi_done = CompletionRoutine;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 184c7ae78519..3e161cd66463 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -341,7 +341,7 @@ config ISCSI_TCP
341 The userspace component needed to initialize the driver, documentation, 341 The userspace component needed to initialize the driver, documentation,
342 and sample configuration files can be found here: 342 and sample configuration files can be found here:
343 343
344 http://linux-iscsi.sf.net 344 http://open-iscsi.org
345 345
346config SGIWD93_SCSI 346config SGIWD93_SCSI
347 tristate "SGI WD93C93 SCSI Driver" 347 tristate "SGI WD93C93 SCSI Driver"
@@ -573,10 +573,10 @@ config SCSI_ARCMSR_AER
573source "drivers/scsi/megaraid/Kconfig.megaraid" 573source "drivers/scsi/megaraid/Kconfig.megaraid"
574 574
575config SCSI_HPTIOP 575config SCSI_HPTIOP
576 tristate "HighPoint RocketRAID 3xxx Controller support" 576 tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
577 depends on SCSI && PCI 577 depends on SCSI && PCI
578 help 578 help
579 This option enables support for HighPoint RocketRAID 3xxx 579 This option enables support for HighPoint RocketRAID 3xxx/4xxx
580 controllers. 580 controllers.
581 581
582 To compile this driver as a module, choose M here; the module 582 To compile this driver as a module, choose M here; the module
@@ -1288,17 +1288,6 @@ config SCSI_PAS16
1288 To compile this driver as a module, choose M here: the 1288 To compile this driver as a module, choose M here: the
1289 module will be called pas16. 1289 module will be called pas16.
1290 1290
1291config SCSI_PSI240I
1292 tristate "PSI240i support"
1293 depends on ISA && SCSI
1294 help
1295 This is support for the PSI240i EIDE interface card which acts as a
1296 SCSI host adapter. Please read the SCSI-HOWTO, available from
1297 <http://www.tldp.org/docs.html#howto>.
1298
1299 To compile this driver as a module, choose M here: the
1300 module will be called psi240i.
1301
1302config SCSI_QLOGIC_FAS 1291config SCSI_QLOGIC_FAS
1303 tristate "Qlogic FAS SCSI support" 1292 tristate "Qlogic FAS SCSI support"
1304 depends on ISA && SCSI 1293 depends on ISA && SCSI
@@ -1359,21 +1348,6 @@ config SCSI_LPFC
1359 This lpfc driver supports the Emulex LightPulse 1348 This lpfc driver supports the Emulex LightPulse
1360 Family of Fibre Channel PCI host adapters. 1349 Family of Fibre Channel PCI host adapters.
1361 1350
1362config SCSI_SEAGATE
1363 tristate "Seagate ST-02 and Future Domain TMC-8xx SCSI support"
1364 depends on X86 && ISA && SCSI
1365 select CHECK_SIGNATURE
1366 ---help---
1367 These are 8-bit SCSI controllers; the ST-01 is also supported by
1368 this driver. It is explained in section 3.9 of the SCSI-HOWTO,
1369 available from <http://www.tldp.org/docs.html#howto>. If it
1370 doesn't work out of the box, you may have to change some macros at
1371 compiletime, which are described in <file:drivers/scsi/seagate.c>.
1372
1373 To compile this driver as a module, choose M here: the
1374 module will be called seagate.
1375
1376# definitely looks not 64bit safe:
1377config SCSI_SIM710 1351config SCSI_SIM710
1378 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" 1352 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
1379 depends on (EISA || MCA) && SCSI 1353 depends on (EISA || MCA) && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2e6129f13d38..93e1428d03fc 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -16,9 +16,8 @@
16 16
17CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF 17CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF
18CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS 18CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS
19CFLAGS_seagate.o = -DARBITRATE -DPARITY -DSEAGATE_USE_ASM
20 19
21subdir-$(CONFIG_PCMCIA) += pcmcia 20obj-$(CONFIG_PCMCIA) += pcmcia/
22 21
23obj-$(CONFIG_SCSI) += scsi_mod.o 22obj-$(CONFIG_SCSI) += scsi_mod.o
24obj-$(CONFIG_SCSI_TGT) += scsi_tgt.o 23obj-$(CONFIG_SCSI_TGT) += scsi_tgt.o
@@ -59,7 +58,6 @@ obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
59obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o 58obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
60obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o 59obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
61obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o 60obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
62obj-$(CONFIG_SCSI_PSI240I) += psi240i.o
63obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o 61obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
64obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o 62obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
65obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o 63obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o
@@ -90,7 +88,6 @@ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
90obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/ 88obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
91obj-$(CONFIG_SCSI_LPFC) += lpfc/ 89obj-$(CONFIG_SCSI_LPFC) += lpfc/
92obj-$(CONFIG_SCSI_PAS16) += pas16.o 90obj-$(CONFIG_SCSI_PAS16) += pas16.o
93obj-$(CONFIG_SCSI_SEAGATE) += seagate.o
94obj-$(CONFIG_SCSI_T128) += t128.o 91obj-$(CONFIG_SCSI_T128) += t128.o
95obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o 92obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
96obj-$(CONFIG_SCSI_DTC3280) += dtc.o 93obj-$(CONFIG_SCSI_DTC3280) += dtc.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 2597209183d0..eeddbd19eba5 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -295,16 +295,16 @@ static __inline__ void initialize_SCp(Scsi_Cmnd * cmd)
295 * various queues are valid. 295 * various queues are valid.
296 */ 296 */
297 297
298 if (cmd->use_sg) { 298 if (scsi_bufflen(cmd)) {
299 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; 299 cmd->SCp.buffer = scsi_sglist(cmd);
300 cmd->SCp.buffers_residual = cmd->use_sg - 1; 300 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
301 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 301 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
302 cmd->SCp.this_residual = cmd->SCp.buffer->length; 302 cmd->SCp.this_residual = cmd->SCp.buffer->length;
303 } else { 303 } else {
304 cmd->SCp.buffer = NULL; 304 cmd->SCp.buffer = NULL;
305 cmd->SCp.buffers_residual = 0; 305 cmd->SCp.buffers_residual = 0;
306 cmd->SCp.ptr = (char *) cmd->request_buffer; 306 cmd->SCp.ptr = NULL;
307 cmd->SCp.this_residual = cmd->request_bufflen; 307 cmd->SCp.this_residual = 0;
308 } 308 }
309} 309}
310 310
@@ -932,7 +932,7 @@ static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags)
932 * @instance: adapter to remove 932 * @instance: adapter to remove
933 */ 933 */
934 934
935static void __devexit NCR5380_exit(struct Scsi_Host *instance) 935static void NCR5380_exit(struct Scsi_Host *instance)
936{ 936{
937 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; 937 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
938 938
@@ -975,14 +975,14 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
975 case WRITE_6: 975 case WRITE_6:
976 case WRITE_10: 976 case WRITE_10:
977 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); 977 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
978 hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen; 978 hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
979 hostdata->pendingw++; 979 hostdata->pendingw++;
980 break; 980 break;
981 case READ: 981 case READ:
982 case READ_6: 982 case READ_6:
983 case READ_10: 983 case READ_10:
984 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); 984 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
985 hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen; 985 hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
986 hostdata->pendingr++; 986 hostdata->pendingr++;
987 break; 987 break;
988 } 988 }
@@ -1157,16 +1157,17 @@ static void NCR5380_main(struct work_struct *work)
1157 * Locks: takes the needed instance locks 1157 * Locks: takes the needed instance locks
1158 */ 1158 */
1159 1159
1160static irqreturn_t NCR5380_intr(int irq, void *dev_id) 1160static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
1161{ 1161{
1162 NCR5380_local_declare(); 1162 NCR5380_local_declare();
1163 struct Scsi_Host *instance = (struct Scsi_Host *)dev_id; 1163 struct Scsi_Host *instance = dev_id;
1164 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; 1164 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
1165 int done; 1165 int done;
1166 unsigned char basr; 1166 unsigned char basr;
1167 unsigned long flags; 1167 unsigned long flags;
1168 1168
1169 dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n", irq)); 1169 dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n",
1170 instance->irq));
1170 1171
1171 do { 1172 do {
1172 done = 1; 1173 done = 1;
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index b7c5385e2efe..23f27c9c9895 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -73,18 +73,9 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
73 } 73 }
74 74
75 if (!dir_in) { 75 if (!dir_in) {
76 /* copy to bounce buffer for a write */ 76 /* copy to bounce buffer for a write */
77 if (cmd->use_sg)
78#if 0
79 panic ("scsi%ddma: incomplete s/g support",
80 instance->host_no);
81#else
82 memcpy (HDATA(instance)->dma_bounce_buffer, 77 memcpy (HDATA(instance)->dma_bounce_buffer,
83 cmd->SCp.ptr, cmd->SCp.this_residual); 78 cmd->SCp.ptr, cmd->SCp.this_residual);
84#endif
85 else
86 memcpy (HDATA(instance)->dma_bounce_buffer,
87 cmd->request_buffer, cmd->request_bufflen);
88 } 79 }
89 } 80 }
90 81
@@ -144,30 +135,13 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
144 135
145 /* copy from a bounce buffer, if necessary */ 136 /* copy from a bounce buffer, if necessary */
146 if (status && HDATA(instance)->dma_bounce_buffer) { 137 if (status && HDATA(instance)->dma_bounce_buffer) {
147 if (SCpnt && SCpnt->use_sg) { 138 if( HDATA(instance)->dma_dir )
148#if 0
149 panic ("scsi%d: incomplete s/g support",
150 instance->host_no);
151#else
152 if( HDATA(instance)->dma_dir )
153 memcpy (SCpnt->SCp.ptr, 139 memcpy (SCpnt->SCp.ptr,
154 HDATA(instance)->dma_bounce_buffer, 140 HDATA(instance)->dma_bounce_buffer,
155 SCpnt->SCp.this_residual); 141 SCpnt->SCp.this_residual);
156 kfree (HDATA(instance)->dma_bounce_buffer); 142 kfree (HDATA(instance)->dma_bounce_buffer);
157 HDATA(instance)->dma_bounce_buffer = NULL; 143 HDATA(instance)->dma_bounce_buffer = NULL;
158 HDATA(instance)->dma_bounce_len = 0; 144 HDATA(instance)->dma_bounce_len = 0;
159
160#endif
161 } else {
162 if (HDATA(instance)->dma_dir && SCpnt)
163 memcpy (SCpnt->request_buffer,
164 HDATA(instance)->dma_bounce_buffer,
165 SCpnt->request_bufflen);
166
167 kfree (HDATA(instance)->dma_bounce_buffer);
168 HDATA(instance)->dma_bounce_buffer = NULL;
169 HDATA(instance)->dma_bounce_len = 0;
170 }
171 } 145 }
172} 146}
173 147
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 796f1c4d772e..d7255c8bf281 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -70,12 +70,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
70 70
71 if (!dir_in) { 71 if (!dir_in) {
72 /* copy to bounce buffer for a write */ 72 /* copy to bounce buffer for a write */
73 if (cmd->use_sg) { 73 memcpy (HDATA(a3000_host)->dma_bounce_buffer,
74 memcpy (HDATA(a3000_host)->dma_bounce_buffer, 74 cmd->SCp.ptr, cmd->SCp.this_residual);
75 cmd->SCp.ptr, cmd->SCp.this_residual);
76 } else
77 memcpy (HDATA(a3000_host)->dma_bounce_buffer,
78 cmd->request_buffer, cmd->request_bufflen);
79 } 75 }
80 76
81 addr = virt_to_bus(HDATA(a3000_host)->dma_bounce_buffer); 77 addr = virt_to_bus(HDATA(a3000_host)->dma_bounce_buffer);
@@ -146,7 +142,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
146 142
147 /* copy from a bounce buffer, if necessary */ 143 /* copy from a bounce buffer, if necessary */
148 if (status && HDATA(instance)->dma_bounce_buffer) { 144 if (status && HDATA(instance)->dma_bounce_buffer) {
149 if (SCpnt && SCpnt->use_sg) { 145 if (SCpnt) {
150 if (HDATA(instance)->dma_dir && SCpnt) 146 if (HDATA(instance)->dma_dir && SCpnt)
151 memcpy (SCpnt->SCp.ptr, 147 memcpy (SCpnt->SCp.ptr,
152 HDATA(instance)->dma_bounce_buffer, 148 HDATA(instance)->dma_bounce_buffer,
@@ -155,11 +151,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
155 HDATA(instance)->dma_bounce_buffer = NULL; 151 HDATA(instance)->dma_bounce_buffer = NULL;
156 HDATA(instance)->dma_bounce_len = 0; 152 HDATA(instance)->dma_bounce_len = 0;
157 } else { 153 } else {
158 if (HDATA(instance)->dma_dir && SCpnt)
159 memcpy (SCpnt->request_buffer,
160 HDATA(instance)->dma_bounce_buffer,
161 SCpnt->request_bufflen);
162
163 kfree (HDATA(instance)->dma_bounce_buffer); 154 kfree (HDATA(instance)->dma_bounce_buffer);
164 HDATA(instance)->dma_bounce_buffer = NULL; 155 HDATA(instance)->dma_bounce_buffer = NULL;
165 HDATA(instance)->dma_bounce_len = 0; 156 HDATA(instance)->dma_bounce_len = 0;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a77ab8d693d4..d7235f42cf5f 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -31,9 +31,9 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/completion.h> 32#include <linux/completion.h>
33#include <linux/blkdev.h> 33#include <linux/blkdev.h>
34#include <linux/dma-mapping.h>
35#include <asm/semaphore.h> 34#include <asm/semaphore.h>
36#include <asm/uaccess.h> 35#include <asm/uaccess.h>
36#include <linux/highmem.h> /* For flush_kernel_dcache_page */
37 37
38#include <scsi/scsi.h> 38#include <scsi/scsi.h>
39#include <scsi/scsi_cmnd.h> 39#include <scsi/scsi_cmnd.h>
@@ -56,54 +56,54 @@
56/* 56/*
57 * Sense codes 57 * Sense codes
58 */ 58 */
59 59
60#define SENCODE_NO_SENSE 0x00 60#define SENCODE_NO_SENSE 0x00
61#define SENCODE_END_OF_DATA 0x00 61#define SENCODE_END_OF_DATA 0x00
62#define SENCODE_BECOMING_READY 0x04 62#define SENCODE_BECOMING_READY 0x04
63#define SENCODE_INIT_CMD_REQUIRED 0x04 63#define SENCODE_INIT_CMD_REQUIRED 0x04
64#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A 64#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
65#define SENCODE_INVALID_COMMAND 0x20 65#define SENCODE_INVALID_COMMAND 0x20
66#define SENCODE_LBA_OUT_OF_RANGE 0x21 66#define SENCODE_LBA_OUT_OF_RANGE 0x21
67#define SENCODE_INVALID_CDB_FIELD 0x24 67#define SENCODE_INVALID_CDB_FIELD 0x24
68#define SENCODE_LUN_NOT_SUPPORTED 0x25 68#define SENCODE_LUN_NOT_SUPPORTED 0x25
69#define SENCODE_INVALID_PARAM_FIELD 0x26 69#define SENCODE_INVALID_PARAM_FIELD 0x26
70#define SENCODE_PARAM_NOT_SUPPORTED 0x26 70#define SENCODE_PARAM_NOT_SUPPORTED 0x26
71#define SENCODE_PARAM_VALUE_INVALID 0x26 71#define SENCODE_PARAM_VALUE_INVALID 0x26
72#define SENCODE_RESET_OCCURRED 0x29 72#define SENCODE_RESET_OCCURRED 0x29
73#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E 73#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
74#define SENCODE_INQUIRY_DATA_CHANGED 0x3F 74#define SENCODE_INQUIRY_DATA_CHANGED 0x3F
75#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39 75#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
76#define SENCODE_DIAGNOSTIC_FAILURE 0x40 76#define SENCODE_DIAGNOSTIC_FAILURE 0x40
77#define SENCODE_INTERNAL_TARGET_FAILURE 0x44 77#define SENCODE_INTERNAL_TARGET_FAILURE 0x44
78#define SENCODE_INVALID_MESSAGE_ERROR 0x49 78#define SENCODE_INVALID_MESSAGE_ERROR 0x49
79#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c 79#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
80#define SENCODE_OVERLAPPED_COMMAND 0x4E 80#define SENCODE_OVERLAPPED_COMMAND 0x4E
81 81
82/* 82/*
83 * Additional sense codes 83 * Additional sense codes
84 */ 84 */
85 85
86#define ASENCODE_NO_SENSE 0x00 86#define ASENCODE_NO_SENSE 0x00
87#define ASENCODE_END_OF_DATA 0x05 87#define ASENCODE_END_OF_DATA 0x05
88#define ASENCODE_BECOMING_READY 0x01 88#define ASENCODE_BECOMING_READY 0x01
89#define ASENCODE_INIT_CMD_REQUIRED 0x02 89#define ASENCODE_INIT_CMD_REQUIRED 0x02
90#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00 90#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
91#define ASENCODE_INVALID_COMMAND 0x00 91#define ASENCODE_INVALID_COMMAND 0x00
92#define ASENCODE_LBA_OUT_OF_RANGE 0x00 92#define ASENCODE_LBA_OUT_OF_RANGE 0x00
93#define ASENCODE_INVALID_CDB_FIELD 0x00 93#define ASENCODE_INVALID_CDB_FIELD 0x00
94#define ASENCODE_LUN_NOT_SUPPORTED 0x00 94#define ASENCODE_LUN_NOT_SUPPORTED 0x00
95#define ASENCODE_INVALID_PARAM_FIELD 0x00 95#define ASENCODE_INVALID_PARAM_FIELD 0x00
96#define ASENCODE_PARAM_NOT_SUPPORTED 0x01 96#define ASENCODE_PARAM_NOT_SUPPORTED 0x01
97#define ASENCODE_PARAM_VALUE_INVALID 0x02 97#define ASENCODE_PARAM_VALUE_INVALID 0x02
98#define ASENCODE_RESET_OCCURRED 0x00 98#define ASENCODE_RESET_OCCURRED 0x00
99#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00 99#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
100#define ASENCODE_INQUIRY_DATA_CHANGED 0x03 100#define ASENCODE_INQUIRY_DATA_CHANGED 0x03
101#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00 101#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
102#define ASENCODE_DIAGNOSTIC_FAILURE 0x80 102#define ASENCODE_DIAGNOSTIC_FAILURE 0x80
103#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00 103#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
104#define ASENCODE_INVALID_MESSAGE_ERROR 0x00 104#define ASENCODE_INVALID_MESSAGE_ERROR 0x00
105#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00 105#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
106#define ASENCODE_OVERLAPPED_COMMAND 0x00 106#define ASENCODE_OVERLAPPED_COMMAND 0x00
107 107
108#define BYTE0(x) (unsigned char)(x) 108#define BYTE0(x) (unsigned char)(x)
109#define BYTE1(x) (unsigned char)((x) >> 8) 109#define BYTE1(x) (unsigned char)((x) >> 8)
@@ -115,8 +115,8 @@
115 *----------------------------------------------------------------------------*/ 115 *----------------------------------------------------------------------------*/
116/* SCSI inquiry data */ 116/* SCSI inquiry data */
117struct inquiry_data { 117struct inquiry_data {
118 u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */ 118 u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
119 u8 inqd_dtq; /* RMB | Device Type Qualifier */ 119 u8 inqd_dtq; /* RMB | Device Type Qualifier */
120 u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */ 120 u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
121 u8 inqd_rdf; /* AENC | TrmIOP | Response data format */ 121 u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
122 u8 inqd_len; /* Additional length (n-4) */ 122 u8 inqd_len; /* Additional length (n-4) */
@@ -130,7 +130,7 @@ struct inquiry_data {
130/* 130/*
131 * M O D U L E G L O B A L S 131 * M O D U L E G L O B A L S
132 */ 132 */
133 133
134static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap); 134static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
135static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg); 135static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
136static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg); 136static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
@@ -141,9 +141,10 @@ static char *aac_get_status_string(u32 status);
141 141
142/* 142/*
143 * Non dasd selection is handled entirely in aachba now 143 * Non dasd selection is handled entirely in aachba now
144 */ 144 */
145 145
146static int nondasd = -1; 146static int nondasd = -1;
147static int aac_cache = 0;
147static int dacmode = -1; 148static int dacmode = -1;
148 149
149int aac_commit = -1; 150int aac_commit = -1;
@@ -152,6 +153,8 @@ int aif_timeout = 120;
152 153
153module_param(nondasd, int, S_IRUGO|S_IWUSR); 154module_param(nondasd, int, S_IRUGO|S_IWUSR);
154MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on"); 155MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
156module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
157MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n\tbit 0 - Disable FUA in WRITE SCSI commands\n\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n\tbit 2 - Disable only if Battery not protecting Cache");
155module_param(dacmode, int, S_IRUGO|S_IWUSR); 158module_param(dacmode, int, S_IRUGO|S_IWUSR);
156MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on"); 159MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
157module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); 160module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
@@ -179,7 +182,7 @@ MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health che
179 182
180int aac_check_reset = 1; 183int aac_check_reset = 1;
181module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); 184module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
182MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter."); 185MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter. a value of -1 forces the reset to adapters programmed to ignore it.");
183 186
184int expose_physicals = -1; 187int expose_physicals = -1;
185module_param(expose_physicals, int, S_IRUGO|S_IWUSR); 188module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
@@ -193,12 +196,12 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
193 struct fib *fibptr) { 196 struct fib *fibptr) {
194 struct scsi_device *device; 197 struct scsi_device *device;
195 198
196 if (unlikely(!scsicmd || !scsicmd->scsi_done )) { 199 if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
197 dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n")); 200 dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
198 aac_fib_complete(fibptr); 201 aac_fib_complete(fibptr);
199 aac_fib_free(fibptr); 202 aac_fib_free(fibptr);
200 return 0; 203 return 0;
201 } 204 }
202 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL; 205 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
203 device = scsicmd->device; 206 device = scsicmd->device;
204 if (unlikely(!device || !scsi_device_online(device))) { 207 if (unlikely(!device || !scsi_device_online(device))) {
@@ -240,7 +243,7 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
240 FsaNormal, 243 FsaNormal,
241 1, 1, 244 1, 1,
242 NULL, NULL); 245 NULL, NULL);
243 if (status < 0 ) { 246 if (status < 0) {
244 printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n"); 247 printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
245 } else { 248 } else {
246 struct aac_get_config_status_resp *reply 249 struct aac_get_config_status_resp *reply
@@ -264,10 +267,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
264 struct aac_commit_config * dinfo; 267 struct aac_commit_config * dinfo;
265 aac_fib_init(fibptr); 268 aac_fib_init(fibptr);
266 dinfo = (struct aac_commit_config *) fib_data(fibptr); 269 dinfo = (struct aac_commit_config *) fib_data(fibptr);
267 270
268 dinfo->command = cpu_to_le32(VM_ContainerConfig); 271 dinfo->command = cpu_to_le32(VM_ContainerConfig);
269 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); 272 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
270 273
271 status = aac_fib_send(ContainerCommand, 274 status = aac_fib_send(ContainerCommand,
272 fibptr, 275 fibptr,
273 sizeof (struct aac_commit_config), 276 sizeof (struct aac_commit_config),
@@ -293,7 +296,7 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
293int aac_get_containers(struct aac_dev *dev) 296int aac_get_containers(struct aac_dev *dev)
294{ 297{
295 struct fsa_dev_info *fsa_dev_ptr; 298 struct fsa_dev_info *fsa_dev_ptr;
296 u32 index; 299 u32 index;
297 int status = 0; 300 int status = 0;
298 struct fib * fibptr; 301 struct fib * fibptr;
299 struct aac_get_container_count *dinfo; 302 struct aac_get_container_count *dinfo;
@@ -363,6 +366,7 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
363 if (buf && transfer_len > 0) 366 if (buf && transfer_len > 0)
364 memcpy(buf + offset, data, transfer_len); 367 memcpy(buf + offset, data, transfer_len);
365 368
369 flush_kernel_dcache_page(kmap_atomic_to_page(buf - sg->offset));
366 kunmap_atomic(buf - sg->offset, KM_IRQ0); 370 kunmap_atomic(buf - sg->offset, KM_IRQ0);
367 371
368} 372}
@@ -395,7 +399,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
395 do { 399 do {
396 *dp++ = (*sp) ? *sp++ : ' '; 400 *dp++ = (*sp) ? *sp++ : ' ';
397 } while (--count > 0); 401 } while (--count > 0);
398 aac_internal_transfer(scsicmd, d, 402 aac_internal_transfer(scsicmd, d,
399 offsetof(struct inquiry_data, inqd_pid), sizeof(d)); 403 offsetof(struct inquiry_data, inqd_pid), sizeof(d));
400 } 404 }
401 } 405 }
@@ -431,13 +435,13 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
431 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 435 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
432 436
433 status = aac_fib_send(ContainerCommand, 437 status = aac_fib_send(ContainerCommand,
434 cmd_fibcontext, 438 cmd_fibcontext,
435 sizeof (struct aac_get_name), 439 sizeof (struct aac_get_name),
436 FsaNormal, 440 FsaNormal,
437 0, 1, 441 0, 1,
438 (fib_callback) get_container_name_callback, 442 (fib_callback)get_container_name_callback,
439 (void *) scsicmd); 443 (void *) scsicmd);
440 444
441 /* 445 /*
442 * Check that the command queued to the controller 446 * Check that the command queued to the controller
443 */ 447 */
@@ -445,7 +449,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
445 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 449 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
446 return 0; 450 return 0;
447 } 451 }
448 452
449 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); 453 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
450 aac_fib_complete(cmd_fibcontext); 454 aac_fib_complete(cmd_fibcontext);
451 aac_fib_free(cmd_fibcontext); 455 aac_fib_free(cmd_fibcontext);
@@ -652,42 +656,47 @@ struct scsi_inq {
652 * @a: string to copy from 656 * @a: string to copy from
653 * @b: string to copy to 657 * @b: string to copy to
654 * 658 *
655 * Copy a String from one location to another 659 * Copy a String from one location to another
656 * without copying \0 660 * without copying \0
657 */ 661 */
658 662
659static void inqstrcpy(char *a, char *b) 663static void inqstrcpy(char *a, char *b)
660{ 664{
661 665
662 while(*a != (char)0) 666 while (*a != (char)0)
663 *b++ = *a++; 667 *b++ = *a++;
664} 668}
665 669
666static char *container_types[] = { 670static char *container_types[] = {
667 "None", 671 "None",
668 "Volume", 672 "Volume",
669 "Mirror", 673 "Mirror",
670 "Stripe", 674 "Stripe",
671 "RAID5", 675 "RAID5",
672 "SSRW", 676 "SSRW",
673 "SSRO", 677 "SSRO",
674 "Morph", 678 "Morph",
675 "Legacy", 679 "Legacy",
676 "RAID4", 680 "RAID4",
677 "RAID10", 681 "RAID10",
678 "RAID00", 682 "RAID00",
679 "V-MIRRORS", 683 "V-MIRRORS",
680 "PSEUDO R4", 684 "PSEUDO R4",
681 "RAID50", 685 "RAID50",
682 "RAID5D", 686 "RAID5D",
683 "RAID5D0", 687 "RAID5D0",
684 "RAID1E", 688 "RAID1E",
685 "RAID6", 689 "RAID6",
686 "RAID60", 690 "RAID60",
687 "Unknown" 691 "Unknown"
688}; 692};
689 693
690 694char * get_container_type(unsigned tindex)
695{
696 if (tindex >= ARRAY_SIZE(container_types))
697 tindex = ARRAY_SIZE(container_types) - 1;
698 return container_types[tindex];
699}
691 700
692/* Function: setinqstr 701/* Function: setinqstr
693 * 702 *
@@ -707,16 +716,21 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
707 716
708 if (dev->supplement_adapter_info.AdapterTypeText[0]) { 717 if (dev->supplement_adapter_info.AdapterTypeText[0]) {
709 char * cp = dev->supplement_adapter_info.AdapterTypeText; 718 char * cp = dev->supplement_adapter_info.AdapterTypeText;
710 int c = sizeof(str->vid); 719 int c;
711 while (*cp && *cp != ' ' && --c) 720 if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
712 ++cp; 721 inqstrcpy("SMC", str->vid);
713 c = *cp; 722 else {
714 *cp = '\0'; 723 c = sizeof(str->vid);
715 inqstrcpy (dev->supplement_adapter_info.AdapterTypeText, 724 while (*cp && *cp != ' ' && --c)
716 str->vid); 725 ++cp;
717 *cp = c; 726 c = *cp;
718 while (*cp && *cp != ' ') 727 *cp = '\0';
719 ++cp; 728 inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
729 str->vid);
730 *cp = c;
731 while (*cp && *cp != ' ')
732 ++cp;
733 }
720 while (*cp == ' ') 734 while (*cp == ' ')
721 ++cp; 735 ++cp;
722 /* last six chars reserved for vol type */ 736 /* last six chars reserved for vol type */
@@ -898,9 +912,8 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
898 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, 912 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
899 0, 0); 913 0, 0);
900 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 914 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
901 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(cmd->sense_buffer)) 915 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
902 ? sizeof(cmd->sense_buffer) 916 SCSI_SENSE_BUFFERSIZE));
903 : sizeof(dev->fsa_dev[cid].sense_data));
904 cmd->scsi_done(cmd); 917 cmd->scsi_done(cmd);
905 return 1; 918 return 1;
906 } 919 }
@@ -981,7 +994,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
981 aac_fib_init(fib); 994 aac_fib_init(fib);
982 readcmd = (struct aac_read *) fib_data(fib); 995 readcmd = (struct aac_read *) fib_data(fib);
983 readcmd->command = cpu_to_le32(VM_CtBlockRead); 996 readcmd->command = cpu_to_le32(VM_CtBlockRead);
984 readcmd->cid = cpu_to_le16(scmd_id(cmd)); 997 readcmd->cid = cpu_to_le32(scmd_id(cmd));
985 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); 998 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
986 readcmd->count = cpu_to_le32(count * 512); 999 readcmd->count = cpu_to_le32(count * 512);
987 1000
@@ -1013,7 +1026,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
1013 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); 1026 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1014 writecmd->count = cpu_to_le32(count<<9); 1027 writecmd->count = cpu_to_le32(count<<9);
1015 writecmd->cid = cpu_to_le16(scmd_id(cmd)); 1028 writecmd->cid = cpu_to_le16(scmd_id(cmd));
1016 writecmd->flags = fua ? 1029 writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
1030 (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1017 cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) : 1031 cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) :
1018 cpu_to_le16(IO_TYPE_WRITE); 1032 cpu_to_le16(IO_TYPE_WRITE);
1019 writecmd->bpTotal = 0; 1033 writecmd->bpTotal = 0;
@@ -1072,7 +1086,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
1072 aac_fib_init(fib); 1086 aac_fib_init(fib);
1073 writecmd = (struct aac_write *) fib_data(fib); 1087 writecmd = (struct aac_write *) fib_data(fib);
1074 writecmd->command = cpu_to_le32(VM_CtBlockWrite); 1088 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1075 writecmd->cid = cpu_to_le16(scmd_id(cmd)); 1089 writecmd->cid = cpu_to_le32(scmd_id(cmd));
1076 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); 1090 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1077 writecmd->count = cpu_to_le32(count * 512); 1091 writecmd->count = cpu_to_le32(count * 512);
1078 writecmd->sg.count = cpu_to_le32(1); 1092 writecmd->sg.count = cpu_to_le32(1);
@@ -1190,6 +1204,15 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
1190 (fib_callback) aac_srb_callback, (void *) cmd); 1204 (fib_callback) aac_srb_callback, (void *) cmd);
1191} 1205}
1192 1206
1207static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
1208{
1209 if ((sizeof(dma_addr_t) > 4) &&
1210 (num_physpages > (0xFFFFFFFFULL >> PAGE_SHIFT)) &&
1211 (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
1212 return FAILED;
1213 return aac_scsi_32(fib, cmd);
1214}
1215
1193int aac_get_adapter_info(struct aac_dev* dev) 1216int aac_get_adapter_info(struct aac_dev* dev)
1194{ 1217{
1195 struct fib* fibptr; 1218 struct fib* fibptr;
@@ -1207,11 +1230,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
1207 memset(info,0,sizeof(*info)); 1230 memset(info,0,sizeof(*info));
1208 1231
1209 rcode = aac_fib_send(RequestAdapterInfo, 1232 rcode = aac_fib_send(RequestAdapterInfo,
1210 fibptr, 1233 fibptr,
1211 sizeof(*info), 1234 sizeof(*info),
1212 FsaNormal, 1235 FsaNormal,
1213 -1, 1, /* First `interrupt' command uses special wait */ 1236 -1, 1, /* First `interrupt' command uses special wait */
1214 NULL, 1237 NULL,
1215 NULL); 1238 NULL);
1216 1239
1217 if (rcode < 0) { 1240 if (rcode < 0) {
@@ -1222,29 +1245,29 @@ int aac_get_adapter_info(struct aac_dev* dev)
1222 memcpy(&dev->adapter_info, info, sizeof(*info)); 1245 memcpy(&dev->adapter_info, info, sizeof(*info));
1223 1246
1224 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { 1247 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
1225 struct aac_supplement_adapter_info * info; 1248 struct aac_supplement_adapter_info * sinfo;
1226 1249
1227 aac_fib_init(fibptr); 1250 aac_fib_init(fibptr);
1228 1251
1229 info = (struct aac_supplement_adapter_info *) fib_data(fibptr); 1252 sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
1230 1253
1231 memset(info,0,sizeof(*info)); 1254 memset(sinfo,0,sizeof(*sinfo));
1232 1255
1233 rcode = aac_fib_send(RequestSupplementAdapterInfo, 1256 rcode = aac_fib_send(RequestSupplementAdapterInfo,
1234 fibptr, 1257 fibptr,
1235 sizeof(*info), 1258 sizeof(*sinfo),
1236 FsaNormal, 1259 FsaNormal,
1237 1, 1, 1260 1, 1,
1238 NULL, 1261 NULL,
1239 NULL); 1262 NULL);
1240 1263
1241 if (rcode >= 0) 1264 if (rcode >= 0)
1242 memcpy(&dev->supplement_adapter_info, info, sizeof(*info)); 1265 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
1243 } 1266 }
1244 1267
1245 1268
1246 /* 1269 /*
1247 * GetBusInfo 1270 * GetBusInfo
1248 */ 1271 */
1249 1272
1250 aac_fib_init(fibptr); 1273 aac_fib_init(fibptr);
@@ -1267,6 +1290,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
1267 1, 1, 1290 1, 1,
1268 NULL, NULL); 1291 NULL, NULL);
1269 1292
1293 /* reasoned default */
1294 dev->maximum_num_physicals = 16;
1270 if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) { 1295 if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
1271 dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus); 1296 dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
1272 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); 1297 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
@@ -1276,7 +1301,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
1276 char buffer[16]; 1301 char buffer[16];
1277 tmp = le32_to_cpu(dev->adapter_info.kernelrev); 1302 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
1278 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", 1303 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
1279 dev->name, 1304 dev->name,
1280 dev->id, 1305 dev->id,
1281 tmp>>24, 1306 tmp>>24,
1282 (tmp>>16)&0xff, 1307 (tmp>>16)&0xff,
@@ -1305,19 +1330,21 @@ int aac_get_adapter_info(struct aac_dev* dev)
1305 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), 1330 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
1306 dev->supplement_adapter_info.VpdInfo.Tsid); 1331 dev->supplement_adapter_info.VpdInfo.Tsid);
1307 } 1332 }
1308 if (!aac_check_reset || 1333 if (!aac_check_reset || ((aac_check_reset != 1) &&
1309 (dev->supplement_adapter_info.SupportedOptions2 & 1334 (dev->supplement_adapter_info.SupportedOptions2 &
1310 le32_to_cpu(AAC_OPTION_IGNORE_RESET))) { 1335 AAC_OPTION_IGNORE_RESET))) {
1311 printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", 1336 printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
1312 dev->name, dev->id); 1337 dev->name, dev->id);
1313 } 1338 }
1314 } 1339 }
1315 1340
1341 dev->cache_protected = 0;
1342 dev->jbod = ((dev->supplement_adapter_info.FeatureBits &
1343 AAC_FEATURE_JBOD) != 0);
1316 dev->nondasd_support = 0; 1344 dev->nondasd_support = 0;
1317 dev->raid_scsi_mode = 0; 1345 dev->raid_scsi_mode = 0;
1318 if(dev->adapter_info.options & AAC_OPT_NONDASD){ 1346 if(dev->adapter_info.options & AAC_OPT_NONDASD)
1319 dev->nondasd_support = 1; 1347 dev->nondasd_support = 1;
1320 }
1321 1348
1322 /* 1349 /*
1323 * If the firmware supports ROMB RAID/SCSI mode and we are currently 1350 * If the firmware supports ROMB RAID/SCSI mode and we are currently
@@ -1338,11 +1365,10 @@ int aac_get_adapter_info(struct aac_dev* dev)
1338 if (dev->raid_scsi_mode != 0) 1365 if (dev->raid_scsi_mode != 0)
1339 printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n", 1366 printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
1340 dev->name, dev->id); 1367 dev->name, dev->id);
1341 1368
1342 if(nondasd != -1) { 1369 if (nondasd != -1)
1343 dev->nondasd_support = (nondasd!=0); 1370 dev->nondasd_support = (nondasd!=0);
1344 } 1371 if(dev->nondasd_support != 0) {
1345 if(dev->nondasd_support != 0){
1346 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); 1372 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
1347 } 1373 }
1348 1374
@@ -1371,12 +1397,14 @@ int aac_get_adapter_info(struct aac_dev* dev)
1371 rcode = -ENOMEM; 1397 rcode = -ENOMEM;
1372 } 1398 }
1373 } 1399 }
1374 /* 1400 /*
1375 * Deal with configuring for the individualized limits of each packet 1401 * Deal with configuring for the individualized limits of each packet
1376 * interface. 1402 * interface.
1377 */ 1403 */
1378 dev->a_ops.adapter_scsi = (dev->dac_support) 1404 dev->a_ops.adapter_scsi = (dev->dac_support)
1379 ? aac_scsi_64 1405 ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
1406 ? aac_scsi_32_64
1407 : aac_scsi_64)
1380 : aac_scsi_32; 1408 : aac_scsi_32;
1381 if (dev->raw_io_interface) { 1409 if (dev->raw_io_interface) {
1382 dev->a_ops.adapter_bounds = (dev->raw_io_64) 1410 dev->a_ops.adapter_bounds = (dev->raw_io_64)
@@ -1393,8 +1421,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
1393 if (dev->dac_support) { 1421 if (dev->dac_support) {
1394 dev->a_ops.adapter_read = aac_read_block64; 1422 dev->a_ops.adapter_read = aac_read_block64;
1395 dev->a_ops.adapter_write = aac_write_block64; 1423 dev->a_ops.adapter_write = aac_write_block64;
1396 /* 1424 /*
1397 * 38 scatter gather elements 1425 * 38 scatter gather elements
1398 */ 1426 */
1399 dev->scsi_host_ptr->sg_tablesize = 1427 dev->scsi_host_ptr->sg_tablesize =
1400 (dev->max_fib_size - 1428 (dev->max_fib_size -
@@ -1498,9 +1526,8 @@ static void io_callback(void *context, struct fib * fibptr)
1498 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, 1526 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1499 0, 0); 1527 0, 0);
1500 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1528 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1501 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) 1529 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1502 ? sizeof(scsicmd->sense_buffer) 1530 SCSI_SENSE_BUFFERSIZE));
1503 : sizeof(dev->fsa_dev[cid].sense_data));
1504 } 1531 }
1505 aac_fib_complete(fibptr); 1532 aac_fib_complete(fibptr);
1506 aac_fib_free(fibptr); 1533 aac_fib_free(fibptr);
@@ -1524,7 +1551,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
1524 case READ_6: 1551 case READ_6:
1525 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd))); 1552 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
1526 1553
1527 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | 1554 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
1528 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 1555 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
1529 count = scsicmd->cmnd[4]; 1556 count = scsicmd->cmnd[4];
1530 1557
@@ -1534,32 +1561,32 @@ static int aac_read(struct scsi_cmnd * scsicmd)
1534 case READ_16: 1561 case READ_16:
1535 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd))); 1562 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
1536 1563
1537 lba = ((u64)scsicmd->cmnd[2] << 56) | 1564 lba = ((u64)scsicmd->cmnd[2] << 56) |
1538 ((u64)scsicmd->cmnd[3] << 48) | 1565 ((u64)scsicmd->cmnd[3] << 48) |
1539 ((u64)scsicmd->cmnd[4] << 40) | 1566 ((u64)scsicmd->cmnd[4] << 40) |
1540 ((u64)scsicmd->cmnd[5] << 32) | 1567 ((u64)scsicmd->cmnd[5] << 32) |
1541 ((u64)scsicmd->cmnd[6] << 24) | 1568 ((u64)scsicmd->cmnd[6] << 24) |
1542 (scsicmd->cmnd[7] << 16) | 1569 (scsicmd->cmnd[7] << 16) |
1543 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 1570 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1544 count = (scsicmd->cmnd[10] << 24) | 1571 count = (scsicmd->cmnd[10] << 24) |
1545 (scsicmd->cmnd[11] << 16) | 1572 (scsicmd->cmnd[11] << 16) |
1546 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; 1573 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1547 break; 1574 break;
1548 case READ_12: 1575 case READ_12:
1549 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd))); 1576 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
1550 1577
1551 lba = ((u64)scsicmd->cmnd[2] << 24) | 1578 lba = ((u64)scsicmd->cmnd[2] << 24) |
1552 (scsicmd->cmnd[3] << 16) | 1579 (scsicmd->cmnd[3] << 16) |
1553 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1580 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1554 count = (scsicmd->cmnd[6] << 24) | 1581 count = (scsicmd->cmnd[6] << 24) |
1555 (scsicmd->cmnd[7] << 16) | 1582 (scsicmd->cmnd[7] << 16) |
1556 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 1583 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1557 break; 1584 break;
1558 default: 1585 default:
1559 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd))); 1586 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
1560 1587
1561 lba = ((u64)scsicmd->cmnd[2] << 24) | 1588 lba = ((u64)scsicmd->cmnd[2] << 24) |
1562 (scsicmd->cmnd[3] << 16) | 1589 (scsicmd->cmnd[3] << 16) |
1563 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1590 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1564 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1591 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1565 break; 1592 break;
@@ -1584,7 +1611,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
1584 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 1611 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1585 return 0; 1612 return 0;
1586 } 1613 }
1587 1614
1588 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); 1615 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
1589 /* 1616 /*
1590 * For some reason, the Fib didn't queue, return QUEUE_FULL 1617 * For some reason, the Fib didn't queue, return QUEUE_FULL
@@ -1619,11 +1646,11 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1619 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */ 1646 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
1620 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd))); 1647 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
1621 1648
1622 lba = ((u64)scsicmd->cmnd[2] << 56) | 1649 lba = ((u64)scsicmd->cmnd[2] << 56) |
1623 ((u64)scsicmd->cmnd[3] << 48) | 1650 ((u64)scsicmd->cmnd[3] << 48) |
1624 ((u64)scsicmd->cmnd[4] << 40) | 1651 ((u64)scsicmd->cmnd[4] << 40) |
1625 ((u64)scsicmd->cmnd[5] << 32) | 1652 ((u64)scsicmd->cmnd[5] << 32) |
1626 ((u64)scsicmd->cmnd[6] << 24) | 1653 ((u64)scsicmd->cmnd[6] << 24) |
1627 (scsicmd->cmnd[7] << 16) | 1654 (scsicmd->cmnd[7] << 16) |
1628 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 1655 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1629 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) | 1656 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
@@ -1712,8 +1739,8 @@ static void synchronize_callback(void *context, struct fib *fibptr)
1712 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, 1739 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1713 0, 0); 1740 0, 0);
1714 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1741 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1715 min(sizeof(dev->fsa_dev[cid].sense_data), 1742 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1716 sizeof(cmd->sense_buffer))); 1743 SCSI_SENSE_BUFFERSIZE));
1717 } 1744 }
1718 1745
1719 aac_fib_complete(fibptr); 1746 aac_fib_complete(fibptr);
@@ -1798,7 +1825,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
1798 if (active) 1825 if (active)
1799 return SCSI_MLQUEUE_DEVICE_BUSY; 1826 return SCSI_MLQUEUE_DEVICE_BUSY;
1800 1827
1801 aac = (struct aac_dev *)scsicmd->device->host->hostdata; 1828 aac = (struct aac_dev *)sdev->host->hostdata;
1802 if (aac->in_reset) 1829 if (aac->in_reset)
1803 return SCSI_MLQUEUE_HOST_BUSY; 1830 return SCSI_MLQUEUE_HOST_BUSY;
1804 1831
@@ -1850,14 +1877,14 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
1850 * Emulate a SCSI command and queue the required request for the 1877 * Emulate a SCSI command and queue the required request for the
1851 * aacraid firmware. 1878 * aacraid firmware.
1852 */ 1879 */
1853 1880
1854int aac_scsi_cmd(struct scsi_cmnd * scsicmd) 1881int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1855{ 1882{
1856 u32 cid; 1883 u32 cid;
1857 struct Scsi_Host *host = scsicmd->device->host; 1884 struct Scsi_Host *host = scsicmd->device->host;
1858 struct aac_dev *dev = (struct aac_dev *)host->hostdata; 1885 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
1859 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; 1886 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
1860 1887
1861 if (fsa_dev_ptr == NULL) 1888 if (fsa_dev_ptr == NULL)
1862 return -1; 1889 return -1;
1863 /* 1890 /*
@@ -1898,7 +1925,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1898 } 1925 }
1899 } 1926 }
1900 } else { /* check for physical non-dasd devices */ 1927 } else { /* check for physical non-dasd devices */
1901 if ((dev->nondasd_support == 1) || expose_physicals) { 1928 if (dev->nondasd_support || expose_physicals ||
1929 dev->jbod) {
1902 if (dev->in_reset) 1930 if (dev->in_reset)
1903 return -1; 1931 return -1;
1904 return aac_send_srb_fib(scsicmd); 1932 return aac_send_srb_fib(scsicmd);
@@ -1913,7 +1941,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1913 * else Command for the controller itself 1941 * else Command for the controller itself
1914 */ 1942 */
1915 else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */ 1943 else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */
1916 (scsicmd->cmnd[0] != TEST_UNIT_READY)) 1944 (scsicmd->cmnd[0] != TEST_UNIT_READY))
1917 { 1945 {
1918 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0])); 1946 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
1919 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1947 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
@@ -1922,9 +1950,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1922 SENCODE_INVALID_COMMAND, 1950 SENCODE_INVALID_COMMAND,
1923 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0); 1951 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1924 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1952 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1925 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) 1953 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1926 ? sizeof(scsicmd->sense_buffer) 1954 SCSI_SENSE_BUFFERSIZE));
1927 : sizeof(dev->fsa_dev[cid].sense_data));
1928 scsicmd->scsi_done(scsicmd); 1955 scsicmd->scsi_done(scsicmd);
1929 return 0; 1956 return 0;
1930 } 1957 }
@@ -1939,7 +1966,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1939 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid)); 1966 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
1940 memset(&inq_data, 0, sizeof (struct inquiry_data)); 1967 memset(&inq_data, 0, sizeof (struct inquiry_data));
1941 1968
1942 if (scsicmd->cmnd[1] & 0x1 ) { 1969 if (scsicmd->cmnd[1] & 0x1) {
1943 char *arr = (char *)&inq_data; 1970 char *arr = (char *)&inq_data;
1944 1971
1945 /* EVPD bit set */ 1972 /* EVPD bit set */
@@ -1974,10 +2001,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1974 ASENCODE_NO_SENSE, 0, 7, 2, 0); 2001 ASENCODE_NO_SENSE, 0, 7, 2, 0);
1975 memcpy(scsicmd->sense_buffer, 2002 memcpy(scsicmd->sense_buffer,
1976 &dev->fsa_dev[cid].sense_data, 2003 &dev->fsa_dev[cid].sense_data,
1977 (sizeof(dev->fsa_dev[cid].sense_data) > 2004 min_t(size_t,
1978 sizeof(scsicmd->sense_buffer)) 2005 sizeof(dev->fsa_dev[cid].sense_data),
1979 ? sizeof(scsicmd->sense_buffer) 2006 SCSI_SENSE_BUFFERSIZE));
1980 : sizeof(dev->fsa_dev[cid].sense_data));
1981 } 2007 }
1982 scsicmd->scsi_done(scsicmd); 2008 scsicmd->scsi_done(scsicmd);
1983 return 0; 2009 return 0;
@@ -2092,7 +2118,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2092 mode_buf[2] = 0; /* Device-specific param, 2118 mode_buf[2] = 0; /* Device-specific param,
2093 bit 8: 0/1 = write enabled/protected 2119 bit 8: 0/1 = write enabled/protected
2094 bit 4: 0/1 = FUA enabled */ 2120 bit 4: 0/1 = FUA enabled */
2095 if (dev->raw_io_interface) 2121 if (dev->raw_io_interface && ((aac_cache & 5) != 1))
2096 mode_buf[2] = 0x10; 2122 mode_buf[2] = 0x10;
2097 mode_buf[3] = 0; /* Block descriptor length */ 2123 mode_buf[3] = 0; /* Block descriptor length */
2098 if (((scsicmd->cmnd[2] & 0x3f) == 8) || 2124 if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
@@ -2100,7 +2126,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2100 mode_buf[0] = 6; 2126 mode_buf[0] = 6;
2101 mode_buf[4] = 8; 2127 mode_buf[4] = 8;
2102 mode_buf[5] = 1; 2128 mode_buf[5] = 1;
2103 mode_buf[6] = 0x04; /* WCE */ 2129 mode_buf[6] = ((aac_cache & 6) == 2)
2130 ? 0 : 0x04; /* WCE */
2104 mode_buf_length = 7; 2131 mode_buf_length = 7;
2105 if (mode_buf_length > scsicmd->cmnd[4]) 2132 if (mode_buf_length > scsicmd->cmnd[4])
2106 mode_buf_length = scsicmd->cmnd[4]; 2133 mode_buf_length = scsicmd->cmnd[4];
@@ -2123,7 +2150,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2123 mode_buf[3] = 0; /* Device-specific param, 2150 mode_buf[3] = 0; /* Device-specific param,
2124 bit 8: 0/1 = write enabled/protected 2151 bit 8: 0/1 = write enabled/protected
2125 bit 4: 0/1 = FUA enabled */ 2152 bit 4: 0/1 = FUA enabled */
2126 if (dev->raw_io_interface) 2153 if (dev->raw_io_interface && ((aac_cache & 5) != 1))
2127 mode_buf[3] = 0x10; 2154 mode_buf[3] = 0x10;
2128 mode_buf[4] = 0; /* reserved */ 2155 mode_buf[4] = 0; /* reserved */
2129 mode_buf[5] = 0; /* reserved */ 2156 mode_buf[5] = 0; /* reserved */
@@ -2134,7 +2161,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2134 mode_buf[1] = 9; 2161 mode_buf[1] = 9;
2135 mode_buf[8] = 8; 2162 mode_buf[8] = 8;
2136 mode_buf[9] = 1; 2163 mode_buf[9] = 1;
2137 mode_buf[10] = 0x04; /* WCE */ 2164 mode_buf[10] = ((aac_cache & 6) == 2)
2165 ? 0 : 0x04; /* WCE */
2138 mode_buf_length = 11; 2166 mode_buf_length = 11;
2139 if (mode_buf_length > scsicmd->cmnd[8]) 2167 if (mode_buf_length > scsicmd->cmnd[8])
2140 mode_buf_length = scsicmd->cmnd[8]; 2168 mode_buf_length = scsicmd->cmnd[8];
@@ -2179,7 +2207,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2179 return 0; 2207 return 0;
2180 } 2208 }
2181 2209
2182 switch (scsicmd->cmnd[0]) 2210 switch (scsicmd->cmnd[0])
2183 { 2211 {
2184 case READ_6: 2212 case READ_6:
2185 case READ_10: 2213 case READ_10:
@@ -2192,11 +2220,11 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2192 * corresponds to a container. Needed to convert 2220 * corresponds to a container. Needed to convert
2193 * containers to /dev/sd device names 2221 * containers to /dev/sd device names
2194 */ 2222 */
2195 2223
2196 if (scsicmd->request->rq_disk) 2224 if (scsicmd->request->rq_disk)
2197 strlcpy(fsa_dev_ptr[cid].devname, 2225 strlcpy(fsa_dev_ptr[cid].devname,
2198 scsicmd->request->rq_disk->disk_name, 2226 scsicmd->request->rq_disk->disk_name,
2199 min(sizeof(fsa_dev_ptr[cid].devname), 2227 min(sizeof(fsa_dev_ptr[cid].devname),
2200 sizeof(scsicmd->request->rq_disk->disk_name) + 1)); 2228 sizeof(scsicmd->request->rq_disk->disk_name) + 1));
2201 2229
2202 return aac_read(scsicmd); 2230 return aac_read(scsicmd);
@@ -2210,9 +2238,16 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2210 return aac_write(scsicmd); 2238 return aac_write(scsicmd);
2211 2239
2212 case SYNCHRONIZE_CACHE: 2240 case SYNCHRONIZE_CACHE:
2241 if (((aac_cache & 6) == 6) && dev->cache_protected) {
2242 scsicmd->result = DID_OK << 16 |
2243 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2244 scsicmd->scsi_done(scsicmd);
2245 return 0;
2246 }
2213 /* Issue FIB to tell Firmware to flush it's cache */ 2247 /* Issue FIB to tell Firmware to flush it's cache */
2214 return aac_synchronize(scsicmd); 2248 if ((aac_cache & 6) != 2)
2215 2249 return aac_synchronize(scsicmd);
2250 /* FALLTHRU */
2216 default: 2251 default:
2217 /* 2252 /*
2218 * Unhandled commands 2253 * Unhandled commands
@@ -2223,9 +2258,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2223 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, 2258 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
2224 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0); 2259 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
2225 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2260 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2226 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) 2261 min_t(size_t,
2227 ? sizeof(scsicmd->sense_buffer) 2262 sizeof(dev->fsa_dev[cid].sense_data),
2228 : sizeof(dev->fsa_dev[cid].sense_data)); 2263 SCSI_SENSE_BUFFERSIZE));
2229 scsicmd->scsi_done(scsicmd); 2264 scsicmd->scsi_done(scsicmd);
2230 return 0; 2265 return 0;
2231 } 2266 }
@@ -2243,7 +2278,7 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
2243 return -EFAULT; 2278 return -EFAULT;
2244 if (qd.cnum == -1) 2279 if (qd.cnum == -1)
2245 qd.cnum = qd.id; 2280 qd.cnum = qd.id;
2246 else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) 2281 else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
2247 { 2282 {
2248 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) 2283 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
2249 return -EINVAL; 2284 return -EINVAL;
@@ -2370,7 +2405,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2370 2405
2371 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ 2406 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
2372 /* 2407 /*
2373 * Calculate resid for sg 2408 * Calculate resid for sg
2374 */ 2409 */
2375 2410
2376 scsi_set_resid(scsicmd, scsi_bufflen(scsicmd) 2411 scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
@@ -2385,10 +2420,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2385 if (le32_to_cpu(srbreply->status) != ST_OK){ 2420 if (le32_to_cpu(srbreply->status) != ST_OK){
2386 int len; 2421 int len;
2387 printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status)); 2422 printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
2388 len = (le32_to_cpu(srbreply->sense_data_size) > 2423 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
2389 sizeof(scsicmd->sense_buffer)) ? 2424 SCSI_SENSE_BUFFERSIZE);
2390 sizeof(scsicmd->sense_buffer) :
2391 le32_to_cpu(srbreply->sense_data_size);
2392 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 2425 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2393 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); 2426 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
2394 } 2427 }
@@ -2412,7 +2445,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2412 case WRITE_12: 2445 case WRITE_12:
2413 case READ_16: 2446 case READ_16:
2414 case WRITE_16: 2447 case WRITE_16:
2415 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) { 2448 if (le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow) {
2416 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n"); 2449 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
2417 } else { 2450 } else {
2418 printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n"); 2451 printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
@@ -2481,26 +2514,23 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2481 printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n", 2514 printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
2482 le32_to_cpu(srbreply->srb_status) & 0x3F, 2515 le32_to_cpu(srbreply->srb_status) & 0x3F,
2483 aac_get_status_string( 2516 aac_get_status_string(
2484 le32_to_cpu(srbreply->srb_status) & 0x3F), 2517 le32_to_cpu(srbreply->srb_status) & 0x3F),
2485 scsicmd->cmnd[0], 2518 scsicmd->cmnd[0],
2486 le32_to_cpu(srbreply->scsi_status)); 2519 le32_to_cpu(srbreply->scsi_status));
2487#endif 2520#endif
2488 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 2521 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
2489 break; 2522 break;
2490 } 2523 }
2491 if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition 2524 if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) {
2492 int len; 2525 int len;
2493 scsicmd->result |= SAM_STAT_CHECK_CONDITION; 2526 scsicmd->result |= SAM_STAT_CHECK_CONDITION;
2494 len = (le32_to_cpu(srbreply->sense_data_size) > 2527 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
2495 sizeof(scsicmd->sense_buffer)) ? 2528 SCSI_SENSE_BUFFERSIZE);
2496 sizeof(scsicmd->sense_buffer) :
2497 le32_to_cpu(srbreply->sense_data_size);
2498#ifdef AAC_DETAILED_STATUS_INFO 2529#ifdef AAC_DETAILED_STATUS_INFO
2499 printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", 2530 printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
2500 le32_to_cpu(srbreply->status), len); 2531 le32_to_cpu(srbreply->status), len);
2501#endif 2532#endif
2502 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); 2533 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
2503
2504 } 2534 }
2505 /* 2535 /*
2506 * OR in the scsi status (already shifted up a bit) 2536 * OR in the scsi status (already shifted up a bit)
@@ -2517,7 +2547,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2517 * aac_send_scb_fib 2547 * aac_send_scb_fib
2518 * @scsicmd: the scsi command block 2548 * @scsicmd: the scsi command block
2519 * 2549 *
2520 * This routine will form a FIB and fill in the aac_srb from the 2550 * This routine will form a FIB and fill in the aac_srb from the
2521 * scsicmd passed in. 2551 * scsicmd passed in.
2522 */ 2552 */
2523 2553
@@ -2731,7 +2761,7 @@ static struct aac_srb_status_info srb_status_info[] = {
2731 { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"}, 2761 { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
2732 { SRB_STATUS_NOT_STARTED, "Not Started"}, 2762 { SRB_STATUS_NOT_STARTED, "Not Started"},
2733 { SRB_STATUS_NOT_IN_USE, "Not In Use"}, 2763 { SRB_STATUS_NOT_IN_USE, "Not In Use"},
2734 { SRB_STATUS_FORCE_ABORT, "Force Abort"}, 2764 { SRB_STATUS_FORCE_ABORT, "Force Abort"},
2735 { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"}, 2765 { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
2736 { 0xff, "Unknown Error"} 2766 { 0xff, "Unknown Error"}
2737}; 2767};
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 9abba8b90f70..3195d29f2177 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1,4 +1,4 @@
1#if (!defined(dprintk)) 1#ifndef dprintk
2# define dprintk(x) 2# define dprintk(x)
3#endif 3#endif
4/* eg: if (nblank(dprintk(x))) */ 4/* eg: if (nblank(dprintk(x))) */
@@ -12,7 +12,7 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 2449 15# define AAC_DRIVER_BUILD 2455
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
@@ -50,9 +50,9 @@ struct diskparm
50/* 50/*
51 * Firmware constants 51 * Firmware constants
52 */ 52 */
53 53
54#define CT_NONE 0 54#define CT_NONE 0
55#define CT_OK 218 55#define CT_OK 218
56#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */ 56#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */
57#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */ 57#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */
58 58
@@ -107,12 +107,12 @@ struct user_sgentryraw {
107 107
108struct sgmap { 108struct sgmap {
109 __le32 count; 109 __le32 count;
110 struct sgentry sg[1]; 110 struct sgentry sg[1];
111}; 111};
112 112
113struct user_sgmap { 113struct user_sgmap {
114 u32 count; 114 u32 count;
115 struct user_sgentry sg[1]; 115 struct user_sgentry sg[1];
116}; 116};
117 117
118struct sgmap64 { 118struct sgmap64 {
@@ -137,18 +137,18 @@ struct user_sgmapraw {
137 137
138struct creation_info 138struct creation_info
139{ 139{
140 u8 buildnum; /* e.g., 588 */ 140 u8 buildnum; /* e.g., 588 */
141 u8 usec; /* e.g., 588 */ 141 u8 usec; /* e.g., 588 */
142 u8 via; /* e.g., 1 = FSU, 142 u8 via; /* e.g., 1 = FSU,
143 * 2 = API 143 * 2 = API
144 */ 144 */
145 u8 year; /* e.g., 1997 = 97 */ 145 u8 year; /* e.g., 1997 = 97 */
146 __le32 date; /* 146 __le32 date; /*
147 * unsigned Month :4; // 1 - 12 147 * unsigned Month :4; // 1 - 12
148 * unsigned Day :6; // 1 - 32 148 * unsigned Day :6; // 1 - 32
149 * unsigned Hour :6; // 0 - 23 149 * unsigned Hour :6; // 0 - 23
150 * unsigned Minute :6; // 0 - 60 150 * unsigned Minute :6; // 0 - 60
151 * unsigned Second :6; // 0 - 60 151 * unsigned Second :6; // 0 - 60
152 */ 152 */
153 __le32 serial[2]; /* e.g., 0x1DEADB0BFAFAF001 */ 153 __le32 serial[2]; /* e.g., 0x1DEADB0BFAFAF001 */
154}; 154};
@@ -184,7 +184,7 @@ struct creation_info
184/* 184/*
185 * Set the queues on a 16 byte alignment 185 * Set the queues on a 16 byte alignment
186 */ 186 */
187 187
188#define QUEUE_ALIGNMENT 16 188#define QUEUE_ALIGNMENT 16
189 189
190/* 190/*
@@ -203,9 +203,9 @@ struct aac_entry {
203 * The adapter assumes the ProducerIndex and ConsumerIndex are grouped 203 * The adapter assumes the ProducerIndex and ConsumerIndex are grouped
204 * adjacently and in that order. 204 * adjacently and in that order.
205 */ 205 */
206 206
207struct aac_qhdr { 207struct aac_qhdr {
208 __le64 header_addr;/* Address to hand the adapter to access 208 __le64 header_addr;/* Address to hand the adapter to access
209 to this queue head */ 209 to this queue head */
210 __le32 *producer; /* The producer index for this queue (host address) */ 210 __le32 *producer; /* The producer index for this queue (host address) */
211 __le32 *consumer; /* The consumer index for this queue (host address) */ 211 __le32 *consumer; /* The consumer index for this queue (host address) */
@@ -215,7 +215,7 @@ struct aac_qhdr {
215 * Define all the events which the adapter would like to notify 215 * Define all the events which the adapter would like to notify
216 * the host of. 216 * the host of.
217 */ 217 */
218 218
219#define HostNormCmdQue 1 /* Change in host normal priority command queue */ 219#define HostNormCmdQue 1 /* Change in host normal priority command queue */
220#define HostHighCmdQue 2 /* Change in host high priority command queue */ 220#define HostHighCmdQue 2 /* Change in host high priority command queue */
221#define HostNormRespQue 3 /* Change in host normal priority response queue */ 221#define HostNormRespQue 3 /* Change in host normal priority response queue */
@@ -286,17 +286,17 @@ struct aac_fibhdr {
286 u8 StructType; /* Type FIB */ 286 u8 StructType; /* Type FIB */
287 u8 Flags; /* Flags for FIB */ 287 u8 Flags; /* Flags for FIB */
288 __le16 Size; /* Size of this FIB in bytes */ 288 __le16 Size; /* Size of this FIB in bytes */
289 __le16 SenderSize; /* Size of the FIB in the sender 289 __le16 SenderSize; /* Size of the FIB in the sender
290 (for response sizing) */ 290 (for response sizing) */
291 __le32 SenderFibAddress; /* Host defined data in the FIB */ 291 __le32 SenderFibAddress; /* Host defined data in the FIB */
292 __le32 ReceiverFibAddress;/* Logical address of this FIB for 292 __le32 ReceiverFibAddress;/* Logical address of this FIB for
293 the adapter */ 293 the adapter */
294 u32 SenderData; /* Place holder for the sender to store data */ 294 u32 SenderData; /* Place holder for the sender to store data */
295 union { 295 union {
296 struct { 296 struct {
297 __le32 _ReceiverTimeStart; /* Timestamp for 297 __le32 _ReceiverTimeStart; /* Timestamp for
298 receipt of fib */ 298 receipt of fib */
299 __le32 _ReceiverTimeDone; /* Timestamp for 299 __le32 _ReceiverTimeDone; /* Timestamp for
300 completion of fib */ 300 completion of fib */
301 } _s; 301 } _s;
302 } _u; 302 } _u;
@@ -311,7 +311,7 @@ struct hw_fib {
311 * FIB commands 311 * FIB commands
312 */ 312 */
313 313
314#define TestCommandResponse 1 314#define TestCommandResponse 1
315#define TestAdapterCommand 2 315#define TestAdapterCommand 2
316/* 316/*
317 * Lowlevel and comm commands 317 * Lowlevel and comm commands
@@ -350,10 +350,6 @@ struct hw_fib {
350#define ContainerCommand64 501 350#define ContainerCommand64 501
351#define ContainerRawIo 502 351#define ContainerRawIo 502
352/* 352/*
353 * Cluster Commands
354 */
355#define ClusterCommand 550
356/*
357 * Scsi Port commands (scsi passthrough) 353 * Scsi Port commands (scsi passthrough)
358 */ 354 */
359#define ScsiPortCommand 600 355#define ScsiPortCommand 600
@@ -375,19 +371,19 @@ struct hw_fib {
375 */ 371 */
376 372
377enum fib_xfer_state { 373enum fib_xfer_state {
378 HostOwned = (1<<0), 374 HostOwned = (1<<0),
379 AdapterOwned = (1<<1), 375 AdapterOwned = (1<<1),
380 FibInitialized = (1<<2), 376 FibInitialized = (1<<2),
381 FibEmpty = (1<<3), 377 FibEmpty = (1<<3),
382 AllocatedFromPool = (1<<4), 378 AllocatedFromPool = (1<<4),
383 SentFromHost = (1<<5), 379 SentFromHost = (1<<5),
384 SentFromAdapter = (1<<6), 380 SentFromAdapter = (1<<6),
385 ResponseExpected = (1<<7), 381 ResponseExpected = (1<<7),
386 NoResponseExpected = (1<<8), 382 NoResponseExpected = (1<<8),
387 AdapterProcessed = (1<<9), 383 AdapterProcessed = (1<<9),
388 HostProcessed = (1<<10), 384 HostProcessed = (1<<10),
389 HighPriority = (1<<11), 385 HighPriority = (1<<11),
390 NormalPriority = (1<<12), 386 NormalPriority = (1<<12),
391 Async = (1<<13), 387 Async = (1<<13),
392 AsyncIo = (1<<13), // rpbfix: remove with new regime 388 AsyncIo = (1<<13), // rpbfix: remove with new regime
393 PageFileIo = (1<<14), // rpbfix: remove with new regime 389 PageFileIo = (1<<14), // rpbfix: remove with new regime
@@ -420,7 +416,7 @@ struct aac_init
420 __le32 AdapterFibAlign; 416 __le32 AdapterFibAlign;
421 __le32 printfbuf; 417 __le32 printfbuf;
422 __le32 printfbufsiz; 418 __le32 printfbufsiz;
423 __le32 HostPhysMemPages; /* number of 4k pages of host 419 __le32 HostPhysMemPages; /* number of 4k pages of host
424 physical memory */ 420 physical memory */
425 __le32 HostElapsedSeconds; /* number of seconds since 1970. */ 421 __le32 HostElapsedSeconds; /* number of seconds since 1970. */
426 /* 422 /*
@@ -481,7 +477,7 @@ struct adapter_ops
481 477
482struct aac_driver_ident 478struct aac_driver_ident
483{ 479{
484 int (*init)(struct aac_dev *dev); 480 int (*init)(struct aac_dev *dev);
485 char * name; 481 char * name;
486 char * vname; 482 char * vname;
487 char * model; 483 char * model;
@@ -489,7 +485,7 @@ struct aac_driver_ident
489 int quirks; 485 int quirks;
490}; 486};
491/* 487/*
492 * Some adapter firmware needs communication memory 488 * Some adapter firmware needs communication memory
493 * below 2gig. This tells the init function to set the 489 * below 2gig. This tells the init function to set the
494 * dma mask such that fib memory will be allocated where the 490 * dma mask such that fib memory will be allocated where the
495 * adapter firmware can get to it. 491 * adapter firmware can get to it.
@@ -521,33 +517,39 @@ struct aac_driver_ident
521#define AAC_QUIRK_17SG 0x0010 517#define AAC_QUIRK_17SG 0x0010
522 518
523/* 519/*
520 * Some adapter firmware does not support 64 bit scsi passthrough
521 * commands.
522 */
523#define AAC_QUIRK_SCSI_32 0x0020
524
525/*
524 * The adapter interface specs all queues to be located in the same 526 * The adapter interface specs all queues to be located in the same
525 * physically contigous block. The host structure that defines the 527 * physically contigous block. The host structure that defines the
526 * commuication queues will assume they are each a separate physically 528 * commuication queues will assume they are each a separate physically
527 * contigous memory region that will support them all being one big 529 * contigous memory region that will support them all being one big
528 * contigous block. 530 * contigous block.
529 * There is a command and response queue for each level and direction of 531 * There is a command and response queue for each level and direction of
530 * commuication. These regions are accessed by both the host and adapter. 532 * commuication. These regions are accessed by both the host and adapter.
531 */ 533 */
532 534
533struct aac_queue { 535struct aac_queue {
534 u64 logical; /*address we give the adapter */ 536 u64 logical; /*address we give the adapter */
535 struct aac_entry *base; /*system virtual address */ 537 struct aac_entry *base; /*system virtual address */
536 struct aac_qhdr headers; /*producer,consumer q headers*/ 538 struct aac_qhdr headers; /*producer,consumer q headers*/
537 u32 entries; /*Number of queue entries */ 539 u32 entries; /*Number of queue entries */
538 wait_queue_head_t qfull; /*Event to wait on if q full */ 540 wait_queue_head_t qfull; /*Event to wait on if q full */
539 wait_queue_head_t cmdready; /*Cmd ready from the adapter */ 541 wait_queue_head_t cmdready; /*Cmd ready from the adapter */
540 /* This is only valid for adapter to host command queues. */ 542 /* This is only valid for adapter to host command queues. */
541 spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */ 543 spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
542 spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ 544 spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
543 struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ 545 struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
544 /* only valid for command queues which receive entries from the adapter. */ 546 /* only valid for command queues which receive entries from the adapter. */
545 u32 numpending; /* Number of entries on outstanding queue. */ 547 u32 numpending; /* Number of entries on outstanding queue. */
546 struct aac_dev * dev; /* Back pointer to adapter structure */ 548 struct aac_dev * dev; /* Back pointer to adapter structure */
547}; 549};
548 550
549/* 551/*
550 * Message queues. The order here is important, see also the 552 * Message queues. The order here is important, see also the
551 * queue type ordering 553 * queue type ordering
552 */ 554 */
553 555
@@ -559,12 +561,12 @@ struct aac_queue_block
559/* 561/*
560 * SaP1 Message Unit Registers 562 * SaP1 Message Unit Registers
561 */ 563 */
562 564
563struct sa_drawbridge_CSR { 565struct sa_drawbridge_CSR {
564 /* Offset | Name */ 566 /* Offset | Name */
565 __le32 reserved[10]; /* 00h-27h | Reserved */ 567 __le32 reserved[10]; /* 00h-27h | Reserved */
566 u8 LUT_Offset; /* 28h | Lookup Table Offset */ 568 u8 LUT_Offset; /* 28h | Lookup Table Offset */
567 u8 reserved1[3]; /* 29h-2bh | Reserved */ 569 u8 reserved1[3]; /* 29h-2bh | Reserved */
568 __le32 LUT_Data; /* 2ch | Looup Table Data */ 570 __le32 LUT_Data; /* 2ch | Looup Table Data */
569 __le32 reserved2[26]; /* 30h-97h | Reserved */ 571 __le32 reserved2[26]; /* 30h-97h | Reserved */
570 __le16 PRICLEARIRQ; /* 98h | Primary Clear Irq */ 572 __le16 PRICLEARIRQ; /* 98h | Primary Clear Irq */
@@ -583,8 +585,8 @@ struct sa_drawbridge_CSR {
583 __le32 MAILBOX5; /* bch | Scratchpad 5 */ 585 __le32 MAILBOX5; /* bch | Scratchpad 5 */
584 __le32 MAILBOX6; /* c0h | Scratchpad 6 */ 586 __le32 MAILBOX6; /* c0h | Scratchpad 6 */
585 __le32 MAILBOX7; /* c4h | Scratchpad 7 */ 587 __le32 MAILBOX7; /* c4h | Scratchpad 7 */
586 __le32 ROM_Setup_Data; /* c8h | Rom Setup and Data */ 588 __le32 ROM_Setup_Data; /* c8h | Rom Setup and Data */
587 __le32 ROM_Control_Addr;/* cch | Rom Control and Address */ 589 __le32 ROM_Control_Addr;/* cch | Rom Control and Address */
588 __le32 reserved3[12]; /* d0h-ffh | reserved */ 590 __le32 reserved3[12]; /* d0h-ffh | reserved */
589 __le32 LUT[64]; /* 100h-1ffh | Lookup Table Entries */ 591 __le32 LUT[64]; /* 100h-1ffh | Lookup Table Entries */
590}; 592};
@@ -597,7 +599,7 @@ struct sa_drawbridge_CSR {
597#define Mailbox5 SaDbCSR.MAILBOX5 599#define Mailbox5 SaDbCSR.MAILBOX5
598#define Mailbox6 SaDbCSR.MAILBOX6 600#define Mailbox6 SaDbCSR.MAILBOX6
599#define Mailbox7 SaDbCSR.MAILBOX7 601#define Mailbox7 SaDbCSR.MAILBOX7
600 602
601#define DoorbellReg_p SaDbCSR.PRISETIRQ 603#define DoorbellReg_p SaDbCSR.PRISETIRQ
602#define DoorbellReg_s SaDbCSR.SECSETIRQ 604#define DoorbellReg_s SaDbCSR.SECSETIRQ
603#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ 605#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ
@@ -611,19 +613,19 @@ struct sa_drawbridge_CSR {
611#define DOORBELL_5 0x0020 613#define DOORBELL_5 0x0020
612#define DOORBELL_6 0x0040 614#define DOORBELL_6 0x0040
613 615
614 616
615#define PrintfReady DOORBELL_5 617#define PrintfReady DOORBELL_5
616#define PrintfDone DOORBELL_5 618#define PrintfDone DOORBELL_5
617 619
618struct sa_registers { 620struct sa_registers {
619 struct sa_drawbridge_CSR SaDbCSR; /* 98h - c4h */ 621 struct sa_drawbridge_CSR SaDbCSR; /* 98h - c4h */
620}; 622};
621 623
622 624
623#define Sa_MINIPORT_REVISION 1 625#define Sa_MINIPORT_REVISION 1
624 626
625#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) 627#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
626#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) 628#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
627#define sa_writew(AEP, CSR, value) writew(value, &((AEP)->regs.sa->CSR)) 629#define sa_writew(AEP, CSR, value) writew(value, &((AEP)->regs.sa->CSR))
628#define sa_writel(AEP, CSR, value) writel(value, &((AEP)->regs.sa->CSR)) 630#define sa_writel(AEP, CSR, value) writel(value, &((AEP)->regs.sa->CSR))
629 631
@@ -640,21 +642,21 @@ struct rx_mu_registers {
640 __le32 IMRx[2]; /* 1310h | 10h | Inbound Message Registers */ 642 __le32 IMRx[2]; /* 1310h | 10h | Inbound Message Registers */
641 __le32 OMRx[2]; /* 1318h | 18h | Outbound Message Registers */ 643 __le32 OMRx[2]; /* 1318h | 18h | Outbound Message Registers */
642 __le32 IDR; /* 1320h | 20h | Inbound Doorbell Register */ 644 __le32 IDR; /* 1320h | 20h | Inbound Doorbell Register */
643 __le32 IISR; /* 1324h | 24h | Inbound Interrupt 645 __le32 IISR; /* 1324h | 24h | Inbound Interrupt
644 Status Register */ 646 Status Register */
645 __le32 IIMR; /* 1328h | 28h | Inbound Interrupt 647 __le32 IIMR; /* 1328h | 28h | Inbound Interrupt
646 Mask Register */ 648 Mask Register */
647 __le32 ODR; /* 132Ch | 2Ch | Outbound Doorbell Register */ 649 __le32 ODR; /* 132Ch | 2Ch | Outbound Doorbell Register */
648 __le32 OISR; /* 1330h | 30h | Outbound Interrupt 650 __le32 OISR; /* 1330h | 30h | Outbound Interrupt
649 Status Register */ 651 Status Register */
650 __le32 OIMR; /* 1334h | 34h | Outbound Interrupt 652 __le32 OIMR; /* 1334h | 34h | Outbound Interrupt
651 Mask Register */ 653 Mask Register */
652 __le32 reserved2; /* 1338h | 38h | Reserved */ 654 __le32 reserved2; /* 1338h | 38h | Reserved */
653 __le32 reserved3; /* 133Ch | 3Ch | Reserved */ 655 __le32 reserved3; /* 133Ch | 3Ch | Reserved */
654 __le32 InboundQueue;/* 1340h | 40h | Inbound Queue Port relative to firmware */ 656 __le32 InboundQueue;/* 1340h | 40h | Inbound Queue Port relative to firmware */
655 __le32 OutboundQueue;/*1344h | 44h | Outbound Queue Port relative to firmware */ 657 __le32 OutboundQueue;/*1344h | 44h | Outbound Queue Port relative to firmware */
656 /* * Must access through ATU Inbound 658 /* * Must access through ATU Inbound
657 Translation Window */ 659 Translation Window */
658}; 660};
659 661
660struct rx_inbound { 662struct rx_inbound {
@@ -710,12 +712,12 @@ struct rkt_registers {
710typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); 712typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
711 713
712struct aac_fib_context { 714struct aac_fib_context {
713 s16 type; // used for verification of structure 715 s16 type; // used for verification of structure
714 s16 size; 716 s16 size;
715 u32 unique; // unique value representing this context 717 u32 unique; // unique value representing this context
716 ulong jiffies; // used for cleanup - dmb changed to ulong 718 ulong jiffies; // used for cleanup - dmb changed to ulong
717 struct list_head next; // used to link context's into a linked list 719 struct list_head next; // used to link context's into a linked list
718 struct semaphore wait_sem; // this is used to wait for the next fib to arrive. 720 struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
719 int wait; // Set to true when thread is in WaitForSingleObject 721 int wait; // Set to true when thread is in WaitForSingleObject
720 unsigned long count; // total number of FIBs on FibList 722 unsigned long count; // total number of FIBs on FibList
721 struct list_head fib_list; // this holds fibs and their attachd hw_fibs 723 struct list_head fib_list; // this holds fibs and their attachd hw_fibs
@@ -734,9 +736,9 @@ struct sense_data {
734 u8 EOM:1; /* End Of Medium - reserved for random access devices */ 736 u8 EOM:1; /* End Of Medium - reserved for random access devices */
735 u8 filemark:1; /* Filemark - reserved for random access devices */ 737 u8 filemark:1; /* Filemark - reserved for random access devices */
736 738
737 u8 information[4]; /* for direct-access devices, contains the unsigned 739 u8 information[4]; /* for direct-access devices, contains the unsigned
738 * logical block address or residue associated with 740 * logical block address or residue associated with
739 * the sense key 741 * the sense key
740 */ 742 */
741 u8 add_sense_len; /* number of additional sense bytes to follow this field */ 743 u8 add_sense_len; /* number of additional sense bytes to follow this field */
742 u8 cmnd_info[4]; /* not used */ 744 u8 cmnd_info[4]; /* not used */
@@ -746,7 +748,7 @@ struct sense_data {
746 u8 bit_ptr:3; /* indicates which byte of the CDB or parameter data 748 u8 bit_ptr:3; /* indicates which byte of the CDB or parameter data
747 * was in error 749 * was in error
748 */ 750 */
749 u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that 751 u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that
750 * the bit_ptr field has valid value 752 * the bit_ptr field has valid value
751 */ 753 */
752 u8 reserved2:2; 754 u8 reserved2:2;
@@ -780,24 +782,24 @@ struct fib {
780 /* 782 /*
781 * The Adapter that this I/O is destined for. 783 * The Adapter that this I/O is destined for.
782 */ 784 */
783 struct aac_dev *dev; 785 struct aac_dev *dev;
784 /* 786 /*
785 * This is the event the sendfib routine will wait on if the 787 * This is the event the sendfib routine will wait on if the
786 * caller did not pass one and this is synch io. 788 * caller did not pass one and this is synch io.
787 */ 789 */
788 struct semaphore event_wait; 790 struct semaphore event_wait;
789 spinlock_t event_lock; 791 spinlock_t event_lock;
790 792
791 u32 done; /* gets set to 1 when fib is complete */ 793 u32 done; /* gets set to 1 when fib is complete */
792 fib_callback callback; 794 fib_callback callback;
793 void *callback_data; 795 void *callback_data;
794 u32 flags; // u32 dmb was ulong 796 u32 flags; // u32 dmb was ulong
795 /* 797 /*
796 * And for the internal issue/reply queues (we may be able 798 * And for the internal issue/reply queues (we may be able
797 * to merge these two) 799 * to merge these two)
798 */ 800 */
799 struct list_head fiblink; 801 struct list_head fiblink;
800 void *data; 802 void *data;
801 struct hw_fib *hw_fib_va; /* Actual shared object */ 803 struct hw_fib *hw_fib_va; /* Actual shared object */
802 dma_addr_t hw_fib_pa; /* physical address of hw_fib*/ 804 dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
803}; 805};
@@ -807,7 +809,7 @@ struct fib {
807 * 809 *
808 * This is returned by the RequestAdapterInfo block 810 * This is returned by the RequestAdapterInfo block
809 */ 811 */
810 812
811struct aac_adapter_info 813struct aac_adapter_info
812{ 814{
813 __le32 platform; 815 __le32 platform;
@@ -826,7 +828,7 @@ struct aac_adapter_info
826 __le32 biosrev; 828 __le32 biosrev;
827 __le32 biosbuild; 829 __le32 biosbuild;
828 __le32 cluster; 830 __le32 cluster;
829 __le32 clusterchannelmask; 831 __le32 clusterchannelmask;
830 __le32 serial[2]; 832 __le32 serial[2];
831 __le32 battery; 833 __le32 battery;
832 __le32 options; 834 __le32 options;
@@ -863,9 +865,10 @@ struct aac_supplement_adapter_info
863 __le32 SupportedOptions2; 865 __le32 SupportedOptions2;
864 __le32 ReservedGrowth[1]; 866 __le32 ReservedGrowth[1];
865}; 867};
866#define AAC_FEATURE_FALCON 0x00000010 868#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
867#define AAC_OPTION_MU_RESET 0x00000001 869#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000)
868#define AAC_OPTION_IGNORE_RESET 0x00000002 870#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001)
871#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
869#define AAC_SIS_VERSION_V3 3 872#define AAC_SIS_VERSION_V3 3
870#define AAC_SIS_SLOT_UNKNOWN 0xFF 873#define AAC_SIS_SLOT_UNKNOWN 0xFF
871 874
@@ -916,13 +919,13 @@ struct aac_bus_info_response {
916#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4) 919#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4)
917#define AAC_OPT_RAID50 cpu_to_le32(1<<5) 920#define AAC_OPT_RAID50 cpu_to_le32(1<<5)
918#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6) 921#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6)
919#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7) 922#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
920#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8) 923#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8)
921#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9) 924#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
922#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10) 925#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10)
923#define AAC_OPT_ALARM cpu_to_le32(1<<11) 926#define AAC_OPT_ALARM cpu_to_le32(1<<11)
924#define AAC_OPT_NONDASD cpu_to_le32(1<<12) 927#define AAC_OPT_NONDASD cpu_to_le32(1<<12)
925#define AAC_OPT_SCSI_MANAGED cpu_to_le32(1<<13) 928#define AAC_OPT_SCSI_MANAGED cpu_to_le32(1<<13)
926#define AAC_OPT_RAID_SCSI_MODE cpu_to_le32(1<<14) 929#define AAC_OPT_RAID_SCSI_MODE cpu_to_le32(1<<14)
927#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) 930#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16)
928#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) 931#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17)
@@ -942,7 +945,7 @@ struct aac_dev
942 945
943 /* 946 /*
944 * Map for 128 fib objects (64k) 947 * Map for 128 fib objects (64k)
945 */ 948 */
946 dma_addr_t hw_fib_pa; 949 dma_addr_t hw_fib_pa;
947 struct hw_fib *hw_fib_va; 950 struct hw_fib *hw_fib_va;
948 struct hw_fib *aif_base_va; 951 struct hw_fib *aif_base_va;
@@ -953,24 +956,24 @@ struct aac_dev
953 956
954 struct fib *free_fib; 957 struct fib *free_fib;
955 spinlock_t fib_lock; 958 spinlock_t fib_lock;
956 959
957 struct aac_queue_block *queues; 960 struct aac_queue_block *queues;
958 /* 961 /*
959 * The user API will use an IOCTL to register itself to receive 962 * The user API will use an IOCTL to register itself to receive
960 * FIBs from the adapter. The following list is used to keep 963 * FIBs from the adapter. The following list is used to keep
961 * track of all the threads that have requested these FIBs. The 964 * track of all the threads that have requested these FIBs. The
962 * mutex is used to synchronize access to all data associated 965 * mutex is used to synchronize access to all data associated
963 * with the adapter fibs. 966 * with the adapter fibs.
964 */ 967 */
965 struct list_head fib_list; 968 struct list_head fib_list;
966 969
967 struct adapter_ops a_ops; 970 struct adapter_ops a_ops;
968 unsigned long fsrev; /* Main driver's revision number */ 971 unsigned long fsrev; /* Main driver's revision number */
969 972
970 unsigned base_size; /* Size of mapped in region */ 973 unsigned base_size; /* Size of mapped in region */
971 struct aac_init *init; /* Holds initialization info to communicate with adapter */ 974 struct aac_init *init; /* Holds initialization info to communicate with adapter */
972 dma_addr_t init_pa; /* Holds physical address of the init struct */ 975 dma_addr_t init_pa; /* Holds physical address of the init struct */
973 976
974 struct pci_dev *pdev; /* Our PCI interface */ 977 struct pci_dev *pdev; /* Our PCI interface */
975 void * printfbuf; /* pointer to buffer used for printf's from the adapter */ 978 void * printfbuf; /* pointer to buffer used for printf's from the adapter */
976 void * comm_addr; /* Base address of Comm area */ 979 void * comm_addr; /* Base address of Comm area */
@@ -984,11 +987,11 @@ struct aac_dev
984 struct fsa_dev_info *fsa_dev; 987 struct fsa_dev_info *fsa_dev;
985 struct task_struct *thread; 988 struct task_struct *thread;
986 int cardtype; 989 int cardtype;
987 990
988 /* 991 /*
989 * The following is the device specific extension. 992 * The following is the device specific extension.
990 */ 993 */
991#if (!defined(AAC_MIN_FOOTPRINT_SIZE)) 994#ifndef AAC_MIN_FOOTPRINT_SIZE
992# define AAC_MIN_FOOTPRINT_SIZE 8192 995# define AAC_MIN_FOOTPRINT_SIZE 8192
993#endif 996#endif
994 union 997 union
@@ -1009,7 +1012,9 @@ struct aac_dev
1009 /* These are in adapter info but they are in the io flow so 1012 /* These are in adapter info but they are in the io flow so
1010 * lets break them out so we don't have to do an AND to check them 1013 * lets break them out so we don't have to do an AND to check them
1011 */ 1014 */
1012 u8 nondasd_support; 1015 u8 nondasd_support;
1016 u8 jbod;
1017 u8 cache_protected;
1013 u8 dac_support; 1018 u8 dac_support;
1014 u8 raid_scsi_mode; 1019 u8 raid_scsi_mode;
1015 u8 comm_interface; 1020 u8 comm_interface;
@@ -1066,18 +1071,19 @@ struct aac_dev
1066 (dev)->a_ops.adapter_comm(dev, comm) 1071 (dev)->a_ops.adapter_comm(dev, comm)
1067 1072
1068#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) 1073#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
1074#define FIB_CONTEXT_FLAG (0x00000002)
1069 1075
1070/* 1076/*
1071 * Define the command values 1077 * Define the command values
1072 */ 1078 */
1073 1079
1074#define Null 0 1080#define Null 0
1075#define GetAttributes 1 1081#define GetAttributes 1
1076#define SetAttributes 2 1082#define SetAttributes 2
1077#define Lookup 3 1083#define Lookup 3
1078#define ReadLink 4 1084#define ReadLink 4
1079#define Read 5 1085#define Read 5
1080#define Write 6 1086#define Write 6
1081#define Create 7 1087#define Create 7
1082#define MakeDirectory 8 1088#define MakeDirectory 8
1083#define SymbolicLink 9 1089#define SymbolicLink 9
@@ -1173,19 +1179,19 @@ struct aac_dev
1173 1179
1174struct aac_read 1180struct aac_read
1175{ 1181{
1176 __le32 command; 1182 __le32 command;
1177 __le32 cid; 1183 __le32 cid;
1178 __le32 block; 1184 __le32 block;
1179 __le32 count; 1185 __le32 count;
1180 struct sgmap sg; // Must be last in struct because it is variable 1186 struct sgmap sg; // Must be last in struct because it is variable
1181}; 1187};
1182 1188
1183struct aac_read64 1189struct aac_read64
1184{ 1190{
1185 __le32 command; 1191 __le32 command;
1186 __le16 cid; 1192 __le16 cid;
1187 __le16 sector_count; 1193 __le16 sector_count;
1188 __le32 block; 1194 __le32 block;
1189 __le16 pad; 1195 __le16 pad;
1190 __le16 flags; 1196 __le16 flags;
1191 struct sgmap64 sg; // Must be last in struct because it is variable 1197 struct sgmap64 sg; // Must be last in struct because it is variable
@@ -1193,26 +1199,26 @@ struct aac_read64
1193 1199
1194struct aac_read_reply 1200struct aac_read_reply
1195{ 1201{
1196 __le32 status; 1202 __le32 status;
1197 __le32 count; 1203 __le32 count;
1198}; 1204};
1199 1205
1200struct aac_write 1206struct aac_write
1201{ 1207{
1202 __le32 command; 1208 __le32 command;
1203 __le32 cid; 1209 __le32 cid;
1204 __le32 block; 1210 __le32 block;
1205 __le32 count; 1211 __le32 count;
1206 __le32 stable; // Not used 1212 __le32 stable; // Not used
1207 struct sgmap sg; // Must be last in struct because it is variable 1213 struct sgmap sg; // Must be last in struct because it is variable
1208}; 1214};
1209 1215
1210struct aac_write64 1216struct aac_write64
1211{ 1217{
1212 __le32 command; 1218 __le32 command;
1213 __le16 cid; 1219 __le16 cid;
1214 __le16 sector_count; 1220 __le16 sector_count;
1215 __le32 block; 1221 __le32 block;
1216 __le16 pad; 1222 __le16 pad;
1217 __le16 flags; 1223 __le16 flags;
1218#define IO_TYPE_WRITE 0x00000000 1224#define IO_TYPE_WRITE 0x00000000
@@ -1223,7 +1229,7 @@ struct aac_write64
1223struct aac_write_reply 1229struct aac_write_reply
1224{ 1230{
1225 __le32 status; 1231 __le32 status;
1226 __le32 count; 1232 __le32 count;
1227 __le32 committed; 1233 __le32 committed;
1228}; 1234};
1229 1235
@@ -1326,10 +1332,10 @@ struct aac_srb_reply
1326#define SRB_NoDataXfer 0x0000 1332#define SRB_NoDataXfer 0x0000
1327#define SRB_DisableDisconnect 0x0004 1333#define SRB_DisableDisconnect 0x0004
1328#define SRB_DisableSynchTransfer 0x0008 1334#define SRB_DisableSynchTransfer 0x0008
1329#define SRB_BypassFrozenQueue 0x0010 1335#define SRB_BypassFrozenQueue 0x0010
1330#define SRB_DisableAutosense 0x0020 1336#define SRB_DisableAutosense 0x0020
1331#define SRB_DataIn 0x0040 1337#define SRB_DataIn 0x0040
1332#define SRB_DataOut 0x0080 1338#define SRB_DataOut 0x0080
1333 1339
1334/* 1340/*
1335 * SRB Functions - set in aac_srb->function 1341 * SRB Functions - set in aac_srb->function
@@ -1352,7 +1358,7 @@ struct aac_srb_reply
1352#define SRBF_RemoveDevice 0x0016 1358#define SRBF_RemoveDevice 0x0016
1353#define SRBF_DomainValidation 0x0017 1359#define SRBF_DomainValidation 0x0017
1354 1360
1355/* 1361/*
1356 * SRB SCSI Status - set in aac_srb->scsi_status 1362 * SRB SCSI Status - set in aac_srb->scsi_status
1357 */ 1363 */
1358#define SRB_STATUS_PENDING 0x00 1364#define SRB_STATUS_PENDING 0x00
@@ -1511,17 +1517,17 @@ struct aac_get_container_count_resp {
1511 */ 1517 */
1512 1518
1513struct aac_mntent { 1519struct aac_mntent {
1514 __le32 oid; 1520 __le32 oid;
1515 u8 name[16]; /* if applicable */ 1521 u8 name[16]; /* if applicable */
1516 struct creation_info create_info; /* if applicable */ 1522 struct creation_info create_info; /* if applicable */
1517 __le32 capacity; 1523 __le32 capacity;
1518 __le32 vol; /* substrate structure */ 1524 __le32 vol; /* substrate structure */
1519 __le32 obj; /* FT_FILESYS, etc. */ 1525 __le32 obj; /* FT_FILESYS, etc. */
1520 __le32 state; /* unready for mounting, 1526 __le32 state; /* unready for mounting,
1521 readonly, etc. */ 1527 readonly, etc. */
1522 union aac_contentinfo fileinfo; /* Info specific to content 1528 union aac_contentinfo fileinfo; /* Info specific to content
1523 manager (eg, filesystem) */ 1529 manager (eg, filesystem) */
1524 __le32 altoid; /* != oid <==> snapshot or 1530 __le32 altoid; /* != oid <==> snapshot or
1525 broken mirror exists */ 1531 broken mirror exists */
1526 __le32 capacityhigh; 1532 __le32 capacityhigh;
1527}; 1533};
@@ -1538,7 +1544,7 @@ struct aac_query_mount {
1538 1544
1539struct aac_mount { 1545struct aac_mount {
1540 __le32 status; 1546 __le32 status;
1541 __le32 type; /* should be same as that requested */ 1547 __le32 type; /* should be same as that requested */
1542 __le32 count; 1548 __le32 count;
1543 struct aac_mntent mnt[1]; 1549 struct aac_mntent mnt[1];
1544}; 1550};
@@ -1608,7 +1614,7 @@ struct aac_delete_disk {
1608 u32 disknum; 1614 u32 disknum;
1609 u32 cnum; 1615 u32 cnum;
1610}; 1616};
1611 1617
1612struct fib_ioctl 1618struct fib_ioctl
1613{ 1619{
1614 u32 fibctx; 1620 u32 fibctx;
@@ -1622,10 +1628,10 @@ struct revision
1622 __le32 version; 1628 __le32 version;
1623 __le32 build; 1629 __le32 build;
1624}; 1630};
1625 1631
1626 1632
1627/* 1633/*
1628 * Ugly - non Linux like ioctl coding for back compat. 1634 * Ugly - non Linux like ioctl coding for back compat.
1629 */ 1635 */
1630 1636
1631#define CTL_CODE(function, method) ( \ 1637#define CTL_CODE(function, method) ( \
@@ -1633,7 +1639,7 @@ struct revision
1633) 1639)
1634 1640
1635/* 1641/*
1636 * Define the method codes for how buffers are passed for I/O and FS 1642 * Define the method codes for how buffers are passed for I/O and FS
1637 * controls 1643 * controls
1638 */ 1644 */
1639 1645
@@ -1644,15 +1650,15 @@ struct revision
1644 * Filesystem ioctls 1650 * Filesystem ioctls
1645 */ 1651 */
1646 1652
1647#define FSACTL_SENDFIB CTL_CODE(2050, METHOD_BUFFERED) 1653#define FSACTL_SENDFIB CTL_CODE(2050, METHOD_BUFFERED)
1648#define FSACTL_SEND_RAW_SRB CTL_CODE(2067, METHOD_BUFFERED) 1654#define FSACTL_SEND_RAW_SRB CTL_CODE(2067, METHOD_BUFFERED)
1649#define FSACTL_DELETE_DISK 0x163 1655#define FSACTL_DELETE_DISK 0x163
1650#define FSACTL_QUERY_DISK 0x173 1656#define FSACTL_QUERY_DISK 0x173
1651#define FSACTL_OPEN_GET_ADAPTER_FIB CTL_CODE(2100, METHOD_BUFFERED) 1657#define FSACTL_OPEN_GET_ADAPTER_FIB CTL_CODE(2100, METHOD_BUFFERED)
1652#define FSACTL_GET_NEXT_ADAPTER_FIB CTL_CODE(2101, METHOD_BUFFERED) 1658#define FSACTL_GET_NEXT_ADAPTER_FIB CTL_CODE(2101, METHOD_BUFFERED)
1653#define FSACTL_CLOSE_GET_ADAPTER_FIB CTL_CODE(2102, METHOD_BUFFERED) 1659#define FSACTL_CLOSE_GET_ADAPTER_FIB CTL_CODE(2102, METHOD_BUFFERED)
1654#define FSACTL_MINIPORT_REV_CHECK CTL_CODE(2107, METHOD_BUFFERED) 1660#define FSACTL_MINIPORT_REV_CHECK CTL_CODE(2107, METHOD_BUFFERED)
1655#define FSACTL_GET_PCI_INFO CTL_CODE(2119, METHOD_BUFFERED) 1661#define FSACTL_GET_PCI_INFO CTL_CODE(2119, METHOD_BUFFERED)
1656#define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER) 1662#define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER)
1657#define FSACTL_GET_CONTAINERS 2131 1663#define FSACTL_GET_CONTAINERS 2131
1658#define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED) 1664#define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED)
@@ -1661,7 +1667,7 @@ struct revision
1661struct aac_common 1667struct aac_common
1662{ 1668{
1663 /* 1669 /*
1664 * If this value is set to 1 then interrupt moderation will occur 1670 * If this value is set to 1 then interrupt moderation will occur
1665 * in the base commuication support. 1671 * in the base commuication support.
1666 */ 1672 */
1667 u32 irq_mod; 1673 u32 irq_mod;
@@ -1690,11 +1696,11 @@ extern struct aac_common aac_config;
1690 * The following macro is used when sending and receiving FIBs. It is 1696 * The following macro is used when sending and receiving FIBs. It is
1691 * only used for debugging. 1697 * only used for debugging.
1692 */ 1698 */
1693 1699
1694#ifdef DBG 1700#ifdef DBG
1695#define FIB_COUNTER_INCREMENT(counter) (counter)++ 1701#define FIB_COUNTER_INCREMENT(counter) (counter)++
1696#else 1702#else
1697#define FIB_COUNTER_INCREMENT(counter) 1703#define FIB_COUNTER_INCREMENT(counter)
1698#endif 1704#endif
1699 1705
1700/* 1706/*
@@ -1726,17 +1732,17 @@ extern struct aac_common aac_config;
1726 * 1732 *
1727 * The adapter reports is present state through the phase. Only 1733 * The adapter reports is present state through the phase. Only
1728 * a single phase should be ever be set. Each phase can have multiple 1734 * a single phase should be ever be set. Each phase can have multiple
1729 * phase status bits to provide more detailed information about the 1735 * phase status bits to provide more detailed information about the
1730 * state of the board. Care should be taken to ensure that any phase 1736 * state of the board. Care should be taken to ensure that any phase
1731 * status bits that are set when changing the phase are also valid 1737 * status bits that are set when changing the phase are also valid
1732 * for the new phase or be cleared out. Adapter software (monitor, 1738 * for the new phase or be cleared out. Adapter software (monitor,
1733 * iflash, kernel) is responsible for properly maintining the phase 1739 * iflash, kernel) is responsible for properly maintining the phase
1734 * status mailbox when it is running. 1740 * status mailbox when it is running.
1735 *
1736 * MONKER_API Phases
1737 * 1741 *
1738 * Phases are bit oriented. It is NOT valid to have multiple bits set 1742 * MONKER_API Phases
1739 */ 1743 *
1744 * Phases are bit oriented. It is NOT valid to have multiple bits set
1745 */
1740 1746
1741#define SELF_TEST_FAILED 0x00000004 1747#define SELF_TEST_FAILED 0x00000004
1742#define MONITOR_PANIC 0x00000020 1748#define MONITOR_PANIC 0x00000020
@@ -1759,16 +1765,22 @@ extern struct aac_common aac_config;
1759 * For FIB communication, we need all of the following things 1765 * For FIB communication, we need all of the following things
1760 * to send back to the user. 1766 * to send back to the user.
1761 */ 1767 */
1762 1768
1763#define AifCmdEventNotify 1 /* Notify of event */ 1769#define AifCmdEventNotify 1 /* Notify of event */
1764#define AifEnConfigChange 3 /* Adapter configuration change */ 1770#define AifEnConfigChange 3 /* Adapter configuration change */
1765#define AifEnContainerChange 4 /* Container configuration change */ 1771#define AifEnContainerChange 4 /* Container configuration change */
1766#define AifEnDeviceFailure 5 /* SCSI device failed */ 1772#define AifEnDeviceFailure 5 /* SCSI device failed */
1773#define AifEnEnclosureManagement 13 /* EM_DRIVE_* */
1774#define EM_DRIVE_INSERTION 31
1775#define EM_DRIVE_REMOVAL 32
1776#define AifEnBatteryEvent 14 /* Change in Battery State */
1767#define AifEnAddContainer 15 /* A new array was created */ 1777#define AifEnAddContainer 15 /* A new array was created */
1768#define AifEnDeleteContainer 16 /* A container was deleted */ 1778#define AifEnDeleteContainer 16 /* A container was deleted */
1769#define AifEnExpEvent 23 /* Firmware Event Log */ 1779#define AifEnExpEvent 23 /* Firmware Event Log */
1770#define AifExeFirmwarePanic 3 /* Firmware Event Panic */ 1780#define AifExeFirmwarePanic 3 /* Firmware Event Panic */
1771#define AifHighPriority 3 /* Highest Priority Event */ 1781#define AifHighPriority 3 /* Highest Priority Event */
1782#define AifEnAddJBOD 30 /* JBOD created */
1783#define AifEnDeleteJBOD 31 /* JBOD deleted */
1772 1784
1773#define AifCmdJobProgress 2 /* Progress report */ 1785#define AifCmdJobProgress 2 /* Progress report */
1774#define AifJobCtrZero 101 /* Array Zero progress */ 1786#define AifJobCtrZero 101 /* Array Zero progress */
@@ -1780,11 +1792,11 @@ extern struct aac_common aac_config;
1780#define AifDenVolumeExtendComplete 201 /* A volume extend completed */ 1792#define AifDenVolumeExtendComplete 201 /* A volume extend completed */
1781#define AifReqJobList 100 /* Gets back complete job list */ 1793#define AifReqJobList 100 /* Gets back complete job list */
1782#define AifReqJobsForCtr 101 /* Gets back jobs for specific container */ 1794#define AifReqJobsForCtr 101 /* Gets back jobs for specific container */
1783#define AifReqJobsForScsi 102 /* Gets back jobs for specific SCSI device */ 1795#define AifReqJobsForScsi 102 /* Gets back jobs for specific SCSI device */
1784#define AifReqJobReport 103 /* Gets back a specific job report or list of them */ 1796#define AifReqJobReport 103 /* Gets back a specific job report or list of them */
1785#define AifReqTerminateJob 104 /* Terminates job */ 1797#define AifReqTerminateJob 104 /* Terminates job */
1786#define AifReqSuspendJob 105 /* Suspends a job */ 1798#define AifReqSuspendJob 105 /* Suspends a job */
1787#define AifReqResumeJob 106 /* Resumes a job */ 1799#define AifReqResumeJob 106 /* Resumes a job */
1788#define AifReqSendAPIReport 107 /* API generic report requests */ 1800#define AifReqSendAPIReport 107 /* API generic report requests */
1789#define AifReqAPIJobStart 108 /* Start a job from the API */ 1801#define AifReqAPIJobStart 108 /* Start a job from the API */
1790#define AifReqAPIJobUpdate 109 /* Update a job report from the API */ 1802#define AifReqAPIJobUpdate 109 /* Update a job report from the API */
@@ -1803,8 +1815,8 @@ struct aac_aifcmd {
1803}; 1815};
1804 1816
1805/** 1817/**
1806 * Convert capacity to cylinders 1818 * Convert capacity to cylinders
1807 * accounting for the fact capacity could be a 64 bit value 1819 * accounting for the fact capacity could be a 64 bit value
1808 * 1820 *
1809 */ 1821 */
1810static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) 1822static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
@@ -1861,6 +1873,7 @@ int aac_probe_container(struct aac_dev *dev, int cid);
1861int _aac_rx_init(struct aac_dev *dev); 1873int _aac_rx_init(struct aac_dev *dev);
1862int aac_rx_select_comm(struct aac_dev *dev, int comm); 1874int aac_rx_select_comm(struct aac_dev *dev, int comm);
1863int aac_rx_deliver_producer(struct fib * fib); 1875int aac_rx_deliver_producer(struct fib * fib);
1876char * get_container_type(unsigned type);
1864extern int numacb; 1877extern int numacb;
1865extern int acbsize; 1878extern int acbsize;
1866extern char aac_driver_version[]; 1879extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 1e6d7a9c75bf..851a7e599c50 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -48,13 +48,13 @@
48 * ioctl_send_fib - send a FIB from userspace 48 * ioctl_send_fib - send a FIB from userspace
49 * @dev: adapter is being processed 49 * @dev: adapter is being processed
50 * @arg: arguments to the ioctl call 50 * @arg: arguments to the ioctl call
51 * 51 *
52 * This routine sends a fib to the adapter on behalf of a user level 52 * This routine sends a fib to the adapter on behalf of a user level
53 * program. 53 * program.
54 */ 54 */
55# define AAC_DEBUG_PREAMBLE KERN_INFO 55# define AAC_DEBUG_PREAMBLE KERN_INFO
56# define AAC_DEBUG_POSTAMBLE 56# define AAC_DEBUG_POSTAMBLE
57 57
58static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) 58static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
59{ 59{
60 struct hw_fib * kfib; 60 struct hw_fib * kfib;
@@ -71,7 +71,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
71 if(fibptr == NULL) { 71 if(fibptr == NULL) {
72 return -ENOMEM; 72 return -ENOMEM;
73 } 73 }
74 74
75 kfib = fibptr->hw_fib_va; 75 kfib = fibptr->hw_fib_va;
76 /* 76 /*
77 * First copy in the header so that we can check the size field. 77 * First copy in the header so that we can check the size field.
@@ -109,7 +109,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
109 if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { 109 if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
110 aac_adapter_interrupt(dev); 110 aac_adapter_interrupt(dev);
111 /* 111 /*
112 * Since we didn't really send a fib, zero out the state to allow 112 * Since we didn't really send a fib, zero out the state to allow
113 * cleanup code not to assert. 113 * cleanup code not to assert.
114 */ 114 */
115 kfib->header.XferState = 0; 115 kfib->header.XferState = 0;
@@ -169,7 +169,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
169 169
170 fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT; 170 fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
171 fibctx->size = sizeof(struct aac_fib_context); 171 fibctx->size = sizeof(struct aac_fib_context);
172 /* 172 /*
173 * Yes yes, I know this could be an index, but we have a 173 * Yes yes, I know this could be an index, but we have a
174 * better guarantee of uniqueness for the locked loop below. 174 * better guarantee of uniqueness for the locked loop below.
175 * Without the aid of a persistent history, this also helps 175 * Without the aid of a persistent history, this also helps
@@ -189,7 +189,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
189 INIT_LIST_HEAD(&fibctx->fib_list); 189 INIT_LIST_HEAD(&fibctx->fib_list);
190 fibctx->jiffies = jiffies/HZ; 190 fibctx->jiffies = jiffies/HZ;
191 /* 191 /*
192 * Now add this context onto the adapter's 192 * Now add this context onto the adapter's
193 * AdapterFibContext list. 193 * AdapterFibContext list.
194 */ 194 */
195 spin_lock_irqsave(&dev->fib_lock, flags); 195 spin_lock_irqsave(&dev->fib_lock, flags);
@@ -207,12 +207,12 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
207 } 207 }
208 list_add_tail(&fibctx->next, &dev->fib_list); 208 list_add_tail(&fibctx->next, &dev->fib_list);
209 spin_unlock_irqrestore(&dev->fib_lock, flags); 209 spin_unlock_irqrestore(&dev->fib_lock, flags);
210 if (copy_to_user(arg, &fibctx->unique, 210 if (copy_to_user(arg, &fibctx->unique,
211 sizeof(fibctx->unique))) { 211 sizeof(fibctx->unique))) {
212 status = -EFAULT; 212 status = -EFAULT;
213 } else { 213 } else {
214 status = 0; 214 status = 0;
215 } 215 }
216 } 216 }
217 return status; 217 return status;
218} 218}
@@ -221,8 +221,8 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
221 * next_getadapter_fib - get the next fib 221 * next_getadapter_fib - get the next fib
222 * @dev: adapter to use 222 * @dev: adapter to use
223 * @arg: ioctl argument 223 * @arg: ioctl argument
224 * 224 *
225 * This routine will get the next Fib, if available, from the AdapterFibContext 225 * This routine will get the next Fib, if available, from the AdapterFibContext
226 * passed in from the user. 226 * passed in from the user.
227 */ 227 */
228 228
@@ -234,7 +234,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
234 int status; 234 int status;
235 struct list_head * entry; 235 struct list_head * entry;
236 unsigned long flags; 236 unsigned long flags;
237 237
238 if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl))) 238 if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
239 return -EFAULT; 239 return -EFAULT;
240 /* 240 /*
@@ -243,6 +243,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
243 * Search the list of AdapterFibContext addresses on the adapter 243 * Search the list of AdapterFibContext addresses on the adapter
244 * to be sure this is a valid address 244 * to be sure this is a valid address
245 */ 245 */
246 spin_lock_irqsave(&dev->fib_lock, flags);
246 entry = dev->fib_list.next; 247 entry = dev->fib_list.next;
247 fibctx = NULL; 248 fibctx = NULL;
248 249
@@ -251,37 +252,37 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
251 /* 252 /*
252 * Extract the AdapterFibContext from the Input parameters. 253 * Extract the AdapterFibContext from the Input parameters.
253 */ 254 */
254 if (fibctx->unique == f.fibctx) { /* We found a winner */ 255 if (fibctx->unique == f.fibctx) { /* We found a winner */
255 break; 256 break;
256 } 257 }
257 entry = entry->next; 258 entry = entry->next;
258 fibctx = NULL; 259 fibctx = NULL;
259 } 260 }
260 if (!fibctx) { 261 if (!fibctx) {
262 spin_unlock_irqrestore(&dev->fib_lock, flags);
261 dprintk ((KERN_INFO "Fib Context not found\n")); 263 dprintk ((KERN_INFO "Fib Context not found\n"));
262 return -EINVAL; 264 return -EINVAL;
263 } 265 }
264 266
265 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || 267 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
266 (fibctx->size != sizeof(struct aac_fib_context))) { 268 (fibctx->size != sizeof(struct aac_fib_context))) {
269 spin_unlock_irqrestore(&dev->fib_lock, flags);
267 dprintk ((KERN_INFO "Fib Context corrupt?\n")); 270 dprintk ((KERN_INFO "Fib Context corrupt?\n"));
268 return -EINVAL; 271 return -EINVAL;
269 } 272 }
270 status = 0; 273 status = 0;
271 spin_lock_irqsave(&dev->fib_lock, flags);
272 /* 274 /*
273 * If there are no fibs to send back, then either wait or return 275 * If there are no fibs to send back, then either wait or return
274 * -EAGAIN 276 * -EAGAIN
275 */ 277 */
276return_fib: 278return_fib:
277 if (!list_empty(&fibctx->fib_list)) { 279 if (!list_empty(&fibctx->fib_list)) {
278 struct list_head * entry;
279 /* 280 /*
280 * Pull the next fib from the fibs 281 * Pull the next fib from the fibs
281 */ 282 */
282 entry = fibctx->fib_list.next; 283 entry = fibctx->fib_list.next;
283 list_del(entry); 284 list_del(entry);
284 285
285 fib = list_entry(entry, struct fib, fiblink); 286 fib = list_entry(entry, struct fib, fiblink);
286 fibctx->count--; 287 fibctx->count--;
287 spin_unlock_irqrestore(&dev->fib_lock, flags); 288 spin_unlock_irqrestore(&dev->fib_lock, flags);
@@ -289,7 +290,7 @@ return_fib:
289 kfree(fib->hw_fib_va); 290 kfree(fib->hw_fib_va);
290 kfree(fib); 291 kfree(fib);
291 return -EFAULT; 292 return -EFAULT;
292 } 293 }
293 /* 294 /*
294 * Free the space occupied by this copy of the fib. 295 * Free the space occupied by this copy of the fib.
295 */ 296 */
@@ -318,7 +319,7 @@ return_fib:
318 } 319 }
319 } else { 320 } else {
320 status = -EAGAIN; 321 status = -EAGAIN;
321 } 322 }
322 } 323 }
323 fibctx->jiffies = jiffies/HZ; 324 fibctx->jiffies = jiffies/HZ;
324 return status; 325 return status;
@@ -327,7 +328,9 @@ return_fib:
327int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx) 328int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
328{ 329{
329 struct fib *fib; 330 struct fib *fib;
331 unsigned long flags;
330 332
333 spin_lock_irqsave(&dev->fib_lock, flags);
331 /* 334 /*
332 * First free any FIBs that have not been consumed. 335 * First free any FIBs that have not been consumed.
333 */ 336 */
@@ -350,6 +353,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
350 * Remove the Context from the AdapterFibContext List 353 * Remove the Context from the AdapterFibContext List
351 */ 354 */
352 list_del(&fibctx->next); 355 list_del(&fibctx->next);
356 spin_unlock_irqrestore(&dev->fib_lock, flags);
353 /* 357 /*
354 * Invalidate context 358 * Invalidate context
355 */ 359 */
@@ -368,7 +372,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
368 * 372 *
369 * This routine will close down the fibctx passed in from the user. 373 * This routine will close down the fibctx passed in from the user.
370 */ 374 */
371 375
372static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) 376static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
373{ 377{
374 struct aac_fib_context *fibctx; 378 struct aac_fib_context *fibctx;
@@ -415,8 +419,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
415 * @arg: ioctl arguments 419 * @arg: ioctl arguments
416 * 420 *
417 * This routine returns the driver version. 421 * This routine returns the driver version.
418 * Under Linux, there have been no version incompatibilities, so this is 422 * Under Linux, there have been no version incompatibilities, so this is
419 * simple! 423 * simple!
420 */ 424 */
421 425
422static int check_revision(struct aac_dev *dev, void __user *arg) 426static int check_revision(struct aac_dev *dev, void __user *arg)
@@ -426,12 +430,12 @@ static int check_revision(struct aac_dev *dev, void __user *arg)
426 u32 version; 430 u32 version;
427 431
428 response.compat = 1; 432 response.compat = 1;
429 version = (simple_strtol(driver_version, 433 version = (simple_strtol(driver_version,
430 &driver_version, 10) << 24) | 0x00000400; 434 &driver_version, 10) << 24) | 0x00000400;
431 version += simple_strtol(driver_version + 1, &driver_version, 10) << 16; 435 version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
432 version += simple_strtol(driver_version + 1, NULL, 10); 436 version += simple_strtol(driver_version + 1, NULL, 10);
433 response.version = cpu_to_le32(version); 437 response.version = cpu_to_le32(version);
434# if (defined(AAC_DRIVER_BUILD)) 438# ifdef AAC_DRIVER_BUILD
435 response.build = cpu_to_le32(AAC_DRIVER_BUILD); 439 response.build = cpu_to_le32(AAC_DRIVER_BUILD);
436# else 440# else
437 response.build = cpu_to_le32(9999); 441 response.build = cpu_to_le32(9999);
@@ -464,7 +468,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
464 u32 data_dir; 468 u32 data_dir;
465 void __user *sg_user[32]; 469 void __user *sg_user[32];
466 void *sg_list[32]; 470 void *sg_list[32];
467 u32 sg_indx = 0; 471 u32 sg_indx = 0;
468 u32 byte_count = 0; 472 u32 byte_count = 0;
469 u32 actual_fibsize64, actual_fibsize = 0; 473 u32 actual_fibsize64, actual_fibsize = 0;
470 int i; 474 int i;
@@ -475,7 +479,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
475 return -EBUSY; 479 return -EBUSY;
476 } 480 }
477 if (!capable(CAP_SYS_ADMIN)){ 481 if (!capable(CAP_SYS_ADMIN)){
478 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); 482 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
479 return -EPERM; 483 return -EPERM;
480 } 484 }
481 /* 485 /*
@@ -490,7 +494,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
490 494
491 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ 495 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
492 if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ 496 if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
493 dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n")); 497 dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
494 rcode = -EFAULT; 498 rcode = -EFAULT;
495 goto cleanup; 499 goto cleanup;
496 } 500 }
@@ -507,7 +511,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
507 goto cleanup; 511 goto cleanup;
508 } 512 }
509 if(copy_from_user(user_srbcmd, user_srb,fibsize)){ 513 if(copy_from_user(user_srbcmd, user_srb,fibsize)){
510 dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n")); 514 dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
511 rcode = -EFAULT; 515 rcode = -EFAULT;
512 goto cleanup; 516 goto cleanup;
513 } 517 }
@@ -518,15 +522,15 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
518 // Fix up srb for endian and force some values 522 // Fix up srb for endian and force some values
519 523
520 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this 524 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
521 srbcmd->channel = cpu_to_le32(user_srbcmd->channel); 525 srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
522 srbcmd->id = cpu_to_le32(user_srbcmd->id); 526 srbcmd->id = cpu_to_le32(user_srbcmd->id);
523 srbcmd->lun = cpu_to_le32(user_srbcmd->lun); 527 srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
524 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); 528 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
525 srbcmd->flags = cpu_to_le32(flags); 529 srbcmd->flags = cpu_to_le32(flags);
526 srbcmd->retry_limit = 0; // Obsolete parameter 530 srbcmd->retry_limit = 0; // Obsolete parameter
527 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); 531 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
528 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); 532 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
529 533
530 switch (flags & (SRB_DataIn | SRB_DataOut)) { 534 switch (flags & (SRB_DataIn | SRB_DataOut)) {
531 case SRB_DataOut: 535 case SRB_DataOut:
532 data_dir = DMA_TO_DEVICE; 536 data_dir = DMA_TO_DEVICE;
@@ -582,7 +586,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
582 void* p; 586 void* p;
583 /* Does this really need to be GFP_DMA? */ 587 /* Does this really need to be GFP_DMA? */
584 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); 588 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
585 if(p == 0) { 589 if(!p) {
586 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 590 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
587 upsg->sg[i].count,i,upsg->count)); 591 upsg->sg[i].count,i,upsg->count));
588 rcode = -ENOMEM; 592 rcode = -ENOMEM;
@@ -594,7 +598,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
594 sg_list[i] = p; // save so we can clean up later 598 sg_list[i] = p; // save so we can clean up later
595 sg_indx = i; 599 sg_indx = i;
596 600
597 if( flags & SRB_DataOut ){ 601 if (flags & SRB_DataOut) {
598 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ 602 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
599 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 603 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
600 rcode = -EFAULT; 604 rcode = -EFAULT;
@@ -626,7 +630,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
626 void* p; 630 void* p;
627 /* Does this really need to be GFP_DMA? */ 631 /* Does this really need to be GFP_DMA? */
628 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 632 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
629 if(p == 0) { 633 if(!p) {
630 kfree (usg); 634 kfree (usg);
631 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 635 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
632 usg->sg[i].count,i,usg->count)); 636 usg->sg[i].count,i,usg->count));
@@ -637,7 +641,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
637 sg_list[i] = p; // save so we can clean up later 641 sg_list[i] = p; // save so we can clean up later
638 sg_indx = i; 642 sg_indx = i;
639 643
640 if( flags & SRB_DataOut ){ 644 if (flags & SRB_DataOut) {
641 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ 645 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
642 kfree (usg); 646 kfree (usg);
643 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 647 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
@@ -668,7 +672,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
668 void* p; 672 void* p;
669 /* Does this really need to be GFP_DMA? */ 673 /* Does this really need to be GFP_DMA? */
670 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 674 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
671 if(p == 0) { 675 if(!p) {
672 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 676 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
673 usg->sg[i].count,i,usg->count)); 677 usg->sg[i].count,i,usg->count));
674 rcode = -ENOMEM; 678 rcode = -ENOMEM;
@@ -680,7 +684,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
680 sg_list[i] = p; // save so we can clean up later 684 sg_list[i] = p; // save so we can clean up later
681 sg_indx = i; 685 sg_indx = i;
682 686
683 if( flags & SRB_DataOut ){ 687 if (flags & SRB_DataOut) {
684 if(copy_from_user(p,sg_user[i],usg->sg[i].count)){ 688 if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
685 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 689 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
686 rcode = -EFAULT; 690 rcode = -EFAULT;
@@ -698,7 +702,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
698 dma_addr_t addr; 702 dma_addr_t addr;
699 void* p; 703 void* p;
700 p = kmalloc(upsg->sg[i].count, GFP_KERNEL); 704 p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
701 if(p == 0) { 705 if (!p) {
702 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 706 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
703 upsg->sg[i].count, i, upsg->count)); 707 upsg->sg[i].count, i, upsg->count));
704 rcode = -ENOMEM; 708 rcode = -ENOMEM;
@@ -708,7 +712,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
708 sg_list[i] = p; // save so we can clean up later 712 sg_list[i] = p; // save so we can clean up later
709 sg_indx = i; 713 sg_indx = i;
710 714
711 if( flags & SRB_DataOut ){ 715 if (flags & SRB_DataOut) {
712 if(copy_from_user(p, sg_user[i], 716 if(copy_from_user(p, sg_user[i],
713 upsg->sg[i].count)) { 717 upsg->sg[i].count)) {
714 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 718 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
@@ -734,19 +738,19 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
734 } 738 }
735 739
736 if (status != 0){ 740 if (status != 0){
737 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); 741 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
738 rcode = -ENXIO; 742 rcode = -ENXIO;
739 goto cleanup; 743 goto cleanup;
740 } 744 }
741 745
742 if( flags & SRB_DataIn ) { 746 if (flags & SRB_DataIn) {
743 for(i = 0 ; i <= sg_indx; i++){ 747 for(i = 0 ; i <= sg_indx; i++){
744 byte_count = le32_to_cpu( 748 byte_count = le32_to_cpu(
745 (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) 749 (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
746 ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count 750 ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
747 : srbcmd->sg.sg[i].count); 751 : srbcmd->sg.sg[i].count);
748 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ 752 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
749 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); 753 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
750 rcode = -EFAULT; 754 rcode = -EFAULT;
751 goto cleanup; 755 goto cleanup;
752 756
@@ -756,7 +760,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
756 760
757 reply = (struct aac_srb_reply *) fib_data(srbfib); 761 reply = (struct aac_srb_reply *) fib_data(srbfib);
758 if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){ 762 if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
759 dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n")); 763 dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
760 rcode = -EFAULT; 764 rcode = -EFAULT;
761 goto cleanup; 765 goto cleanup;
762 } 766 }
@@ -775,34 +779,34 @@ cleanup:
775} 779}
776 780
777struct aac_pci_info { 781struct aac_pci_info {
778 u32 bus; 782 u32 bus;
779 u32 slot; 783 u32 slot;
780}; 784};
781 785
782 786
783static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) 787static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
784{ 788{
785 struct aac_pci_info pci_info; 789 struct aac_pci_info pci_info;
786 790
787 pci_info.bus = dev->pdev->bus->number; 791 pci_info.bus = dev->pdev->bus->number;
788 pci_info.slot = PCI_SLOT(dev->pdev->devfn); 792 pci_info.slot = PCI_SLOT(dev->pdev->devfn);
789 793
790 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { 794 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
791 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); 795 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
792 return -EFAULT; 796 return -EFAULT;
793 } 797 }
794 return 0; 798 return 0;
795} 799}
796 800
797 801
798int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) 802int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
799{ 803{
800 int status; 804 int status;
801 805
802 /* 806 /*
803 * HBA gets first crack 807 * HBA gets first crack
804 */ 808 */
805 809
806 status = aac_dev_ioctl(dev, cmd, arg); 810 status = aac_dev_ioctl(dev, cmd, arg);
807 if(status != -ENOTTY) 811 if(status != -ENOTTY)
808 return status; 812 return status;
@@ -832,7 +836,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
832 break; 836 break;
833 default: 837 default:
834 status = -ENOTTY; 838 status = -ENOTTY;
835 break; 839 break;
836 } 840 }
837 return status; 841 return status;
838} 842}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 8736813a0296..89cc8b7b42a2 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -301,10 +301,10 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
301 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 301 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
302 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && 302 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
303 (status[0] == 0x00000001)) { 303 (status[0] == 0x00000001)) {
304 if (status[1] & AAC_OPT_NEW_COMM_64) 304 if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
305 dev->raw_io_64 = 1; 305 dev->raw_io_64 = 1;
306 if (dev->a_ops.adapter_comm && 306 if (dev->a_ops.adapter_comm &&
307 (status[1] & AAC_OPT_NEW_COMM)) 307 (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)))
308 dev->comm_interface = AAC_COMM_MESSAGE; 308 dev->comm_interface = AAC_COMM_MESSAGE;
309 if ((dev->comm_interface == AAC_COMM_MESSAGE) && 309 if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
310 (status[2] > dev->base_size)) { 310 (status[2] > dev->base_size)) {
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index abce48ccc85b..81b36923e0ef 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -56,7 +56,7 @@
56 * Allocate and map the shared PCI space for the FIB blocks used to 56 * Allocate and map the shared PCI space for the FIB blocks used to
57 * talk to the Adaptec firmware. 57 * talk to the Adaptec firmware.
58 */ 58 */
59 59
60static int fib_map_alloc(struct aac_dev *dev) 60static int fib_map_alloc(struct aac_dev *dev)
61{ 61{
62 dprintk((KERN_INFO 62 dprintk((KERN_INFO
@@ -109,14 +109,16 @@ int aac_fib_setup(struct aac_dev * dev)
109 } 109 }
110 if (i<0) 110 if (i<0)
111 return -ENOMEM; 111 return -ENOMEM;
112 112
113 hw_fib = dev->hw_fib_va; 113 hw_fib = dev->hw_fib_va;
114 hw_fib_pa = dev->hw_fib_pa; 114 hw_fib_pa = dev->hw_fib_pa;
115 memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 115 memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
116 /* 116 /*
117 * Initialise the fibs 117 * Initialise the fibs
118 */ 118 */
119 for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) 119 for (i = 0, fibptr = &dev->fibs[i];
120 i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
121 i++, fibptr++)
120 { 122 {
121 fibptr->dev = dev; 123 fibptr->dev = dev;
122 fibptr->hw_fib_va = hw_fib; 124 fibptr->hw_fib_va = hw_fib;
@@ -148,13 +150,13 @@ int aac_fib_setup(struct aac_dev * dev)
148 * Allocate a fib from the adapter fib pool. If the pool is empty we 150 * Allocate a fib from the adapter fib pool. If the pool is empty we
149 * return NULL. 151 * return NULL.
150 */ 152 */
151 153
152struct fib *aac_fib_alloc(struct aac_dev *dev) 154struct fib *aac_fib_alloc(struct aac_dev *dev)
153{ 155{
154 struct fib * fibptr; 156 struct fib * fibptr;
155 unsigned long flags; 157 unsigned long flags;
156 spin_lock_irqsave(&dev->fib_lock, flags); 158 spin_lock_irqsave(&dev->fib_lock, flags);
157 fibptr = dev->free_fib; 159 fibptr = dev->free_fib;
158 if(!fibptr){ 160 if(!fibptr){
159 spin_unlock_irqrestore(&dev->fib_lock, flags); 161 spin_unlock_irqrestore(&dev->fib_lock, flags);
160 return fibptr; 162 return fibptr;
@@ -171,6 +173,7 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
171 * each I/O 173 * each I/O
172 */ 174 */
173 fibptr->hw_fib_va->header.XferState = 0; 175 fibptr->hw_fib_va->header.XferState = 0;
176 fibptr->flags = 0;
174 fibptr->callback = NULL; 177 fibptr->callback = NULL;
175 fibptr->callback_data = NULL; 178 fibptr->callback_data = NULL;
176 179
@@ -183,7 +186,7 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
183 * 186 *
184 * Frees up a fib and places it on the appropriate queue 187 * Frees up a fib and places it on the appropriate queue
185 */ 188 */
186 189
187void aac_fib_free(struct fib *fibptr) 190void aac_fib_free(struct fib *fibptr)
188{ 191{
189 unsigned long flags; 192 unsigned long flags;
@@ -204,10 +207,10 @@ void aac_fib_free(struct fib *fibptr)
204/** 207/**
205 * aac_fib_init - initialise a fib 208 * aac_fib_init - initialise a fib
206 * @fibptr: The fib to initialize 209 * @fibptr: The fib to initialize
207 * 210 *
208 * Set up the generic fib fields ready for use 211 * Set up the generic fib fields ready for use
209 */ 212 */
210 213
211void aac_fib_init(struct fib *fibptr) 214void aac_fib_init(struct fib *fibptr)
212{ 215{
213 struct hw_fib *hw_fib = fibptr->hw_fib_va; 216 struct hw_fib *hw_fib = fibptr->hw_fib_va;
@@ -227,12 +230,12 @@ void aac_fib_init(struct fib *fibptr)
227 * Will deallocate and return to the free pool the FIB pointed to by the 230 * Will deallocate and return to the free pool the FIB pointed to by the
228 * caller. 231 * caller.
229 */ 232 */
230 233
231static void fib_dealloc(struct fib * fibptr) 234static void fib_dealloc(struct fib * fibptr)
232{ 235{
233 struct hw_fib *hw_fib = fibptr->hw_fib_va; 236 struct hw_fib *hw_fib = fibptr->hw_fib_va;
234 BUG_ON(hw_fib->header.StructType != FIB_MAGIC); 237 BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
235 hw_fib->header.XferState = 0; 238 hw_fib->header.XferState = 0;
236} 239}
237 240
238/* 241/*
@@ -241,7 +244,7 @@ static void fib_dealloc(struct fib * fibptr)
241 * these routines and are the only routines which have a knowledge of the 244 * these routines and are the only routines which have a knowledge of the
242 * how these queues are implemented. 245 * how these queues are implemented.
243 */ 246 */
244 247
245/** 248/**
246 * aac_get_entry - get a queue entry 249 * aac_get_entry - get a queue entry
247 * @dev: Adapter 250 * @dev: Adapter
@@ -254,7 +257,7 @@ static void fib_dealloc(struct fib * fibptr)
254 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is 257 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
255 * returned. 258 * returned.
256 */ 259 */
257 260
258static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) 261static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
259{ 262{
260 struct aac_queue * q; 263 struct aac_queue * q;
@@ -279,26 +282,27 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
279 idx = ADAP_NORM_RESP_ENTRIES; 282 idx = ADAP_NORM_RESP_ENTRIES;
280 } 283 }
281 if (idx != le32_to_cpu(*(q->headers.consumer))) 284 if (idx != le32_to_cpu(*(q->headers.consumer)))
282 *nonotify = 1; 285 *nonotify = 1;
283 } 286 }
284 287
285 if (qid == AdapNormCmdQueue) { 288 if (qid == AdapNormCmdQueue) {
286 if (*index >= ADAP_NORM_CMD_ENTRIES) 289 if (*index >= ADAP_NORM_CMD_ENTRIES)
287 *index = 0; /* Wrap to front of the Producer Queue. */ 290 *index = 0; /* Wrap to front of the Producer Queue. */
288 } else { 291 } else {
289 if (*index >= ADAP_NORM_RESP_ENTRIES) 292 if (*index >= ADAP_NORM_RESP_ENTRIES)
290 *index = 0; /* Wrap to front of the Producer Queue. */ 293 *index = 0; /* Wrap to front of the Producer Queue. */
291 } 294 }
292 295
293 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ 296 /* Queue is full */
297 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
294 printk(KERN_WARNING "Queue %d full, %u outstanding.\n", 298 printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
295 qid, q->numpending); 299 qid, q->numpending);
296 return 0; 300 return 0;
297 } else { 301 } else {
298 *entry = q->base + *index; 302 *entry = q->base + *index;
299 return 1; 303 return 1;
300 } 304 }
301} 305}
302 306
303/** 307/**
304 * aac_queue_get - get the next free QE 308 * aac_queue_get - get the next free QE
@@ -320,31 +324,29 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
320{ 324{
321 struct aac_entry * entry = NULL; 325 struct aac_entry * entry = NULL;
322 int map = 0; 326 int map = 0;
323 327
324 if (qid == AdapNormCmdQueue) { 328 if (qid == AdapNormCmdQueue) {
325 /* if no entries wait for some if caller wants to */ 329 /* if no entries wait for some if caller wants to */
326 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 330 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
327 {
328 printk(KERN_ERR "GetEntries failed\n"); 331 printk(KERN_ERR "GetEntries failed\n");
329 } 332 }
330 /* 333 /*
331 * Setup queue entry with a command, status and fib mapped 334 * Setup queue entry with a command, status and fib mapped
332 */ 335 */
333 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 336 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
334 map = 1; 337 map = 1;
335 } else { 338 } else {
336 while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 339 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
337 {
338 /* if no entries wait for some if caller wants to */ 340 /* if no entries wait for some if caller wants to */
339 } 341 }
340 /* 342 /*
341 * Setup queue entry with command, status and fib mapped 343 * Setup queue entry with command, status and fib mapped
342 */ 344 */
343 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 345 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
344 entry->addr = hw_fib->header.SenderFibAddress; 346 entry->addr = hw_fib->header.SenderFibAddress;
345 /* Restore adapters pointer to the FIB */ 347 /* Restore adapters pointer to the FIB */
346 hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ 348 hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
347 map = 0; 349 map = 0;
348 } 350 }
349 /* 351 /*
350 * If MapFib is true than we need to map the Fib and put pointers 352 * If MapFib is true than we need to map the Fib and put pointers
@@ -356,8 +358,8 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
356} 358}
357 359
358/* 360/*
359 * Define the highest level of host to adapter communication routines. 361 * Define the highest level of host to adapter communication routines.
360 * These routines will support host to adapter FS commuication. These 362 * These routines will support host to adapter FS commuication. These
361 * routines have no knowledge of the commuication method used. This level 363 * routines have no knowledge of the commuication method used. This level
362 * sends and receives FIBs. This level has no knowledge of how these FIBs 364 * sends and receives FIBs. This level has no knowledge of how these FIBs
363 * get passed back and forth. 365 * get passed back and forth.
@@ -379,7 +381,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
379 * an event to wait on must be supplied. This event will be set when a 381 * an event to wait on must be supplied. This event will be set when a
380 * response FIB is received from the adapter. 382 * response FIB is received from the adapter.
381 */ 383 */
382 384
383int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, 385int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
384 int priority, int wait, int reply, fib_callback callback, 386 int priority, int wait, int reply, fib_callback callback,
385 void *callback_data) 387 void *callback_data)
@@ -392,16 +394,17 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
392 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 394 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
393 return -EBUSY; 395 return -EBUSY;
394 /* 396 /*
395 * There are 5 cases with the wait and reponse requested flags. 397 * There are 5 cases with the wait and reponse requested flags.
396 * The only invalid cases are if the caller requests to wait and 398 * The only invalid cases are if the caller requests to wait and
397 * does not request a response and if the caller does not want a 399 * does not request a response and if the caller does not want a
398 * response and the Fib is not allocated from pool. If a response 400 * response and the Fib is not allocated from pool. If a response
399 * is not requesed the Fib will just be deallocaed by the DPC 401 * is not requesed the Fib will just be deallocaed by the DPC
400 * routine when the response comes back from the adapter. No 402 * routine when the response comes back from the adapter. No
401 * further processing will be done besides deleting the Fib. We 403 * further processing will be done besides deleting the Fib. We
402 * will have a debug mode where the adapter can notify the host 404 * will have a debug mode where the adapter can notify the host
403 * it had a problem and the host can log that fact. 405 * it had a problem and the host can log that fact.
404 */ 406 */
407 fibptr->flags = 0;
405 if (wait && !reply) { 408 if (wait && !reply) {
406 return -EINVAL; 409 return -EINVAL;
407 } else if (!wait && reply) { 410 } else if (!wait && reply) {
@@ -413,7 +416,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
413 } else if (wait && reply) { 416 } else if (wait && reply) {
414 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); 417 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
415 FIB_COUNTER_INCREMENT(aac_config.NormalSent); 418 FIB_COUNTER_INCREMENT(aac_config.NormalSent);
416 } 419 }
417 /* 420 /*
418 * Map the fib into 32bits by using the fib number 421 * Map the fib into 32bits by using the fib number
419 */ 422 */
@@ -436,7 +439,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
436 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); 439 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
437 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { 440 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
438 return -EMSGSIZE; 441 return -EMSGSIZE;
439 } 442 }
440 /* 443 /*
441 * Get a queue entry connect the FIB to it and send an notify 444 * Get a queue entry connect the FIB to it and send an notify
442 * the adapter a command is ready. 445 * the adapter a command is ready.
@@ -450,10 +453,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
450 if (!wait) { 453 if (!wait) {
451 fibptr->callback = callback; 454 fibptr->callback = callback;
452 fibptr->callback_data = callback_data; 455 fibptr->callback_data = callback_data;
456 fibptr->flags = FIB_CONTEXT_FLAG;
453 } 457 }
454 458
455 fibptr->done = 0; 459 fibptr->done = 0;
456 fibptr->flags = 0;
457 460
458 FIB_COUNTER_INCREMENT(aac_config.FibsSent); 461 FIB_COUNTER_INCREMENT(aac_config.FibsSent);
459 462
@@ -473,9 +476,9 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
473 aac_adapter_deliver(fibptr); 476 aac_adapter_deliver(fibptr);
474 477
475 /* 478 /*
476 * If the caller wanted us to wait for response wait now. 479 * If the caller wanted us to wait for response wait now.
477 */ 480 */
478 481
479 if (wait) { 482 if (wait) {
480 spin_unlock_irqrestore(&fibptr->event_lock, flags); 483 spin_unlock_irqrestore(&fibptr->event_lock, flags);
481 /* Only set for first known interruptable command */ 484 /* Only set for first known interruptable command */
@@ -522,7 +525,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
522 } 525 }
523 spin_unlock_irqrestore(&fibptr->event_lock, flags); 526 spin_unlock_irqrestore(&fibptr->event_lock, flags);
524 BUG_ON(fibptr->done == 0); 527 BUG_ON(fibptr->done == 0);
525 528
526 if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) 529 if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
527 return -ETIMEDOUT; 530 return -ETIMEDOUT;
528 return 0; 531 return 0;
@@ -537,15 +540,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
537 return 0; 540 return 0;
538} 541}
539 542
540/** 543/**
541 * aac_consumer_get - get the top of the queue 544 * aac_consumer_get - get the top of the queue
542 * @dev: Adapter 545 * @dev: Adapter
543 * @q: Queue 546 * @q: Queue
544 * @entry: Return entry 547 * @entry: Return entry
545 * 548 *
546 * Will return a pointer to the entry on the top of the queue requested that 549 * Will return a pointer to the entry on the top of the queue requested that
547 * we are a consumer of, and return the address of the queue entry. It does 550 * we are a consumer of, and return the address of the queue entry. It does
548 * not change the state of the queue. 551 * not change the state of the queue.
549 */ 552 */
550 553
551int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) 554int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
@@ -560,10 +563,10 @@ int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entr
560 * the end of the queue, else we just use the entry 563 * the end of the queue, else we just use the entry
561 * pointed to by the header index 564 * pointed to by the header index
562 */ 565 */
563 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 566 if (le32_to_cpu(*q->headers.consumer) >= q->entries)
564 index = 0; 567 index = 0;
565 else 568 else
566 index = le32_to_cpu(*q->headers.consumer); 569 index = le32_to_cpu(*q->headers.consumer);
567 *entry = q->base + index; 570 *entry = q->base + index;
568 status = 1; 571 status = 1;
569 } 572 }
@@ -587,12 +590,12 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
587 590
588 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) 591 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
589 wasfull = 1; 592 wasfull = 1;
590 593
591 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 594 if (le32_to_cpu(*q->headers.consumer) >= q->entries)
592 *q->headers.consumer = cpu_to_le32(1); 595 *q->headers.consumer = cpu_to_le32(1);
593 else 596 else
594 *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1); 597 *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
595 598
596 if (wasfull) { 599 if (wasfull) {
597 switch (qid) { 600 switch (qid) {
598 601
@@ -608,7 +611,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
608 } 611 }
609 aac_adapter_notify(dev, notify); 612 aac_adapter_notify(dev, notify);
610 } 613 }
611} 614}
612 615
613/** 616/**
614 * aac_fib_adapter_complete - complete adapter issued fib 617 * aac_fib_adapter_complete - complete adapter issued fib
@@ -630,32 +633,32 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
630 if (hw_fib->header.XferState == 0) { 633 if (hw_fib->header.XferState == 0) {
631 if (dev->comm_interface == AAC_COMM_MESSAGE) 634 if (dev->comm_interface == AAC_COMM_MESSAGE)
632 kfree (hw_fib); 635 kfree (hw_fib);
633 return 0; 636 return 0;
634 } 637 }
635 /* 638 /*
636 * If we plan to do anything check the structure type first. 639 * If we plan to do anything check the structure type first.
637 */ 640 */
638 if ( hw_fib->header.StructType != FIB_MAGIC ) { 641 if (hw_fib->header.StructType != FIB_MAGIC) {
639 if (dev->comm_interface == AAC_COMM_MESSAGE) 642 if (dev->comm_interface == AAC_COMM_MESSAGE)
640 kfree (hw_fib); 643 kfree (hw_fib);
641 return -EINVAL; 644 return -EINVAL;
642 } 645 }
643 /* 646 /*
644 * This block handles the case where the adapter had sent us a 647 * This block handles the case where the adapter had sent us a
645 * command and we have finished processing the command. We 648 * command and we have finished processing the command. We
646 * call completeFib when we are done processing the command 649 * call completeFib when we are done processing the command
647 * and want to send a response back to the adapter. This will 650 * and want to send a response back to the adapter. This will
648 * send the completed cdb to the adapter. 651 * send the completed cdb to the adapter.
649 */ 652 */
650 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 653 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
651 if (dev->comm_interface == AAC_COMM_MESSAGE) { 654 if (dev->comm_interface == AAC_COMM_MESSAGE) {
652 kfree (hw_fib); 655 kfree (hw_fib);
653 } else { 656 } else {
654 u32 index; 657 u32 index;
655 hw_fib->header.XferState |= cpu_to_le32(HostProcessed); 658 hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
656 if (size) { 659 if (size) {
657 size += sizeof(struct aac_fibhdr); 660 size += sizeof(struct aac_fibhdr);
658 if (size > le16_to_cpu(hw_fib->header.SenderSize)) 661 if (size > le16_to_cpu(hw_fib->header.SenderSize))
659 return -EMSGSIZE; 662 return -EMSGSIZE;
660 hw_fib->header.Size = cpu_to_le16(size); 663 hw_fib->header.Size = cpu_to_le16(size);
661 } 664 }
@@ -667,12 +670,11 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
667 if (!(nointr & (int)aac_config.irq_mod)) 670 if (!(nointr & (int)aac_config.irq_mod))
668 aac_adapter_notify(dev, AdapNormRespQueue); 671 aac_adapter_notify(dev, AdapNormRespQueue);
669 } 672 }
673 } else {
674 printk(KERN_WARNING "aac_fib_adapter_complete: "
675 "Unknown xferstate detected.\n");
676 BUG();
670 } 677 }
671 else
672 {
673 printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
674 BUG();
675 }
676 return 0; 678 return 0;
677} 679}
678 680
@@ -682,7 +684,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
682 * 684 *
683 * Will do all necessary work to complete a FIB. 685 * Will do all necessary work to complete a FIB.
684 */ 686 */
685 687
686int aac_fib_complete(struct fib *fibptr) 688int aac_fib_complete(struct fib *fibptr)
687{ 689{
688 struct hw_fib * hw_fib = fibptr->hw_fib_va; 690 struct hw_fib * hw_fib = fibptr->hw_fib_va;
@@ -692,15 +694,15 @@ int aac_fib_complete(struct fib *fibptr)
692 */ 694 */
693 695
694 if (hw_fib->header.XferState == 0) 696 if (hw_fib->header.XferState == 0)
695 return 0; 697 return 0;
696 /* 698 /*
697 * If we plan to do anything check the structure type first. 699 * If we plan to do anything check the structure type first.
698 */ 700 */
699 701
700 if (hw_fib->header.StructType != FIB_MAGIC) 702 if (hw_fib->header.StructType != FIB_MAGIC)
701 return -EINVAL; 703 return -EINVAL;
702 /* 704 /*
703 * This block completes a cdb which orginated on the host and we 705 * This block completes a cdb which orginated on the host and we
704 * just need to deallocate the cdb or reinit it. At this point the 706 * just need to deallocate the cdb or reinit it. At this point the
705 * command is complete that we had sent to the adapter and this 707 * command is complete that we had sent to the adapter and this
706 * cdb could be reused. 708 * cdb could be reused.
@@ -721,7 +723,7 @@ int aac_fib_complete(struct fib *fibptr)
721 fib_dealloc(fibptr); 723 fib_dealloc(fibptr);
722 } else { 724 } else {
723 BUG(); 725 BUG();
724 } 726 }
725 return 0; 727 return 0;
726} 728}
727 729
@@ -741,7 +743,7 @@ void aac_printf(struct aac_dev *dev, u32 val)
741 { 743 {
742 int length = val & 0xffff; 744 int length = val & 0xffff;
743 int level = (val >> 16) & 0xffff; 745 int level = (val >> 16) & 0xffff;
744 746
745 /* 747 /*
746 * The size of the printfbuf is set in port.c 748 * The size of the printfbuf is set in port.c
747 * There is no variable or define for it 749 * There is no variable or define for it
@@ -755,7 +757,7 @@ void aac_printf(struct aac_dev *dev, u32 val)
755 else 757 else
756 printk(KERN_INFO "%s:%s", dev->name, cp); 758 printk(KERN_INFO "%s:%s", dev->name, cp);
757 } 759 }
758 memset(cp, 0, 256); 760 memset(cp, 0, 256);
759} 761}
760 762
761 763
@@ -773,20 +775,20 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
773{ 775{
774 struct hw_fib * hw_fib = fibptr->hw_fib_va; 776 struct hw_fib * hw_fib = fibptr->hw_fib_va;
775 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; 777 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
776 u32 container; 778 u32 channel, id, lun, container;
777 struct scsi_device *device; 779 struct scsi_device *device;
778 enum { 780 enum {
779 NOTHING, 781 NOTHING,
780 DELETE, 782 DELETE,
781 ADD, 783 ADD,
782 CHANGE 784 CHANGE
783 } device_config_needed; 785 } device_config_needed = NOTHING;
784 786
785 /* Sniff for container changes */ 787 /* Sniff for container changes */
786 788
787 if (!dev || !dev->fsa_dev) 789 if (!dev || !dev->fsa_dev)
788 return; 790 return;
789 container = (u32)-1; 791 container = channel = id = lun = (u32)-1;
790 792
791 /* 793 /*
792 * We have set this up to try and minimize the number of 794 * We have set this up to try and minimize the number of
@@ -796,13 +798,13 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
796 */ 798 */
797 switch (le32_to_cpu(aifcmd->command)) { 799 switch (le32_to_cpu(aifcmd->command)) {
798 case AifCmdDriverNotify: 800 case AifCmdDriverNotify:
799 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 801 switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
800 /* 802 /*
801 * Morph or Expand complete 803 * Morph or Expand complete
802 */ 804 */
803 case AifDenMorphComplete: 805 case AifDenMorphComplete:
804 case AifDenVolumeExtendComplete: 806 case AifDenVolumeExtendComplete:
805 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 807 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
806 if (container >= dev->maximum_num_containers) 808 if (container >= dev->maximum_num_containers)
807 break; 809 break;
808 810
@@ -814,9 +816,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
814 */ 816 */
815 817
816 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { 818 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
817 device = scsi_device_lookup(dev->scsi_host_ptr, 819 device = scsi_device_lookup(dev->scsi_host_ptr,
818 CONTAINER_TO_CHANNEL(container), 820 CONTAINER_TO_CHANNEL(container),
819 CONTAINER_TO_ID(container), 821 CONTAINER_TO_ID(container),
820 CONTAINER_TO_LUN(container)); 822 CONTAINER_TO_LUN(container));
821 if (device) { 823 if (device) {
822 dev->fsa_dev[container].config_needed = CHANGE; 824 dev->fsa_dev[container].config_needed = CHANGE;
@@ -835,25 +837,29 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
835 if (container >= dev->maximum_num_containers) 837 if (container >= dev->maximum_num_containers)
836 break; 838 break;
837 if ((dev->fsa_dev[container].config_waiting_on == 839 if ((dev->fsa_dev[container].config_waiting_on ==
838 le32_to_cpu(*(u32 *)aifcmd->data)) && 840 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
839 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 841 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
840 dev->fsa_dev[container].config_waiting_on = 0; 842 dev->fsa_dev[container].config_waiting_on = 0;
841 } else for (container = 0; 843 } else for (container = 0;
842 container < dev->maximum_num_containers; ++container) { 844 container < dev->maximum_num_containers; ++container) {
843 if ((dev->fsa_dev[container].config_waiting_on == 845 if ((dev->fsa_dev[container].config_waiting_on ==
844 le32_to_cpu(*(u32 *)aifcmd->data)) && 846 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
845 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 847 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
846 dev->fsa_dev[container].config_waiting_on = 0; 848 dev->fsa_dev[container].config_waiting_on = 0;
847 } 849 }
848 break; 850 break;
849 851
850 case AifCmdEventNotify: 852 case AifCmdEventNotify:
851 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 853 switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
854 case AifEnBatteryEvent:
855 dev->cache_protected =
856 (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
857 break;
852 /* 858 /*
853 * Add an Array. 859 * Add an Array.
854 */ 860 */
855 case AifEnAddContainer: 861 case AifEnAddContainer:
856 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 862 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
857 if (container >= dev->maximum_num_containers) 863 if (container >= dev->maximum_num_containers)
858 break; 864 break;
859 dev->fsa_dev[container].config_needed = ADD; 865 dev->fsa_dev[container].config_needed = ADD;
@@ -866,7 +872,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
866 * Delete an Array. 872 * Delete an Array.
867 */ 873 */
868 case AifEnDeleteContainer: 874 case AifEnDeleteContainer:
869 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 875 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
870 if (container >= dev->maximum_num_containers) 876 if (container >= dev->maximum_num_containers)
871 break; 877 break;
872 dev->fsa_dev[container].config_needed = DELETE; 878 dev->fsa_dev[container].config_needed = DELETE;
@@ -880,7 +886,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
880 * waiting on something else, setup to wait on a Config Change. 886 * waiting on something else, setup to wait on a Config Change.
881 */ 887 */
882 case AifEnContainerChange: 888 case AifEnContainerChange:
883 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 889 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
884 if (container >= dev->maximum_num_containers) 890 if (container >= dev->maximum_num_containers)
885 break; 891 break;
886 if (dev->fsa_dev[container].config_waiting_on && 892 if (dev->fsa_dev[container].config_waiting_on &&
@@ -895,6 +901,60 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
895 case AifEnConfigChange: 901 case AifEnConfigChange:
896 break; 902 break;
897 903
904 case AifEnAddJBOD:
905 case AifEnDeleteJBOD:
906 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
907 if ((container >> 28))
908 break;
909 channel = (container >> 24) & 0xF;
910 if (channel >= dev->maximum_num_channels)
911 break;
912 id = container & 0xFFFF;
913 if (id >= dev->maximum_num_physicals)
914 break;
915 lun = (container >> 16) & 0xFF;
916 channel = aac_phys_to_logical(channel);
917 device_config_needed =
918 (((__le32 *)aifcmd->data)[0] ==
919 cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
920 break;
921
922 case AifEnEnclosureManagement:
923 /*
924 * If in JBOD mode, automatic exposure of new
925 * physical target to be suppressed until configured.
926 */
927 if (dev->jbod)
928 break;
929 switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
930 case EM_DRIVE_INSERTION:
931 case EM_DRIVE_REMOVAL:
932 container = le32_to_cpu(
933 ((__le32 *)aifcmd->data)[2]);
934 if ((container >> 28))
935 break;
936 channel = (container >> 24) & 0xF;
937 if (channel >= dev->maximum_num_channels)
938 break;
939 id = container & 0xFFFF;
940 lun = (container >> 16) & 0xFF;
941 if (id >= dev->maximum_num_physicals) {
942 /* legacy dev_t ? */
943 if ((0x2000 <= id) || lun || channel ||
944 ((channel = (id >> 7) & 0x3F) >=
945 dev->maximum_num_channels))
946 break;
947 lun = (id >> 4) & 7;
948 id &= 0xF;
949 }
950 channel = aac_phys_to_logical(channel);
951 device_config_needed =
952 (((__le32 *)aifcmd->data)[3]
953 == cpu_to_le32(EM_DRIVE_INSERTION)) ?
954 ADD : DELETE;
955 break;
956 }
957 break;
898 } 958 }
899 959
900 /* 960 /*
@@ -905,13 +965,13 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
905 if (container >= dev->maximum_num_containers) 965 if (container >= dev->maximum_num_containers)
906 break; 966 break;
907 if ((dev->fsa_dev[container].config_waiting_on == 967 if ((dev->fsa_dev[container].config_waiting_on ==
908 le32_to_cpu(*(u32 *)aifcmd->data)) && 968 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
909 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 969 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
910 dev->fsa_dev[container].config_waiting_on = 0; 970 dev->fsa_dev[container].config_waiting_on = 0;
911 } else for (container = 0; 971 } else for (container = 0;
912 container < dev->maximum_num_containers; ++container) { 972 container < dev->maximum_num_containers; ++container) {
913 if ((dev->fsa_dev[container].config_waiting_on == 973 if ((dev->fsa_dev[container].config_waiting_on ==
914 le32_to_cpu(*(u32 *)aifcmd->data)) && 974 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
915 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 975 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
916 dev->fsa_dev[container].config_waiting_on = 0; 976 dev->fsa_dev[container].config_waiting_on = 0;
917 } 977 }
@@ -926,9 +986,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
926 * wait for a container change. 986 * wait for a container change.
927 */ 987 */
928 988
929 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 989 if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
930 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5]) 990 (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
931 || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) { 991 ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
932 for (container = 0; 992 for (container = 0;
933 container < dev->maximum_num_containers; 993 container < dev->maximum_num_containers;
934 ++container) { 994 ++container) {
@@ -943,9 +1003,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
943 jiffies; 1003 jiffies;
944 } 1004 }
945 } 1005 }
946 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 1006 if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
947 && (((u32 *)aifcmd->data)[6] == 0) 1007 ((__le32 *)aifcmd->data)[6] == 0 &&
948 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) { 1008 ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
949 for (container = 0; 1009 for (container = 0;
950 container < dev->maximum_num_containers; 1010 container < dev->maximum_num_containers;
951 ++container) { 1011 ++container) {
@@ -963,7 +1023,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
963 break; 1023 break;
964 } 1024 }
965 1025
966 device_config_needed = NOTHING; 1026 if (device_config_needed == NOTHING)
967 for (container = 0; container < dev->maximum_num_containers; 1027 for (container = 0; container < dev->maximum_num_containers;
968 ++container) { 1028 ++container) {
969 if ((dev->fsa_dev[container].config_waiting_on == 0) && 1029 if ((dev->fsa_dev[container].config_waiting_on == 0) &&
@@ -972,6 +1032,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
972 device_config_needed = 1032 device_config_needed =
973 dev->fsa_dev[container].config_needed; 1033 dev->fsa_dev[container].config_needed;
974 dev->fsa_dev[container].config_needed = NOTHING; 1034 dev->fsa_dev[container].config_needed = NOTHING;
1035 channel = CONTAINER_TO_CHANNEL(container);
1036 id = CONTAINER_TO_ID(container);
1037 lun = CONTAINER_TO_LUN(container);
975 break; 1038 break;
976 } 1039 }
977 } 1040 }
@@ -995,34 +1058,56 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
995 /* 1058 /*
996 * force reload of disk info via aac_probe_container 1059 * force reload of disk info via aac_probe_container
997 */ 1060 */
998 if ((device_config_needed == CHANGE) 1061 if ((channel == CONTAINER_CHANNEL) &&
999 && (dev->fsa_dev[container].valid == 1)) 1062 (device_config_needed != NOTHING)) {
1000 dev->fsa_dev[container].valid = 2; 1063 if (dev->fsa_dev[container].valid == 1)
1001 if ((device_config_needed == CHANGE) || 1064 dev->fsa_dev[container].valid = 2;
1002 (device_config_needed == ADD))
1003 aac_probe_container(dev, container); 1065 aac_probe_container(dev, container);
1004 device = scsi_device_lookup(dev->scsi_host_ptr, 1066 }
1005 CONTAINER_TO_CHANNEL(container), 1067 device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
1006 CONTAINER_TO_ID(container),
1007 CONTAINER_TO_LUN(container));
1008 if (device) { 1068 if (device) {
1009 switch (device_config_needed) { 1069 switch (device_config_needed) {
1010 case DELETE: 1070 case DELETE:
1071 if (scsi_device_online(device)) {
1072 scsi_device_set_state(device, SDEV_OFFLINE);
1073 sdev_printk(KERN_INFO, device,
1074 "Device offlined - %s\n",
1075 (channel == CONTAINER_CHANNEL) ?
1076 "array deleted" :
1077 "enclosure services event");
1078 }
1079 break;
1080 case ADD:
1081 if (!scsi_device_online(device)) {
1082 sdev_printk(KERN_INFO, device,
1083 "Device online - %s\n",
1084 (channel == CONTAINER_CHANNEL) ?
1085 "array created" :
1086 "enclosure services event");
1087 scsi_device_set_state(device, SDEV_RUNNING);
1088 }
1089 /* FALLTHRU */
1011 case CHANGE: 1090 case CHANGE:
1091 if ((channel == CONTAINER_CHANNEL)
1092 && (!dev->fsa_dev[container].valid)) {
1093 if (!scsi_device_online(device))
1094 break;
1095 scsi_device_set_state(device, SDEV_OFFLINE);
1096 sdev_printk(KERN_INFO, device,
1097 "Device offlined - %s\n",
1098 "array failed");
1099 break;
1100 }
1012 scsi_rescan_device(&device->sdev_gendev); 1101 scsi_rescan_device(&device->sdev_gendev);
1013 1102
1014 default: 1103 default:
1015 break; 1104 break;
1016 } 1105 }
1017 scsi_device_put(device); 1106 scsi_device_put(device);
1107 device_config_needed = NOTHING;
1018 } 1108 }
1019 if (device_config_needed == ADD) { 1109 if (device_config_needed == ADD)
1020 scsi_add_device(dev->scsi_host_ptr, 1110 scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1021 CONTAINER_TO_CHANNEL(container),
1022 CONTAINER_TO_ID(container),
1023 CONTAINER_TO_LUN(container));
1024 }
1025
1026} 1111}
1027 1112
1028static int _aac_reset_adapter(struct aac_dev *aac, int forced) 1113static int _aac_reset_adapter(struct aac_dev *aac, int forced)
@@ -1099,7 +1184,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1099 free_irq(aac->pdev->irq, aac); 1184 free_irq(aac->pdev->irq, aac);
1100 kfree(aac->fsa_dev); 1185 kfree(aac->fsa_dev);
1101 aac->fsa_dev = NULL; 1186 aac->fsa_dev = NULL;
1102 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) { 1187 quirks = aac_get_driver_ident(index)->quirks;
1188 if (quirks & AAC_QUIRK_31BIT) {
1103 if (((retval = pci_set_dma_mask(aac->pdev, DMA_31BIT_MASK))) || 1189 if (((retval = pci_set_dma_mask(aac->pdev, DMA_31BIT_MASK))) ||
1104 ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_31BIT_MASK)))) 1190 ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_31BIT_MASK))))
1105 goto out; 1191 goto out;
@@ -1110,7 +1196,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1110 } 1196 }
1111 if ((retval = (*(aac_get_driver_ident(index)->init))(aac))) 1197 if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1112 goto out; 1198 goto out;
1113 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) 1199 if (quirks & AAC_QUIRK_31BIT)
1114 if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) 1200 if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1115 goto out; 1201 goto out;
1116 if (jafo) { 1202 if (jafo) {
@@ -1121,15 +1207,14 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1121 } 1207 }
1122 } 1208 }
1123 (void)aac_get_adapter_info(aac); 1209 (void)aac_get_adapter_info(aac);
1124 quirks = aac_get_driver_ident(index)->quirks;
1125 if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) { 1210 if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1126 host->sg_tablesize = 34; 1211 host->sg_tablesize = 34;
1127 host->max_sectors = (host->sg_tablesize * 8) + 112; 1212 host->max_sectors = (host->sg_tablesize * 8) + 112;
1128 } 1213 }
1129 if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) { 1214 if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1130 host->sg_tablesize = 17; 1215 host->sg_tablesize = 17;
1131 host->max_sectors = (host->sg_tablesize * 8) + 112; 1216 host->max_sectors = (host->sg_tablesize * 8) + 112;
1132 } 1217 }
1133 aac_get_config_status(aac, 1); 1218 aac_get_config_status(aac, 1);
1134 aac_get_containers(aac); 1219 aac_get_containers(aac);
1135 /* 1220 /*
@@ -1217,12 +1302,13 @@ int aac_reset_adapter(struct aac_dev * aac, int forced)
1217 } 1302 }
1218 1303
1219 /* Quiesce build, flush cache, write through mode */ 1304 /* Quiesce build, flush cache, write through mode */
1220 aac_send_shutdown(aac); 1305 if (forced < 2)
1306 aac_send_shutdown(aac);
1221 spin_lock_irqsave(host->host_lock, flagv); 1307 spin_lock_irqsave(host->host_lock, flagv);
1222 retval = _aac_reset_adapter(aac, forced); 1308 retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
1223 spin_unlock_irqrestore(host->host_lock, flagv); 1309 spin_unlock_irqrestore(host->host_lock, flagv);
1224 1310
1225 if (retval == -ENODEV) { 1311 if ((forced < 2) && (retval == -ENODEV)) {
1226 /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */ 1312 /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1227 struct fib * fibctx = aac_fib_alloc(aac); 1313 struct fib * fibctx = aac_fib_alloc(aac);
1228 if (fibctx) { 1314 if (fibctx) {
@@ -1338,11 +1424,11 @@ int aac_check_health(struct aac_dev * aac)
1338 fib->data = hw_fib->data; 1424 fib->data = hw_fib->data;
1339 aif = (struct aac_aifcmd *)hw_fib->data; 1425 aif = (struct aac_aifcmd *)hw_fib->data;
1340 aif->command = cpu_to_le32(AifCmdEventNotify); 1426 aif->command = cpu_to_le32(AifCmdEventNotify);
1341 aif->seqnum = cpu_to_le32(0xFFFFFFFF); 1427 aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1342 aif->data[0] = AifEnExpEvent; 1428 ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
1343 aif->data[1] = AifExeFirmwarePanic; 1429 ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
1344 aif->data[2] = AifHighPriority; 1430 ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
1345 aif->data[3] = BlinkLED; 1431 ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
1346 1432
1347 /* 1433 /*
1348 * Put the FIB onto the 1434 * Put the FIB onto the
@@ -1372,14 +1458,14 @@ int aac_check_health(struct aac_dev * aac)
1372 1458
1373 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); 1459 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1374 1460
1375 if (!aac_check_reset || 1461 if (!aac_check_reset || ((aac_check_reset != 1) &&
1376 (aac->supplement_adapter_info.SupportedOptions2 & 1462 (aac->supplement_adapter_info.SupportedOptions2 &
1377 le32_to_cpu(AAC_OPTION_IGNORE_RESET))) 1463 AAC_OPTION_IGNORE_RESET)))
1378 goto out; 1464 goto out;
1379 host = aac->scsi_host_ptr; 1465 host = aac->scsi_host_ptr;
1380 if (aac->thread->pid != current->pid) 1466 if (aac->thread->pid != current->pid)
1381 spin_lock_irqsave(host->host_lock, flagv); 1467 spin_lock_irqsave(host->host_lock, flagv);
1382 BlinkLED = _aac_reset_adapter(aac, 0); 1468 BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
1383 if (aac->thread->pid != current->pid) 1469 if (aac->thread->pid != current->pid)
1384 spin_unlock_irqrestore(host->host_lock, flagv); 1470 spin_unlock_irqrestore(host->host_lock, flagv);
1385 return BlinkLED; 1471 return BlinkLED;
@@ -1399,7 +1485,7 @@ out:
1399 * until the queue is empty. When the queue is empty it will wait for 1485 * until the queue is empty. When the queue is empty it will wait for
1400 * more FIBs. 1486 * more FIBs.
1401 */ 1487 */
1402 1488
1403int aac_command_thread(void *data) 1489int aac_command_thread(void *data)
1404{ 1490{
1405 struct aac_dev *dev = data; 1491 struct aac_dev *dev = data;
@@ -1425,30 +1511,29 @@ int aac_command_thread(void *data)
1425 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1511 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1426 set_current_state(TASK_INTERRUPTIBLE); 1512 set_current_state(TASK_INTERRUPTIBLE);
1427 dprintk ((KERN_INFO "aac_command_thread start\n")); 1513 dprintk ((KERN_INFO "aac_command_thread start\n"));
1428 while(1) 1514 while (1) {
1429 {
1430 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); 1515 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1431 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { 1516 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1432 struct list_head *entry; 1517 struct list_head *entry;
1433 struct aac_aifcmd * aifcmd; 1518 struct aac_aifcmd * aifcmd;
1434 1519
1435 set_current_state(TASK_RUNNING); 1520 set_current_state(TASK_RUNNING);
1436 1521
1437 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; 1522 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
1438 list_del(entry); 1523 list_del(entry);
1439 1524
1440 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1525 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1441 fib = list_entry(entry, struct fib, fiblink); 1526 fib = list_entry(entry, struct fib, fiblink);
1442 /* 1527 /*
1443 * We will process the FIB here or pass it to a 1528 * We will process the FIB here or pass it to a
1444 * worker thread that is TBD. We Really can't 1529 * worker thread that is TBD. We Really can't
1445 * do anything at this point since we don't have 1530 * do anything at this point since we don't have
1446 * anything defined for this thread to do. 1531 * anything defined for this thread to do.
1447 */ 1532 */
1448 hw_fib = fib->hw_fib_va; 1533 hw_fib = fib->hw_fib_va;
1449 memset(fib, 0, sizeof(struct fib)); 1534 memset(fib, 0, sizeof(struct fib));
1450 fib->type = FSAFS_NTC_FIB_CONTEXT; 1535 fib->type = FSAFS_NTC_FIB_CONTEXT;
1451 fib->size = sizeof( struct fib ); 1536 fib->size = sizeof(struct fib);
1452 fib->hw_fib_va = hw_fib; 1537 fib->hw_fib_va = hw_fib;
1453 fib->data = hw_fib->data; 1538 fib->data = hw_fib->data;
1454 fib->dev = dev; 1539 fib->dev = dev;
@@ -1462,20 +1547,19 @@ int aac_command_thread(void *data)
1462 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1547 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1463 aac_fib_adapter_complete(fib, (u16)sizeof(u32)); 1548 aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1464 } else { 1549 } else {
1465 struct list_head *entry;
1466 /* The u32 here is important and intended. We are using 1550 /* The u32 here is important and intended. We are using
1467 32bit wrapping time to fit the adapter field */ 1551 32bit wrapping time to fit the adapter field */
1468 1552
1469 u32 time_now, time_last; 1553 u32 time_now, time_last;
1470 unsigned long flagv; 1554 unsigned long flagv;
1471 unsigned num; 1555 unsigned num;
1472 struct hw_fib ** hw_fib_pool, ** hw_fib_p; 1556 struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1473 struct fib ** fib_pool, ** fib_p; 1557 struct fib ** fib_pool, ** fib_p;
1474 1558
1475 /* Sniff events */ 1559 /* Sniff events */
1476 if ((aifcmd->command == 1560 if ((aifcmd->command ==
1477 cpu_to_le32(AifCmdEventNotify)) || 1561 cpu_to_le32(AifCmdEventNotify)) ||
1478 (aifcmd->command == 1562 (aifcmd->command ==
1479 cpu_to_le32(AifCmdJobProgress))) { 1563 cpu_to_le32(AifCmdJobProgress))) {
1480 aac_handle_aif(dev, fib); 1564 aac_handle_aif(dev, fib);
1481 } 1565 }
@@ -1527,7 +1611,7 @@ int aac_command_thread(void *data)
1527 spin_lock_irqsave(&dev->fib_lock, flagv); 1611 spin_lock_irqsave(&dev->fib_lock, flagv);
1528 entry = dev->fib_list.next; 1612 entry = dev->fib_list.next;
1529 /* 1613 /*
1530 * For each Context that is on the 1614 * For each Context that is on the
1531 * fibctxList, make a copy of the 1615 * fibctxList, make a copy of the
1532 * fib, and then set the event to wake up the 1616 * fib, and then set the event to wake up the
1533 * thread that is waiting for it. 1617 * thread that is waiting for it.
@@ -1552,7 +1636,7 @@ int aac_command_thread(void *data)
1552 */ 1636 */
1553 time_last = fibctx->jiffies; 1637 time_last = fibctx->jiffies;
1554 /* 1638 /*
1555 * Has it been > 2 minutes 1639 * Has it been > 2 minutes
1556 * since the last read off 1640 * since the last read off
1557 * the queue? 1641 * the queue?
1558 */ 1642 */
@@ -1583,7 +1667,7 @@ int aac_command_thread(void *data)
1583 */ 1667 */
1584 list_add_tail(&newfib->fiblink, &fibctx->fib_list); 1668 list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1585 fibctx->count++; 1669 fibctx->count++;
1586 /* 1670 /*
1587 * Set the event to wake up the 1671 * Set the event to wake up the
1588 * thread that is waiting. 1672 * thread that is waiting.
1589 */ 1673 */
@@ -1655,11 +1739,11 @@ int aac_command_thread(void *data)
1655 struct fib *fibptr; 1739 struct fib *fibptr;
1656 1740
1657 if ((fibptr = aac_fib_alloc(dev))) { 1741 if ((fibptr = aac_fib_alloc(dev))) {
1658 u32 * info; 1742 __le32 *info;
1659 1743
1660 aac_fib_init(fibptr); 1744 aac_fib_init(fibptr);
1661 1745
1662 info = (u32 *) fib_data(fibptr); 1746 info = (__le32 *) fib_data(fibptr);
1663 if (now.tv_usec > 500000) 1747 if (now.tv_usec > 500000)
1664 ++now.tv_sec; 1748 ++now.tv_sec;
1665 1749
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index e6032ffc66a6..d1163ded132b 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -120,6 +120,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
120 * NOTE: we cannot touch the fib after this 120 * NOTE: we cannot touch the fib after this
121 * call, because it may have been deallocated. 121 * call, because it may have been deallocated.
122 */ 122 */
123 fib->flags = 0;
123 fib->callback(fib->callback_data, fib); 124 fib->callback(fib->callback_data, fib);
124 } else { 125 } else {
125 unsigned long flagv; 126 unsigned long flagv;
@@ -229,11 +230,9 @@ unsigned int aac_command_normal(struct aac_queue *q)
229 * all QE there are and wake up all the waiters before exiting. 230 * all QE there are and wake up all the waiters before exiting.
230 */ 231 */
231 232
232unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index) 233unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
233{ 234{
234 u32 index = le32_to_cpu(Index); 235 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
235
236 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, Index));
237 if ((index & 0x00000002L)) { 236 if ((index & 0x00000002L)) {
238 struct hw_fib * hw_fib; 237 struct hw_fib * hw_fib;
239 struct fib * fib; 238 struct fib * fib;
@@ -301,7 +300,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
301 300
302 if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) 301 if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
303 { 302 {
304 u32 *pstatus = (u32 *)hwfib->data; 303 __le32 *pstatus = (__le32 *)hwfib->data;
305 if (*pstatus & cpu_to_le32(0xffff0000)) 304 if (*pstatus & cpu_to_le32(0xffff0000))
306 *pstatus = cpu_to_le32(ST_OK); 305 *pstatus = cpu_to_le32(ST_OK);
307 } 306 }
@@ -315,6 +314,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
315 * NOTE: we cannot touch the fib after this 314 * NOTE: we cannot touch the fib after this
316 * call, because it may have been deallocated. 315 * call, because it may have been deallocated.
317 */ 316 */
317 fib->flags = 0;
318 fib->callback(fib->callback_data, fib); 318 fib->callback(fib->callback_data, fib);
319 } else { 319 } else {
320 unsigned long flagv; 320 unsigned long flagv;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 9dd331bc29b0..61be22774e99 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -159,27 +159,27 @@ static struct pci_device_id aac_pci_tbl[] = {
159MODULE_DEVICE_TABLE(pci, aac_pci_tbl); 159MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
160 160
161/* 161/*
162 * dmb - For now we add the number of channels to this structure. 162 * dmb - For now we add the number of channels to this structure.
163 * In the future we should add a fib that reports the number of channels 163 * In the future we should add a fib that reports the number of channels
164 * for the card. At that time we can remove the channels from here 164 * for the card. At that time we can remove the channels from here
165 */ 165 */
166static struct aac_driver_ident aac_drivers[] = { 166static struct aac_driver_ident aac_drivers[] = {
167 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 2/Si (Iguana/PERC2Si) */ 167 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
168 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Opal/PERC3Di) */ 168 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
169 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Si (SlimFast/PERC3Si */ 169 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
170 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */ 170 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
171 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Viper/PERC3DiV) */ 171 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
172 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Lexus/PERC3DiL) */ 172 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
173 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Jaguar/PERC3DiJ) */ 173 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
174 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Dagger/PERC3DiD) */ 174 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
175 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Boxster/PERC3DiB) */ 175 { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
176 { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* catapult */ 176 { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
177 { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* tomcat */ 177 { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
178 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */ 178 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2120S (Crusader) */
179 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */ 179 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan) */
180 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan-2m) */ 180 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
181 { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S220 (Legend Crusader) */ 181 { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
182 { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S230 (Legend Vulcan) */ 182 { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
183 183
184 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */ 184 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
185 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */ 185 { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
@@ -224,8 +224,8 @@ static struct aac_driver_ident aac_drivers[] = {
224 { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */ 224 { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
225 { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */ 225 { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
226 226
227 { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */ 227 { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
228 { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */ 228 { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
229 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ 229 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
230 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ 230 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
231 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */ 231 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */
@@ -239,7 +239,7 @@ static struct aac_driver_ident aac_drivers[] = {
239 * Queues a command for execution by the associated Host Adapter. 239 * Queues a command for execution by the associated Host Adapter.
240 * 240 *
241 * TODO: unify with aac_scsi_cmd(). 241 * TODO: unify with aac_scsi_cmd().
242 */ 242 */
243 243
244static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 244static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
245{ 245{
@@ -258,7 +258,7 @@ static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
258 } 258 }
259 cmd->SCp.phase = AAC_OWNER_LOWLEVEL; 259 cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
260 return (aac_scsi_cmd(cmd) ? FAILED : 0); 260 return (aac_scsi_cmd(cmd) ? FAILED : 0);
261} 261}
262 262
263/** 263/**
264 * aac_info - Returns the host adapter name 264 * aac_info - Returns the host adapter name
@@ -292,21 +292,21 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype)
292 * @capacity: the sector capacity of the disk 292 * @capacity: the sector capacity of the disk
293 * @geom: geometry block to fill in 293 * @geom: geometry block to fill in
294 * 294 *
295 * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk. 295 * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
296 * The default disk geometry is 64 heads, 32 sectors, and the appropriate 296 * The default disk geometry is 64 heads, 32 sectors, and the appropriate
297 * number of cylinders so as not to exceed drive capacity. In order for 297 * number of cylinders so as not to exceed drive capacity. In order for
298 * disks equal to or larger than 1 GB to be addressable by the BIOS 298 * disks equal to or larger than 1 GB to be addressable by the BIOS
299 * without exceeding the BIOS limitation of 1024 cylinders, Extended 299 * without exceeding the BIOS limitation of 1024 cylinders, Extended
300 * Translation should be enabled. With Extended Translation enabled, 300 * Translation should be enabled. With Extended Translation enabled,
301 * drives between 1 GB inclusive and 2 GB exclusive are given a disk 301 * drives between 1 GB inclusive and 2 GB exclusive are given a disk
302 * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive 302 * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
303 * are given a disk geometry of 255 heads and 63 sectors. However, if 303 * are given a disk geometry of 255 heads and 63 sectors. However, if
304 * the BIOS detects that the Extended Translation setting does not match 304 * the BIOS detects that the Extended Translation setting does not match
305 * the geometry in the partition table, then the translation inferred 305 * the geometry in the partition table, then the translation inferred
306 * from the partition table will be used by the BIOS, and a warning may 306 * from the partition table will be used by the BIOS, and a warning may
307 * be displayed. 307 * be displayed.
308 */ 308 */
309 309
310static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev, 310static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
311 sector_t capacity, int *geom) 311 sector_t capacity, int *geom)
312{ 312{
@@ -333,10 +333,10 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
333 333
334 param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); 334 param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
335 335
336 /* 336 /*
337 * Read the first 1024 bytes from the disk device, if the boot 337 * Read the first 1024 bytes from the disk device, if the boot
338 * sector partition table is valid, search for a partition table 338 * sector partition table is valid, search for a partition table
339 * entry whose end_head matches one of the standard geometry 339 * entry whose end_head matches one of the standard geometry
340 * translations ( 64/32, 128/32, 255/63 ). 340 * translations ( 64/32, 128/32, 255/63 ).
341 */ 341 */
342 buf = scsi_bios_ptable(bdev); 342 buf = scsi_bios_ptable(bdev);
@@ -401,30 +401,44 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
401 401
402static int aac_slave_configure(struct scsi_device *sdev) 402static int aac_slave_configure(struct scsi_device *sdev)
403{ 403{
404 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
404 if ((sdev->type == TYPE_DISK) && 405 if ((sdev->type == TYPE_DISK) &&
405 (sdev_channel(sdev) != CONTAINER_CHANNEL)) { 406 (sdev_channel(sdev) != CONTAINER_CHANNEL) &&
407 (!aac->jbod || sdev->inq_periph_qual) &&
408 (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
406 if (expose_physicals == 0) 409 if (expose_physicals == 0)
407 return -ENXIO; 410 return -ENXIO;
408 if (expose_physicals < 0) { 411 if (expose_physicals < 0)
409 struct aac_dev *aac = 412 sdev->no_uld_attach = 1;
410 (struct aac_dev *)sdev->host->hostdata;
411 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
412 sdev->no_uld_attach = 1;
413 }
414 } 413 }
415 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && 414 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
416 (sdev_channel(sdev) == CONTAINER_CHANNEL)) { 415 (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
416 !sdev->no_uld_attach) {
417 struct scsi_device * dev; 417 struct scsi_device * dev;
418 struct Scsi_Host *host = sdev->host; 418 struct Scsi_Host *host = sdev->host;
419 unsigned num_lsu = 0; 419 unsigned num_lsu = 0;
420 unsigned num_one = 0; 420 unsigned num_one = 0;
421 unsigned depth; 421 unsigned depth;
422 unsigned cid;
422 423
424 /*
425 * Firmware has an individual device recovery time typically
426 * of 35 seconds, give us a margin.
427 */
428 if (sdev->timeout < (45 * HZ))
429 sdev->timeout = 45 * HZ;
430 for (cid = 0; cid < aac->maximum_num_containers; ++cid)
431 if (aac->fsa_dev[cid].valid)
432 ++num_lsu;
423 __shost_for_each_device(dev, host) { 433 __shost_for_each_device(dev, host) {
424 if (dev->tagged_supported && (dev->type == TYPE_DISK) && 434 if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
425 (sdev_channel(dev) == CONTAINER_CHANNEL)) 435 (!aac->raid_scsi_mode ||
426 ++num_lsu; 436 (sdev_channel(sdev) != 2)) &&
427 else 437 !dev->no_uld_attach) {
438 if ((sdev_channel(dev) != CONTAINER_CHANNEL)
439 || !aac->fsa_dev[sdev_id(dev)].valid)
440 ++num_lsu;
441 } else
428 ++num_one; 442 ++num_one;
429 } 443 }
430 if (num_lsu == 0) 444 if (num_lsu == 0)
@@ -481,9 +495,35 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
481 return sdev->queue_depth; 495 return sdev->queue_depth;
482} 496}
483 497
498static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
499{
500 struct scsi_device * sdev = to_scsi_device(dev);
501 if (sdev_channel(sdev) != CONTAINER_CHANNEL)
502 return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
503 ? "Hidden\n" : "JBOD");
504 return snprintf(buf, PAGE_SIZE, "%s\n",
505 get_container_type(((struct aac_dev *)(sdev->host->hostdata))
506 ->fsa_dev[sdev_id(sdev)].type));
507}
508
509static struct device_attribute aac_raid_level_attr = {
510 .attr = {
511 .name = "level",
512 .mode = S_IRUGO,
513 },
514 .show = aac_show_raid_level
515};
516
517static struct device_attribute *aac_dev_attrs[] = {
518 &aac_raid_level_attr,
519 NULL,
520};
521
484static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg) 522static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
485{ 523{
486 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; 524 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
525 if (!capable(CAP_SYS_RAWIO))
526 return -EPERM;
487 return aac_do_ioctl(dev, cmd, arg); 527 return aac_do_ioctl(dev, cmd, arg);
488} 528}
489 529
@@ -506,17 +546,33 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
506 break; 546 break;
507 case INQUIRY: 547 case INQUIRY:
508 case READ_CAPACITY: 548 case READ_CAPACITY:
509 case TEST_UNIT_READY:
510 /* Mark associated FIB to not complete, eh handler does this */ 549 /* Mark associated FIB to not complete, eh handler does this */
511 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { 550 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
512 struct fib * fib = &aac->fibs[count]; 551 struct fib * fib = &aac->fibs[count];
513 if (fib->hw_fib_va->header.XferState && 552 if (fib->hw_fib_va->header.XferState &&
553 (fib->flags & FIB_CONTEXT_FLAG) &&
514 (fib->callback_data == cmd)) { 554 (fib->callback_data == cmd)) {
515 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; 555 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
516 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; 556 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
517 ret = SUCCESS; 557 ret = SUCCESS;
518 } 558 }
519 } 559 }
560 break;
561 case TEST_UNIT_READY:
562 /* Mark associated FIB to not complete, eh handler does this */
563 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
564 struct scsi_cmnd * command;
565 struct fib * fib = &aac->fibs[count];
566 if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
567 (fib->flags & FIB_CONTEXT_FLAG) &&
568 ((command = fib->callback_data)) &&
569 (command->device == cmd->device)) {
570 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
571 command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
572 if (command == cmd)
573 ret = SUCCESS;
574 }
575 }
520 } 576 }
521 return ret; 577 return ret;
522} 578}
@@ -539,12 +595,13 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
539 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { 595 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
540 struct fib * fib = &aac->fibs[count]; 596 struct fib * fib = &aac->fibs[count];
541 if (fib->hw_fib_va->header.XferState && 597 if (fib->hw_fib_va->header.XferState &&
598 (fib->flags & FIB_CONTEXT_FLAG) &&
542 (fib->callback_data == cmd)) { 599 (fib->callback_data == cmd)) {
543 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; 600 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
544 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; 601 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
545 } 602 }
546 } 603 }
547 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n", 604 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
548 AAC_DRIVERNAME); 605 AAC_DRIVERNAME);
549 606
550 if ((count = aac_check_health(aac))) 607 if ((count = aac_check_health(aac)))
@@ -584,8 +641,11 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
584 * support a register, instead of a commanded, reset. 641 * support a register, instead of a commanded, reset.
585 */ 642 */
586 if ((aac->supplement_adapter_info.SupportedOptions2 & 643 if ((aac->supplement_adapter_info.SupportedOptions2 &
587 le32_to_cpu(AAC_OPTION_MU_RESET|AAC_OPTION_IGNORE_RESET)) == 644 AAC_OPTION_MU_RESET) &&
588 le32_to_cpu(AAC_OPTION_MU_RESET)) 645 aac_check_reset &&
646 ((aac_check_reset != 1) ||
647 (aac->supplement_adapter_info.SupportedOptions2 &
648 AAC_OPTION_IGNORE_RESET)))
589 aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */ 649 aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
590 return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */ 650 return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
591} 651}
@@ -632,8 +692,8 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
632 * Bugs: Needs locking against parallel ioctls lower down 692 * Bugs: Needs locking against parallel ioctls lower down
633 * Bugs: Needs to handle hot plugging 693 * Bugs: Needs to handle hot plugging
634 */ 694 */
635 695
636static int aac_cfg_ioctl(struct inode *inode, struct file *file, 696static int aac_cfg_ioctl(struct inode *inode, struct file *file,
637 unsigned int cmd, unsigned long arg) 697 unsigned int cmd, unsigned long arg)
638{ 698{
639 if (!capable(CAP_SYS_RAWIO)) 699 if (!capable(CAP_SYS_RAWIO))
@@ -646,7 +706,7 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
646{ 706{
647 long ret; 707 long ret;
648 lock_kernel(); 708 lock_kernel();
649 switch (cmd) { 709 switch (cmd) {
650 case FSACTL_MINIPORT_REV_CHECK: 710 case FSACTL_MINIPORT_REV_CHECK:
651 case FSACTL_SENDFIB: 711 case FSACTL_SENDFIB:
652 case FSACTL_OPEN_GET_ADAPTER_FIB: 712 case FSACTL_OPEN_GET_ADAPTER_FIB:
@@ -656,14 +716,14 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
656 case FSACTL_QUERY_DISK: 716 case FSACTL_QUERY_DISK:
657 case FSACTL_DELETE_DISK: 717 case FSACTL_DELETE_DISK:
658 case FSACTL_FORCE_DELETE_DISK: 718 case FSACTL_FORCE_DELETE_DISK:
659 case FSACTL_GET_CONTAINERS: 719 case FSACTL_GET_CONTAINERS:
660 case FSACTL_SEND_LARGE_FIB: 720 case FSACTL_SEND_LARGE_FIB:
661 ret = aac_do_ioctl(dev, cmd, (void __user *)arg); 721 ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
662 break; 722 break;
663 723
664 case FSACTL_GET_NEXT_ADAPTER_FIB: { 724 case FSACTL_GET_NEXT_ADAPTER_FIB: {
665 struct fib_ioctl __user *f; 725 struct fib_ioctl __user *f;
666 726
667 f = compat_alloc_user_space(sizeof(*f)); 727 f = compat_alloc_user_space(sizeof(*f));
668 ret = 0; 728 ret = 0;
669 if (clear_user(f, sizeof(*f))) 729 if (clear_user(f, sizeof(*f)))
@@ -676,9 +736,9 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
676 } 736 }
677 737
678 default: 738 default:
679 ret = -ENOIOCTLCMD; 739 ret = -ENOIOCTLCMD;
680 break; 740 break;
681 } 741 }
682 unlock_kernel(); 742 unlock_kernel();
683 return ret; 743 return ret;
684} 744}
@@ -735,6 +795,25 @@ static ssize_t aac_show_vendor(struct class_device *class_dev,
735 return len; 795 return len;
736} 796}
737 797
798static ssize_t aac_show_flags(struct class_device *class_dev, char *buf)
799{
800 int len = 0;
801 struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
802
803 if (nblank(dprintk(x)))
804 len = snprintf(buf, PAGE_SIZE, "dprintk\n");
805#ifdef AAC_DETAILED_STATUS_INFO
806 len += snprintf(buf + len, PAGE_SIZE - len,
807 "AAC_DETAILED_STATUS_INFO\n");
808#endif
809 if (dev->raw_io_interface && dev->raw_io_64)
810 len += snprintf(buf + len, PAGE_SIZE - len,
811 "SAI_READ_CAPACITY_16\n");
812 if (dev->jbod)
813 len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
814 return len;
815}
816
738static ssize_t aac_show_kernel_version(struct class_device *class_dev, 817static ssize_t aac_show_kernel_version(struct class_device *class_dev,
739 char *buf) 818 char *buf)
740{ 819{
@@ -742,7 +821,7 @@ static ssize_t aac_show_kernel_version(struct class_device *class_dev,
742 int len, tmp; 821 int len, tmp;
743 822
744 tmp = le32_to_cpu(dev->adapter_info.kernelrev); 823 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
745 len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n", 824 len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
746 tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff, 825 tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
747 le32_to_cpu(dev->adapter_info.kernelbuild)); 826 le32_to_cpu(dev->adapter_info.kernelbuild));
748 return len; 827 return len;
@@ -755,7 +834,7 @@ static ssize_t aac_show_monitor_version(struct class_device *class_dev,
755 int len, tmp; 834 int len, tmp;
756 835
757 tmp = le32_to_cpu(dev->adapter_info.monitorrev); 836 tmp = le32_to_cpu(dev->adapter_info.monitorrev);
758 len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n", 837 len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
759 tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff, 838 tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
760 le32_to_cpu(dev->adapter_info.monitorbuild)); 839 le32_to_cpu(dev->adapter_info.monitorbuild));
761 return len; 840 return len;
@@ -768,7 +847,7 @@ static ssize_t aac_show_bios_version(struct class_device *class_dev,
768 int len, tmp; 847 int len, tmp;
769 848
770 tmp = le32_to_cpu(dev->adapter_info.biosrev); 849 tmp = le32_to_cpu(dev->adapter_info.biosrev);
771 len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n", 850 len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
772 tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff, 851 tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
773 le32_to_cpu(dev->adapter_info.biosbuild)); 852 le32_to_cpu(dev->adapter_info.biosbuild));
774 return len; 853 return len;
@@ -844,6 +923,13 @@ static struct class_device_attribute aac_vendor = {
844 }, 923 },
845 .show = aac_show_vendor, 924 .show = aac_show_vendor,
846}; 925};
926static struct class_device_attribute aac_flags = {
927 .attr = {
928 .name = "flags",
929 .mode = S_IRUGO,
930 },
931 .show = aac_show_flags,
932};
847static struct class_device_attribute aac_kernel_version = { 933static struct class_device_attribute aac_kernel_version = {
848 .attr = { 934 .attr = {
849 .name = "hba_kernel_version", 935 .name = "hba_kernel_version",
@@ -898,6 +984,7 @@ static struct class_device_attribute aac_reset = {
898static struct class_device_attribute *aac_attrs[] = { 984static struct class_device_attribute *aac_attrs[] = {
899 &aac_model, 985 &aac_model,
900 &aac_vendor, 986 &aac_vendor,
987 &aac_flags,
901 &aac_kernel_version, 988 &aac_kernel_version,
902 &aac_monitor_version, 989 &aac_monitor_version,
903 &aac_bios_version, 990 &aac_bios_version,
@@ -928,21 +1015,22 @@ static struct scsi_host_template aac_driver_template = {
928 .compat_ioctl = aac_compat_ioctl, 1015 .compat_ioctl = aac_compat_ioctl,
929#endif 1016#endif
930 .queuecommand = aac_queuecommand, 1017 .queuecommand = aac_queuecommand,
931 .bios_param = aac_biosparm, 1018 .bios_param = aac_biosparm,
932 .shost_attrs = aac_attrs, 1019 .shost_attrs = aac_attrs,
933 .slave_configure = aac_slave_configure, 1020 .slave_configure = aac_slave_configure,
934 .change_queue_depth = aac_change_queue_depth, 1021 .change_queue_depth = aac_change_queue_depth,
1022 .sdev_attrs = aac_dev_attrs,
935 .eh_abort_handler = aac_eh_abort, 1023 .eh_abort_handler = aac_eh_abort,
936 .eh_host_reset_handler = aac_eh_reset, 1024 .eh_host_reset_handler = aac_eh_reset,
937 .can_queue = AAC_NUM_IO_FIB, 1025 .can_queue = AAC_NUM_IO_FIB,
938 .this_id = MAXIMUM_NUM_CONTAINERS, 1026 .this_id = MAXIMUM_NUM_CONTAINERS,
939 .sg_tablesize = 16, 1027 .sg_tablesize = 16,
940 .max_sectors = 128, 1028 .max_sectors = 128,
941#if (AAC_NUM_IO_FIB > 256) 1029#if (AAC_NUM_IO_FIB > 256)
942 .cmd_per_lun = 256, 1030 .cmd_per_lun = 256,
943#else 1031#else
944 .cmd_per_lun = AAC_NUM_IO_FIB, 1032 .cmd_per_lun = AAC_NUM_IO_FIB,
945#endif 1033#endif
946 .use_clustering = ENABLE_CLUSTERING, 1034 .use_clustering = ENABLE_CLUSTERING,
947 .use_sg_chaining = ENABLE_SG_CHAINING, 1035 .use_sg_chaining = ENABLE_SG_CHAINING,
948 .emulated = 1, 1036 .emulated = 1,
@@ -979,18 +1067,18 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
979 goto out; 1067 goto out;
980 error = -ENODEV; 1068 error = -ENODEV;
981 1069
982 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || 1070 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
983 pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) 1071 pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
984 goto out_disable_pdev; 1072 goto out_disable_pdev;
985 /* 1073 /*
986 * If the quirk31 bit is set, the adapter needs adapter 1074 * If the quirk31 bit is set, the adapter needs adapter
987 * to driver communication memory to be allocated below 2gig 1075 * to driver communication memory to be allocated below 2gig
988 */ 1076 */
989 if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) 1077 if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
990 if (pci_set_dma_mask(pdev, DMA_31BIT_MASK) || 1078 if (pci_set_dma_mask(pdev, DMA_31BIT_MASK) ||
991 pci_set_consistent_dma_mask(pdev, DMA_31BIT_MASK)) 1079 pci_set_consistent_dma_mask(pdev, DMA_31BIT_MASK))
992 goto out_disable_pdev; 1080 goto out_disable_pdev;
993 1081
994 pci_set_master(pdev); 1082 pci_set_master(pdev);
995 1083
996 shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev)); 1084 shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
@@ -1003,7 +1091,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1003 shost->max_cmd_len = 16; 1091 shost->max_cmd_len = 16;
1004 1092
1005 aac = (struct aac_dev *)shost->hostdata; 1093 aac = (struct aac_dev *)shost->hostdata;
1006 aac->scsi_host_ptr = shost; 1094 aac->scsi_host_ptr = shost;
1007 aac->pdev = pdev; 1095 aac->pdev = pdev;
1008 aac->name = aac_driver_template.name; 1096 aac->name = aac_driver_template.name;
1009 aac->id = shost->unique_id; 1097 aac->id = shost->unique_id;
@@ -1040,7 +1128,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1040 if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) 1128 if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
1041 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) 1129 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
1042 goto out_deinit; 1130 goto out_deinit;
1043 1131
1044 aac->maximum_num_channels = aac_drivers[index].channels; 1132 aac->maximum_num_channels = aac_drivers[index].channels;
1045 error = aac_get_adapter_info(aac); 1133 error = aac_get_adapter_info(aac);
1046 if (error < 0) 1134 if (error < 0)
@@ -1049,7 +1137,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1049 /* 1137 /*
1050 * Lets override negotiations and drop the maximum SG limit to 34 1138 * Lets override negotiations and drop the maximum SG limit to 34
1051 */ 1139 */
1052 if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) && 1140 if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
1053 (aac->scsi_host_ptr->sg_tablesize > 34)) { 1141 (aac->scsi_host_ptr->sg_tablesize > 34)) {
1054 aac->scsi_host_ptr->sg_tablesize = 34; 1142 aac->scsi_host_ptr->sg_tablesize = 34;
1055 aac->scsi_host_ptr->max_sectors 1143 aac->scsi_host_ptr->max_sectors
@@ -1066,17 +1154,17 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1066 /* 1154 /*
1067 * Firware printf works only with older firmware. 1155 * Firware printf works only with older firmware.
1068 */ 1156 */
1069 if (aac_drivers[index].quirks & AAC_QUIRK_34SG) 1157 if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
1070 aac->printf_enabled = 1; 1158 aac->printf_enabled = 1;
1071 else 1159 else
1072 aac->printf_enabled = 0; 1160 aac->printf_enabled = 0;
1073 1161
1074 /* 1162 /*
1075 * max channel will be the physical channels plus 1 virtual channel 1163 * max channel will be the physical channels plus 1 virtual channel
1076 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL) 1164 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
1077 * physical channels are address by their actual physical number+1 1165 * physical channels are address by their actual physical number+1
1078 */ 1166 */
1079 if ((aac->nondasd_support == 1) || expose_physicals) 1167 if (aac->nondasd_support || expose_physicals || aac->jbod)
1080 shost->max_channel = aac->maximum_num_channels; 1168 shost->max_channel = aac->maximum_num_channels;
1081 else 1169 else
1082 shost->max_channel = 0; 1170 shost->max_channel = 0;
@@ -1148,10 +1236,10 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
1148 kfree(aac->queues); 1236 kfree(aac->queues);
1149 1237
1150 aac_adapter_ioremap(aac, 0); 1238 aac_adapter_ioremap(aac, 0);
1151 1239
1152 kfree(aac->fibs); 1240 kfree(aac->fibs);
1153 kfree(aac->fsa_dev); 1241 kfree(aac->fsa_dev);
1154 1242
1155 list_del(&aac->entry); 1243 list_del(&aac->entry);
1156 scsi_host_put(shost); 1244 scsi_host_put(shost);
1157 pci_disable_device(pdev); 1245 pci_disable_device(pdev);
@@ -1172,7 +1260,7 @@ static struct pci_driver aac_pci_driver = {
1172static int __init aac_init(void) 1260static int __init aac_init(void)
1173{ 1261{
1174 int error; 1262 int error;
1175 1263
1176 printk(KERN_INFO "Adaptec %s driver %s\n", 1264 printk(KERN_INFO "Adaptec %s driver %s\n",
1177 AAC_DRIVERNAME, aac_driver_version); 1265 AAC_DRIVERNAME, aac_driver_version);
1178 1266
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 73eef3dc5dc6..a08bbf1fd76c 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -465,7 +465,7 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
465 u32 var; 465 u32 var;
466 466
467 if (!(dev->supplement_adapter_info.SupportedOptions2 & 467 if (!(dev->supplement_adapter_info.SupportedOptions2 &
468 le32_to_cpu(AAC_OPTION_MU_RESET)) || (bled >= 0) || (bled == -2)) { 468 AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
469 if (bled) 469 if (bled)
470 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", 470 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
471 dev->name, dev->id, bled); 471 dev->name, dev->id, bled);
@@ -549,7 +549,9 @@ int _aac_rx_init(struct aac_dev *dev)
549 dev->OIMR = status = rx_readb (dev, MUnit.OIMR); 549 dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
550 if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) && 550 if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
551 !aac_rx_restart_adapter(dev, 0)) 551 !aac_rx_restart_adapter(dev, 0))
552 ++restart; 552 /* Make sure the Hardware FIFO is empty */
553 while ((++restart < 512) &&
554 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
553 /* 555 /*
554 * Check to see if the board panic'd while booting. 556 * Check to see if the board panic'd while booting.
555 */ 557 */
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 38a1ee2eacd8..374ed025dc5a 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -8233,7 +8233,7 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
8233 if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) { 8233 if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) {
8234 ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); 8234 ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
8235 ASC_DBG_PRT_SENSE(2, scp->sense_buffer, 8235 ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
8236 sizeof(scp->sense_buffer)); 8236 SCSI_SENSE_BUFFERSIZE);
8237 /* 8237 /*
8238 * Note: The 'status_byte()' macro used by 8238 * Note: The 'status_byte()' macro used by
8239 * target drivers defined in scsi.h shifts the 8239 * target drivers defined in scsi.h shifts the
@@ -9136,7 +9136,7 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
9136 BUG_ON(asc_dvc_varp != &boardp->dvc_var.asc_dvc_var); 9136 BUG_ON(asc_dvc_varp != &boardp->dvc_var.asc_dvc_var);
9137 9137
9138 dma_unmap_single(boardp->dev, scp->SCp.dma_handle, 9138 dma_unmap_single(boardp->dev, scp->SCp.dma_handle,
9139 sizeof(scp->sense_buffer), DMA_FROM_DEVICE); 9139 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
9140 /* 9140 /*
9141 * 'qdonep' contains the command's ending status. 9141 * 'qdonep' contains the command's ending status.
9142 */ 9142 */
@@ -9166,7 +9166,7 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
9166 if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) { 9166 if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) {
9167 ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); 9167 ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
9168 ASC_DBG_PRT_SENSE(2, scp->sense_buffer, 9168 ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
9169 sizeof(scp->sense_buffer)); 9169 SCSI_SENSE_BUFFERSIZE);
9170 /* 9170 /*
9171 * Note: The 'status_byte()' macro used by 9171 * Note: The 'status_byte()' macro used by
9172 * target drivers defined in scsi.h shifts the 9172 * target drivers defined in scsi.h shifts the
@@ -9881,9 +9881,9 @@ static __le32 advansys_get_sense_buffer_dma(struct scsi_cmnd *scp)
9881{ 9881{
9882 struct asc_board *board = shost_priv(scp->device->host); 9882 struct asc_board *board = shost_priv(scp->device->host);
9883 scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer, 9883 scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer,
9884 sizeof(scp->sense_buffer), DMA_FROM_DEVICE); 9884 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
9885 dma_cache_sync(board->dev, scp->sense_buffer, 9885 dma_cache_sync(board->dev, scp->sense_buffer,
9886 sizeof(scp->sense_buffer), DMA_FROM_DEVICE); 9886 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
9887 return cpu_to_le32(scp->SCp.dma_handle); 9887 return cpu_to_le32(scp->SCp.dma_handle);
9888} 9888}
9889 9889
@@ -9914,7 +9914,7 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
9914 asc_scsi_q->q2.target_ix = 9914 asc_scsi_q->q2.target_ix =
9915 ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun); 9915 ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun);
9916 asc_scsi_q->q1.sense_addr = advansys_get_sense_buffer_dma(scp); 9916 asc_scsi_q->q1.sense_addr = advansys_get_sense_buffer_dma(scp);
9917 asc_scsi_q->q1.sense_len = sizeof(scp->sense_buffer); 9917 asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE;
9918 9918
9919 /* 9919 /*
9920 * If there are any outstanding requests for the current target, 9920 * If there are any outstanding requests for the current target,
@@ -10173,7 +10173,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
10173 scsiqp->target_lun = scp->device->lun; 10173 scsiqp->target_lun = scp->device->lun;
10174 10174
10175 scsiqp->sense_addr = cpu_to_le32(virt_to_bus(&scp->sense_buffer[0])); 10175 scsiqp->sense_addr = cpu_to_le32(virt_to_bus(&scp->sense_buffer[0]));
10176 scsiqp->sense_len = sizeof(scp->sense_buffer); 10176 scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE;
10177 10177
10178 /* Build ADV_SCSI_REQ_Q */ 10178 /* Build ADV_SCSI_REQ_Q */
10179 10179
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index ea8c69947644..6ccdc96cc480 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -260,6 +260,7 @@
260#include <scsi/scsi_dbg.h> 260#include <scsi/scsi_dbg.h>
261#include <scsi/scsi_host.h> 261#include <scsi/scsi_host.h>
262#include <scsi/scsi_transport_spi.h> 262#include <scsi/scsi_transport_spi.h>
263#include <scsi/scsi_eh.h>
263#include "aha152x.h" 264#include "aha152x.h"
264 265
265static LIST_HEAD(aha152x_host_list); 266static LIST_HEAD(aha152x_host_list);
@@ -558,9 +559,7 @@ struct aha152x_hostdata {
558struct aha152x_scdata { 559struct aha152x_scdata {
559 Scsi_Cmnd *next; /* next sc in queue */ 560 Scsi_Cmnd *next; /* next sc in queue */
560 struct completion *done;/* semaphore to block on */ 561 struct completion *done;/* semaphore to block on */
561 unsigned char aha_orig_cmd_len; 562 struct scsi_eh_save ses;
562 unsigned char aha_orig_cmnd[MAX_COMMAND_SIZE];
563 int aha_orig_resid;
564}; 563};
565 564
566/* access macros for hostdata */ 565/* access macros for hostdata */
@@ -1017,16 +1016,10 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
1017 SCp.buffers_residual : left buffers in list 1016 SCp.buffers_residual : left buffers in list
1018 SCp.phase : current state of the command */ 1017 SCp.phase : current state of the command */
1019 1018
1020 if ((phase & (check_condition|resetting)) || !scsi_sglist(SCpnt)) { 1019 if ((phase & resetting) || !scsi_sglist(SCpnt)) {
1021 if (phase & check_condition) { 1020 SCpnt->SCp.ptr = NULL;
1022 SCpnt->SCp.ptr = SCpnt->sense_buffer; 1021 SCpnt->SCp.this_residual = 0;
1023 SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer); 1022 scsi_set_resid(SCpnt, 0);
1024 scsi_set_resid(SCpnt, sizeof(SCpnt->sense_buffer));
1025 } else {
1026 SCpnt->SCp.ptr = NULL;
1027 SCpnt->SCp.this_residual = 0;
1028 scsi_set_resid(SCpnt, 0);
1029 }
1030 SCpnt->SCp.buffer = NULL; 1023 SCpnt->SCp.buffer = NULL;
1031 SCpnt->SCp.buffers_residual = 0; 1024 SCpnt->SCp.buffers_residual = 0;
1032 } else { 1025 } else {
@@ -1561,10 +1554,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
1561 } 1554 }
1562#endif 1555#endif
1563 1556
1564 /* restore old command */ 1557 scsi_eh_restore_cmnd(cmd, &sc->ses);
1565 memcpy(cmd->cmnd, sc->aha_orig_cmnd, sizeof(cmd->cmnd));
1566 cmd->cmd_len = sc->aha_orig_cmd_len;
1567 scsi_set_resid(cmd, sc->aha_orig_resid);
1568 1558
1569 cmd->SCp.Status = SAM_STAT_CHECK_CONDITION; 1559 cmd->SCp.Status = SAM_STAT_CHECK_CONDITION;
1570 1560
@@ -1587,22 +1577,10 @@ static void busfree_run(struct Scsi_Host *shpnt)
1587 DPRINTK(debug_eh, ERR_LEAD "requesting sense\n", CMDINFO(ptr)); 1577 DPRINTK(debug_eh, ERR_LEAD "requesting sense\n", CMDINFO(ptr));
1588#endif 1578#endif
1589 1579
1590 /* save old command */
1591 sc = SCDATA(ptr); 1580 sc = SCDATA(ptr);
1592 /* It was allocated in aha152x_internal_queue? */ 1581 /* It was allocated in aha152x_internal_queue? */
1593 BUG_ON(!sc); 1582 BUG_ON(!sc);
1594 memcpy(sc->aha_orig_cmnd, ptr->cmnd, 1583 scsi_eh_prep_cmnd(ptr, &sc->ses, NULL, 0, ~0);
1595 sizeof(ptr->cmnd));
1596 sc->aha_orig_cmd_len = ptr->cmd_len;
1597 sc->aha_orig_resid = scsi_get_resid(ptr);
1598
1599 ptr->cmnd[0] = REQUEST_SENSE;
1600 ptr->cmnd[1] = 0;
1601 ptr->cmnd[2] = 0;
1602 ptr->cmnd[3] = 0;
1603 ptr->cmnd[4] = sizeof(ptr->sense_buffer);
1604 ptr->cmnd[5] = 0;
1605 ptr->cmd_len = 6;
1606 1584
1607 DO_UNLOCK(flags); 1585 DO_UNLOCK(flags);
1608 aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done); 1586 aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index bbcc2c52d79f..190568ebea3c 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -51,15 +51,6 @@
51#define SCSI_BUF_PA(address) isa_virt_to_bus(address) 51#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
52#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset) 52#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
53 53
54static void BAD_DMA(void *address, unsigned int length)
55{
56 printk(KERN_CRIT "buf vaddress %p paddress 0x%lx length %d\n",
57 address,
58 SCSI_BUF_PA(address),
59 length);
60 panic("Buffer at physical address > 16Mb used for aha1542");
61}
62
63static void BAD_SG_DMA(Scsi_Cmnd * SCpnt, 54static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
64 struct scatterlist *sgp, 55 struct scatterlist *sgp,
65 int nseg, 56 int nseg,
@@ -545,7 +536,7 @@ static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id)
545 we will still have it in the cdb when we come back */ 536 we will still have it in the cdb when we come back */
546 if (ccb[mbo].tarstat == 2) 537 if (ccb[mbo].tarstat == 2)
547 memcpy(SCtmp->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen], 538 memcpy(SCtmp->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
548 sizeof(SCtmp->sense_buffer)); 539 SCSI_SENSE_BUFFERSIZE);
549 540
550 541
551 /* is there mail :-) */ 542 /* is there mail :-) */
@@ -597,8 +588,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
597 unchar target = SCpnt->device->id; 588 unchar target = SCpnt->device->id;
598 unchar lun = SCpnt->device->lun; 589 unchar lun = SCpnt->device->lun;
599 unsigned long flags; 590 unsigned long flags;
600 void *buff = SCpnt->request_buffer; 591 int bufflen = scsi_bufflen(SCpnt);
601 int bufflen = SCpnt->request_bufflen;
602 int mbo; 592 int mbo;
603 struct mailbox *mb; 593 struct mailbox *mb;
604 struct ccb *ccb; 594 struct ccb *ccb;
@@ -619,7 +609,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
619#if 0 609#if 0
620 /* scsi_request_sense() provides a buffer of size 256, 610 /* scsi_request_sense() provides a buffer of size 256,
621 so there is no reason to expect equality */ 611 so there is no reason to expect equality */
622 if (bufflen != sizeof(SCpnt->sense_buffer)) 612 if (bufflen != SCSI_SENSE_BUFFERSIZE)
623 printk(KERN_CRIT "aha1542: Wrong buffer length supplied " 613 printk(KERN_CRIT "aha1542: Wrong buffer length supplied "
624 "for request sense (%d)\n", bufflen); 614 "for request sense (%d)\n", bufflen);
625#endif 615#endif
@@ -689,42 +679,29 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
689 679
690 memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen); 680 memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
691 681
692 if (SCpnt->use_sg) { 682 if (bufflen) {
693 struct scatterlist *sg; 683 struct scatterlist *sg;
694 struct chain *cptr; 684 struct chain *cptr;
695#ifdef DEBUG 685#ifdef DEBUG
696 unsigned char *ptr; 686 unsigned char *ptr;
697#endif 687#endif
698 int i; 688 int i, sg_count = scsi_sg_count(SCpnt);
699 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ 689 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
700 SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA); 690 SCpnt->host_scribble = kmalloc(sizeof(*cptr)*sg_count,
691 GFP_KERNEL | GFP_DMA);
701 cptr = (struct chain *) SCpnt->host_scribble; 692 cptr = (struct chain *) SCpnt->host_scribble;
702 if (cptr == NULL) { 693 if (cptr == NULL) {
703 /* free the claimed mailbox slot */ 694 /* free the claimed mailbox slot */
704 HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL; 695 HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL;
705 return SCSI_MLQUEUE_HOST_BUSY; 696 return SCSI_MLQUEUE_HOST_BUSY;
706 } 697 }
707 scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) { 698 scsi_for_each_sg(SCpnt, sg, sg_count, i) {
708 if (sg->length == 0 || SCpnt->use_sg > 16 ||
709 (((int) sg->offset) & 1) || (sg->length & 1)) {
710 unsigned char *ptr;
711 printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
712 scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
713 printk(KERN_CRIT "%d: %p %d\n", i,
714 sg_virt(sg), sg->length);
715 };
716 printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
717 ptr = (unsigned char *) &cptr[i];
718 for (i = 0; i < 18; i++)
719 printk("%02x ", ptr[i]);
720 panic("Foooooooood fight!");
721 };
722 any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg)); 699 any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg));
723 if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD) 700 if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD)
724 BAD_SG_DMA(SCpnt, sg, SCpnt->use_sg, i); 701 BAD_SG_DMA(SCpnt, scsi_sglist(SCpnt), sg_count, i);
725 any2scsi(cptr[i].datalen, sg->length); 702 any2scsi(cptr[i].datalen, sg->length);
726 }; 703 };
727 any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain)); 704 any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
728 any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr)); 705 any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr));
729#ifdef DEBUG 706#ifdef DEBUG
730 printk("cptr %x: ", cptr); 707 printk("cptr %x: ", cptr);
@@ -735,10 +712,8 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
735 } else { 712 } else {
736 ccb[mbo].op = 0; /* SCSI Initiator Command */ 713 ccb[mbo].op = 0; /* SCSI Initiator Command */
737 SCpnt->host_scribble = NULL; 714 SCpnt->host_scribble = NULL;
738 any2scsi(ccb[mbo].datalen, bufflen); 715 any2scsi(ccb[mbo].datalen, 0);
739 if (buff && SCSI_BUF_PA(buff + bufflen - 1) > ISA_DMA_THRESHOLD) 716 any2scsi(ccb[mbo].dataptr, 0);
740 BAD_DMA(buff, bufflen);
741 any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(buff));
742 }; 717 };
743 ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7); /*SCSI Target Id */ 718 ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7); /*SCSI Target Id */
744 ccb[mbo].rsalen = 16; 719 ccb[mbo].rsalen = 16;
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index f6722fd46008..be58a0b097c7 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -286,7 +286,7 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
286 cdb when we come back */ 286 cdb when we come back */
287 if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) { 287 if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) {
288 memcpy(SCtmp->sense_buffer, ecbptr->sense, 288 memcpy(SCtmp->sense_buffer, ecbptr->sense,
289 sizeof(SCtmp->sense_buffer)); 289 SCSI_SENSE_BUFFERSIZE);
290 errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status); 290 errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
291 } else 291 } else
292 errstatus = 0; 292 errstatus = 0;
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
index 9a6ce19a4030..e4f70c563bc2 100644
--- a/drivers/scsi/aic7xxx/Makefile
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -33,11 +33,10 @@ aic79xx-y += aic79xx_osm.o \
33 aic79xx_proc.o \ 33 aic79xx_proc.o \
34 aic79xx_osm_pci.o 34 aic79xx_osm_pci.o
35 35
36EXTRA_CFLAGS += -Idrivers/scsi 36ccflags-y += -Idrivers/scsi
37ifdef WARNINGS_BECOME_ERRORS 37ifdef WARNINGS_BECOME_ERRORS
38EXTRA_CFLAGS += -Werror 38ccflags-y += -Werror
39endif 39endif
40#EXTRA_CFLAGS += -g
41 40
42# Files generated that shall be removed upon make clean 41# Files generated that shall be removed upon make clean
43clean-files := aic7xxx_seq.h aic7xxx_reg.h aic7xxx_reg_print.c 42clean-files := aic7xxx_seq.h aic7xxx_reg.h aic7xxx_reg_print.c
@@ -46,53 +45,45 @@ clean-files += aic79xx_seq.h aic79xx_reg.h aic79xx_reg_print.c
46# Dependencies for generated files need to be listed explicitly 45# Dependencies for generated files need to be listed explicitly
47 46
48$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_seq.h 47$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_seq.h
48$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_reg.h
49$(obj)/aic79xx_core.o: $(obj)/aic79xx_seq.h 49$(obj)/aic79xx_core.o: $(obj)/aic79xx_seq.h
50$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped 50$(obj)/aic79xx_core.o: $(obj)/aic79xx_reg.h
51$(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped
52 51
53$(addprefix $(obj)/,$(aic7xxx-y)): $(obj)/aic7xxx_reg.h 52$(addprefix $(obj)/,$(aic7xxx-y)): $(obj)/aic7xxx_seq.h
54$(addprefix $(obj)/,$(aic79xx-y)): $(obj)/aic79xx_reg.h 53$(addprefix $(obj)/,$(aic79xx-y)): $(obj)/aic79xx_seq.h
55 54
56aic7xxx-gen-$(CONFIG_AIC7XXX_BUILD_FIRMWARE) := $(obj)/aic7xxx_seq.h \ 55aic7xxx-gen-$(CONFIG_AIC7XXX_BUILD_FIRMWARE) := $(obj)/aic7xxx_reg.h
57 $(obj)/aic7xxx_reg.h
58aic7xxx-gen-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) += $(obj)/aic7xxx_reg_print.c 56aic7xxx-gen-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) += $(obj)/aic7xxx_reg_print.c
59 57
60aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \ 58aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \
61 -p $(obj)/aic7xxx_reg_print.c -i aic7xxx_osm.h 59 -p $(obj)/aic7xxx_reg_print.c -i aic7xxx_osm.h
62 60
63ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y) 61ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y)
64# Create a dependency chain in generated files 62$(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
65# to avoid concurrent invocations of the single
66# rule that builds them all.
67aic7xxx_seq.h: aic7xxx_reg.h
68ifeq ($(CONFIG_AIC7XXX_REG_PRETTY_PRINT),y)
69aic7xxx_reg.h: aic7xxx_reg_print.c
70endif
71$(aic7xxx-gen-y): $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
72 $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic7xxx_reg.h \ 63 $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic7xxx_reg.h \
73 $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \ 64 $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \
74 $(src)/aic7xxx.seq 65 $(src)/aic7xxx.seq
66
67$(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h
68else
69$(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped
75endif 70endif
76 71
77aic79xx-gen-$(CONFIG_AIC79XX_BUILD_FIRMWARE) := $(obj)/aic79xx_seq.h \ 72aic79xx-gen-$(CONFIG_AIC79XX_BUILD_FIRMWARE) := $(obj)/aic79xx_reg.h
78 $(obj)/aic79xx_reg.h
79aic79xx-gen-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) += $(obj)/aic79xx_reg_print.c 73aic79xx-gen-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) += $(obj)/aic79xx_reg_print.c
80 74
81aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \ 75aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \
82 -p $(obj)/aic79xx_reg_print.c -i aic79xx_osm.h 76 -p $(obj)/aic79xx_reg_print.c -i aic79xx_osm.h
83 77
84ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y) 78ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y)
85# Create a dependency chain in generated files 79$(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
86# to avoid concurrent invocations of the single
87# rule that builds them all.
88aic79xx_seq.h: aic79xx_reg.h
89ifeq ($(CONFIG_AIC79XX_REG_PRETTY_PRINT),y)
90aic79xx_reg.h: aic79xx_reg_print.c
91endif
92$(aic79xx-gen-y): $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
93 $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic79xx_reg.h \ 80 $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic79xx_reg.h \
94 $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \ 81 $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \
95 $(src)/aic79xx.seq 82 $(src)/aic79xx.seq
83
84$(aic79xx-gen-y): $(obj)/aic79xx_seq.h
85else
86$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
96endif 87endif
97 88
98$(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl] 89$(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl]
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 2d020405480c..0e4708fd43c8 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1784,7 +1784,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
1784 if (scb->flags & SCB_SENSE) { 1784 if (scb->flags & SCB_SENSE) {
1785 sense_size = min(sizeof(struct scsi_sense_data) 1785 sense_size = min(sizeof(struct scsi_sense_data)
1786 - ahd_get_sense_residual(scb), 1786 - ahd_get_sense_residual(scb),
1787 (u_long)sizeof(cmd->sense_buffer)); 1787 (u_long)SCSI_SENSE_BUFFERSIZE);
1788 sense_offset = 0; 1788 sense_offset = 0;
1789 } else { 1789 } else {
1790 /* 1790 /*
@@ -1795,11 +1795,11 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
1795 scb->sense_data; 1795 scb->sense_data;
1796 sense_size = min_t(size_t, 1796 sense_size = min_t(size_t,
1797 scsi_4btoul(siu->sense_length), 1797 scsi_4btoul(siu->sense_length),
1798 sizeof(cmd->sense_buffer)); 1798 SCSI_SENSE_BUFFERSIZE);
1799 sense_offset = SIU_SENSE_OFFSET(siu); 1799 sense_offset = SIU_SENSE_OFFSET(siu);
1800 } 1800 }
1801 1801
1802 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 1802 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1803 memcpy(cmd->sense_buffer, 1803 memcpy(cmd->sense_buffer,
1804 ahd_get_sense_buf(ahd, scb) 1804 ahd_get_sense_buf(ahd, scb)
1805 + sense_offset, sense_size); 1805 + sense_offset, sense_size);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 390b0fc991c5..e310e414067f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1801,12 +1801,12 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
1801 1801
1802 sense_size = min(sizeof(struct scsi_sense_data) 1802 sense_size = min(sizeof(struct scsi_sense_data)
1803 - ahc_get_sense_residual(scb), 1803 - ahc_get_sense_residual(scb),
1804 (u_long)sizeof(cmd->sense_buffer)); 1804 (u_long)SCSI_SENSE_BUFFERSIZE);
1805 memcpy(cmd->sense_buffer, 1805 memcpy(cmd->sense_buffer,
1806 ahc_get_sense_buf(ahc, scb), sense_size); 1806 ahc_get_sense_buf(ahc, scb), sense_size);
1807 if (sense_size < sizeof(cmd->sense_buffer)) 1807 if (sense_size < SCSI_SENSE_BUFFERSIZE)
1808 memset(&cmd->sense_buffer[sense_size], 0, 1808 memset(&cmd->sense_buffer[sense_size], 0,
1809 sizeof(cmd->sense_buffer) - sense_size); 1809 SCSI_SENSE_BUFFERSIZE - sense_size);
1810 cmd->result |= (DRIVER_SENSE << 24); 1810 cmd->result |= (DRIVER_SENSE << 24);
1811#ifdef AHC_DEBUG 1811#ifdef AHC_DEBUG
1812 if (ahc_debug & AHC_SHOW_SENSE) { 1812 if (ahc_debug & AHC_SHOW_SENSE) {
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 8f8db5f0aef7..bcb0b870320c 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -2696,7 +2696,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
2696 { 2696 {
2697 pci_unmap_single(p->pdev, 2697 pci_unmap_single(p->pdev,
2698 le32_to_cpu(scb->sg_list[0].address), 2698 le32_to_cpu(scb->sg_list[0].address),
2699 sizeof(cmd->sense_buffer), 2699 SCSI_SENSE_BUFFERSIZE,
2700 PCI_DMA_FROMDEVICE); 2700 PCI_DMA_FROMDEVICE);
2701 } 2701 }
2702 if (scb->flags & SCB_RECOVERY_SCB) 2702 if (scb->flags & SCB_RECOVERY_SCB)
@@ -4267,13 +4267,13 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
4267 sizeof(generic_sense)); 4267 sizeof(generic_sense));
4268 4268
4269 scb->sense_cmd[1] = (cmd->device->lun << 5); 4269 scb->sense_cmd[1] = (cmd->device->lun << 5);
4270 scb->sense_cmd[4] = sizeof(cmd->sense_buffer); 4270 scb->sense_cmd[4] = SCSI_SENSE_BUFFERSIZE;
4271 4271
4272 scb->sg_list[0].length = 4272 scb->sg_list[0].length =
4273 cpu_to_le32(sizeof(cmd->sense_buffer)); 4273 cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
4274 scb->sg_list[0].address = 4274 scb->sg_list[0].address =
4275 cpu_to_le32(pci_map_single(p->pdev, cmd->sense_buffer, 4275 cpu_to_le32(pci_map_single(p->pdev, cmd->sense_buffer,
4276 sizeof(cmd->sense_buffer), 4276 SCSI_SENSE_BUFFERSIZE,
4277 PCI_DMA_FROMDEVICE)); 4277 PCI_DMA_FROMDEVICE));
4278 4278
4279 /* 4279 /*
@@ -4296,7 +4296,7 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
4296 hscb->residual_data_count[2] = 0; 4296 hscb->residual_data_count[2] = 0;
4297 4297
4298 scb->sg_count = hscb->SG_segment_count = 1; 4298 scb->sg_count = hscb->SG_segment_count = 1;
4299 scb->sg_length = sizeof(cmd->sense_buffer); 4299 scb->sg_length = SCSI_SENSE_BUFFERSIZE;
4300 scb->tag_action = 0; 4300 scb->tag_action = 0;
4301 scb->flags |= SCB_SENSE; 4301 scb->flags |= SCB_SENSE;
4302 /* 4302 /*
@@ -10293,7 +10293,6 @@ static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
10293 aic7xxx_position(cmd) = scb->hscb->tag; 10293 aic7xxx_position(cmd) = scb->hscb->tag;
10294 cmd->scsi_done = fn; 10294 cmd->scsi_done = fn;
10295 cmd->result = DID_OK; 10295 cmd->result = DID_OK;
10296 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
10297 aic7xxx_error(cmd) = DID_OK; 10296 aic7xxx_error(cmd) = DID_OK;
10298 aic7xxx_status(cmd) = 0; 10297 aic7xxx_status(cmd) = 0;
10299 cmd->host_scribble = NULL; 10298 cmd->host_scribble = NULL;
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 3dce618bf414..72042cae7768 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -165,7 +165,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
165 if (dev->port->oob_mode != SATA_OOB_MODE) { 165 if (dev->port->oob_mode != SATA_OOB_MODE) {
166 flags |= OPEN_REQUIRED; 166 flags |= OPEN_REQUIRED;
167 if ((dev->dev_type == SATA_DEV) || 167 if ((dev->dev_type == SATA_DEV) ||
168 (dev->tproto & SAS_PROTO_STP)) { 168 (dev->tproto & SAS_PROTOCOL_STP)) {
169 struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; 169 struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
170 if (rps_resp->frame_type == SMP_RESPONSE && 170 if (rps_resp->frame_type == SMP_RESPONSE &&
171 rps_resp->function == SMP_REPORT_PHY_SATA && 171 rps_resp->function == SMP_REPORT_PHY_SATA &&
@@ -193,7 +193,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
193 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); 193 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
194 194
195 flags = 0; 195 flags = 0;
196 if (dev->tproto & SAS_PROTO_STP) 196 if (dev->tproto & SAS_PROTOCOL_STP)
197 flags |= STP_CL_POL_NO_TX; 197 flags |= STP_CL_POL_NO_TX;
198 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags); 198 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags);
199 199
@@ -201,7 +201,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
201 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); 201 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
202 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); 202 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
203 203
204 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTO_STP)) { 204 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
205 i = asd_init_sata(dev); 205 i = asd_init_sata(dev);
206 if (i < 0) { 206 if (i < 0) {
207 asd_free_ddb(asd_ha, ddb); 207 asd_free_ddb(asd_ha, ddb);
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c
index 6bd8e3059d27..3d8c4ff1f2ef 100644
--- a/drivers/scsi/aic94xx/aic94xx_dump.c
+++ b/drivers/scsi/aic94xx/aic94xx_dump.c
@@ -903,11 +903,11 @@ void asd_dump_frame_rcvd(struct asd_phy *phy,
903 int i; 903 int i;
904 904
905 switch ((dl->status_block[1] & 0x70) >> 3) { 905 switch ((dl->status_block[1] & 0x70) >> 3) {
906 case SAS_PROTO_STP: 906 case SAS_PROTOCOL_STP:
907 ASD_DPRINTK("STP proto device-to-host FIS:\n"); 907 ASD_DPRINTK("STP proto device-to-host FIS:\n");
908 break; 908 break;
909 default: 909 default:
910 case SAS_PROTO_SSP: 910 case SAS_PROTOCOL_SSP:
911 ASD_DPRINTK("SAS proto IDENTIFY:\n"); 911 ASD_DPRINTK("SAS proto IDENTIFY:\n");
912 break; 912 break;
913 } 913 }
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 0cd7eed9196c..098b5f39cd31 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -91,7 +91,7 @@ static int asd_init_phy(struct asd_phy *phy)
91 91
92 sas_phy->enabled = 1; 92 sas_phy->enabled = 1;
93 sas_phy->class = SAS; 93 sas_phy->class = SAS;
94 sas_phy->iproto = SAS_PROTO_ALL; 94 sas_phy->iproto = SAS_PROTOCOL_ALL;
95 sas_phy->tproto = 0; 95 sas_phy->tproto = 0;
96 sas_phy->type = PHY_TYPE_PHYSICAL; 96 sas_phy->type = PHY_TYPE_PHYSICAL;
97 sas_phy->role = PHY_ROLE_INITIATOR; 97 sas_phy->role = PHY_ROLE_INITIATOR;
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h
index 491e5d8a98bc..150f6706d23f 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.h
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.h
@@ -72,6 +72,7 @@ struct flash_struct {
72 u8 manuf; 72 u8 manuf;
73 u8 dev_id; 73 u8 dev_id;
74 u8 sec_prot; 74 u8 sec_prot;
75 u8 method;
75 76
76 u32 dir_offs; 77 u32 dir_offs;
77}; 78};
@@ -216,6 +217,8 @@ struct asd_ha_struct {
216 struct dma_pool *scb_pool; 217 struct dma_pool *scb_pool;
217 218
218 struct asd_seq_data seq; /* sequencer related */ 219 struct asd_seq_data seq; /* sequencer related */
220 u32 bios_status;
221 const struct firmware *bios_image;
219}; 222};
220 223
221/* ---------- Common macros ---------- */ 224/* ---------- Common macros ---------- */
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index b70d6e7f96e9..5d761eb67442 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/firmware.h>
32 33
33#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
34 35
@@ -36,6 +37,7 @@
36#include "aic94xx_reg.h" 37#include "aic94xx_reg.h"
37#include "aic94xx_hwi.h" 38#include "aic94xx_hwi.h"
38#include "aic94xx_seq.h" 39#include "aic94xx_seq.h"
40#include "aic94xx_sds.h"
39 41
40/* The format is "version.release.patchlevel" */ 42/* The format is "version.release.patchlevel" */
41#define ASD_DRIVER_VERSION "1.0.3" 43#define ASD_DRIVER_VERSION "1.0.3"
@@ -134,7 +136,7 @@ Err:
134 return err; 136 return err;
135} 137}
136 138
137static void __devexit asd_unmap_memio(struct asd_ha_struct *asd_ha) 139static void asd_unmap_memio(struct asd_ha_struct *asd_ha)
138{ 140{
139 struct asd_ha_addrspace *io_handle; 141 struct asd_ha_addrspace *io_handle;
140 142
@@ -171,7 +173,7 @@ static int __devinit asd_map_ioport(struct asd_ha_struct *asd_ha)
171 return err; 173 return err;
172} 174}
173 175
174static void __devexit asd_unmap_ioport(struct asd_ha_struct *asd_ha) 176static void asd_unmap_ioport(struct asd_ha_struct *asd_ha)
175{ 177{
176 pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET); 178 pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET);
177} 179}
@@ -208,7 +210,7 @@ Err:
208 return err; 210 return err;
209} 211}
210 212
211static void __devexit asd_unmap_ha(struct asd_ha_struct *asd_ha) 213static void asd_unmap_ha(struct asd_ha_struct *asd_ha)
212{ 214{
213 if (asd_ha->iospace) 215 if (asd_ha->iospace)
214 asd_unmap_ioport(asd_ha); 216 asd_unmap_ioport(asd_ha);
@@ -313,6 +315,181 @@ static ssize_t asd_show_dev_pcba_sn(struct device *dev,
313} 315}
314static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL); 316static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL);
315 317
318#define FLASH_CMD_NONE 0x00
319#define FLASH_CMD_UPDATE 0x01
320#define FLASH_CMD_VERIFY 0x02
321
322struct flash_command {
323 u8 command[8];
324 int code;
325};
326
327static struct flash_command flash_command_table[] =
328{
329 {"verify", FLASH_CMD_VERIFY},
330 {"update", FLASH_CMD_UPDATE},
331 {"", FLASH_CMD_NONE} /* Last entry should be NULL. */
332};
333
334struct error_bios {
335 char *reason;
336 int err_code;
337};
338
339static struct error_bios flash_error_table[] =
340{
341 {"Failed to open bios image file", FAIL_OPEN_BIOS_FILE},
342 {"PCI ID mismatch", FAIL_CHECK_PCI_ID},
343 {"Checksum mismatch", FAIL_CHECK_SUM},
344 {"Unknown Error", FAIL_UNKNOWN},
345 {"Failed to verify.", FAIL_VERIFY},
346 {"Failed to reset flash chip.", FAIL_RESET_FLASH},
347 {"Failed to find flash chip type.", FAIL_FIND_FLASH_ID},
348 {"Failed to erash flash chip.", FAIL_ERASE_FLASH},
349 {"Failed to program flash chip.", FAIL_WRITE_FLASH},
350 {"Flash in progress", FLASH_IN_PROGRESS},
351 {"Image file size Error", FAIL_FILE_SIZE},
352 {"Input parameter error", FAIL_PARAMETERS},
353 {"Out of memory", FAIL_OUT_MEMORY},
354 {"OK", 0} /* Last entry err_code = 0. */
355};
356
357static ssize_t asd_store_update_bios(struct device *dev,
358 struct device_attribute *attr,
359 const char *buf, size_t count)
360{
361 struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
362 char *cmd_ptr, *filename_ptr;
363 struct bios_file_header header, *hdr_ptr;
364 int res, i;
365 u32 csum = 0;
366 int flash_command = FLASH_CMD_NONE;
367 int err = 0;
368
369 cmd_ptr = kzalloc(count*2, GFP_KERNEL);
370
371 if (!cmd_ptr) {
372 err = FAIL_OUT_MEMORY;
373 goto out;
374 }
375
376 filename_ptr = cmd_ptr + count;
377 res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr);
378 if (res != 2) {
379 err = FAIL_PARAMETERS;
380 goto out1;
381 }
382
383 for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) {
384 if (!memcmp(flash_command_table[i].command,
385 cmd_ptr, strlen(cmd_ptr))) {
386 flash_command = flash_command_table[i].code;
387 break;
388 }
389 }
390 if (flash_command == FLASH_CMD_NONE) {
391 err = FAIL_PARAMETERS;
392 goto out1;
393 }
394
395 if (asd_ha->bios_status == FLASH_IN_PROGRESS) {
396 err = FLASH_IN_PROGRESS;
397 goto out1;
398 }
399 err = request_firmware(&asd_ha->bios_image,
400 filename_ptr,
401 &asd_ha->pcidev->dev);
402 if (err) {
403 asd_printk("Failed to load bios image file %s, error %d\n",
404 filename_ptr, err);
405 err = FAIL_OPEN_BIOS_FILE;
406 goto out1;
407 }
408
409 hdr_ptr = (struct bios_file_header *)asd_ha->bios_image->data;
410
411 if ((hdr_ptr->contrl_id.vendor != asd_ha->pcidev->vendor ||
412 hdr_ptr->contrl_id.device != asd_ha->pcidev->device) &&
413 (hdr_ptr->contrl_id.sub_vendor != asd_ha->pcidev->vendor ||
414 hdr_ptr->contrl_id.sub_device != asd_ha->pcidev->device)) {
415
416 ASD_DPRINTK("The PCI vendor or device id does not match\n");
417 ASD_DPRINTK("vendor=%x dev=%x sub_vendor=%x sub_dev=%x"
418 " pci vendor=%x pci dev=%x\n",
419 hdr_ptr->contrl_id.vendor,
420 hdr_ptr->contrl_id.device,
421 hdr_ptr->contrl_id.sub_vendor,
422 hdr_ptr->contrl_id.sub_device,
423 asd_ha->pcidev->vendor,
424 asd_ha->pcidev->device);
425 err = FAIL_CHECK_PCI_ID;
426 goto out2;
427 }
428
429 if (hdr_ptr->filelen != asd_ha->bios_image->size) {
430 err = FAIL_FILE_SIZE;
431 goto out2;
432 }
433
434 /* calculate checksum */
435 for (i = 0; i < hdr_ptr->filelen; i++)
436 csum += asd_ha->bios_image->data[i];
437
438 if ((csum & 0x0000ffff) != hdr_ptr->checksum) {
439 ASD_DPRINTK("BIOS file checksum mismatch\n");
440 err = FAIL_CHECK_SUM;
441 goto out2;
442 }
443 if (flash_command == FLASH_CMD_UPDATE) {
444 asd_ha->bios_status = FLASH_IN_PROGRESS;
445 err = asd_write_flash_seg(asd_ha,
446 &asd_ha->bios_image->data[sizeof(*hdr_ptr)],
447 0, hdr_ptr->filelen-sizeof(*hdr_ptr));
448 if (!err)
449 err = asd_verify_flash_seg(asd_ha,
450 &asd_ha->bios_image->data[sizeof(*hdr_ptr)],
451 0, hdr_ptr->filelen-sizeof(*hdr_ptr));
452 } else {
453 asd_ha->bios_status = FLASH_IN_PROGRESS;
454 err = asd_verify_flash_seg(asd_ha,
455 &asd_ha->bios_image->data[sizeof(header)],
456 0, hdr_ptr->filelen-sizeof(header));
457 }
458
459out2:
460 release_firmware(asd_ha->bios_image);
461out1:
462 kfree(cmd_ptr);
463out:
464 asd_ha->bios_status = err;
465
466 if (!err)
467 return count;
468 else
469 return -err;
470}
471
472static ssize_t asd_show_update_bios(struct device *dev,
473 struct device_attribute *attr, char *buf)
474{
475 int i;
476 struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
477
478 for (i = 0; flash_error_table[i].err_code != 0; i++) {
479 if (flash_error_table[i].err_code == asd_ha->bios_status)
480 break;
481 }
482 if (asd_ha->bios_status != FLASH_IN_PROGRESS)
483 asd_ha->bios_status = FLASH_OK;
484
485 return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
486 flash_error_table[i].err_code,
487 flash_error_table[i].reason);
488}
489
490static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
491 asd_show_update_bios, asd_store_update_bios);
492
316static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) 493static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
317{ 494{
318 int err; 495 int err;
@@ -328,9 +505,14 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
328 err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); 505 err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
329 if (err) 506 if (err)
330 goto err_biosb; 507 goto err_biosb;
508 err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
509 if (err)
510 goto err_update_bios;
331 511
332 return 0; 512 return 0;
333 513
514err_update_bios:
515 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
334err_biosb: 516err_biosb:
335 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); 517 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
336err_rev: 518err_rev:
@@ -343,6 +525,7 @@ static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
343 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); 525 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
344 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); 526 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
345 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); 527 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
528 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
346} 529}
347 530
348/* The first entry, 0, is used for dynamic ids, the rest for devices 531/* The first entry, 0, is used for dynamic ids, the rest for devices
@@ -589,6 +772,7 @@ static int __devinit asd_pci_probe(struct pci_dev *dev,
589 asd_ha->sas_ha.dev = &asd_ha->pcidev->dev; 772 asd_ha->sas_ha.dev = &asd_ha->pcidev->dev;
590 asd_ha->sas_ha.lldd_ha = asd_ha; 773 asd_ha->sas_ha.lldd_ha = asd_ha;
591 774
775 asd_ha->bios_status = FLASH_OK;
592 asd_ha->name = asd_dev->name; 776 asd_ha->name = asd_dev->name;
593 asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev)); 777 asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev));
594 778
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index db6ab1a3b81e..0febad4dd75f 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -788,12 +788,12 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
788 788
789 /* initiator port settings are in the hi nibble */ 789 /* initiator port settings are in the hi nibble */
790 if (phy->sas_phy.role == PHY_ROLE_INITIATOR) 790 if (phy->sas_phy.role == PHY_ROLE_INITIATOR)
791 control_phy->port_type = SAS_PROTO_ALL << 4; 791 control_phy->port_type = SAS_PROTOCOL_ALL << 4;
792 else if (phy->sas_phy.role == PHY_ROLE_TARGET) 792 else if (phy->sas_phy.role == PHY_ROLE_TARGET)
793 control_phy->port_type = SAS_PROTO_ALL; 793 control_phy->port_type = SAS_PROTOCOL_ALL;
794 else 794 else
795 control_phy->port_type = 795 control_phy->port_type =
796 (SAS_PROTO_ALL << 4) | SAS_PROTO_ALL; 796 (SAS_PROTOCOL_ALL << 4) | SAS_PROTOCOL_ALL;
797 797
798 /* link reset retries, this should be nominal */ 798 /* link reset retries, this should be nominal */
799 control_phy->link_reset_retries = 10; 799 control_phy->link_reset_retries = 10;
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 06509bff71f7..2a4c933eb89c 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -30,6 +30,7 @@
30 30
31#include "aic94xx.h" 31#include "aic94xx.h"
32#include "aic94xx_reg.h" 32#include "aic94xx_reg.h"
33#include "aic94xx_sds.h"
33 34
34/* ---------- OCM stuff ---------- */ 35/* ---------- OCM stuff ---------- */
35 36
@@ -1083,3 +1084,391 @@ out:
1083 kfree(flash_dir); 1084 kfree(flash_dir);
1084 return err; 1085 return err;
1085} 1086}
1087
1088/**
1089 * asd_verify_flash_seg - verify data with flash memory
1090 * @asd_ha: pointer to the host adapter structure
1091 * @src: pointer to the source data to be verified
1092 * @dest_offset: offset from flash memory
1093 * @bytes_to_verify: total bytes to verify
1094 */
1095int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
1096 void *src, u32 dest_offset, u32 bytes_to_verify)
1097{
1098 u8 *src_buf;
1099 u8 flash_char;
1100 int err;
1101 u32 nv_offset, reg, i;
1102
1103 reg = asd_ha->hw_prof.flash.bar;
1104 src_buf = NULL;
1105
1106 err = FLASH_OK;
1107 nv_offset = dest_offset;
1108 src_buf = (u8 *)src;
1109 for (i = 0; i < bytes_to_verify; i++) {
1110 flash_char = asd_read_reg_byte(asd_ha, reg + nv_offset + i);
1111 if (flash_char != src_buf[i]) {
1112 err = FAIL_VERIFY;
1113 break;
1114 }
1115 }
1116 return err;
1117}
1118
1119/**
1120 * asd_write_flash_seg - write data into flash memory
1121 * @asd_ha: pointer to the host adapter structure
1122 * @src: pointer to the source data to be written
1123 * @dest_offset: offset from flash memory
1124 * @bytes_to_write: total bytes to write
1125 */
1126int asd_write_flash_seg(struct asd_ha_struct *asd_ha,
1127 void *src, u32 dest_offset, u32 bytes_to_write)
1128{
1129 u8 *src_buf;
1130 u32 nv_offset, reg, i;
1131 int err;
1132
1133 reg = asd_ha->hw_prof.flash.bar;
1134 src_buf = NULL;
1135
1136 err = asd_check_flash_type(asd_ha);
1137 if (err) {
1138 ASD_DPRINTK("couldn't find the type of flash. err=%d\n", err);
1139 return err;
1140 }
1141
1142 nv_offset = dest_offset;
1143 err = asd_erase_nv_sector(asd_ha, nv_offset, bytes_to_write);
1144 if (err) {
1145 ASD_DPRINTK("Erase failed at offset:0x%x\n",
1146 nv_offset);
1147 return err;
1148 }
1149
1150 err = asd_reset_flash(asd_ha);
1151 if (err) {
1152 ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
1153 return err;
1154 }
1155
1156 src_buf = (u8 *)src;
1157 for (i = 0; i < bytes_to_write; i++) {
1158 /* Setup program command sequence */
1159 switch (asd_ha->hw_prof.flash.method) {
1160 case FLASH_METHOD_A:
1161 {
1162 asd_write_reg_byte(asd_ha,
1163 (reg + 0xAAA), 0xAA);
1164 asd_write_reg_byte(asd_ha,
1165 (reg + 0x555), 0x55);
1166 asd_write_reg_byte(asd_ha,
1167 (reg + 0xAAA), 0xA0);
1168 asd_write_reg_byte(asd_ha,
1169 (reg + nv_offset + i),
1170 (*(src_buf + i)));
1171 break;
1172 }
1173 case FLASH_METHOD_B:
1174 {
1175 asd_write_reg_byte(asd_ha,
1176 (reg + 0x555), 0xAA);
1177 asd_write_reg_byte(asd_ha,
1178 (reg + 0x2AA), 0x55);
1179 asd_write_reg_byte(asd_ha,
1180 (reg + 0x555), 0xA0);
1181 asd_write_reg_byte(asd_ha,
1182 (reg + nv_offset + i),
1183 (*(src_buf + i)));
1184 break;
1185 }
1186 default:
1187 break;
1188 }
1189 if (asd_chk_write_status(asd_ha,
1190 (nv_offset + i), 0) != 0) {
1191 ASD_DPRINTK("aicx: Write failed at offset:0x%x\n",
1192 reg + nv_offset + i);
1193 return FAIL_WRITE_FLASH;
1194 }
1195 }
1196
1197 err = asd_reset_flash(asd_ha);
1198 if (err) {
1199 ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
1200 return err;
1201 }
1202 return 0;
1203}
1204
1205int asd_chk_write_status(struct asd_ha_struct *asd_ha,
1206 u32 sector_addr, u8 erase_flag)
1207{
1208 u32 reg;
1209 u32 loop_cnt;
1210 u8 nv_data1, nv_data2;
1211 u8 toggle_bit1;
1212
1213 /*
1214 * Read from DQ2 requires sector address
1215 * while it's dont care for DQ6
1216 */
1217 reg = asd_ha->hw_prof.flash.bar;
1218
1219 for (loop_cnt = 0; loop_cnt < 50000; loop_cnt++) {
1220 nv_data1 = asd_read_reg_byte(asd_ha, reg);
1221 nv_data2 = asd_read_reg_byte(asd_ha, reg);
1222
1223 toggle_bit1 = ((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6)
1224 ^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6));
1225
1226 if (toggle_bit1 == 0) {
1227 return 0;
1228 } else {
1229 if (nv_data2 & FLASH_STATUS_BIT_MASK_DQ5) {
1230 nv_data1 = asd_read_reg_byte(asd_ha,
1231 reg);
1232 nv_data2 = asd_read_reg_byte(asd_ha,
1233 reg);
1234 toggle_bit1 =
1235 ((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6)
1236 ^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6));
1237
1238 if (toggle_bit1 == 0)
1239 return 0;
1240 }
1241 }
1242
1243 /*
1244 * ERASE is a sector-by-sector operation and requires
1245 * more time to finish while WRITE is byte-byte-byte
1246 * operation and takes lesser time to finish.
1247 *
1248 * For some strange reason a reduced ERASE delay gives different
1249 * behaviour across different spirit boards. Hence we set
1250 * a optimum balance of 50mus for ERASE which works well
1251 * across all boards.
1252 */
1253 if (erase_flag) {
1254 udelay(FLASH_STATUS_ERASE_DELAY_COUNT);
1255 } else {
1256 udelay(FLASH_STATUS_WRITE_DELAY_COUNT);
1257 }
1258 }
1259 return -1;
1260}
1261
1262/**
1263 * asd_hwi_erase_nv_sector - Erase the flash memory sectors.
1264 * @asd_ha: pointer to the host adapter structure
1265 * @flash_addr: pointer to offset from flash memory
1266 * @size: total bytes to erase.
1267 */
1268int asd_erase_nv_sector(struct asd_ha_struct *asd_ha, u32 flash_addr, u32 size)
1269{
1270 u32 reg;
1271 u32 sector_addr;
1272
1273 reg = asd_ha->hw_prof.flash.bar;
1274
1275 /* sector staring address */
1276 sector_addr = flash_addr & FLASH_SECTOR_SIZE_MASK;
1277
1278 /*
1279 * Erasing an flash sector needs to be done in six consecutive
1280 * write cyles.
1281 */
1282 while (sector_addr < flash_addr+size) {
1283 switch (asd_ha->hw_prof.flash.method) {
1284 case FLASH_METHOD_A:
1285 asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA);
1286 asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55);
1287 asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0x80);
1288 asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA);
1289 asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55);
1290 asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30);
1291 break;
1292 case FLASH_METHOD_B:
1293 asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
1294 asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
1295 asd_write_reg_byte(asd_ha, (reg + 0x555), 0x80);
1296 asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
1297 asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
1298 asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30);
1299 break;
1300 default:
1301 break;
1302 }
1303
1304 if (asd_chk_write_status(asd_ha, sector_addr, 1) != 0)
1305 return FAIL_ERASE_FLASH;
1306
1307 sector_addr += FLASH_SECTOR_SIZE;
1308 }
1309
1310 return 0;
1311}
1312
1313int asd_check_flash_type(struct asd_ha_struct *asd_ha)
1314{
1315 u8 manuf_id;
1316 u8 dev_id;
1317 u8 sec_prot;
1318 u32 inc;
1319 u32 reg;
1320 int err;
1321
1322 /* get Flash memory base address */
1323 reg = asd_ha->hw_prof.flash.bar;
1324
1325 /* Determine flash info */
1326 err = asd_reset_flash(asd_ha);
1327 if (err) {
1328 ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
1329 return err;
1330 }
1331
1332 asd_ha->hw_prof.flash.method = FLASH_METHOD_UNKNOWN;
1333 asd_ha->hw_prof.flash.manuf = FLASH_MANUF_ID_UNKNOWN;
1334 asd_ha->hw_prof.flash.dev_id = FLASH_DEV_ID_UNKNOWN;
1335
1336 /* Get flash info. This would most likely be AMD Am29LV family flash.
1337 * First try the sequence for word mode. It is the same as for
1338 * 008B (byte mode only), 160B (word mode) and 800D (word mode).
1339 */
1340 inc = asd_ha->hw_prof.flash.wide ? 2 : 1;
1341 asd_write_reg_byte(asd_ha, reg + 0xAAA, 0xAA);
1342 asd_write_reg_byte(asd_ha, reg + 0x555, 0x55);
1343 asd_write_reg_byte(asd_ha, reg + 0xAAA, 0x90);
1344 manuf_id = asd_read_reg_byte(asd_ha, reg);
1345 dev_id = asd_read_reg_byte(asd_ha, reg + inc);
1346 sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc);
1347 /* Get out of autoselect mode. */
1348 err = asd_reset_flash(asd_ha);
1349 if (err) {
1350 ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
1351 return err;
1352 }
1353 ASD_DPRINTK("Flash MethodA manuf_id(0x%x) dev_id(0x%x) "
1354 "sec_prot(0x%x)\n", manuf_id, dev_id, sec_prot);
1355 err = asd_reset_flash(asd_ha);
1356 if (err != 0)
1357 return err;
1358
1359 switch (manuf_id) {
1360 case FLASH_MANUF_ID_AMD:
1361 switch (sec_prot) {
1362 case FLASH_DEV_ID_AM29LV800DT:
1363 case FLASH_DEV_ID_AM29LV640MT:
1364 case FLASH_DEV_ID_AM29F800B:
1365 asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
1366 break;
1367 default:
1368 break;
1369 }
1370 break;
1371 case FLASH_MANUF_ID_ST:
1372 switch (sec_prot) {
1373 case FLASH_DEV_ID_STM29W800DT:
1374 case FLASH_DEV_ID_STM29LV640:
1375 asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
1376 break;
1377 default:
1378 break;
1379 }
1380 break;
1381 case FLASH_MANUF_ID_FUJITSU:
1382 switch (sec_prot) {
1383 case FLASH_DEV_ID_MBM29LV800TE:
1384 case FLASH_DEV_ID_MBM29DL800TA:
1385 asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
1386 break;
1387 }
1388 break;
1389 case FLASH_MANUF_ID_MACRONIX:
1390 switch (sec_prot) {
1391 case FLASH_DEV_ID_MX29LV800BT:
1392 asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
1393 break;
1394 }
1395 break;
1396 }
1397
1398 if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN) {
1399 err = asd_reset_flash(asd_ha);
1400 if (err) {
1401 ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
1402 return err;
1403 }
1404
1405 /* Issue Unlock sequence for AM29LV008BT */
1406 asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
1407 asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
1408 asd_write_reg_byte(asd_ha, (reg + 0x555), 0x90);
1409 manuf_id = asd_read_reg_byte(asd_ha, reg);
1410 dev_id = asd_read_reg_byte(asd_ha, reg + inc);
1411 sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc);
1412
1413 ASD_DPRINTK("Flash MethodB manuf_id(0x%x) dev_id(0x%x) sec_prot"
1414 "(0x%x)\n", manuf_id, dev_id, sec_prot);
1415
1416 err = asd_reset_flash(asd_ha);
1417 if (err != 0) {
1418 ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
1419 return err;
1420 }
1421
1422 switch (manuf_id) {
1423 case FLASH_MANUF_ID_AMD:
1424 switch (dev_id) {
1425 case FLASH_DEV_ID_AM29LV008BT:
1426 asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
1427 break;
1428 default:
1429 break;
1430 }
1431 break;
1432 case FLASH_MANUF_ID_ST:
1433 switch (dev_id) {
1434 case FLASH_DEV_ID_STM29008:
1435 asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
1436 break;
1437 default:
1438 break;
1439 }
1440 break;
1441 case FLASH_MANUF_ID_FUJITSU:
1442 switch (dev_id) {
1443 case FLASH_DEV_ID_MBM29LV008TA:
1444 asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
1445 break;
1446 }
1447 break;
1448 case FLASH_MANUF_ID_INTEL:
1449 switch (dev_id) {
1450 case FLASH_DEV_ID_I28LV00TAT:
1451 asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
1452 break;
1453 }
1454 break;
1455 case FLASH_MANUF_ID_MACRONIX:
1456 switch (dev_id) {
1457 case FLASH_DEV_ID_I28LV00TAT:
1458 asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
1459 break;
1460 }
1461 break;
1462 default:
1463 return FAIL_FIND_FLASH_ID;
1464 }
1465 }
1466
1467 if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN)
1468 return FAIL_FIND_FLASH_ID;
1469
1470 asd_ha->hw_prof.flash.manuf = manuf_id;
1471 asd_ha->hw_prof.flash.dev_id = dev_id;
1472 asd_ha->hw_prof.flash.sec_prot = sec_prot;
1473 return 0;
1474}
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.h b/drivers/scsi/aic94xx/aic94xx_sds.h
new file mode 100644
index 000000000000..bb9795a04dc3
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_sds.h
@@ -0,0 +1,121 @@
1/*
2 * Aic94xx SAS/SATA driver hardware interface header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Gilbert Wu <gilbert_wu@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26#ifndef _AIC94XX_SDS_H_
27#define _AIC94XX_SDS_H_
28
29enum {
30 FLASH_METHOD_UNKNOWN,
31 FLASH_METHOD_A,
32 FLASH_METHOD_B
33};
34
35#define FLASH_MANUF_ID_AMD 0x01
36#define FLASH_MANUF_ID_ST 0x20
37#define FLASH_MANUF_ID_FUJITSU 0x04
38#define FLASH_MANUF_ID_MACRONIX 0xC2
39#define FLASH_MANUF_ID_INTEL 0x89
40#define FLASH_MANUF_ID_UNKNOWN 0xFF
41
42#define FLASH_DEV_ID_AM29LV008BT 0x3E
43#define FLASH_DEV_ID_AM29LV800DT 0xDA
44#define FLASH_DEV_ID_STM29W800DT 0xD7
45#define FLASH_DEV_ID_STM29LV640 0xDE
46#define FLASH_DEV_ID_STM29008 0xEA
47#define FLASH_DEV_ID_MBM29LV800TE 0xDA
48#define FLASH_DEV_ID_MBM29DL800TA 0x4A
49#define FLASH_DEV_ID_MBM29LV008TA 0x3E
50#define FLASH_DEV_ID_AM29LV640MT 0x7E
51#define FLASH_DEV_ID_AM29F800B 0xD6
52#define FLASH_DEV_ID_MX29LV800BT 0xDA
53#define FLASH_DEV_ID_MX29LV008CT 0xDA
54#define FLASH_DEV_ID_I28LV00TAT 0x3E
55#define FLASH_DEV_ID_UNKNOWN 0xFF
56
57/* status bit mask values */
58#define FLASH_STATUS_BIT_MASK_DQ6 0x40
59#define FLASH_STATUS_BIT_MASK_DQ5 0x20
60#define FLASH_STATUS_BIT_MASK_DQ2 0x04
61
62/* minimum value in micro seconds needed for checking status */
63#define FLASH_STATUS_ERASE_DELAY_COUNT 50
64#define FLASH_STATUS_WRITE_DELAY_COUNT 25
65
66#define FLASH_SECTOR_SIZE 0x010000
67#define FLASH_SECTOR_SIZE_MASK 0xffff0000
68
69#define FLASH_OK 0x000000
70#define FAIL_OPEN_BIOS_FILE 0x000100
71#define FAIL_CHECK_PCI_ID 0x000200
72#define FAIL_CHECK_SUM 0x000300
73#define FAIL_UNKNOWN 0x000400
74#define FAIL_VERIFY 0x000500
75#define FAIL_RESET_FLASH 0x000600
76#define FAIL_FIND_FLASH_ID 0x000700
77#define FAIL_ERASE_FLASH 0x000800
78#define FAIL_WRITE_FLASH 0x000900
79#define FAIL_FILE_SIZE 0x000a00
80#define FAIL_PARAMETERS 0x000b00
81#define FAIL_OUT_MEMORY 0x000c00
82#define FLASH_IN_PROGRESS 0x001000
83
84struct controller_id {
85 u32 vendor; /* PCI Vendor ID */
86 u32 device; /* PCI Device ID */
87 u32 sub_vendor; /* PCI Subvendor ID */
88 u32 sub_device; /* PCI Subdevice ID */
89};
90
91struct image_info {
92 u32 ImageId; /* Identifies the image */
93 u32 ImageOffset; /* Offset the beginning of the file */
94 u32 ImageLength; /* length of the image */
95 u32 ImageChecksum; /* Image checksum */
96 u32 ImageVersion; /* Version of the image, could be build number */
97};
98
99struct bios_file_header {
100 u8 signature[32]; /* Signature/Cookie to identify the file */
101 u32 checksum; /*Entire file checksum with this field zero */
102 u32 antidote; /* Entire file checksum with this field 0xFFFFFFFF */
103 struct controller_id contrl_id; /*PCI id to identify the controller */
104 u32 filelen; /*Length of the entire file*/
105 u32 chunk_num; /*The chunk/part number for multiple Image files */
106 u32 total_chunks; /*Total number of chunks/parts in the image file */
107 u32 num_images; /* Number of images in the file */
108 u32 build_num; /* Build number of this image */
109 struct image_info image_header;
110};
111
112int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
113 void *src, u32 dest_offset, u32 bytes_to_verify);
114int asd_write_flash_seg(struct asd_ha_struct *asd_ha,
115 void *src, u32 dest_offset, u32 bytes_to_write);
116int asd_chk_write_status(struct asd_ha_struct *asd_ha,
117 u32 sector_addr, u8 erase_flag);
118int asd_check_flash_type(struct asd_ha_struct *asd_ha);
119int asd_erase_nv_sector(struct asd_ha_struct *asd_ha,
120 u32 flash_addr, u32 size);
121#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index ee0a98bffcd4..965d4bb999d9 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -187,29 +187,13 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
187 ts->buf_valid_size = 0; 187 ts->buf_valid_size = 0;
188 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index]; 188 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
189 r = edb->vaddr; 189 r = edb->vaddr;
190 if (task->task_proto == SAS_PROTO_SSP) { 190 if (task->task_proto == SAS_PROTOCOL_SSP) {
191 struct ssp_response_iu *iu = 191 struct ssp_response_iu *iu =
192 r + 16 + sizeof(struct ssp_frame_hdr); 192 r + 16 + sizeof(struct ssp_frame_hdr);
193 193
194 ts->residual = le32_to_cpu(*(__le32 *)r); 194 ts->residual = le32_to_cpu(*(__le32 *)r);
195 ts->resp = SAS_TASK_COMPLETE; 195
196 if (iu->datapres == 0) 196 sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu);
197 ts->stat = iu->status;
198 else if (iu->datapres == 1)
199 ts->stat = iu->resp_data[3];
200 else if (iu->datapres == 2) {
201 ts->stat = SAM_CHECK_COND;
202 ts->buf_valid_size = min((u32) SAS_STATUS_BUF_SIZE,
203 be32_to_cpu(iu->sense_data_len));
204 memcpy(ts->buf, iu->sense_data, ts->buf_valid_size);
205 if (iu->status != SAM_CHECK_COND) {
206 ASD_DPRINTK("device %llx sent sense data, but "
207 "stat(0x%x) is not CHECK_CONDITION"
208 "\n",
209 SAS_ADDR(task->dev->sas_addr),
210 iu->status);
211 }
212 }
213 } else { 197 } else {
214 struct ata_task_resp *resp = (void *) &ts->buf[0]; 198 struct ata_task_resp *resp = (void *) &ts->buf[0];
215 199
@@ -341,14 +325,14 @@ Again:
341 } 325 }
342 326
343 switch (task->task_proto) { 327 switch (task->task_proto) {
344 case SATA_PROTO: 328 case SAS_PROTOCOL_SATA:
345 case SAS_PROTO_STP: 329 case SAS_PROTOCOL_STP:
346 asd_unbuild_ata_ascb(ascb); 330 asd_unbuild_ata_ascb(ascb);
347 break; 331 break;
348 case SAS_PROTO_SMP: 332 case SAS_PROTOCOL_SMP:
349 asd_unbuild_smp_ascb(ascb); 333 asd_unbuild_smp_ascb(ascb);
350 break; 334 break;
351 case SAS_PROTO_SSP: 335 case SAS_PROTOCOL_SSP:
352 asd_unbuild_ssp_ascb(ascb); 336 asd_unbuild_ssp_ascb(ascb);
353 default: 337 default:
354 break; 338 break;
@@ -586,17 +570,17 @@ int asd_execute_task(struct sas_task *task, const int num,
586 list_for_each_entry(a, &alist, list) { 570 list_for_each_entry(a, &alist, list) {
587 t = a->uldd_task; 571 t = a->uldd_task;
588 a->uldd_timer = 1; 572 a->uldd_timer = 1;
589 if (t->task_proto & SAS_PROTO_STP) 573 if (t->task_proto & SAS_PROTOCOL_STP)
590 t->task_proto = SAS_PROTO_STP; 574 t->task_proto = SAS_PROTOCOL_STP;
591 switch (t->task_proto) { 575 switch (t->task_proto) {
592 case SATA_PROTO: 576 case SAS_PROTOCOL_SATA:
593 case SAS_PROTO_STP: 577 case SAS_PROTOCOL_STP:
594 res = asd_build_ata_ascb(a, t, gfp_flags); 578 res = asd_build_ata_ascb(a, t, gfp_flags);
595 break; 579 break;
596 case SAS_PROTO_SMP: 580 case SAS_PROTOCOL_SMP:
597 res = asd_build_smp_ascb(a, t, gfp_flags); 581 res = asd_build_smp_ascb(a, t, gfp_flags);
598 break; 582 break;
599 case SAS_PROTO_SSP: 583 case SAS_PROTOCOL_SSP:
600 res = asd_build_ssp_ascb(a, t, gfp_flags); 584 res = asd_build_ssp_ascb(a, t, gfp_flags);
601 break; 585 break;
602 default: 586 default:
@@ -633,14 +617,14 @@ out_err_unmap:
633 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 617 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
634 spin_unlock_irqrestore(&t->task_state_lock, flags); 618 spin_unlock_irqrestore(&t->task_state_lock, flags);
635 switch (t->task_proto) { 619 switch (t->task_proto) {
636 case SATA_PROTO: 620 case SAS_PROTOCOL_SATA:
637 case SAS_PROTO_STP: 621 case SAS_PROTOCOL_STP:
638 asd_unbuild_ata_ascb(a); 622 asd_unbuild_ata_ascb(a);
639 break; 623 break;
640 case SAS_PROTO_SMP: 624 case SAS_PROTOCOL_SMP:
641 asd_unbuild_smp_ascb(a); 625 asd_unbuild_smp_ascb(a);
642 break; 626 break;
643 case SAS_PROTO_SSP: 627 case SAS_PROTOCOL_SSP:
644 asd_unbuild_ssp_ascb(a); 628 asd_unbuild_ssp_ascb(a);
645 default: 629 default:
646 break; 630 break;
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index c0d0b7d7a8ce..87b2f6e6adfe 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -372,21 +372,21 @@ int asd_abort_task(struct sas_task *task)
372 scb->header.opcode = ABORT_TASK; 372 scb->header.opcode = ABORT_TASK;
373 373
374 switch (task->task_proto) { 374 switch (task->task_proto) {
375 case SATA_PROTO: 375 case SAS_PROTOCOL_SATA:
376 case SAS_PROTO_STP: 376 case SAS_PROTOCOL_STP:
377 scb->abort_task.proto_conn_rate = (1 << 5); /* STP */ 377 scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
378 break; 378 break;
379 case SAS_PROTO_SSP: 379 case SAS_PROTOCOL_SSP:
380 scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */ 380 scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
381 scb->abort_task.proto_conn_rate |= task->dev->linkrate; 381 scb->abort_task.proto_conn_rate |= task->dev->linkrate;
382 break; 382 break;
383 case SAS_PROTO_SMP: 383 case SAS_PROTOCOL_SMP:
384 break; 384 break;
385 default: 385 default:
386 break; 386 break;
387 } 387 }
388 388
389 if (task->task_proto == SAS_PROTO_SSP) { 389 if (task->task_proto == SAS_PROTOCOL_SSP) {
390 scb->abort_task.ssp_frame.frame_type = SSP_TASK; 390 scb->abort_task.ssp_frame.frame_type = SSP_TASK;
391 memcpy(scb->abort_task.ssp_frame.hashed_dest_addr, 391 memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
392 task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); 392 task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
@@ -512,7 +512,7 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
512 int res = 1; 512 int res = 1;
513 struct scb *scb; 513 struct scb *scb;
514 514
515 if (!(dev->tproto & SAS_PROTO_SSP)) 515 if (!(dev->tproto & SAS_PROTOCOL_SSP))
516 return TMF_RESP_FUNC_ESUPP; 516 return TMF_RESP_FUNC_ESUPP;
517 517
518 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); 518 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index d466a2dac1db..d80dba913a75 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -634,9 +634,9 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
634 pcmd->result = DID_OK << 16; 634 pcmd->result = DID_OK << 16;
635 if (sensebuffer) { 635 if (sensebuffer) {
636 int sense_data_length = 636 int sense_data_length =
637 sizeof(struct SENSE_DATA) < sizeof(pcmd->sense_buffer) 637 sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
638 ? sizeof(struct SENSE_DATA) : sizeof(pcmd->sense_buffer); 638 ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
639 memset(sensebuffer, 0, sizeof(pcmd->sense_buffer)); 639 memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
640 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length); 640 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
641 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; 641 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
642 sensebuffer->Valid = 1; 642 sensebuffer->Valid = 1;
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index a9680b5e8ac6..93b61f148653 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -511,9 +511,9 @@ static inline void initialize_SCp(Scsi_Cmnd *cmd)
511 * various queues are valid. 511 * various queues are valid.
512 */ 512 */
513 513
514 if (cmd->use_sg) { 514 if (scsi_bufflen(cmd)) {
515 cmd->SCp.buffer = (struct scatterlist *)cmd->request_buffer; 515 cmd->SCp.buffer = scsi_sglist(cmd);
516 cmd->SCp.buffers_residual = cmd->use_sg - 1; 516 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
517 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 517 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
518 cmd->SCp.this_residual = cmd->SCp.buffer->length; 518 cmd->SCp.this_residual = cmd->SCp.buffer->length;
519 /* ++roman: Try to merge some scatter-buffers if they are at 519 /* ++roman: Try to merge some scatter-buffers if they are at
@@ -523,8 +523,8 @@ static inline void initialize_SCp(Scsi_Cmnd *cmd)
523 } else { 523 } else {
524 cmd->SCp.buffer = NULL; 524 cmd->SCp.buffer = NULL;
525 cmd->SCp.buffers_residual = 0; 525 cmd->SCp.buffers_residual = 0;
526 cmd->SCp.ptr = (char *)cmd->request_buffer; 526 cmd->SCp.ptr = NULL;
527 cmd->SCp.this_residual = cmd->request_bufflen; 527 cmd->SCp.this_residual = 0;
528 } 528 }
529} 529}
530 530
@@ -936,21 +936,21 @@ static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
936 } 936 }
937# endif 937# endif
938# ifdef NCR5380_STAT_LIMIT 938# ifdef NCR5380_STAT_LIMIT
939 if (cmd->request_bufflen > NCR5380_STAT_LIMIT) 939 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
940# endif 940# endif
941 switch (cmd->cmnd[0]) { 941 switch (cmd->cmnd[0]) {
942 case WRITE: 942 case WRITE:
943 case WRITE_6: 943 case WRITE_6:
944 case WRITE_10: 944 case WRITE_10:
945 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); 945 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
946 hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen; 946 hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
947 hostdata->pendingw++; 947 hostdata->pendingw++;
948 break; 948 break;
949 case READ: 949 case READ:
950 case READ_6: 950 case READ_6:
951 case READ_10: 951 case READ_10:
952 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); 952 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
953 hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen; 953 hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
954 hostdata->pendingr++; 954 hostdata->pendingr++;
955 break; 955 break;
956 } 956 }
@@ -1352,21 +1352,21 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1352static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd) 1352static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd)
1353{ 1353{
1354# ifdef NCR5380_STAT_LIMIT 1354# ifdef NCR5380_STAT_LIMIT
1355 if (cmd->request_bufflen > NCR5380_STAT_LIMIT) 1355 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
1356# endif 1356# endif
1357 switch (cmd->cmnd[0]) { 1357 switch (cmd->cmnd[0]) {
1358 case WRITE: 1358 case WRITE:
1359 case WRITE_6: 1359 case WRITE_6:
1360 case WRITE_10: 1360 case WRITE_10:
1361 hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase); 1361 hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase);
1362 /*hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;*/ 1362 /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/
1363 hostdata->pendingw--; 1363 hostdata->pendingw--;
1364 break; 1364 break;
1365 case READ: 1365 case READ:
1366 case READ_6: 1366 case READ_6:
1367 case READ_10: 1367 case READ_10:
1368 hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase); 1368 hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase);
1369 /*hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;*/ 1369 /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/
1370 hostdata->pendingr--; 1370 hostdata->pendingr--;
1371 break; 1371 break;
1372 } 1372 }
@@ -1868,7 +1868,7 @@ static int do_abort(struct Scsi_Host *host)
1868 * the target sees, so we just handshake. 1868 * the target sees, so we just handshake.
1869 */ 1869 */
1870 1870
1871 while (!(tmp = NCR5380_read(STATUS_REG)) & SR_REQ) 1871 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ))
1872 ; 1872 ;
1873 1873
1874 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); 1874 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index fec58cc47f1c..db6de5e6afb3 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -471,18 +471,8 @@ go_42:
471 /* 471 /*
472 * Complete the command 472 * Complete the command
473 */ 473 */
474 if (workreq->use_sg) { 474 scsi_dma_unmap(workreq);
475 pci_unmap_sg(dev->pdev, 475
476 (struct scatterlist *)workreq->request_buffer,
477 workreq->use_sg,
478 workreq->sc_data_direction);
479 } else if (workreq->request_bufflen &&
480 workreq->sc_data_direction != DMA_NONE) {
481 pci_unmap_single(dev->pdev,
482 workreq->SCp.dma_handle,
483 workreq->request_bufflen,
484 workreq->sc_data_direction);
485 }
486 spin_lock_irqsave(dev->host->host_lock, flags); 476 spin_lock_irqsave(dev->host->host_lock, flags);
487 (*workreq->scsi_done) (workreq); 477 (*workreq->scsi_done) (workreq);
488#ifdef ED_DBGP 478#ifdef ED_DBGP
@@ -624,7 +614,7 @@ static int atp870u_queuecommand(struct scsi_cmnd * req_p,
624 614
625 c = scmd_channel(req_p); 615 c = scmd_channel(req_p);
626 req_p->sense_buffer[0]=0; 616 req_p->sense_buffer[0]=0;
627 req_p->resid = 0; 617 scsi_set_resid(req_p, 0);
628 if (scmd_channel(req_p) > 1) { 618 if (scmd_channel(req_p) > 1) {
629 req_p->result = 0x00040000; 619 req_p->result = 0x00040000;
630 done(req_p); 620 done(req_p);
@@ -722,7 +712,6 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
722 unsigned short int tmpcip, w; 712 unsigned short int tmpcip, w;
723 unsigned long l, bttl = 0; 713 unsigned long l, bttl = 0;
724 unsigned int workport; 714 unsigned int workport;
725 struct scatterlist *sgpnt;
726 unsigned long sg_count; 715 unsigned long sg_count;
727 716
728 if (dev->in_snd[c] != 0) { 717 if (dev->in_snd[c] != 0) {
@@ -793,6 +782,8 @@ oktosend:
793 } 782 }
794 printk("\n"); 783 printk("\n");
795#endif 784#endif
785 l = scsi_bufflen(workreq);
786
796 if (dev->dev_id == ATP885_DEVID) { 787 if (dev->dev_id == ATP885_DEVID) {
797 j = inb(dev->baseport + 0x29) & 0xfe; 788 j = inb(dev->baseport + 0x29) & 0xfe;
798 outb(j, dev->baseport + 0x29); 789 outb(j, dev->baseport + 0x29);
@@ -800,12 +791,11 @@ oktosend:
800 } 791 }
801 792
802 if (workreq->cmnd[0] == READ_CAPACITY) { 793 if (workreq->cmnd[0] == READ_CAPACITY) {
803 if (workreq->request_bufflen > 8) { 794 if (l > 8)
804 workreq->request_bufflen = 0x08; 795 l = 8;
805 }
806 } 796 }
807 if (workreq->cmnd[0] == 0x00) { 797 if (workreq->cmnd[0] == 0x00) {
808 workreq->request_bufflen = 0; 798 l = 0;
809 } 799 }
810 800
811 tmport = workport + 0x1b; 801 tmport = workport + 0x1b;
@@ -852,40 +842,8 @@ oktosend:
852#ifdef ED_DBGP 842#ifdef ED_DBGP
853 printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,dev->id[c][target_id].devsp); 843 printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,dev->id[c][target_id].devsp);
854#endif 844#endif
855 /* 845
856 * Figure out the transfer size 846 sg_count = scsi_dma_map(workreq);
857 */
858 if (workreq->use_sg) {
859#ifdef ED_DBGP
860 printk("Using SGL\n");
861#endif
862 l = 0;
863
864 sgpnt = (struct scatterlist *) workreq->request_buffer;
865 sg_count = pci_map_sg(dev->pdev, sgpnt, workreq->use_sg,
866 workreq->sc_data_direction);
867
868 for (i = 0; i < workreq->use_sg; i++) {
869 if (sgpnt[i].length == 0 || workreq->use_sg > ATP870U_SCATTER) {
870 panic("Foooooooood fight!");
871 }
872 l += sgpnt[i].length;
873 }
874#ifdef ED_DBGP
875 printk( "send_s870: workreq->use_sg %d, sg_count %d l %8ld\n", workreq->use_sg, sg_count, l);
876#endif
877 } else if(workreq->request_bufflen && workreq->sc_data_direction != PCI_DMA_NONE) {
878#ifdef ED_DBGP
879 printk("Not using SGL\n");
880#endif
881 workreq->SCp.dma_handle = pci_map_single(dev->pdev, workreq->request_buffer,
882 workreq->request_bufflen,
883 workreq->sc_data_direction);
884 l = workreq->request_bufflen;
885#ifdef ED_DBGP
886 printk( "send_s870: workreq->use_sg %d, l %8ld\n", workreq->use_sg, l);
887#endif
888 } else l = 0;
889 /* 847 /*
890 * Write transfer size 848 * Write transfer size
891 */ 849 */
@@ -938,16 +896,16 @@ oktosend:
938 * a linear chain. 896 * a linear chain.
939 */ 897 */
940 898
941 if (workreq->use_sg) { 899 if (l) {
942 sgpnt = (struct scatterlist *) workreq->request_buffer; 900 struct scatterlist *sgpnt;
943 i = 0; 901 i = 0;
944 for (j = 0; j < workreq->use_sg; j++) { 902 scsi_for_each_sg(workreq, sgpnt, sg_count, j) {
945 bttl = sg_dma_address(&sgpnt[j]); 903 bttl = sg_dma_address(sgpnt);
946 l=sg_dma_len(&sgpnt[j]); 904 l=sg_dma_len(sgpnt);
947#ifdef ED_DBGP 905#ifdef ED_DBGP
948 printk("1. bttl %x, l %x\n",bttl, l); 906 printk("1. bttl %x, l %x\n",bttl, l);
949#endif 907#endif
950 while (l > 0x10000) { 908 while (l > 0x10000) {
951 (((u16 *) (prd))[i + 3]) = 0x0000; 909 (((u16 *) (prd))[i + 3]) = 0x0000;
952 (((u16 *) (prd))[i + 2]) = 0x0000; 910 (((u16 *) (prd))[i + 2]) = 0x0000;
953 (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl); 911 (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
@@ -965,32 +923,6 @@ oktosend:
965 printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3])); 923 printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3]));
966 printk("2. bttl %x, l %x\n",bttl, l); 924 printk("2. bttl %x, l %x\n",bttl, l);
967#endif 925#endif
968 } else {
969 /*
970 * For a linear request write a chain of blocks
971 */
972 bttl = workreq->SCp.dma_handle;
973 l = workreq->request_bufflen;
974 i = 0;
975#ifdef ED_DBGP
976 printk("3. bttl %x, l %x\n",bttl, l);
977#endif
978 while (l > 0x10000) {
979 (((u16 *) (prd))[i + 3]) = 0x0000;
980 (((u16 *) (prd))[i + 2]) = 0x0000;
981 (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
982 l -= 0x10000;
983 bttl += 0x10000;
984 i += 0x04;
985 }
986 (((u16 *) (prd))[i + 3]) = cpu_to_le16(0x8000);
987 (((u16 *) (prd))[i + 2]) = cpu_to_le16(l);
988 (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
989#ifdef ED_DBGP
990 printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3]));
991 printk("4. bttl %x, l %x\n",bttl, l);
992#endif
993
994 } 926 }
995 tmpcip += 4; 927 tmpcip += 4;
996#ifdef ED_DBGP 928#ifdef ED_DBGP
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 2311019304c0..7aad15436d24 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -21,6 +21,7 @@
21#include <linux/compat.h> 21#include <linux/compat.h>
22#include <linux/chio.h> /* here are all the ioctls */ 22#include <linux/chio.h> /* here are all the ioctls */
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/idr.h>
24 25
25#include <scsi/scsi.h> 26#include <scsi/scsi.h>
26#include <scsi/scsi_cmnd.h> 27#include <scsi/scsi_cmnd.h>
@@ -33,6 +34,7 @@
33 34
34#define CH_DT_MAX 16 35#define CH_DT_MAX 16
35#define CH_TYPES 8 36#define CH_TYPES 8
37#define CH_MAX_DEVS 128
36 38
37MODULE_DESCRIPTION("device driver for scsi media changer devices"); 39MODULE_DESCRIPTION("device driver for scsi media changer devices");
38MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org>"); 40MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org>");
@@ -88,17 +90,6 @@ static const char * vendor_labels[CH_TYPES-4] = {
88 90
89#define MAX_RETRIES 1 91#define MAX_RETRIES 1
90 92
91static int ch_probe(struct device *);
92static int ch_remove(struct device *);
93static int ch_open(struct inode * inode, struct file * filp);
94static int ch_release(struct inode * inode, struct file * filp);
95static int ch_ioctl(struct inode * inode, struct file * filp,
96 unsigned int cmd, unsigned long arg);
97#ifdef CONFIG_COMPAT
98static long ch_ioctl_compat(struct file * filp,
99 unsigned int cmd, unsigned long arg);
100#endif
101
102static struct class * ch_sysfs_class; 93static struct class * ch_sysfs_class;
103 94
104typedef struct { 95typedef struct {
@@ -114,30 +105,8 @@ typedef struct {
114 struct mutex lock; 105 struct mutex lock;
115} scsi_changer; 106} scsi_changer;
116 107
117static LIST_HEAD(ch_devlist); 108static DEFINE_IDR(ch_index_idr);
118static DEFINE_SPINLOCK(ch_devlist_lock); 109static DEFINE_SPINLOCK(ch_index_lock);
119static int ch_devcount;
120
121static struct scsi_driver ch_template =
122{
123 .owner = THIS_MODULE,
124 .gendrv = {
125 .name = "ch",
126 .probe = ch_probe,
127 .remove = ch_remove,
128 },
129};
130
131static const struct file_operations changer_fops =
132{
133 .owner = THIS_MODULE,
134 .open = ch_open,
135 .release = ch_release,
136 .ioctl = ch_ioctl,
137#ifdef CONFIG_COMPAT
138 .compat_ioctl = ch_ioctl_compat,
139#endif
140};
141 110
142static const struct { 111static const struct {
143 unsigned char sense; 112 unsigned char sense;
@@ -207,7 +176,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
207{ 176{
208 int errno, retries = 0, timeout, result; 177 int errno, retries = 0, timeout, result;
209 struct scsi_sense_hdr sshdr; 178 struct scsi_sense_hdr sshdr;
210 179
211 timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS) 180 timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
212 ? timeout_init : timeout_move; 181 ? timeout_init : timeout_move;
213 182
@@ -245,7 +214,7 @@ static int
245ch_elem_to_typecode(scsi_changer *ch, u_int elem) 214ch_elem_to_typecode(scsi_changer *ch, u_int elem)
246{ 215{
247 int i; 216 int i;
248 217
249 for (i = 0; i < CH_TYPES; i++) { 218 for (i = 0; i < CH_TYPES; i++) {
250 if (elem >= ch->firsts[i] && 219 if (elem >= ch->firsts[i] &&
251 elem < ch->firsts[i] + 220 elem < ch->firsts[i] +
@@ -261,15 +230,15 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
261 u_char cmd[12]; 230 u_char cmd[12];
262 u_char *buffer; 231 u_char *buffer;
263 int result; 232 int result;
264 233
265 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); 234 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
266 if(!buffer) 235 if(!buffer)
267 return -ENOMEM; 236 return -ENOMEM;
268 237
269 retry: 238 retry:
270 memset(cmd,0,sizeof(cmd)); 239 memset(cmd,0,sizeof(cmd));
271 cmd[0] = READ_ELEMENT_STATUS; 240 cmd[0] = READ_ELEMENT_STATUS;
272 cmd[1] = (ch->device->lun << 5) | 241 cmd[1] = (ch->device->lun << 5) |
273 (ch->voltags ? 0x10 : 0) | 242 (ch->voltags ? 0x10 : 0) |
274 ch_elem_to_typecode(ch,elem); 243 ch_elem_to_typecode(ch,elem);
275 cmd[2] = (elem >> 8) & 0xff; 244 cmd[2] = (elem >> 8) & 0xff;
@@ -296,7 +265,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
296 return result; 265 return result;
297} 266}
298 267
299static int 268static int
300ch_init_elem(scsi_changer *ch) 269ch_init_elem(scsi_changer *ch)
301{ 270{
302 int err; 271 int err;
@@ -322,7 +291,7 @@ ch_readconfig(scsi_changer *ch)
322 buffer = kzalloc(512, GFP_KERNEL | GFP_DMA); 291 buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
323 if (!buffer) 292 if (!buffer)
324 return -ENOMEM; 293 return -ENOMEM;
325 294
326 memset(cmd,0,sizeof(cmd)); 295 memset(cmd,0,sizeof(cmd));
327 cmd[0] = MODE_SENSE; 296 cmd[0] = MODE_SENSE;
328 cmd[1] = ch->device->lun << 5; 297 cmd[1] = ch->device->lun << 5;
@@ -365,7 +334,7 @@ ch_readconfig(scsi_changer *ch)
365 } else { 334 } else {
366 vprintk("reading element address assigment page failed!\n"); 335 vprintk("reading element address assigment page failed!\n");
367 } 336 }
368 337
369 /* vendor specific element types */ 338 /* vendor specific element types */
370 for (i = 0; i < 4; i++) { 339 for (i = 0; i < 4; i++) {
371 if (0 == vendor_counts[i]) 340 if (0 == vendor_counts[i])
@@ -443,7 +412,7 @@ static int
443ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate) 412ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate)
444{ 413{
445 u_char cmd[10]; 414 u_char cmd[10];
446 415
447 dprintk("position: 0x%x\n",elem); 416 dprintk("position: 0x%x\n",elem);
448 if (0 == trans) 417 if (0 == trans)
449 trans = ch->firsts[CHET_MT]; 418 trans = ch->firsts[CHET_MT];
@@ -462,7 +431,7 @@ static int
462ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate) 431ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate)
463{ 432{
464 u_char cmd[12]; 433 u_char cmd[12];
465 434
466 dprintk("move: 0x%x => 0x%x\n",src,dest); 435 dprintk("move: 0x%x => 0x%x\n",src,dest);
467 if (0 == trans) 436 if (0 == trans)
468 trans = ch->firsts[CHET_MT]; 437 trans = ch->firsts[CHET_MT];
@@ -484,7 +453,7 @@ ch_exchange(scsi_changer *ch, u_int trans, u_int src,
484 u_int dest1, u_int dest2, int rotate1, int rotate2) 453 u_int dest1, u_int dest2, int rotate1, int rotate2)
485{ 454{
486 u_char cmd[12]; 455 u_char cmd[12];
487 456
488 dprintk("exchange: 0x%x => 0x%x => 0x%x\n", 457 dprintk("exchange: 0x%x => 0x%x => 0x%x\n",
489 src,dest1,dest2); 458 src,dest1,dest2);
490 if (0 == trans) 459 if (0 == trans)
@@ -501,7 +470,7 @@ ch_exchange(scsi_changer *ch, u_int trans, u_int src,
501 cmd[8] = (dest2 >> 8) & 0xff; 470 cmd[8] = (dest2 >> 8) & 0xff;
502 cmd[9] = dest2 & 0xff; 471 cmd[9] = dest2 & 0xff;
503 cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0); 472 cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0);
504 473
505 return ch_do_scsi(ch, cmd, NULL,0, DMA_NONE); 474 return ch_do_scsi(ch, cmd, NULL,0, DMA_NONE);
506} 475}
507 476
@@ -539,14 +508,14 @@ ch_set_voltag(scsi_changer *ch, u_int elem,
539 elem, tag); 508 elem, tag);
540 memset(cmd,0,sizeof(cmd)); 509 memset(cmd,0,sizeof(cmd));
541 cmd[0] = SEND_VOLUME_TAG; 510 cmd[0] = SEND_VOLUME_TAG;
542 cmd[1] = (ch->device->lun << 5) | 511 cmd[1] = (ch->device->lun << 5) |
543 ch_elem_to_typecode(ch,elem); 512 ch_elem_to_typecode(ch,elem);
544 cmd[2] = (elem >> 8) & 0xff; 513 cmd[2] = (elem >> 8) & 0xff;
545 cmd[3] = elem & 0xff; 514 cmd[3] = elem & 0xff;
546 cmd[5] = clear 515 cmd[5] = clear
547 ? (alternate ? 0x0d : 0x0c) 516 ? (alternate ? 0x0d : 0x0c)
548 : (alternate ? 0x0b : 0x0a); 517 : (alternate ? 0x0b : 0x0a);
549 518
550 cmd[9] = 255; 519 cmd[9] = 255;
551 520
552 memcpy(buffer,tag,32); 521 memcpy(buffer,tag,32);
@@ -562,7 +531,7 @@ static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest)
562 int retval = 0; 531 int retval = 0;
563 u_char data[16]; 532 u_char data[16];
564 unsigned int i; 533 unsigned int i;
565 534
566 mutex_lock(&ch->lock); 535 mutex_lock(&ch->lock);
567 for (i = 0; i < ch->counts[type]; i++) { 536 for (i = 0; i < ch->counts[type]; i++) {
568 if (0 != ch_read_element_status 537 if (0 != ch_read_element_status
@@ -599,20 +568,17 @@ ch_release(struct inode *inode, struct file *file)
599static int 568static int
600ch_open(struct inode *inode, struct file *file) 569ch_open(struct inode *inode, struct file *file)
601{ 570{
602 scsi_changer *tmp, *ch; 571 scsi_changer *ch;
603 int minor = iminor(inode); 572 int minor = iminor(inode);
604 573
605 spin_lock(&ch_devlist_lock); 574 spin_lock(&ch_index_lock);
606 ch = NULL; 575 ch = idr_find(&ch_index_idr, minor);
607 list_for_each_entry(tmp,&ch_devlist,list) { 576
608 if (tmp->minor == minor)
609 ch = tmp;
610 }
611 if (NULL == ch || scsi_device_get(ch->device)) { 577 if (NULL == ch || scsi_device_get(ch->device)) {
612 spin_unlock(&ch_devlist_lock); 578 spin_unlock(&ch_index_lock);
613 return -ENXIO; 579 return -ENXIO;
614 } 580 }
615 spin_unlock(&ch_devlist_lock); 581 spin_unlock(&ch_index_lock);
616 582
617 file->private_data = ch; 583 file->private_data = ch;
618 return 0; 584 return 0;
@@ -626,24 +592,24 @@ ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit)
626 return 0; 592 return 0;
627} 593}
628 594
629static int ch_ioctl(struct inode * inode, struct file * file, 595static long ch_ioctl(struct file *file,
630 unsigned int cmd, unsigned long arg) 596 unsigned int cmd, unsigned long arg)
631{ 597{
632 scsi_changer *ch = file->private_data; 598 scsi_changer *ch = file->private_data;
633 int retval; 599 int retval;
634 void __user *argp = (void __user *)arg; 600 void __user *argp = (void __user *)arg;
635 601
636 switch (cmd) { 602 switch (cmd) {
637 case CHIOGPARAMS: 603 case CHIOGPARAMS:
638 { 604 {
639 struct changer_params params; 605 struct changer_params params;
640 606
641 params.cp_curpicker = 0; 607 params.cp_curpicker = 0;
642 params.cp_npickers = ch->counts[CHET_MT]; 608 params.cp_npickers = ch->counts[CHET_MT];
643 params.cp_nslots = ch->counts[CHET_ST]; 609 params.cp_nslots = ch->counts[CHET_ST];
644 params.cp_nportals = ch->counts[CHET_IE]; 610 params.cp_nportals = ch->counts[CHET_IE];
645 params.cp_ndrives = ch->counts[CHET_DT]; 611 params.cp_ndrives = ch->counts[CHET_DT];
646 612
647 if (copy_to_user(argp, &params, sizeof(params))) 613 if (copy_to_user(argp, &params, sizeof(params)))
648 return -EFAULT; 614 return -EFAULT;
649 return 0; 615 return 0;
@@ -673,11 +639,11 @@ static int ch_ioctl(struct inode * inode, struct file * file,
673 return -EFAULT; 639 return -EFAULT;
674 return 0; 640 return 0;
675 } 641 }
676 642
677 case CHIOPOSITION: 643 case CHIOPOSITION:
678 { 644 {
679 struct changer_position pos; 645 struct changer_position pos;
680 646
681 if (copy_from_user(&pos, argp, sizeof (pos))) 647 if (copy_from_user(&pos, argp, sizeof (pos)))
682 return -EFAULT; 648 return -EFAULT;
683 649
@@ -692,7 +658,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
692 mutex_unlock(&ch->lock); 658 mutex_unlock(&ch->lock);
693 return retval; 659 return retval;
694 } 660 }
695 661
696 case CHIOMOVE: 662 case CHIOMOVE:
697 { 663 {
698 struct changer_move mv; 664 struct changer_move mv;
@@ -705,7 +671,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
705 dprintk("CHIOMOVE: invalid parameter\n"); 671 dprintk("CHIOMOVE: invalid parameter\n");
706 return -EBADSLT; 672 return -EBADSLT;
707 } 673 }
708 674
709 mutex_lock(&ch->lock); 675 mutex_lock(&ch->lock);
710 retval = ch_move(ch,0, 676 retval = ch_move(ch,0,
711 ch->firsts[mv.cm_fromtype] + mv.cm_fromunit, 677 ch->firsts[mv.cm_fromtype] + mv.cm_fromunit,
@@ -718,7 +684,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
718 case CHIOEXCHANGE: 684 case CHIOEXCHANGE:
719 { 685 {
720 struct changer_exchange mv; 686 struct changer_exchange mv;
721 687
722 if (copy_from_user(&mv, argp, sizeof (mv))) 688 if (copy_from_user(&mv, argp, sizeof (mv)))
723 return -EFAULT; 689 return -EFAULT;
724 690
@@ -728,7 +694,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
728 dprintk("CHIOEXCHANGE: invalid parameter\n"); 694 dprintk("CHIOEXCHANGE: invalid parameter\n");
729 return -EBADSLT; 695 return -EBADSLT;
730 } 696 }
731 697
732 mutex_lock(&ch->lock); 698 mutex_lock(&ch->lock);
733 retval = ch_exchange 699 retval = ch_exchange
734 (ch,0, 700 (ch,0,
@@ -743,7 +709,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
743 case CHIOGSTATUS: 709 case CHIOGSTATUS:
744 { 710 {
745 struct changer_element_status ces; 711 struct changer_element_status ces;
746 712
747 if (copy_from_user(&ces, argp, sizeof (ces))) 713 if (copy_from_user(&ces, argp, sizeof (ces)))
748 return -EFAULT; 714 return -EFAULT;
749 if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES) 715 if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES)
@@ -759,19 +725,19 @@ static int ch_ioctl(struct inode * inode, struct file * file,
759 u_char *buffer; 725 u_char *buffer;
760 unsigned int elem; 726 unsigned int elem;
761 int result,i; 727 int result,i;
762 728
763 if (copy_from_user(&cge, argp, sizeof (cge))) 729 if (copy_from_user(&cge, argp, sizeof (cge)))
764 return -EFAULT; 730 return -EFAULT;
765 731
766 if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit)) 732 if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit))
767 return -EINVAL; 733 return -EINVAL;
768 elem = ch->firsts[cge.cge_type] + cge.cge_unit; 734 elem = ch->firsts[cge.cge_type] + cge.cge_unit;
769 735
770 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); 736 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
771 if (!buffer) 737 if (!buffer)
772 return -ENOMEM; 738 return -ENOMEM;
773 mutex_lock(&ch->lock); 739 mutex_lock(&ch->lock);
774 740
775 voltag_retry: 741 voltag_retry:
776 memset(cmd,0,sizeof(cmd)); 742 memset(cmd,0,sizeof(cmd));
777 cmd[0] = READ_ELEMENT_STATUS; 743 cmd[0] = READ_ELEMENT_STATUS;
@@ -782,7 +748,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
782 cmd[3] = elem & 0xff; 748 cmd[3] = elem & 0xff;
783 cmd[5] = 1; 749 cmd[5] = 1;
784 cmd[9] = 255; 750 cmd[9] = 255;
785 751
786 if (0 == (result = ch_do_scsi(ch, cmd, buffer, 256, DMA_FROM_DEVICE))) { 752 if (0 == (result = ch_do_scsi(ch, cmd, buffer, 256, DMA_FROM_DEVICE))) {
787 cge.cge_status = buffer[18]; 753 cge.cge_status = buffer[18];
788 cge.cge_flags = 0; 754 cge.cge_flags = 0;
@@ -822,7 +788,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
822 } 788 }
823 kfree(buffer); 789 kfree(buffer);
824 mutex_unlock(&ch->lock); 790 mutex_unlock(&ch->lock);
825 791
826 if (copy_to_user(argp, &cge, sizeof (cge))) 792 if (copy_to_user(argp, &cge, sizeof (cge)))
827 return -EFAULT; 793 return -EFAULT;
828 return result; 794 return result;
@@ -835,7 +801,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
835 mutex_unlock(&ch->lock); 801 mutex_unlock(&ch->lock);
836 return retval; 802 return retval;
837 } 803 }
838 804
839 case CHIOSVOLTAG: 805 case CHIOSVOLTAG:
840 { 806 {
841 struct changer_set_voltag csv; 807 struct changer_set_voltag csv;
@@ -876,7 +842,7 @@ static long ch_ioctl_compat(struct file * file,
876 unsigned int cmd, unsigned long arg) 842 unsigned int cmd, unsigned long arg)
877{ 843{
878 scsi_changer *ch = file->private_data; 844 scsi_changer *ch = file->private_data;
879 845
880 switch (cmd) { 846 switch (cmd) {
881 case CHIOGPARAMS: 847 case CHIOGPARAMS:
882 case CHIOGVPARAMS: 848 case CHIOGVPARAMS:
@@ -887,13 +853,12 @@ static long ch_ioctl_compat(struct file * file,
887 case CHIOINITELEM: 853 case CHIOINITELEM:
888 case CHIOSVOLTAG: 854 case CHIOSVOLTAG:
889 /* compatible */ 855 /* compatible */
890 return ch_ioctl(NULL /* inode, unused */, 856 return ch_ioctl(file, cmd, arg);
891 file, cmd, arg);
892 case CHIOGSTATUS32: 857 case CHIOGSTATUS32:
893 { 858 {
894 struct changer_element_status32 ces32; 859 struct changer_element_status32 ces32;
895 unsigned char __user *data; 860 unsigned char __user *data;
896 861
897 if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32))) 862 if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32)))
898 return -EFAULT; 863 return -EFAULT;
899 if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES) 864 if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
@@ -915,63 +880,100 @@ static long ch_ioctl_compat(struct file * file,
915static int ch_probe(struct device *dev) 880static int ch_probe(struct device *dev)
916{ 881{
917 struct scsi_device *sd = to_scsi_device(dev); 882 struct scsi_device *sd = to_scsi_device(dev);
883 struct class_device *class_dev;
884 int minor, ret = -ENOMEM;
918 scsi_changer *ch; 885 scsi_changer *ch;
919 886
920 if (sd->type != TYPE_MEDIUM_CHANGER) 887 if (sd->type != TYPE_MEDIUM_CHANGER)
921 return -ENODEV; 888 return -ENODEV;
922 889
923 ch = kzalloc(sizeof(*ch), GFP_KERNEL); 890 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
924 if (NULL == ch) 891 if (NULL == ch)
925 return -ENOMEM; 892 return -ENOMEM;
926 893
927 ch->minor = ch_devcount; 894 if (!idr_pre_get(&ch_index_idr, GFP_KERNEL))
895 goto free_ch;
896
897 spin_lock(&ch_index_lock);
898 ret = idr_get_new(&ch_index_idr, ch, &minor);
899 spin_unlock(&ch_index_lock);
900
901 if (ret)
902 goto free_ch;
903
904 if (minor > CH_MAX_DEVS) {
905 ret = -ENODEV;
906 goto remove_idr;
907 }
908
909 ch->minor = minor;
928 sprintf(ch->name,"ch%d",ch->minor); 910 sprintf(ch->name,"ch%d",ch->minor);
911
912 class_dev = class_device_create(ch_sysfs_class, NULL,
913 MKDEV(SCSI_CHANGER_MAJOR, ch->minor),
914 dev, "s%s", ch->name);
915 if (IS_ERR(class_dev)) {
916 printk(KERN_WARNING "ch%d: class_device_create failed\n",
917 ch->minor);
918 ret = PTR_ERR(class_dev);
919 goto remove_idr;
920 }
921
929 mutex_init(&ch->lock); 922 mutex_init(&ch->lock);
930 ch->device = sd; 923 ch->device = sd;
931 ch_readconfig(ch); 924 ch_readconfig(ch);
932 if (init) 925 if (init)
933 ch_init_elem(ch); 926 ch_init_elem(ch);
934 927
935 class_device_create(ch_sysfs_class, NULL, 928 dev_set_drvdata(dev, ch);
936 MKDEV(SCSI_CHANGER_MAJOR,ch->minor),
937 dev, "s%s", ch->name);
938
939 sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name); 929 sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
940 930
941 spin_lock(&ch_devlist_lock);
942 list_add_tail(&ch->list,&ch_devlist);
943 ch_devcount++;
944 spin_unlock(&ch_devlist_lock);
945 return 0; 931 return 0;
932remove_idr:
933 idr_remove(&ch_index_idr, minor);
934free_ch:
935 kfree(ch);
936 return ret;
946} 937}
947 938
948static int ch_remove(struct device *dev) 939static int ch_remove(struct device *dev)
949{ 940{
950 struct scsi_device *sd = to_scsi_device(dev); 941 scsi_changer *ch = dev_get_drvdata(dev);
951 scsi_changer *tmp, *ch;
952 942
953 spin_lock(&ch_devlist_lock); 943 spin_lock(&ch_index_lock);
954 ch = NULL; 944 idr_remove(&ch_index_idr, ch->minor);
955 list_for_each_entry(tmp,&ch_devlist,list) { 945 spin_unlock(&ch_index_lock);
956 if (tmp->device == sd)
957 ch = tmp;
958 }
959 BUG_ON(NULL == ch);
960 list_del(&ch->list);
961 spin_unlock(&ch_devlist_lock);
962 946
963 class_device_destroy(ch_sysfs_class, 947 class_device_destroy(ch_sysfs_class,
964 MKDEV(SCSI_CHANGER_MAJOR,ch->minor)); 948 MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
965 kfree(ch->dt); 949 kfree(ch->dt);
966 kfree(ch); 950 kfree(ch);
967 ch_devcount--;
968 return 0; 951 return 0;
969} 952}
970 953
954static struct scsi_driver ch_template = {
955 .owner = THIS_MODULE,
956 .gendrv = {
957 .name = "ch",
958 .probe = ch_probe,
959 .remove = ch_remove,
960 },
961};
962
963static const struct file_operations changer_fops = {
964 .owner = THIS_MODULE,
965 .open = ch_open,
966 .release = ch_release,
967 .unlocked_ioctl = ch_ioctl,
968#ifdef CONFIG_COMPAT
969 .compat_ioctl = ch_ioctl_compat,
970#endif
971};
972
971static int __init init_ch_module(void) 973static int __init init_ch_module(void)
972{ 974{
973 int rc; 975 int rc;
974 976
975 printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n"); 977 printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n");
976 ch_sysfs_class = class_create(THIS_MODULE, "scsi_changer"); 978 ch_sysfs_class = class_create(THIS_MODULE, "scsi_changer");
977 if (IS_ERR(ch_sysfs_class)) { 979 if (IS_ERR(ch_sysfs_class)) {
@@ -996,11 +998,12 @@ static int __init init_ch_module(void)
996 return rc; 998 return rc;
997} 999}
998 1000
999static void __exit exit_ch_module(void) 1001static void __exit exit_ch_module(void)
1000{ 1002{
1001 scsi_unregister_driver(&ch_template.gendrv); 1003 scsi_unregister_driver(&ch_template.gendrv);
1002 unregister_chrdev(SCSI_CHANGER_MAJOR, "ch"); 1004 unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
1003 class_destroy(ch_sysfs_class); 1005 class_destroy(ch_sysfs_class);
1006 idr_destroy(&ch_index_idr);
1004} 1007}
1005 1008
1006module_init(init_ch_module); 1009module_init(init_ch_module);
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 024553f9c247..403a7f2d8f9b 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -362,7 +362,6 @@ void scsi_print_command(struct scsi_cmnd *cmd)
362EXPORT_SYMBOL(scsi_print_command); 362EXPORT_SYMBOL(scsi_print_command);
363 363
364/** 364/**
365 *
366 * scsi_print_status - print scsi status description 365 * scsi_print_status - print scsi status description
367 * @scsi_status: scsi status value 366 * @scsi_status: scsi status value
368 * 367 *
@@ -1369,7 +1368,7 @@ EXPORT_SYMBOL(scsi_print_sense);
1369static const char * const hostbyte_table[]={ 1368static const char * const hostbyte_table[]={
1370"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", 1369"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
1371"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", 1370"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
1372"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY"}; 1371"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE"};
1373#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table) 1372#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
1374 1373
1375static const char * const driverbyte_table[]={ 1374static const char * const driverbyte_table[]={
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index a9def6e1d30e..f93c73c0ba53 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -1629,8 +1629,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1629 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5)); 1629 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1630 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); 1630 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1631 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); 1631 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1632 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 1632 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1633 sizeof(srb->cmd->sense_buffer));
1634 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); 1633 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1635 } else { 1634 } else {
1636 ptr = (u8 *)srb->cmd->cmnd; 1635 ptr = (u8 *)srb->cmd->cmnd;
@@ -1915,8 +1914,7 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1915 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5)); 1914 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1916 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); 1915 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1917 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); 1916 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1918 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 1917 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1919 sizeof(srb->cmd->sense_buffer));
1920 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); 1918 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1921 } 1919 }
1922 srb->state |= SRB_COMMAND; 1920 srb->state |= SRB_COMMAND;
@@ -3685,7 +3683,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3685 srb->target_status = 0; 3683 srb->target_status = 0;
3686 3684
3687 /* KG: Can this prevent crap sense data ? */ 3685 /* KG: Can this prevent crap sense data ? */
3688 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 3686 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3689 3687
3690 /* Save some data */ 3688 /* Save some data */
3691 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address = 3689 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
@@ -3694,15 +3692,15 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3694 srb->segment_x[0].length; 3692 srb->segment_x[0].length;
3695 srb->xferred = srb->total_xfer_length; 3693 srb->xferred = srb->total_xfer_length;
3696 /* srb->segment_x : a one entry of S/G list table */ 3694 /* srb->segment_x : a one entry of S/G list table */
3697 srb->total_xfer_length = sizeof(cmd->sense_buffer); 3695 srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
3698 srb->segment_x[0].length = sizeof(cmd->sense_buffer); 3696 srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
3699 /* Map sense buffer */ 3697 /* Map sense buffer */
3700 srb->segment_x[0].address = 3698 srb->segment_x[0].address =
3701 pci_map_single(acb->dev, cmd->sense_buffer, 3699 pci_map_single(acb->dev, cmd->sense_buffer,
3702 sizeof(cmd->sense_buffer), PCI_DMA_FROMDEVICE); 3700 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
3703 dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n", 3701 dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
3704 cmd->sense_buffer, srb->segment_x[0].address, 3702 cmd->sense_buffer, srb->segment_x[0].address,
3705 sizeof(cmd->sense_buffer)); 3703 SCSI_SENSE_BUFFERSIZE);
3706 srb->sg_count = 1; 3704 srb->sg_count = 1;
3707 srb->sg_index = 0; 3705 srb->sg_index = 0;
3708 3706
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index b31d1c95c9fb..19cce125124c 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2296,9 +2296,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2296 2296
2297 // copy over the request sense data if it was a check 2297 // copy over the request sense data if it was a check
2298 // condition status 2298 // condition status
2299 if(dev_status == 0x02 /*CHECK_CONDITION*/) { 2299 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2300 u32 len = sizeof(cmd->sense_buffer); 2300 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2301 len = (len > 40) ? 40 : len;
2302 // Copy over the sense data 2301 // Copy over the sense data
2303 memcpy_fromio(cmd->sense_buffer, (reply+28) , len); 2302 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2304 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && 2303 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 7ead5210de96..05163cefec12 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1623,9 +1623,9 @@ static void map_dma(unsigned int i, struct hostdata *ha)
1623 if (SCpnt->sense_buffer) 1623 if (SCpnt->sense_buffer)
1624 cpp->sense_addr = 1624 cpp->sense_addr =
1625 H2DEV(pci_map_single(ha->pdev, SCpnt->sense_buffer, 1625 H2DEV(pci_map_single(ha->pdev, SCpnt->sense_buffer,
1626 sizeof SCpnt->sense_buffer, PCI_DMA_FROMDEVICE)); 1626 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));
1627 1627
1628 cpp->sense_len = sizeof SCpnt->sense_buffer; 1628 cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
1629 1629
1630 count = scsi_dma_map(SCpnt); 1630 count = scsi_dma_map(SCpnt);
1631 BUG_ON(count < 0); 1631 BUG_ON(count < 0);
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 982c5092be11..b5a60926e556 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -369,7 +369,6 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
369 cp = &hd->ccb[y]; 369 cp = &hd->ccb[y];
370 370
371 memset(cp, 0, sizeof(struct eata_ccb)); 371 memset(cp, 0, sizeof(struct eata_ccb));
372 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
373 372
374 cp->status = USED; /* claim free slot */ 373 cp->status = USED; /* claim free slot */
375 374
@@ -385,7 +384,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
385 cp->DataIn = 0; /* Input mode */ 384 cp->DataIn = 0; /* Input mode */
386 385
387 cp->Interpret = (cmd->device->id == hd->hostid); 386 cp->Interpret = (cmd->device->id == hd->hostid);
388 cp->cp_datalen = cpu_to_be32(cmd->request_bufflen); 387 cp->cp_datalen = cpu_to_be32(scsi_bufflen(cmd));
389 cp->Auto_Req_Sen = 0; 388 cp->Auto_Req_Sen = 0;
390 cp->cp_reqDMA = 0; 389 cp->cp_reqDMA = 0;
391 cp->reqlen = 0; 390 cp->reqlen = 0;
@@ -402,14 +401,14 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
402 cp->cmd = cmd; 401 cp->cmd = cmd;
403 cmd->host_scribble = (char *) &hd->ccb[y]; 402 cmd->host_scribble = (char *) &hd->ccb[y];
404 403
405 if (cmd->use_sg == 0) { 404 if (!scsi_bufflen(cmd)) {
406 cmd->SCp.buffers_residual = 1; 405 cmd->SCp.buffers_residual = 1;
407 cmd->SCp.ptr = cmd->request_buffer; 406 cmd->SCp.ptr = NULL;
408 cmd->SCp.this_residual = cmd->request_bufflen; 407 cmd->SCp.this_residual = 0;
409 cmd->SCp.buffer = NULL; 408 cmd->SCp.buffer = NULL;
410 } else { 409 } else {
411 cmd->SCp.buffer = cmd->request_buffer; 410 cmd->SCp.buffer = scsi_sglist(cmd);
412 cmd->SCp.buffers_residual = cmd->use_sg; 411 cmd->SCp.buffers_residual = scsi_sg_count(cmd);
413 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 412 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
414 cmd->SCp.this_residual = cmd->SCp.buffer->length; 413 cmd->SCp.this_residual = cmd->SCp.buffer->length;
415 } 414 }
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index 8335b608e571..85bd54c77b50 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -1017,24 +1017,6 @@ static irqreturn_t fd_mcs_intr(int irq, void *dev_id)
1017 printk(" ** IN DONE %d ** ", current_SC->SCp.have_data_in); 1017 printk(" ** IN DONE %d ** ", current_SC->SCp.have_data_in);
1018#endif 1018#endif
1019 1019
1020#if ERRORS_ONLY
1021 if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
1022 if ((unsigned char) (*((char *) current_SC->request_buffer + 2)) & 0x0f) {
1023 unsigned char key;
1024 unsigned char code;
1025 unsigned char qualifier;
1026
1027 key = (unsigned char) (*((char *) current_SC->request_buffer + 2)) & 0x0f;
1028 code = (unsigned char) (*((char *) current_SC->request_buffer + 12));
1029 qualifier = (unsigned char) (*((char *) current_SC->request_buffer + 13));
1030
1031 if (key != UNIT_ATTENTION && !(key == NOT_READY && code == 0x04 && (!qualifier || qualifier == 0x02 || qualifier == 0x01))
1032 && !(key == ILLEGAL_REQUEST && (code == 0x25 || code == 0x24 || !code)))
1033
1034 printk("fd_mcs: REQUEST SENSE " "Key = %x, Code = %x, Qualifier = %x\n", key, code, qualifier);
1035 }
1036 }
1037#endif
1038#if EVERY_ACCESS 1020#if EVERY_ACCESS
1039 printk("BEFORE MY_DONE. . ."); 1021 printk("BEFORE MY_DONE. . .");
1040#endif 1022#endif
@@ -1097,7 +1079,9 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
1097 panic("fd_mcs: fd_mcs_queue() NOT REENTRANT!\n"); 1079 panic("fd_mcs: fd_mcs_queue() NOT REENTRANT!\n");
1098 } 1080 }
1099#if EVERY_ACCESS 1081#if EVERY_ACCESS
1100 printk("queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n", SCpnt->target, *(unsigned char *) SCpnt->cmnd, SCpnt->use_sg, SCpnt->request_bufflen); 1082 printk("queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
1083 SCpnt->target, *(unsigned char *) SCpnt->cmnd,
1084 scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
1101#endif 1085#endif
1102 1086
1103 fd_mcs_make_bus_idle(shpnt); 1087 fd_mcs_make_bus_idle(shpnt);
@@ -1107,14 +1091,14 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
1107 1091
1108 /* Initialize static data */ 1092 /* Initialize static data */
1109 1093
1110 if (current_SC->use_sg) { 1094 if (scsi_bufflen(current_SC)) {
1111 current_SC->SCp.buffer = (struct scatterlist *) current_SC->request_buffer; 1095 current_SC->SCp.buffer = scsi_sglist(current_SC);
1112 current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer); 1096 current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
1113 current_SC->SCp.this_residual = current_SC->SCp.buffer->length; 1097 current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
1114 current_SC->SCp.buffers_residual = current_SC->use_sg - 1; 1098 current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
1115 } else { 1099 } else {
1116 current_SC->SCp.ptr = (char *) current_SC->request_buffer; 1100 current_SC->SCp.ptr = NULL;
1117 current_SC->SCp.this_residual = current_SC->request_bufflen; 1101 current_SC->SCp.this_residual = 0;
1118 current_SC->SCp.buffer = NULL; 1102 current_SC->SCp.buffer = NULL;
1119 current_SC->SCp.buffers_residual = 0; 1103 current_SC->SCp.buffers_residual = 0;
1120 } 1104 }
@@ -1166,7 +1150,9 @@ static void fd_mcs_print_info(Scsi_Cmnd * SCpnt)
1166 break; 1150 break;
1167 } 1151 }
1168 1152
1169 printk("(%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n", SCpnt->SCp.phase, SCpnt->device->id, *(unsigned char *) SCpnt->cmnd, SCpnt->use_sg, SCpnt->request_bufflen); 1153 printk("(%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n",
1154 SCpnt->SCp.phase, SCpnt->device->id, *(unsigned char *) SCpnt->cmnd,
1155 scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
1170 printk("sent_command = %d, have_data_in = %d, timeout = %d\n", SCpnt->SCp.sent_command, SCpnt->SCp.have_data_in, SCpnt->timeout); 1156 printk("sent_command = %d, have_data_in = %d, timeout = %d\n", SCpnt->SCp.sent_command, SCpnt->SCp.have_data_in, SCpnt->timeout);
1171#if DEBUG_RACE 1157#if DEBUG_RACE
1172 printk("in_interrupt_flag = %d\n", in_interrupt_flag); 1158 printk("in_interrupt_flag = %d\n", in_interrupt_flag);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index b253b8c718d3..c82523908c2e 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -141,7 +141,7 @@
141static void gdth_delay(int milliseconds); 141static void gdth_delay(int milliseconds);
142static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs); 142static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs);
143static irqreturn_t gdth_interrupt(int irq, void *dev_id); 143static irqreturn_t gdth_interrupt(int irq, void *dev_id);
144static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq, 144static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
145 int gdth_from_wait, int* pIndex); 145 int gdth_from_wait, int* pIndex);
146static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, 146static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
147 Scsi_Cmnd *scp); 147 Scsi_Cmnd *scp);
@@ -165,7 +165,6 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
165static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); 165static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive);
166 166
167static void gdth_enable_int(gdth_ha_str *ha); 167static void gdth_enable_int(gdth_ha_str *ha);
168static unchar gdth_get_status(gdth_ha_str *ha, int irq);
169static int gdth_test_busy(gdth_ha_str *ha); 168static int gdth_test_busy(gdth_ha_str *ha);
170static int gdth_get_cmd_index(gdth_ha_str *ha); 169static int gdth_get_cmd_index(gdth_ha_str *ha);
171static void gdth_release_event(gdth_ha_str *ha); 170static void gdth_release_event(gdth_ha_str *ha);
@@ -1334,14 +1333,12 @@ static void __init gdth_enable_int(gdth_ha_str *ha)
1334} 1333}
1335 1334
1336/* return IStatus if interrupt was from this card else 0 */ 1335/* return IStatus if interrupt was from this card else 0 */
1337static unchar gdth_get_status(gdth_ha_str *ha, int irq) 1336static unchar gdth_get_status(gdth_ha_str *ha)
1338{ 1337{
1339 unchar IStatus = 0; 1338 unchar IStatus = 0;
1340 1339
1341 TRACE(("gdth_get_status() irq %d ctr_count %d\n", irq, gdth_ctr_count)); 1340 TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
1342 1341
1343 if (ha->irq != (unchar)irq) /* check IRQ */
1344 return false;
1345 if (ha->type == GDT_EISA) 1342 if (ha->type == GDT_EISA)
1346 IStatus = inb((ushort)ha->bmic + EDOORREG); 1343 IStatus = inb((ushort)ha->bmic + EDOORREG);
1347 else if (ha->type == GDT_ISA) 1344 else if (ha->type == GDT_ISA)
@@ -1523,7 +1520,7 @@ static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time)
1523 return 1; /* no wait required */ 1520 return 1; /* no wait required */
1524 1521
1525 do { 1522 do {
1526 __gdth_interrupt(ha, (int)ha->irq, true, &wait_index); 1523 __gdth_interrupt(ha, true, &wait_index);
1527 if (wait_index == index) { 1524 if (wait_index == index) {
1528 answer_found = TRUE; 1525 answer_found = TRUE;
1529 break; 1526 break;
@@ -3036,7 +3033,7 @@ static void gdth_clear_events(void)
3036 3033
3037/* SCSI interface functions */ 3034/* SCSI interface functions */
3038 3035
3039static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq, 3036static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
3040 int gdth_from_wait, int* pIndex) 3037 int gdth_from_wait, int* pIndex)
3041{ 3038{
3042 gdt6m_dpram_str __iomem *dp6m_ptr = NULL; 3039 gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
@@ -3054,7 +3051,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
3054 int act_int_coal = 0; 3051 int act_int_coal = 0;
3055#endif 3052#endif
3056 3053
3057 TRACE(("gdth_interrupt() IRQ %d\n",irq)); 3054 TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
3058 3055
3059 /* if polling and not from gdth_wait() -> return */ 3056 /* if polling and not from gdth_wait() -> return */
3060 if (gdth_polling) { 3057 if (gdth_polling) {
@@ -3067,7 +3064,8 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
3067 spin_lock_irqsave(&ha->smp_lock, flags); 3064 spin_lock_irqsave(&ha->smp_lock, flags);
3068 3065
3069 /* search controller */ 3066 /* search controller */
3070 if (0 == (IStatus = gdth_get_status(ha, irq))) { 3067 IStatus = gdth_get_status(ha);
3068 if (IStatus == 0) {
3071 /* spurious interrupt */ 3069 /* spurious interrupt */
3072 if (!gdth_polling) 3070 if (!gdth_polling)
3073 spin_unlock_irqrestore(&ha->smp_lock, flags); 3071 spin_unlock_irqrestore(&ha->smp_lock, flags);
@@ -3294,9 +3292,9 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
3294 3292
3295static irqreturn_t gdth_interrupt(int irq, void *dev_id) 3293static irqreturn_t gdth_interrupt(int irq, void *dev_id)
3296{ 3294{
3297 gdth_ha_str *ha = (gdth_ha_str *)dev_id; 3295 gdth_ha_str *ha = dev_id;
3298 3296
3299 return __gdth_interrupt(ha, irq, false, NULL); 3297 return __gdth_interrupt(ha, false, NULL);
3300} 3298}
3301 3299
3302static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, 3300static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 6325115e5b3d..5ea1f986220c 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -54,8 +54,7 @@ static struct class shost_class = {
54}; 54};
55 55
56/** 56/**
57 * scsi_host_set_state - Take the given host through the host 57 * scsi_host_set_state - Take the given host through the host state model.
58 * state model.
59 * @shost: scsi host to change the state of. 58 * @shost: scsi host to change the state of.
60 * @state: state to change to. 59 * @state: state to change to.
61 * 60 *
@@ -440,7 +439,6 @@ static int __scsi_host_match(struct class_device *cdev, void *data)
440 439
441/** 440/**
442 * scsi_host_lookup - get a reference to a Scsi_Host by host no 441 * scsi_host_lookup - get a reference to a Scsi_Host by host no
443 *
444 * @hostnum: host number to locate 442 * @hostnum: host number to locate
445 * 443 *
446 * Return value: 444 * Return value:
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 0844331abb87..e7b2f3575ce9 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * HighPoint RR3xxx controller driver for Linux 2 * HighPoint RR3xxx/4xxx controller driver for Linux
3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved. 3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -38,80 +38,84 @@
38#include "hptiop.h" 38#include "hptiop.h"
39 39
40MODULE_AUTHOR("HighPoint Technologies, Inc."); 40MODULE_AUTHOR("HighPoint Technologies, Inc.");
41MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver"); 41MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
42 42
43static char driver_name[] = "hptiop"; 43static char driver_name[] = "hptiop";
44static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; 44static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
45static const char driver_ver[] = "v1.2 (070830)"; 45static const char driver_ver[] = "v1.3 (071203)";
46 46
47static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); 47static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
48static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); 48static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
49 struct hpt_iop_request_scsi_command *req);
50static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
51static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
49static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); 52static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
50 53
51static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop) 54static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
52{
53 readl(&iop->outbound_intstatus);
54}
55
56static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
57{ 55{
58 u32 req = 0; 56 u32 req = 0;
59 int i; 57 int i;
60 58
61 for (i = 0; i < millisec; i++) { 59 for (i = 0; i < millisec; i++) {
62 req = readl(&iop->inbound_queue); 60 req = readl(&hba->u.itl.iop->inbound_queue);
63 if (req != IOPMU_QUEUE_EMPTY) 61 if (req != IOPMU_QUEUE_EMPTY)
64 break; 62 break;
65 msleep(1); 63 msleep(1);
66 } 64 }
67 65
68 if (req != IOPMU_QUEUE_EMPTY) { 66 if (req != IOPMU_QUEUE_EMPTY) {
69 writel(req, &iop->outbound_queue); 67 writel(req, &hba->u.itl.iop->outbound_queue);
70 hptiop_pci_posting_flush(iop); 68 readl(&hba->u.itl.iop->outbound_intstatus);
71 return 0; 69 return 0;
72 } 70 }
73 71
74 return -1; 72 return -1;
75} 73}
76 74
77static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag) 75static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
76{
77 return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
78}
79
80static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
78{ 81{
79 if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) 82 if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
80 return hptiop_host_request_callback(hba, 83 hptiop_host_request_callback_itl(hba,
81 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); 84 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
82 else 85 else
83 return hptiop_iop_request_callback(hba, tag); 86 hptiop_iop_request_callback_itl(hba, tag);
84} 87}
85 88
86static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba) 89static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
87{ 90{
88 u32 req; 91 u32 req;
89 92
90 while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) { 93 while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
94 IOPMU_QUEUE_EMPTY) {
91 95
92 if (req & IOPMU_QUEUE_MASK_HOST_BITS) 96 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
93 hptiop_request_callback(hba, req); 97 hptiop_request_callback_itl(hba, req);
94 else { 98 else {
95 struct hpt_iop_request_header __iomem * p; 99 struct hpt_iop_request_header __iomem * p;
96 100
97 p = (struct hpt_iop_request_header __iomem *) 101 p = (struct hpt_iop_request_header __iomem *)
98 ((char __iomem *)hba->iop + req); 102 ((char __iomem *)hba->u.itl.iop + req);
99 103
100 if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { 104 if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
101 if (readl(&p->context)) 105 if (readl(&p->context))
102 hptiop_request_callback(hba, req); 106 hptiop_request_callback_itl(hba, req);
103 else 107 else
104 writel(1, &p->context); 108 writel(1, &p->context);
105 } 109 }
106 else 110 else
107 hptiop_request_callback(hba, req); 111 hptiop_request_callback_itl(hba, req);
108 } 112 }
109 } 113 }
110} 114}
111 115
112static int __iop_intr(struct hptiop_hba *hba) 116static int iop_intr_itl(struct hptiop_hba *hba)
113{ 117{
114 struct hpt_iopmu __iomem *iop = hba->iop; 118 struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
115 u32 status; 119 u32 status;
116 int ret = 0; 120 int ret = 0;
117 121
@@ -119,6 +123,7 @@ static int __iop_intr(struct hptiop_hba *hba)
119 123
120 if (status & IOPMU_OUTBOUND_INT_MSG0) { 124 if (status & IOPMU_OUTBOUND_INT_MSG0) {
121 u32 msg = readl(&iop->outbound_msgaddr0); 125 u32 msg = readl(&iop->outbound_msgaddr0);
126
122 dprintk("received outbound msg %x\n", msg); 127 dprintk("received outbound msg %x\n", msg);
123 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); 128 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
124 hptiop_message_callback(hba, msg); 129 hptiop_message_callback(hba, msg);
@@ -126,31 +131,115 @@ static int __iop_intr(struct hptiop_hba *hba)
126 } 131 }
127 132
128 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { 133 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
129 hptiop_drain_outbound_queue(hba); 134 hptiop_drain_outbound_queue_itl(hba);
135 ret = 1;
136 }
137
138 return ret;
139}
140
141static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
142{
143 u32 outbound_tail = readl(&mu->outbound_tail);
144 u32 outbound_head = readl(&mu->outbound_head);
145
146 if (outbound_tail != outbound_head) {
147 u64 p;
148
149 memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
150 outbound_tail++;
151
152 if (outbound_tail == MVIOP_QUEUE_LEN)
153 outbound_tail = 0;
154 writel(outbound_tail, &mu->outbound_tail);
155 return p;
156 } else
157 return 0;
158}
159
160static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
161{
162 u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
163 u32 head = inbound_head + 1;
164
165 if (head == MVIOP_QUEUE_LEN)
166 head = 0;
167
168 memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
169 writel(head, &hba->u.mv.mu->inbound_head);
170 writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
171 &hba->u.mv.regs->inbound_doorbell);
172}
173
174static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
175{
176 u32 req_type = (tag >> 5) & 0x7;
177 struct hpt_iop_request_scsi_command *req;
178
179 dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
180
181 BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
182
183 switch (req_type) {
184 case IOP_REQUEST_TYPE_GET_CONFIG:
185 case IOP_REQUEST_TYPE_SET_CONFIG:
186 hba->msg_done = 1;
187 break;
188
189 case IOP_REQUEST_TYPE_SCSI_COMMAND:
190 req = hba->reqs[tag >> 8].req_virt;
191 if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
192 req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
193
194 hptiop_finish_scsi_req(hba, tag>>8, req);
195 break;
196
197 default:
198 break;
199 }
200}
201
202static int iop_intr_mv(struct hptiop_hba *hba)
203{
204 u32 status;
205 int ret = 0;
206
207 status = readl(&hba->u.mv.regs->outbound_doorbell);
208 writel(~status, &hba->u.mv.regs->outbound_doorbell);
209
210 if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
211 u32 msg;
212 msg = readl(&hba->u.mv.mu->outbound_msg);
213 dprintk("received outbound msg %x\n", msg);
214 hptiop_message_callback(hba, msg);
215 ret = 1;
216 }
217
218 if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
219 u64 tag;
220
221 while ((tag = mv_outbound_read(hba->u.mv.mu)))
222 hptiop_request_callback_mv(hba, tag);
130 ret = 1; 223 ret = 1;
131 } 224 }
132 225
133 return ret; 226 return ret;
134} 227}
135 228
136static int iop_send_sync_request(struct hptiop_hba *hba, 229static int iop_send_sync_request_itl(struct hptiop_hba *hba,
137 void __iomem *_req, u32 millisec) 230 void __iomem *_req, u32 millisec)
138{ 231{
139 struct hpt_iop_request_header __iomem *req = _req; 232 struct hpt_iop_request_header __iomem *req = _req;
140 u32 i; 233 u32 i;
141 234
142 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, 235 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
143 &req->flags);
144
145 writel(0, &req->context); 236 writel(0, &req->context);
146 237 writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
147 writel((unsigned long)req - (unsigned long)hba->iop, 238 &hba->u.itl.iop->inbound_queue);
148 &hba->iop->inbound_queue); 239 readl(&hba->u.itl.iop->outbound_intstatus);
149
150 hptiop_pci_posting_flush(hba->iop);
151 240
152 for (i = 0; i < millisec; i++) { 241 for (i = 0; i < millisec; i++) {
153 __iop_intr(hba); 242 iop_intr_itl(hba);
154 if (readl(&req->context)) 243 if (readl(&req->context))
155 return 0; 244 return 0;
156 msleep(1); 245 msleep(1);
@@ -159,19 +248,49 @@ static int iop_send_sync_request(struct hptiop_hba *hba,
159 return -1; 248 return -1;
160} 249}
161 250
162static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) 251static int iop_send_sync_request_mv(struct hptiop_hba *hba,
252 u32 size_bits, u32 millisec)
163{ 253{
254 struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
164 u32 i; 255 u32 i;
165 256
166 hba->msg_done = 0; 257 hba->msg_done = 0;
258 reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
259 mv_inbound_write(hba->u.mv.internal_req_phy |
260 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
261
262 for (i = 0; i < millisec; i++) {
263 iop_intr_mv(hba);
264 if (hba->msg_done)
265 return 0;
266 msleep(1);
267 }
268 return -1;
269}
270
271static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
272{
273 writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
274 readl(&hba->u.itl.iop->outbound_intstatus);
275}
276
277static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
278{
279 writel(msg, &hba->u.mv.mu->inbound_msg);
280 writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
281 readl(&hba->u.mv.regs->inbound_doorbell);
282}
167 283
168 writel(msg, &hba->iop->inbound_msgaddr0); 284static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
285{
286 u32 i;
169 287
170 hptiop_pci_posting_flush(hba->iop); 288 hba->msg_done = 0;
289 hba->ops->post_msg(hba, msg);
171 290
172 for (i = 0; i < millisec; i++) { 291 for (i = 0; i < millisec; i++) {
173 spin_lock_irq(hba->host->host_lock); 292 spin_lock_irq(hba->host->host_lock);
174 __iop_intr(hba); 293 hba->ops->iop_intr(hba);
175 spin_unlock_irq(hba->host->host_lock); 294 spin_unlock_irq(hba->host->host_lock);
176 if (hba->msg_done) 295 if (hba->msg_done)
177 break; 296 break;
@@ -181,46 +300,67 @@ static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
181 return hba->msg_done? 0 : -1; 300 return hba->msg_done? 0 : -1;
182} 301}
183 302
184static int iop_get_config(struct hptiop_hba *hba, 303static int iop_get_config_itl(struct hptiop_hba *hba,
185 struct hpt_iop_request_get_config *config) 304 struct hpt_iop_request_get_config *config)
186{ 305{
187 u32 req32; 306 u32 req32;
188 struct hpt_iop_request_get_config __iomem *req; 307 struct hpt_iop_request_get_config __iomem *req;
189 308
190 req32 = readl(&hba->iop->inbound_queue); 309 req32 = readl(&hba->u.itl.iop->inbound_queue);
191 if (req32 == IOPMU_QUEUE_EMPTY) 310 if (req32 == IOPMU_QUEUE_EMPTY)
192 return -1; 311 return -1;
193 312
194 req = (struct hpt_iop_request_get_config __iomem *) 313 req = (struct hpt_iop_request_get_config __iomem *)
195 ((unsigned long)hba->iop + req32); 314 ((unsigned long)hba->u.itl.iop + req32);
196 315
197 writel(0, &req->header.flags); 316 writel(0, &req->header.flags);
198 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); 317 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
199 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); 318 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
200 writel(IOP_RESULT_PENDING, &req->header.result); 319 writel(IOP_RESULT_PENDING, &req->header.result);
201 320
202 if (iop_send_sync_request(hba, req, 20000)) { 321 if (iop_send_sync_request_itl(hba, req, 20000)) {
203 dprintk("Get config send cmd failed\n"); 322 dprintk("Get config send cmd failed\n");
204 return -1; 323 return -1;
205 } 324 }
206 325
207 memcpy_fromio(config, req, sizeof(*config)); 326 memcpy_fromio(config, req, sizeof(*config));
208 writel(req32, &hba->iop->outbound_queue); 327 writel(req32, &hba->u.itl.iop->outbound_queue);
328 return 0;
329}
330
331static int iop_get_config_mv(struct hptiop_hba *hba,
332 struct hpt_iop_request_get_config *config)
333{
334 struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
335
336 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
337 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
338 req->header.size =
339 cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
340 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
341 req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_GET_CONFIG<<5);
342
343 if (iop_send_sync_request_mv(hba, 0, 20000)) {
344 dprintk("Get config send cmd failed\n");
345 return -1;
346 }
347
348 memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
209 return 0; 349 return 0;
210} 350}
211 351
212static int iop_set_config(struct hptiop_hba *hba, 352static int iop_set_config_itl(struct hptiop_hba *hba,
213 struct hpt_iop_request_set_config *config) 353 struct hpt_iop_request_set_config *config)
214{ 354{
215 u32 req32; 355 u32 req32;
216 struct hpt_iop_request_set_config __iomem *req; 356 struct hpt_iop_request_set_config __iomem *req;
217 357
218 req32 = readl(&hba->iop->inbound_queue); 358 req32 = readl(&hba->u.itl.iop->inbound_queue);
219 if (req32 == IOPMU_QUEUE_EMPTY) 359 if (req32 == IOPMU_QUEUE_EMPTY)
220 return -1; 360 return -1;
221 361
222 req = (struct hpt_iop_request_set_config __iomem *) 362 req = (struct hpt_iop_request_set_config __iomem *)
223 ((unsigned long)hba->iop + req32); 363 ((unsigned long)hba->u.itl.iop + req32);
224 364
225 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), 365 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
226 (u8 *)config + sizeof(struct hpt_iop_request_header), 366 (u8 *)config + sizeof(struct hpt_iop_request_header),
@@ -232,22 +372,52 @@ static int iop_set_config(struct hptiop_hba *hba,
232 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); 372 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
233 writel(IOP_RESULT_PENDING, &req->header.result); 373 writel(IOP_RESULT_PENDING, &req->header.result);
234 374
235 if (iop_send_sync_request(hba, req, 20000)) { 375 if (iop_send_sync_request_itl(hba, req, 20000)) {
236 dprintk("Set config send cmd failed\n"); 376 dprintk("Set config send cmd failed\n");
237 return -1; 377 return -1;
238 } 378 }
239 379
240 writel(req32, &hba->iop->outbound_queue); 380 writel(req32, &hba->u.itl.iop->outbound_queue);
241 return 0; 381 return 0;
242} 382}
243 383
244static int hptiop_initialize_iop(struct hptiop_hba *hba) 384static int iop_set_config_mv(struct hptiop_hba *hba,
385 struct hpt_iop_request_set_config *config)
245{ 386{
246 struct hpt_iopmu __iomem *iop = hba->iop; 387 struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
247 388
248 /* enable interrupts */ 389 memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
390 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
391 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
392 req->header.size =
393 cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
394 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
395 req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_SET_CONFIG<<5);
396
397 if (iop_send_sync_request_mv(hba, 0, 20000)) {
398 dprintk("Set config send cmd failed\n");
399 return -1;
400 }
401
402 return 0;
403}
404
405static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
406{
249 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), 407 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
250 &iop->outbound_intmask); 408 &hba->u.itl.iop->outbound_intmask);
409}
410
411static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
412{
413 writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
414 &hba->u.mv.regs->outbound_intmask);
415}
416
417static int hptiop_initialize_iop(struct hptiop_hba *hba)
418{
419 /* enable interrupts */
420 hba->ops->enable_intr(hba);
251 421
252 hba->initialized = 1; 422 hba->initialized = 1;
253 423
@@ -261,37 +431,74 @@ static int hptiop_initialize_iop(struct hptiop_hba *hba)
261 return 0; 431 return 0;
262} 432}
263 433
264static int hptiop_map_pci_bar(struct hptiop_hba *hba) 434static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
265{ 435{
266 u32 mem_base_phy, length; 436 u32 mem_base_phy, length;
267 void __iomem *mem_base_virt; 437 void __iomem *mem_base_virt;
438
268 struct pci_dev *pcidev = hba->pcidev; 439 struct pci_dev *pcidev = hba->pcidev;
269 440
270 if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) { 441
442 if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
271 printk(KERN_ERR "scsi%d: pci resource invalid\n", 443 printk(KERN_ERR "scsi%d: pci resource invalid\n",
272 hba->host->host_no); 444 hba->host->host_no);
273 return -1; 445 return 0;
274 } 446 }
275 447
276 mem_base_phy = pci_resource_start(pcidev, 0); 448 mem_base_phy = pci_resource_start(pcidev, index);
277 length = pci_resource_len(pcidev, 0); 449 length = pci_resource_len(pcidev, index);
278 mem_base_virt = ioremap(mem_base_phy, length); 450 mem_base_virt = ioremap(mem_base_phy, length);
279 451
280 if (!mem_base_virt) { 452 if (!mem_base_virt) {
281 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", 453 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
282 hba->host->host_no); 454 hba->host->host_no);
455 return 0;
456 }
457 return mem_base_virt;
458}
459
460static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
461{
462 hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
463 if (hba->u.itl.iop)
464 return 0;
465 else
466 return -1;
467}
468
469static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
470{
471 iounmap(hba->u.itl.iop);
472}
473
474static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
475{
476 hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
477 if (hba->u.mv.regs == 0)
478 return -1;
479
480 hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
481 if (hba->u.mv.mu == 0) {
482 iounmap(hba->u.mv.regs);
283 return -1; 483 return -1;
284 } 484 }
285 485
286 hba->iop = mem_base_virt;
287 dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
288 return 0; 486 return 0;
289} 487}
290 488
489static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
490{
491 iounmap(hba->u.mv.regs);
492 iounmap(hba->u.mv.mu);
493}
494
291static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) 495static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
292{ 496{
293 dprintk("iop message 0x%x\n", msg); 497 dprintk("iop message 0x%x\n", msg);
294 498
499 if (msg == IOPMU_INBOUND_MSG0_NOP)
500 hba->msg_done = 1;
501
295 if (!hba->initialized) 502 if (!hba->initialized)
296 return; 503 return;
297 504
@@ -303,7 +510,7 @@ static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
303 hba->msg_done = 1; 510 hba->msg_done = 1;
304} 511}
305 512
306static inline struct hptiop_request *get_req(struct hptiop_hba *hba) 513static struct hptiop_request *get_req(struct hptiop_hba *hba)
307{ 514{
308 struct hptiop_request *ret; 515 struct hptiop_request *ret;
309 516
@@ -316,30 +523,19 @@ static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
316 return ret; 523 return ret;
317} 524}
318 525
319static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req) 526static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
320{ 527{
321 dprintk("free_req(%d, %p)\n", req->index, req); 528 dprintk("free_req(%d, %p)\n", req->index, req);
322 req->next = hba->req_list; 529 req->next = hba->req_list;
323 hba->req_list = req; 530 hba->req_list = req;
324} 531}
325 532
326static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag) 533static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
534 struct hpt_iop_request_scsi_command *req)
327{ 535{
328 struct hpt_iop_request_scsi_command *req;
329 struct scsi_cmnd *scp; 536 struct scsi_cmnd *scp;
330 u32 tag;
331
332 if (hba->iopintf_v2) {
333 tag = _tag & ~ IOPMU_QUEUE_REQUEST_RESULT_BIT;
334 req = hba->reqs[tag].req_virt;
335 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
336 req->header.result = IOP_RESULT_SUCCESS;
337 } else {
338 tag = _tag;
339 req = hba->reqs[tag].req_virt;
340 }
341 537
342 dprintk("hptiop_host_request_callback: req=%p, type=%d, " 538 dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
343 "result=%d, context=0x%x tag=%d\n", 539 "result=%d, context=0x%x tag=%d\n",
344 req, req->header.type, req->header.result, 540 req, req->header.type, req->header.result,
345 req->header.context, tag); 541 req->header.context, tag);
@@ -354,6 +550,8 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
354 550
355 switch (le32_to_cpu(req->header.result)) { 551 switch (le32_to_cpu(req->header.result)) {
356 case IOP_RESULT_SUCCESS: 552 case IOP_RESULT_SUCCESS:
553 scsi_set_resid(scp,
554 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
357 scp->result = (DID_OK<<16); 555 scp->result = (DID_OK<<16);
358 break; 556 break;
359 case IOP_RESULT_BAD_TARGET: 557 case IOP_RESULT_BAD_TARGET:
@@ -371,12 +569,12 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
371 case IOP_RESULT_INVALID_REQUEST: 569 case IOP_RESULT_INVALID_REQUEST:
372 scp->result = (DID_ABORT<<16); 570 scp->result = (DID_ABORT<<16);
373 break; 571 break;
374 case IOP_RESULT_MODE_SENSE_CHECK_CONDITION: 572 case IOP_RESULT_CHECK_CONDITION:
573 scsi_set_resid(scp,
574 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
375 scp->result = SAM_STAT_CHECK_CONDITION; 575 scp->result = SAM_STAT_CHECK_CONDITION;
376 memset(&scp->sense_buffer,
377 0, sizeof(scp->sense_buffer));
378 memcpy(&scp->sense_buffer, &req->sg_list, 576 memcpy(&scp->sense_buffer, &req->sg_list,
379 min(sizeof(scp->sense_buffer), 577 min_t(size_t, SCSI_SENSE_BUFFERSIZE,
380 le32_to_cpu(req->dataxfer_length))); 578 le32_to_cpu(req->dataxfer_length)));
381 break; 579 break;
382 580
@@ -391,15 +589,33 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
391 free_req(hba, &hba->reqs[tag]); 589 free_req(hba, &hba->reqs[tag]);
392} 590}
393 591
394void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag) 592static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
593{
594 struct hpt_iop_request_scsi_command *req;
595 u32 tag;
596
597 if (hba->iopintf_v2) {
598 tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
599 req = hba->reqs[tag].req_virt;
600 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
601 req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
602 } else {
603 tag = _tag;
604 req = hba->reqs[tag].req_virt;
605 }
606
607 hptiop_finish_scsi_req(hba, tag, req);
608}
609
610void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
395{ 611{
396 struct hpt_iop_request_header __iomem *req; 612 struct hpt_iop_request_header __iomem *req;
397 struct hpt_iop_request_ioctl_command __iomem *p; 613 struct hpt_iop_request_ioctl_command __iomem *p;
398 struct hpt_ioctl_k *arg; 614 struct hpt_ioctl_k *arg;
399 615
400 req = (struct hpt_iop_request_header __iomem *) 616 req = (struct hpt_iop_request_header __iomem *)
401 ((unsigned long)hba->iop + tag); 617 ((unsigned long)hba->u.itl.iop + tag);
402 dprintk("hptiop_iop_request_callback: req=%p, type=%d, " 618 dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
403 "result=%d, context=0x%x tag=%d\n", 619 "result=%d, context=0x%x tag=%d\n",
404 req, readl(&req->type), readl(&req->result), 620 req, readl(&req->type), readl(&req->result),
405 readl(&req->context), tag); 621 readl(&req->context), tag);
@@ -427,7 +643,7 @@ void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
427 arg->result = HPT_IOCTL_RESULT_FAILED; 643 arg->result = HPT_IOCTL_RESULT_FAILED;
428 644
429 arg->done(arg); 645 arg->done(arg);
430 writel(tag, &hba->iop->outbound_queue); 646 writel(tag, &hba->u.itl.iop->outbound_queue);
431} 647}
432 648
433static irqreturn_t hptiop_intr(int irq, void *dev_id) 649static irqreturn_t hptiop_intr(int irq, void *dev_id)
@@ -437,7 +653,7 @@ static irqreturn_t hptiop_intr(int irq, void *dev_id)
437 unsigned long flags; 653 unsigned long flags;
438 654
439 spin_lock_irqsave(hba->host->host_lock, flags); 655 spin_lock_irqsave(hba->host->host_lock, flags);
440 handled = __iop_intr(hba); 656 handled = hba->ops->iop_intr(hba);
441 spin_unlock_irqrestore(hba->host->host_lock, flags); 657 spin_unlock_irqrestore(hba->host->host_lock, flags);
442 658
443 return handled; 659 return handled;
@@ -469,6 +685,57 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
469 return HPT_SCP(scp)->sgcnt; 685 return HPT_SCP(scp)->sgcnt;
470} 686}
471 687
688static void hptiop_post_req_itl(struct hptiop_hba *hba,
689 struct hptiop_request *_req)
690{
691 struct hpt_iop_request_header *reqhdr = _req->req_virt;
692
693 reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
694 (u32)_req->index);
695 reqhdr->context_hi32 = 0;
696
697 if (hba->iopintf_v2) {
698 u32 size, size_bits;
699
700 size = le32_to_cpu(reqhdr->size);
701 if (size < 256)
702 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
703 else if (size < 512)
704 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
705 else
706 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
707 IOPMU_QUEUE_ADDR_HOST_BIT;
708 writel(_req->req_shifted_phy | size_bits,
709 &hba->u.itl.iop->inbound_queue);
710 } else
711 writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
712 &hba->u.itl.iop->inbound_queue);
713}
714
715static void hptiop_post_req_mv(struct hptiop_hba *hba,
716 struct hptiop_request *_req)
717{
718 struct hpt_iop_request_header *reqhdr = _req->req_virt;
719 u32 size, size_bit;
720
721 reqhdr->context = cpu_to_le32(_req->index<<8 |
722 IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
723 reqhdr->context_hi32 = 0;
724 size = le32_to_cpu(reqhdr->size);
725
726 if (size <= 256)
727 size_bit = 0;
728 else if (size <= 256*2)
729 size_bit = 1;
730 else if (size <= 256*3)
731 size_bit = 2;
732 else
733 size_bit = 3;
734
735 mv_inbound_write((_req->req_shifted_phy << 5) |
736 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
737}
738
472static int hptiop_queuecommand(struct scsi_cmnd *scp, 739static int hptiop_queuecommand(struct scsi_cmnd *scp,
473 void (*done)(struct scsi_cmnd *)) 740 void (*done)(struct scsi_cmnd *))
474{ 741{
@@ -518,9 +785,6 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
518 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); 785 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
519 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); 786 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
520 req->header.result = cpu_to_le32(IOP_RESULT_PENDING); 787 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
521 req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
522 (u32)_req->index);
523 req->header.context_hi32 = 0;
524 req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); 788 req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
525 req->channel = scp->device->channel; 789 req->channel = scp->device->channel;
526 req->target = scp->device->id; 790 req->target = scp->device->id;
@@ -531,21 +795,7 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
531 + sg_count * sizeof(struct hpt_iopsg)); 795 + sg_count * sizeof(struct hpt_iopsg));
532 796
533 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); 797 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
534 798 hba->ops->post_req(hba, _req);
535 if (hba->iopintf_v2) {
536 u32 size_bits;
537 if (req->header.size < 256)
538 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
539 else if (req->header.size < 512)
540 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
541 else
542 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
543 IOPMU_QUEUE_ADDR_HOST_BIT;
544 writel(_req->req_shifted_phy | size_bits, &hba->iop->inbound_queue);
545 } else
546 writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
547 &hba->iop->inbound_queue);
548
549 return 0; 799 return 0;
550 800
551cmd_done: 801cmd_done:
@@ -563,9 +813,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
563{ 813{
564 if (atomic_xchg(&hba->resetting, 1) == 0) { 814 if (atomic_xchg(&hba->resetting, 1) == 0) {
565 atomic_inc(&hba->reset_count); 815 atomic_inc(&hba->reset_count);
566 writel(IOPMU_INBOUND_MSG0_RESET, 816 hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
567 &hba->iop->inbound_msgaddr0);
568 hptiop_pci_posting_flush(hba->iop);
569 } 817 }
570 818
571 wait_event_timeout(hba->reset_wq, 819 wait_event_timeout(hba->reset_wq,
@@ -601,8 +849,10 @@ static int hptiop_reset(struct scsi_cmnd *scp)
601static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, 849static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
602 int queue_depth) 850 int queue_depth)
603{ 851{
604 if(queue_depth > 256) 852 struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
605 queue_depth = 256; 853
854 if (queue_depth > hba->max_requests)
855 queue_depth = hba->max_requests;
606 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); 856 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
607 return queue_depth; 857 return queue_depth;
608} 858}
@@ -663,6 +913,26 @@ static struct scsi_host_template driver_template = {
663 .change_queue_depth = hptiop_adjust_disk_queue_depth, 913 .change_queue_depth = hptiop_adjust_disk_queue_depth,
664}; 914};
665 915
916static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
917{
918 hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
919 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
920 if (hba->u.mv.internal_req)
921 return 0;
922 else
923 return -1;
924}
925
926static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
927{
928 if (hba->u.mv.internal_req) {
929 dma_free_coherent(&hba->pcidev->dev, 0x800,
930 hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
931 return 0;
932 } else
933 return -1;
934}
935
666static int __devinit hptiop_probe(struct pci_dev *pcidev, 936static int __devinit hptiop_probe(struct pci_dev *pcidev,
667 const struct pci_device_id *id) 937 const struct pci_device_id *id)
668{ 938{
@@ -708,6 +978,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
708 978
709 hba = (struct hptiop_hba *)host->hostdata; 979 hba = (struct hptiop_hba *)host->hostdata;
710 980
981 hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
711 hba->pcidev = pcidev; 982 hba->pcidev = pcidev;
712 hba->host = host; 983 hba->host = host;
713 hba->initialized = 0; 984 hba->initialized = 0;
@@ -725,16 +996,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
725 host->n_io_port = 0; 996 host->n_io_port = 0;
726 host->irq = pcidev->irq; 997 host->irq = pcidev->irq;
727 998
728 if (hptiop_map_pci_bar(hba)) 999 if (hba->ops->map_pci_bar(hba))
729 goto free_scsi_host; 1000 goto free_scsi_host;
730 1001
731 if (iop_wait_ready(hba->iop, 20000)) { 1002 if (hba->ops->iop_wait_ready(hba, 20000)) {
732 printk(KERN_ERR "scsi%d: firmware not ready\n", 1003 printk(KERN_ERR "scsi%d: firmware not ready\n",
733 hba->host->host_no); 1004 hba->host->host_no);
734 goto unmap_pci_bar; 1005 goto unmap_pci_bar;
735 } 1006 }
736 1007
737 if (iop_get_config(hba, &iop_config)) { 1008 if (hba->ops->internal_memalloc) {
1009 if (hba->ops->internal_memalloc(hba)) {
1010 printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
1011 hba->host->host_no);
1012 goto unmap_pci_bar;
1013 }
1014 }
1015
1016 if (hba->ops->get_config(hba, &iop_config)) {
738 printk(KERN_ERR "scsi%d: get config failed\n", 1017 printk(KERN_ERR "scsi%d: get config failed\n",
739 hba->host->host_no); 1018 hba->host->host_no);
740 goto unmap_pci_bar; 1019 goto unmap_pci_bar;
@@ -770,7 +1049,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
770 set_config.vbus_id = cpu_to_le16(host->host_no); 1049 set_config.vbus_id = cpu_to_le16(host->host_no);
771 set_config.max_host_request_size = cpu_to_le16(req_size); 1050 set_config.max_host_request_size = cpu_to_le16(req_size);
772 1051
773 if (iop_set_config(hba, &set_config)) { 1052 if (hba->ops->set_config(hba, &set_config)) {
774 printk(KERN_ERR "scsi%d: set config failed\n", 1053 printk(KERN_ERR "scsi%d: set config failed\n",
775 hba->host->host_no); 1054 hba->host->host_no);
776 goto unmap_pci_bar; 1055 goto unmap_pci_bar;
@@ -839,21 +1118,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
839 1118
840free_request_mem: 1119free_request_mem:
841 dma_free_coherent(&hba->pcidev->dev, 1120 dma_free_coherent(&hba->pcidev->dev,
842 hba->req_size*hba->max_requests + 0x20, 1121 hba->req_size * hba->max_requests + 0x20,
843 hba->dma_coherent, hba->dma_coherent_handle); 1122 hba->dma_coherent, hba->dma_coherent_handle);
844 1123
845free_request_irq: 1124free_request_irq:
846 free_irq(hba->pcidev->irq, hba); 1125 free_irq(hba->pcidev->irq, hba);
847 1126
848unmap_pci_bar: 1127unmap_pci_bar:
849 iounmap(hba->iop); 1128 if (hba->ops->internal_memfree)
1129 hba->ops->internal_memfree(hba);
850 1130
851free_pci_regions: 1131 hba->ops->unmap_pci_bar(hba);
852 pci_release_regions(pcidev) ;
853 1132
854free_scsi_host: 1133free_scsi_host:
855 scsi_host_put(host); 1134 scsi_host_put(host);
856 1135
1136free_pci_regions:
1137 pci_release_regions(pcidev);
1138
857disable_pci_device: 1139disable_pci_device:
858 pci_disable_device(pcidev); 1140 pci_disable_device(pcidev);
859 1141
@@ -865,8 +1147,6 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
865{ 1147{
866 struct Scsi_Host *host = pci_get_drvdata(pcidev); 1148 struct Scsi_Host *host = pci_get_drvdata(pcidev);
867 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; 1149 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
868 struct hpt_iopmu __iomem *iop = hba->iop;
869 u32 int_mask;
870 1150
871 dprintk("hptiop_shutdown(%p)\n", hba); 1151 dprintk("hptiop_shutdown(%p)\n", hba);
872 1152
@@ -876,11 +1156,24 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
876 hba->host->host_no); 1156 hba->host->host_no);
877 1157
878 /* disable all outbound interrupts */ 1158 /* disable all outbound interrupts */
879 int_mask = readl(&iop->outbound_intmask); 1159 hba->ops->disable_intr(hba);
1160}
1161
1162static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
1163{
1164 u32 int_mask;
1165
1166 int_mask = readl(&hba->u.itl.iop->outbound_intmask);
880 writel(int_mask | 1167 writel(int_mask |
881 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, 1168 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
882 &iop->outbound_intmask); 1169 &hba->u.itl.iop->outbound_intmask);
883 hptiop_pci_posting_flush(iop); 1170 readl(&hba->u.itl.iop->outbound_intmask);
1171}
1172
1173static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
1174{
1175 writel(0, &hba->u.mv.regs->outbound_intmask);
1176 readl(&hba->u.mv.regs->outbound_intmask);
884} 1177}
885 1178
886static void hptiop_remove(struct pci_dev *pcidev) 1179static void hptiop_remove(struct pci_dev *pcidev)
@@ -901,7 +1194,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
901 hba->dma_coherent, 1194 hba->dma_coherent,
902 hba->dma_coherent_handle); 1195 hba->dma_coherent_handle);
903 1196
904 iounmap(hba->iop); 1197 if (hba->ops->internal_memfree)
1198 hba->ops->internal_memfree(hba);
1199
1200 hba->ops->unmap_pci_bar(hba);
905 1201
906 pci_release_regions(hba->pcidev); 1202 pci_release_regions(hba->pcidev);
907 pci_set_drvdata(hba->pcidev, NULL); 1203 pci_set_drvdata(hba->pcidev, NULL);
@@ -910,11 +1206,50 @@ static void hptiop_remove(struct pci_dev *pcidev)
910 scsi_host_put(host); 1206 scsi_host_put(host);
911} 1207}
912 1208
1209static struct hptiop_adapter_ops hptiop_itl_ops = {
1210 .iop_wait_ready = iop_wait_ready_itl,
1211 .internal_memalloc = 0,
1212 .internal_memfree = 0,
1213 .map_pci_bar = hptiop_map_pci_bar_itl,
1214 .unmap_pci_bar = hptiop_unmap_pci_bar_itl,
1215 .enable_intr = hptiop_enable_intr_itl,
1216 .disable_intr = hptiop_disable_intr_itl,
1217 .get_config = iop_get_config_itl,
1218 .set_config = iop_set_config_itl,
1219 .iop_intr = iop_intr_itl,
1220 .post_msg = hptiop_post_msg_itl,
1221 .post_req = hptiop_post_req_itl,
1222};
1223
1224static struct hptiop_adapter_ops hptiop_mv_ops = {
1225 .iop_wait_ready = iop_wait_ready_mv,
1226 .internal_memalloc = hptiop_internal_memalloc_mv,
1227 .internal_memfree = hptiop_internal_memfree_mv,
1228 .map_pci_bar = hptiop_map_pci_bar_mv,
1229 .unmap_pci_bar = hptiop_unmap_pci_bar_mv,
1230 .enable_intr = hptiop_enable_intr_mv,
1231 .disable_intr = hptiop_disable_intr_mv,
1232 .get_config = iop_get_config_mv,
1233 .set_config = iop_set_config_mv,
1234 .iop_intr = iop_intr_mv,
1235 .post_msg = hptiop_post_msg_mv,
1236 .post_req = hptiop_post_req_mv,
1237};
1238
913static struct pci_device_id hptiop_id_table[] = { 1239static struct pci_device_id hptiop_id_table[] = {
914 { PCI_VDEVICE(TTI, 0x3220) }, 1240 { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
915 { PCI_VDEVICE(TTI, 0x3320) }, 1241 { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
916 { PCI_VDEVICE(TTI, 0x3520) }, 1242 { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
917 { PCI_VDEVICE(TTI, 0x4320) }, 1243 { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
1244 { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
1245 { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
1246 { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
1247 { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1248 { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1249 { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1250 { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1251 { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1252 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
918 {}, 1253 {},
919}; 1254};
920 1255
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 2a5e46e001cb..a0289f219752 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * HighPoint RR3xxx controller driver for Linux 2 * HighPoint RR3xxx/4xxx controller driver for Linux
3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved. 3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -18,8 +18,7 @@
18#ifndef _HPTIOP_H_ 18#ifndef _HPTIOP_H_
19#define _HPTIOP_H_ 19#define _HPTIOP_H_
20 20
21struct hpt_iopmu 21struct hpt_iopmu_itl {
22{
23 __le32 resrved0[4]; 22 __le32 resrved0[4];
24 __le32 inbound_msgaddr0; 23 __le32 inbound_msgaddr0;
25 __le32 inbound_msgaddr1; 24 __le32 inbound_msgaddr1;
@@ -54,6 +53,40 @@ struct hpt_iopmu
54#define IOPMU_INBOUND_INT_ERROR 8 53#define IOPMU_INBOUND_INT_ERROR 8
55#define IOPMU_INBOUND_INT_POSTQUEUE 0x10 54#define IOPMU_INBOUND_INT_POSTQUEUE 0x10
56 55
56#define MVIOP_QUEUE_LEN 512
57
58struct hpt_iopmu_mv {
59 __le32 inbound_head;
60 __le32 inbound_tail;
61 __le32 outbound_head;
62 __le32 outbound_tail;
63 __le32 inbound_msg;
64 __le32 outbound_msg;
65 __le32 reserve[10];
66 __le64 inbound_q[MVIOP_QUEUE_LEN];
67 __le64 outbound_q[MVIOP_QUEUE_LEN];
68};
69
70struct hpt_iopmv_regs {
71 __le32 reserved[0x20400 / 4];
72 __le32 inbound_doorbell;
73 __le32 inbound_intmask;
74 __le32 outbound_doorbell;
75 __le32 outbound_intmask;
76};
77
78#define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
79#define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
80
81#define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32 0xffffffff
82#define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT 1
83#define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2
84
85#define MVIOP_MU_INBOUND_INT_MSG 1
86#define MVIOP_MU_INBOUND_INT_POSTQUEUE 2
87#define MVIOP_MU_OUTBOUND_INT_MSG 1
88#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
89
57enum hpt_iopmu_message { 90enum hpt_iopmu_message {
58 /* host-to-iop messages */ 91 /* host-to-iop messages */
59 IOPMU_INBOUND_MSG0_NOP = 0, 92 IOPMU_INBOUND_MSG0_NOP = 0,
@@ -72,8 +105,7 @@ enum hpt_iopmu_message {
72 IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff, 105 IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
73}; 106};
74 107
75struct hpt_iop_request_header 108struct hpt_iop_request_header {
76{
77 __le32 size; 109 __le32 size;
78 __le32 type; 110 __le32 type;
79 __le32 flags; 111 __le32 flags;
@@ -104,11 +136,10 @@ enum hpt_iop_result_type {
104 IOP_RESULT_RESET, 136 IOP_RESULT_RESET,
105 IOP_RESULT_INVALID_REQUEST, 137 IOP_RESULT_INVALID_REQUEST,
106 IOP_RESULT_BAD_TARGET, 138 IOP_RESULT_BAD_TARGET,
107 IOP_RESULT_MODE_SENSE_CHECK_CONDITION, 139 IOP_RESULT_CHECK_CONDITION,
108}; 140};
109 141
110struct hpt_iop_request_get_config 142struct hpt_iop_request_get_config {
111{
112 struct hpt_iop_request_header header; 143 struct hpt_iop_request_header header;
113 __le32 interface_version; 144 __le32 interface_version;
114 __le32 firmware_version; 145 __le32 firmware_version;
@@ -121,8 +152,7 @@ struct hpt_iop_request_get_config
121 __le32 sdram_size; 152 __le32 sdram_size;
122}; 153};
123 154
124struct hpt_iop_request_set_config 155struct hpt_iop_request_set_config {
125{
126 struct hpt_iop_request_header header; 156 struct hpt_iop_request_header header;
127 __le32 iop_id; 157 __le32 iop_id;
128 __le16 vbus_id; 158 __le16 vbus_id;
@@ -130,15 +160,13 @@ struct hpt_iop_request_set_config
130 __le32 reserve[6]; 160 __le32 reserve[6];
131}; 161};
132 162
133struct hpt_iopsg 163struct hpt_iopsg {
134{
135 __le32 size; 164 __le32 size;
136 __le32 eot; /* non-zero: end of table */ 165 __le32 eot; /* non-zero: end of table */
137 __le64 pci_address; 166 __le64 pci_address;
138}; 167};
139 168
140struct hpt_iop_request_block_command 169struct hpt_iop_request_block_command {
141{
142 struct hpt_iop_request_header header; 170 struct hpt_iop_request_header header;
143 u8 channel; 171 u8 channel;
144 u8 target; 172 u8 target;
@@ -156,8 +184,7 @@ struct hpt_iop_request_block_command
156#define IOP_BLOCK_COMMAND_FLUSH 4 184#define IOP_BLOCK_COMMAND_FLUSH 4
157#define IOP_BLOCK_COMMAND_SHUTDOWN 5 185#define IOP_BLOCK_COMMAND_SHUTDOWN 5
158 186
159struct hpt_iop_request_scsi_command 187struct hpt_iop_request_scsi_command {
160{
161 struct hpt_iop_request_header header; 188 struct hpt_iop_request_header header;
162 u8 channel; 189 u8 channel;
163 u8 target; 190 u8 target;
@@ -168,8 +195,7 @@ struct hpt_iop_request_scsi_command
168 struct hpt_iopsg sg_list[1]; 195 struct hpt_iopsg sg_list[1];
169}; 196};
170 197
171struct hpt_iop_request_ioctl_command 198struct hpt_iop_request_ioctl_command {
172{
173 struct hpt_iop_request_header header; 199 struct hpt_iop_request_header header;
174 __le32 ioctl_code; 200 __le32 ioctl_code;
175 __le32 inbuf_size; 201 __le32 inbuf_size;
@@ -182,11 +208,11 @@ struct hpt_iop_request_ioctl_command
182#define HPTIOP_MAX_REQUESTS 256u 208#define HPTIOP_MAX_REQUESTS 256u
183 209
184struct hptiop_request { 210struct hptiop_request {
185 struct hptiop_request * next; 211 struct hptiop_request *next;
186 void * req_virt; 212 void *req_virt;
187 u32 req_shifted_phy; 213 u32 req_shifted_phy;
188 struct scsi_cmnd * scp; 214 struct scsi_cmnd *scp;
189 int index; 215 int index;
190}; 216};
191 217
192struct hpt_scsi_pointer { 218struct hpt_scsi_pointer {
@@ -198,9 +224,21 @@ struct hpt_scsi_pointer {
198#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp) 224#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
199 225
200struct hptiop_hba { 226struct hptiop_hba {
201 struct hpt_iopmu __iomem * iop; 227 struct hptiop_adapter_ops *ops;
202 struct Scsi_Host * host; 228 union {
203 struct pci_dev * pcidev; 229 struct {
230 struct hpt_iopmu_itl __iomem *iop;
231 } itl;
232 struct {
233 struct hpt_iopmv_regs *regs;
234 struct hpt_iopmu_mv __iomem *mu;
235 void *internal_req;
236 dma_addr_t internal_req_phy;
237 } mv;
238 } u;
239
240 struct Scsi_Host *host;
241 struct pci_dev *pcidev;
204 242
205 /* IOP config info */ 243 /* IOP config info */
206 u32 interface_version; 244 u32 interface_version;
@@ -213,15 +251,15 @@ struct hptiop_hba {
213 251
214 u32 req_size; /* host-allocated request buffer size */ 252 u32 req_size; /* host-allocated request buffer size */
215 253
216 int iopintf_v2: 1; 254 u32 iopintf_v2: 1;
217 int initialized: 1; 255 u32 initialized: 1;
218 int msg_done: 1; 256 u32 msg_done: 1;
219 257
220 struct hptiop_request * req_list; 258 struct hptiop_request * req_list;
221 struct hptiop_request reqs[HPTIOP_MAX_REQUESTS]; 259 struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
222 260
223 /* used to free allocated dma area */ 261 /* used to free allocated dma area */
224 void * dma_coherent; 262 void *dma_coherent;
225 dma_addr_t dma_coherent_handle; 263 dma_addr_t dma_coherent_handle;
226 264
227 atomic_t reset_count; 265 atomic_t reset_count;
@@ -231,19 +269,35 @@ struct hptiop_hba {
231 wait_queue_head_t ioctl_wq; 269 wait_queue_head_t ioctl_wq;
232}; 270};
233 271
234struct hpt_ioctl_k 272struct hpt_ioctl_k {
235{
236 struct hptiop_hba * hba; 273 struct hptiop_hba * hba;
237 u32 ioctl_code; 274 u32 ioctl_code;
238 u32 inbuf_size; 275 u32 inbuf_size;
239 u32 outbuf_size; 276 u32 outbuf_size;
240 void * inbuf; 277 void *inbuf;
241 void * outbuf; 278 void *outbuf;
242 u32 * bytes_returned; 279 u32 *bytes_returned;
243 void (*done)(struct hpt_ioctl_k *); 280 void (*done)(struct hpt_ioctl_k *);
244 int result; /* HPT_IOCTL_RESULT_ */ 281 int result; /* HPT_IOCTL_RESULT_ */
245}; 282};
246 283
284struct hptiop_adapter_ops {
285 int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
286 int (*internal_memalloc)(struct hptiop_hba *hba);
287 int (*internal_memfree)(struct hptiop_hba *hba);
288 int (*map_pci_bar)(struct hptiop_hba *hba);
289 void (*unmap_pci_bar)(struct hptiop_hba *hba);
290 void (*enable_intr)(struct hptiop_hba *hba);
291 void (*disable_intr)(struct hptiop_hba *hba);
292 int (*get_config)(struct hptiop_hba *hba,
293 struct hpt_iop_request_get_config *config);
294 int (*set_config)(struct hptiop_hba *hba,
295 struct hpt_iop_request_set_config *config);
296 int (*iop_intr)(struct hptiop_hba *hba);
297 void (*post_msg)(struct hptiop_hba *hba, u32 msg);
298 void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
299};
300
247#define HPT_IOCTL_RESULT_OK 0 301#define HPT_IOCTL_RESULT_OK 0
248#define HPT_IOCTL_RESULT_FAILED (-1) 302#define HPT_IOCTL_RESULT_FAILED (-1)
249 303
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 5f2396c03958..30819012898f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -629,6 +629,16 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
629 list_del(&evt_struct->list); 629 list_del(&evt_struct->list);
630 del_timer(&evt_struct->timer); 630 del_timer(&evt_struct->timer);
631 631
632 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
633 * Firmware will send a CRQ with a transport event (0xFF) to
634 * tell this client what has happened to the transport. This
635 * will be handled in ibmvscsi_handle_crq()
636 */
637 if (rc == H_CLOSED) {
638 dev_warn(hostdata->dev, "send warning. "
639 "Receive queue closed, will retry.\n");
640 goto send_busy;
641 }
632 dev_err(hostdata->dev, "send error %d\n", rc); 642 dev_err(hostdata->dev, "send error %d\n", rc);
633 atomic_inc(&hostdata->request_limit); 643 atomic_inc(&hostdata->request_limit);
634 goto send_error; 644 goto send_error;
@@ -976,58 +986,74 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
976 int rsp_rc; 986 int rsp_rc;
977 unsigned long flags; 987 unsigned long flags;
978 u16 lun = lun_from_dev(cmd->device); 988 u16 lun = lun_from_dev(cmd->device);
989 unsigned long wait_switch = 0;
979 990
980 /* First, find this command in our sent list so we can figure 991 /* First, find this command in our sent list so we can figure
981 * out the correct tag 992 * out the correct tag
982 */ 993 */
983 spin_lock_irqsave(hostdata->host->host_lock, flags); 994 spin_lock_irqsave(hostdata->host->host_lock, flags);
984 found_evt = NULL; 995 wait_switch = jiffies + (init_timeout * HZ);
985 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 996 do {
986 if (tmp_evt->cmnd == cmd) { 997 found_evt = NULL;
987 found_evt = tmp_evt; 998 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
988 break; 999 if (tmp_evt->cmnd == cmd) {
1000 found_evt = tmp_evt;
1001 break;
1002 }
989 } 1003 }
990 }
991 1004
992 if (!found_evt) { 1005 if (!found_evt) {
993 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1006 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
994 return SUCCESS; 1007 return SUCCESS;
995 } 1008 }
996 1009
997 evt = get_event_struct(&hostdata->pool); 1010 evt = get_event_struct(&hostdata->pool);
998 if (evt == NULL) { 1011 if (evt == NULL) {
999 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1012 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1000 sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n"); 1013 sdev_printk(KERN_ERR, cmd->device,
1001 return FAILED; 1014 "failed to allocate abort event\n");
1002 } 1015 return FAILED;
1016 }
1003 1017
1004 init_event_struct(evt, 1018 init_event_struct(evt,
1005 sync_completion, 1019 sync_completion,
1006 VIOSRP_SRP_FORMAT, 1020 VIOSRP_SRP_FORMAT,
1007 init_timeout); 1021 init_timeout);
1008 1022
1009 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1023 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1010 1024
1011 /* Set up an abort SRP command */ 1025 /* Set up an abort SRP command */
1012 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 1026 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1013 tsk_mgmt->opcode = SRP_TSK_MGMT; 1027 tsk_mgmt->opcode = SRP_TSK_MGMT;
1014 tsk_mgmt->lun = ((u64) lun) << 48; 1028 tsk_mgmt->lun = ((u64) lun) << 48;
1015 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; 1029 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
1016 tsk_mgmt->task_tag = (u64) found_evt; 1030 tsk_mgmt->task_tag = (u64) found_evt;
1017 1031
1018 sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n", 1032 evt->sync_srp = &srp_rsp;
1019 tsk_mgmt->lun, tsk_mgmt->task_tag); 1033
1020 1034 init_completion(&evt->comp);
1021 evt->sync_srp = &srp_rsp; 1035 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
1022 init_completion(&evt->comp); 1036
1023 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); 1037 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1038 break;
1039
1040 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1041 msleep(10);
1042 spin_lock_irqsave(hostdata->host->host_lock, flags);
1043 } while (time_before(jiffies, wait_switch));
1044
1024 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1045 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1046
1025 if (rsp_rc != 0) { 1047 if (rsp_rc != 0) {
1026 sdev_printk(KERN_ERR, cmd->device, 1048 sdev_printk(KERN_ERR, cmd->device,
1027 "failed to send abort() event. rc=%d\n", rsp_rc); 1049 "failed to send abort() event. rc=%d\n", rsp_rc);
1028 return FAILED; 1050 return FAILED;
1029 } 1051 }
1030 1052
1053 sdev_printk(KERN_INFO, cmd->device,
1054 "aborting command. lun 0x%lx, tag 0x%lx\n",
1055 (((u64) lun) << 48), (u64) found_evt);
1056
1031 wait_for_completion(&evt->comp); 1057 wait_for_completion(&evt->comp);
1032 1058
1033 /* make sure we got a good response */ 1059 /* make sure we got a good response */
@@ -1099,41 +1125,56 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1099 int rsp_rc; 1125 int rsp_rc;
1100 unsigned long flags; 1126 unsigned long flags;
1101 u16 lun = lun_from_dev(cmd->device); 1127 u16 lun = lun_from_dev(cmd->device);
1128 unsigned long wait_switch = 0;
1102 1129
1103 spin_lock_irqsave(hostdata->host->host_lock, flags); 1130 spin_lock_irqsave(hostdata->host->host_lock, flags);
1104 evt = get_event_struct(&hostdata->pool); 1131 wait_switch = jiffies + (init_timeout * HZ);
1105 if (evt == NULL) { 1132 do {
1106 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1133 evt = get_event_struct(&hostdata->pool);
1107 sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n"); 1134 if (evt == NULL) {
1108 return FAILED; 1135 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1109 } 1136 sdev_printk(KERN_ERR, cmd->device,
1137 "failed to allocate reset event\n");
1138 return FAILED;
1139 }
1110 1140
1111 init_event_struct(evt, 1141 init_event_struct(evt,
1112 sync_completion, 1142 sync_completion,
1113 VIOSRP_SRP_FORMAT, 1143 VIOSRP_SRP_FORMAT,
1114 init_timeout); 1144 init_timeout);
1115 1145
1116 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1146 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1117 1147
1118 /* Set up a lun reset SRP command */ 1148 /* Set up a lun reset SRP command */
1119 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 1149 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1120 tsk_mgmt->opcode = SRP_TSK_MGMT; 1150 tsk_mgmt->opcode = SRP_TSK_MGMT;
1121 tsk_mgmt->lun = ((u64) lun) << 48; 1151 tsk_mgmt->lun = ((u64) lun) << 48;
1122 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; 1152 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1123 1153
1124 sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n", 1154 evt->sync_srp = &srp_rsp;
1125 tsk_mgmt->lun); 1155
1156 init_completion(&evt->comp);
1157 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
1158
1159 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1160 break;
1161
1162 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1163 msleep(10);
1164 spin_lock_irqsave(hostdata->host->host_lock, flags);
1165 } while (time_before(jiffies, wait_switch));
1126 1166
1127 evt->sync_srp = &srp_rsp;
1128 init_completion(&evt->comp);
1129 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
1130 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1167 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1168
1131 if (rsp_rc != 0) { 1169 if (rsp_rc != 0) {
1132 sdev_printk(KERN_ERR, cmd->device, 1170 sdev_printk(KERN_ERR, cmd->device,
1133 "failed to send reset event. rc=%d\n", rsp_rc); 1171 "failed to send reset event. rc=%d\n", rsp_rc);
1134 return FAILED; 1172 return FAILED;
1135 } 1173 }
1136 1174
1175 sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
1176 (((u64) lun) << 48));
1177
1137 wait_for_completion(&evt->comp); 1178 wait_for_completion(&evt->comp);
1138 1179
1139 /* make sure we got a good response */ 1180 /* make sure we got a good response */
@@ -1386,8 +1427,10 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1386 unsigned long lock_flags = 0; 1427 unsigned long lock_flags = 0;
1387 1428
1388 spin_lock_irqsave(shost->host_lock, lock_flags); 1429 spin_lock_irqsave(shost->host_lock, lock_flags);
1389 if (sdev->type == TYPE_DISK) 1430 if (sdev->type == TYPE_DISK) {
1390 sdev->allow_restart = 1; 1431 sdev->allow_restart = 1;
1432 sdev->timeout = 60 * HZ;
1433 }
1391 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); 1434 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1392 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1435 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1393 return 0; 1436 return 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 82bcab688b44..d63f11e95abf 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -292,7 +292,7 @@ static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
292 dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0], 292 dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
293 cmd->usg_sg); 293 cmd->usg_sg);
294 294
295 if (sc->use_sg) 295 if (scsi_sg_count(sc))
296 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1); 296 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
297 297
298 spin_lock_irqsave(&target->lock, flags); 298 spin_lock_irqsave(&target->lock, flags);
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index a3d0c6b14958..f97d172844be 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -837,19 +837,16 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
837 837
838 /* Phase 4 - Setup scatter/gather buffers */ 838 /* Phase 4 - Setup scatter/gather buffers */
839 case 4: 839 case 4:
840 if (cmd->use_sg) { 840 if (scsi_bufflen(cmd)) {
841 /* if many buffers are available, start filling the first */ 841 cmd->SCp.buffer = scsi_sglist(cmd);
842 cmd->SCp.buffer =
843 (struct scatterlist *) cmd->request_buffer;
844 cmd->SCp.this_residual = cmd->SCp.buffer->length; 842 cmd->SCp.this_residual = cmd->SCp.buffer->length;
845 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 843 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
846 } else { 844 } else {
847 /* else fill the only available buffer */
848 cmd->SCp.buffer = NULL; 845 cmd->SCp.buffer = NULL;
849 cmd->SCp.this_residual = cmd->request_bufflen; 846 cmd->SCp.this_residual = 0;
850 cmd->SCp.ptr = cmd->request_buffer; 847 cmd->SCp.ptr = NULL;
851 } 848 }
852 cmd->SCp.buffers_residual = cmd->use_sg - 1; 849 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
853 cmd->SCp.phase++; 850 cmd->SCp.phase++;
854 if (cmd->SCp.this_residual & 0x01) 851 if (cmd->SCp.this_residual & 0x01)
855 cmd->SCp.this_residual++; 852 cmd->SCp.this_residual++;
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index c8b452f2878c..8053b1e86ccb 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -369,16 +369,16 @@ static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
369 * - SCp.phase records this command's SRCID_ER bit setting 369 * - SCp.phase records this command's SRCID_ER bit setting
370 */ 370 */
371 371
372 if (cmd->use_sg) { 372 if (scsi_bufflen(cmd)) {
373 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; 373 cmd->SCp.buffer = scsi_sglist(cmd);
374 cmd->SCp.buffers_residual = cmd->use_sg - 1; 374 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
375 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 375 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
376 cmd->SCp.this_residual = cmd->SCp.buffer->length; 376 cmd->SCp.this_residual = cmd->SCp.buffer->length;
377 } else { 377 } else {
378 cmd->SCp.buffer = NULL; 378 cmd->SCp.buffer = NULL;
379 cmd->SCp.buffers_residual = 0; 379 cmd->SCp.buffers_residual = 0;
380 cmd->SCp.ptr = (char *) cmd->request_buffer; 380 cmd->SCp.ptr = NULL;
381 cmd->SCp.this_residual = cmd->request_bufflen; 381 cmd->SCp.this_residual = 0;
382 } 382 }
383 cmd->SCp.have_data_in = 0; 383 cmd->SCp.have_data_in = 0;
384 384
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index aa0df0a4b22a..73270ff892d9 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -84,7 +84,7 @@
84/* 84/*
85 * Global Data 85 * Global Data
86 */ 86 */
87static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head); 87static LIST_HEAD(ipr_ioa_head);
88static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; 88static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89static unsigned int ipr_max_speed = 1; 89static unsigned int ipr_max_speed = 1;
90static int ipr_testmode = 0; 90static int ipr_testmode = 0;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 5c5a9b2628fc..7505cca8e68e 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -389,17 +389,17 @@ static struct pci_device_id ips_pci_table[] = {
389MODULE_DEVICE_TABLE( pci, ips_pci_table ); 389MODULE_DEVICE_TABLE( pci, ips_pci_table );
390 390
391static char ips_hot_plug_name[] = "ips"; 391static char ips_hot_plug_name[] = "ips";
392 392
393static int __devinit ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent); 393static int __devinit ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
394static void __devexit ips_remove_device(struct pci_dev *pci_dev); 394static void __devexit ips_remove_device(struct pci_dev *pci_dev);
395 395
396static struct pci_driver ips_pci_driver = { 396static struct pci_driver ips_pci_driver = {
397 .name = ips_hot_plug_name, 397 .name = ips_hot_plug_name,
398 .id_table = ips_pci_table, 398 .id_table = ips_pci_table,
399 .probe = ips_insert_device, 399 .probe = ips_insert_device,
400 .remove = __devexit_p(ips_remove_device), 400 .remove = __devexit_p(ips_remove_device),
401}; 401};
402 402
403 403
404/* 404/*
405 * Necessary forward function protoypes 405 * Necessary forward function protoypes
@@ -587,7 +587,7 @@ static void
587ips_setup_funclist(ips_ha_t * ha) 587ips_setup_funclist(ips_ha_t * ha)
588{ 588{
589 589
590 /* 590 /*
591 * Setup Functions 591 * Setup Functions
592 */ 592 */
593 if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) { 593 if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) {
@@ -702,12 +702,8 @@ ips_release(struct Scsi_Host *sh)
702 /* free extra memory */ 702 /* free extra memory */
703 ips_free(ha); 703 ips_free(ha);
704 704
705 /* Free I/O Region */
706 if (ha->io_addr)
707 release_region(ha->io_addr, ha->io_len);
708
709 /* free IRQ */ 705 /* free IRQ */
710 free_irq(ha->irq, ha); 706 free_irq(ha->pcidev->irq, ha);
711 707
712 scsi_host_put(sh); 708 scsi_host_put(sh);
713 709
@@ -1637,7 +1633,7 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1637 return (IPS_FAILURE); 1633 return (IPS_FAILURE);
1638 } 1634 }
1639 1635
1640 if (ha->device_id == IPS_DEVICEID_COPPERHEAD && 1636 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
1641 pt->CoppCP.cmd.flashfw.op_code == 1637 pt->CoppCP.cmd.flashfw.op_code ==
1642 IPS_CMD_RW_BIOSFW) { 1638 IPS_CMD_RW_BIOSFW) {
1643 ret = ips_flash_copperhead(ha, pt, scb); 1639 ret = ips_flash_copperhead(ha, pt, scb);
@@ -2021,7 +2017,7 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
2021 pt->ExtendedStatus = scb->extended_status; 2017 pt->ExtendedStatus = scb->extended_status;
2022 pt->AdapterType = ha->ad_type; 2018 pt->AdapterType = ha->ad_type;
2023 2019
2024 if (ha->device_id == IPS_DEVICEID_COPPERHEAD && 2020 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
2025 (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD || 2021 (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD ||
2026 scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW)) 2022 scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW))
2027 ips_free_flash_copperhead(ha); 2023 ips_free_flash_copperhead(ha);
@@ -2075,13 +2071,13 @@ ips_host_info(ips_ha_t * ha, char *ptr, off_t offset, int len)
2075 ha->mem_ptr); 2071 ha->mem_ptr);
2076 } 2072 }
2077 2073
2078 copy_info(&info, "\tIRQ number : %d\n", ha->irq); 2074 copy_info(&info, "\tIRQ number : %d\n", ha->pcidev->irq);
2079 2075
2080 /* For the Next 3 lines Check for Binary 0 at the end and don't include it if it's there. */ 2076 /* For the Next 3 lines Check for Binary 0 at the end and don't include it if it's there. */
2081 /* That keeps everything happy for "text" operations on the proc file. */ 2077 /* That keeps everything happy for "text" operations on the proc file. */
2082 2078
2083 if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) { 2079 if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) {
2084 if (ha->nvram->bios_low[3] == 0) { 2080 if (ha->nvram->bios_low[3] == 0) {
2085 copy_info(&info, 2081 copy_info(&info,
2086 "\tBIOS Version : %c%c%c%c%c%c%c\n", 2082 "\tBIOS Version : %c%c%c%c%c%c%c\n",
2087 ha->nvram->bios_high[0], ha->nvram->bios_high[1], 2083 ha->nvram->bios_high[0], ha->nvram->bios_high[1],
@@ -2232,31 +2228,31 @@ ips_identify_controller(ips_ha_t * ha)
2232{ 2228{
2233 METHOD_TRACE("ips_identify_controller", 1); 2229 METHOD_TRACE("ips_identify_controller", 1);
2234 2230
2235 switch (ha->device_id) { 2231 switch (ha->pcidev->device) {
2236 case IPS_DEVICEID_COPPERHEAD: 2232 case IPS_DEVICEID_COPPERHEAD:
2237 if (ha->revision_id <= IPS_REVID_SERVERAID) { 2233 if (ha->pcidev->revision <= IPS_REVID_SERVERAID) {
2238 ha->ad_type = IPS_ADTYPE_SERVERAID; 2234 ha->ad_type = IPS_ADTYPE_SERVERAID;
2239 } else if (ha->revision_id == IPS_REVID_SERVERAID2) { 2235 } else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) {
2240 ha->ad_type = IPS_ADTYPE_SERVERAID2; 2236 ha->ad_type = IPS_ADTYPE_SERVERAID2;
2241 } else if (ha->revision_id == IPS_REVID_NAVAJO) { 2237 } else if (ha->pcidev->revision == IPS_REVID_NAVAJO) {
2242 ha->ad_type = IPS_ADTYPE_NAVAJO; 2238 ha->ad_type = IPS_ADTYPE_NAVAJO;
2243 } else if ((ha->revision_id == IPS_REVID_SERVERAID2) 2239 } else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2)
2244 && (ha->slot_num == 0)) { 2240 && (ha->slot_num == 0)) {
2245 ha->ad_type = IPS_ADTYPE_KIOWA; 2241 ha->ad_type = IPS_ADTYPE_KIOWA;
2246 } else if ((ha->revision_id >= IPS_REVID_CLARINETP1) && 2242 } else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) &&
2247 (ha->revision_id <= IPS_REVID_CLARINETP3)) { 2243 (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) {
2248 if (ha->enq->ucMaxPhysicalDevices == 15) 2244 if (ha->enq->ucMaxPhysicalDevices == 15)
2249 ha->ad_type = IPS_ADTYPE_SERVERAID3L; 2245 ha->ad_type = IPS_ADTYPE_SERVERAID3L;
2250 else 2246 else
2251 ha->ad_type = IPS_ADTYPE_SERVERAID3; 2247 ha->ad_type = IPS_ADTYPE_SERVERAID3;
2252 } else if ((ha->revision_id >= IPS_REVID_TROMBONE32) && 2248 } else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) &&
2253 (ha->revision_id <= IPS_REVID_TROMBONE64)) { 2249 (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) {
2254 ha->ad_type = IPS_ADTYPE_SERVERAID4H; 2250 ha->ad_type = IPS_ADTYPE_SERVERAID4H;
2255 } 2251 }
2256 break; 2252 break;
2257 2253
2258 case IPS_DEVICEID_MORPHEUS: 2254 case IPS_DEVICEID_MORPHEUS:
2259 switch (ha->subdevice_id) { 2255 switch (ha->pcidev->subsystem_device) {
2260 case IPS_SUBDEVICEID_4L: 2256 case IPS_SUBDEVICEID_4L:
2261 ha->ad_type = IPS_ADTYPE_SERVERAID4L; 2257 ha->ad_type = IPS_ADTYPE_SERVERAID4L;
2262 break; 2258 break;
@@ -2285,7 +2281,7 @@ ips_identify_controller(ips_ha_t * ha)
2285 break; 2281 break;
2286 2282
2287 case IPS_DEVICEID_MARCO: 2283 case IPS_DEVICEID_MARCO:
2288 switch (ha->subdevice_id) { 2284 switch (ha->pcidev->subsystem_device) {
2289 case IPS_SUBDEVICEID_6M: 2285 case IPS_SUBDEVICEID_6M:
2290 ha->ad_type = IPS_ADTYPE_SERVERAID6M; 2286 ha->ad_type = IPS_ADTYPE_SERVERAID6M;
2291 break; 2287 break;
@@ -2332,20 +2328,20 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
2332 2328
2333 strncpy(ha->bios_version, " ?", 8); 2329 strncpy(ha->bios_version, " ?", 8);
2334 2330
2335 if (ha->device_id == IPS_DEVICEID_COPPERHEAD) { 2331 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) {
2336 if (IPS_USE_MEMIO(ha)) { 2332 if (IPS_USE_MEMIO(ha)) {
2337 /* Memory Mapped I/O */ 2333 /* Memory Mapped I/O */
2338 2334
2339 /* test 1st byte */ 2335 /* test 1st byte */
2340 writel(0, ha->mem_ptr + IPS_REG_FLAP); 2336 writel(0, ha->mem_ptr + IPS_REG_FLAP);
2341 if (ha->revision_id == IPS_REVID_TROMBONE64) 2337 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2342 udelay(25); /* 25 us */ 2338 udelay(25); /* 25 us */
2343 2339
2344 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) 2340 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
2345 return; 2341 return;
2346 2342
2347 writel(1, ha->mem_ptr + IPS_REG_FLAP); 2343 writel(1, ha->mem_ptr + IPS_REG_FLAP);
2348 if (ha->revision_id == IPS_REVID_TROMBONE64) 2344 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2349 udelay(25); /* 25 us */ 2345 udelay(25); /* 25 us */
2350 2346
2351 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) 2347 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
@@ -2353,20 +2349,20 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
2353 2349
2354 /* Get Major version */ 2350 /* Get Major version */
2355 writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP); 2351 writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP);
2356 if (ha->revision_id == IPS_REVID_TROMBONE64) 2352 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2357 udelay(25); /* 25 us */ 2353 udelay(25); /* 25 us */
2358 2354
2359 major = readb(ha->mem_ptr + IPS_REG_FLDP); 2355 major = readb(ha->mem_ptr + IPS_REG_FLDP);
2360 2356
2361 /* Get Minor version */ 2357 /* Get Minor version */
2362 writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP); 2358 writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP);
2363 if (ha->revision_id == IPS_REVID_TROMBONE64) 2359 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2364 udelay(25); /* 25 us */ 2360 udelay(25); /* 25 us */
2365 minor = readb(ha->mem_ptr + IPS_REG_FLDP); 2361 minor = readb(ha->mem_ptr + IPS_REG_FLDP);
2366 2362
2367 /* Get SubMinor version */ 2363 /* Get SubMinor version */
2368 writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP); 2364 writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP);
2369 if (ha->revision_id == IPS_REVID_TROMBONE64) 2365 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2370 udelay(25); /* 25 us */ 2366 udelay(25); /* 25 us */
2371 subminor = readb(ha->mem_ptr + IPS_REG_FLDP); 2367 subminor = readb(ha->mem_ptr + IPS_REG_FLDP);
2372 2368
@@ -2375,14 +2371,14 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
2375 2371
2376 /* test 1st byte */ 2372 /* test 1st byte */
2377 outl(0, ha->io_addr + IPS_REG_FLAP); 2373 outl(0, ha->io_addr + IPS_REG_FLAP);
2378 if (ha->revision_id == IPS_REVID_TROMBONE64) 2374 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2379 udelay(25); /* 25 us */ 2375 udelay(25); /* 25 us */
2380 2376
2381 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) 2377 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
2382 return; 2378 return;
2383 2379
2384 outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); 2380 outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP);
2385 if (ha->revision_id == IPS_REVID_TROMBONE64) 2381 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2386 udelay(25); /* 25 us */ 2382 udelay(25); /* 25 us */
2387 2383
2388 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) 2384 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
@@ -2390,21 +2386,21 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
2390 2386
2391 /* Get Major version */ 2387 /* Get Major version */
2392 outl(cpu_to_le32(0x1FF), ha->io_addr + IPS_REG_FLAP); 2388 outl(cpu_to_le32(0x1FF), ha->io_addr + IPS_REG_FLAP);
2393 if (ha->revision_id == IPS_REVID_TROMBONE64) 2389 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2394 udelay(25); /* 25 us */ 2390 udelay(25); /* 25 us */
2395 2391
2396 major = inb(ha->io_addr + IPS_REG_FLDP); 2392 major = inb(ha->io_addr + IPS_REG_FLDP);
2397 2393
2398 /* Get Minor version */ 2394 /* Get Minor version */
2399 outl(cpu_to_le32(0x1FE), ha->io_addr + IPS_REG_FLAP); 2395 outl(cpu_to_le32(0x1FE), ha->io_addr + IPS_REG_FLAP);
2400 if (ha->revision_id == IPS_REVID_TROMBONE64) 2396 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2401 udelay(25); /* 25 us */ 2397 udelay(25); /* 25 us */
2402 2398
2403 minor = inb(ha->io_addr + IPS_REG_FLDP); 2399 minor = inb(ha->io_addr + IPS_REG_FLDP);
2404 2400
2405 /* Get SubMinor version */ 2401 /* Get SubMinor version */
2406 outl(cpu_to_le32(0x1FD), ha->io_addr + IPS_REG_FLAP); 2402 outl(cpu_to_le32(0x1FD), ha->io_addr + IPS_REG_FLAP);
2407 if (ha->revision_id == IPS_REVID_TROMBONE64) 2403 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2408 udelay(25); /* 25 us */ 2404 udelay(25); /* 25 us */
2409 2405
2410 subminor = inb(ha->io_addr + IPS_REG_FLDP); 2406 subminor = inb(ha->io_addr + IPS_REG_FLDP);
@@ -2740,8 +2736,6 @@ ips_next(ips_ha_t * ha, int intr)
2740 SC->result = DID_OK; 2736 SC->result = DID_OK;
2741 SC->host_scribble = NULL; 2737 SC->host_scribble = NULL;
2742 2738
2743 memset(SC->sense_buffer, 0, sizeof (SC->sense_buffer));
2744
2745 scb->target_id = SC->device->id; 2739 scb->target_id = SC->device->id;
2746 scb->lun = SC->device->lun; 2740 scb->lun = SC->device->lun;
2747 scb->bus = SC->device->channel; 2741 scb->bus = SC->device->channel;
@@ -2780,10 +2774,11 @@ ips_next(ips_ha_t * ha, int intr)
2780 scb->dcdb.cmd_attribute = 2774 scb->dcdb.cmd_attribute =
2781 ips_command_direction[scb->scsi_cmd->cmnd[0]]; 2775 ips_command_direction[scb->scsi_cmd->cmnd[0]];
2782 2776
2783 /* Allow a WRITE BUFFER Command to Have no Data */ 2777 /* Allow a WRITE BUFFER Command to Have no Data */
2784 /* This is Used by Tape Flash Utilites */ 2778 /* This is Used by Tape Flash Utilites */
2785 if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) && (scb->data_len == 0)) 2779 if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) &&
2786 scb->dcdb.cmd_attribute = 0; 2780 (scb->data_len == 0))
2781 scb->dcdb.cmd_attribute = 0;
2787 2782
2788 if (!(scb->dcdb.cmd_attribute & 0x3)) 2783 if (!(scb->dcdb.cmd_attribute & 0x3))
2789 scb->dcdb.transfer_length = 0; 2784 scb->dcdb.transfer_length = 0;
@@ -3404,7 +3399,7 @@ ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
3404 3399
3405 /* Restrict access to physical DASD */ 3400 /* Restrict access to physical DASD */
3406 if (scb->scsi_cmd->cmnd[0] == INQUIRY) { 3401 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3407 ips_scmd_buf_read(scb->scsi_cmd, 3402 ips_scmd_buf_read(scb->scsi_cmd,
3408 &inquiryData, sizeof (inquiryData)); 3403 &inquiryData, sizeof (inquiryData));
3409 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) { 3404 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) {
3410 errcode = DID_TIME_OUT; 3405 errcode = DID_TIME_OUT;
@@ -3438,13 +3433,11 @@ ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
3438 (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; 3433 (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3439 memcpy(scb->scsi_cmd->sense_buffer, 3434 memcpy(scb->scsi_cmd->sense_buffer,
3440 tapeDCDB->sense_info, 3435 tapeDCDB->sense_info,
3441 sizeof (scb->scsi_cmd-> 3436 SCSI_SENSE_BUFFERSIZE);
3442 sense_buffer));
3443 } else { 3437 } else {
3444 memcpy(scb->scsi_cmd->sense_buffer, 3438 memcpy(scb->scsi_cmd->sense_buffer,
3445 scb->dcdb.sense_info, 3439 scb->dcdb.sense_info,
3446 sizeof (scb->scsi_cmd-> 3440 SCSI_SENSE_BUFFERSIZE);
3447 sense_buffer));
3448 } 3441 }
3449 device_error = 2; /* check condition */ 3442 device_error = 2; /* check condition */
3450 } 3443 }
@@ -3824,7 +3817,6 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3824 /* attempted, a Check Condition occurred, and Sense */ 3817 /* attempted, a Check Condition occurred, and Sense */
3825 /* Data indicating an Invalid CDB OpCode is returned. */ 3818 /* Data indicating an Invalid CDB OpCode is returned. */
3826 sp = (char *) scb->scsi_cmd->sense_buffer; 3819 sp = (char *) scb->scsi_cmd->sense_buffer;
3827 memset(sp, 0, sizeof (scb->scsi_cmd->sense_buffer));
3828 3820
3829 sp[0] = 0x70; /* Error Code */ 3821 sp[0] = 0x70; /* Error Code */
3830 sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */ 3822 sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */
@@ -4090,10 +4082,10 @@ ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
4090 scb->scsi_cmd->result = errcode << 16; 4082 scb->scsi_cmd->result = errcode << 16;
4091 } else { /* bus == 0 */ 4083 } else { /* bus == 0 */
4092 /* restrict access to physical drives */ 4084 /* restrict access to physical drives */
4093 if (scb->scsi_cmd->cmnd[0] == INQUIRY) { 4085 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
4094 ips_scmd_buf_read(scb->scsi_cmd, 4086 ips_scmd_buf_read(scb->scsi_cmd,
4095 &inquiryData, sizeof (inquiryData)); 4087 &inquiryData, sizeof (inquiryData));
4096 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) 4088 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK)
4097 scb->scsi_cmd->result = DID_TIME_OUT << 16; 4089 scb->scsi_cmd->result = DID_TIME_OUT << 16;
4098 } 4090 }
4099 } /* else */ 4091 } /* else */
@@ -4393,8 +4385,6 @@ ips_free(ips_ha_t * ha)
4393 ha->mem_ptr = NULL; 4385 ha->mem_ptr = NULL;
4394 } 4386 }
4395 4387
4396 if (ha->mem_addr)
4397 release_mem_region(ha->mem_addr, ha->mem_len);
4398 ha->mem_addr = 0; 4388 ha->mem_addr = 0;
4399 4389
4400 } 4390 }
@@ -4661,8 +4651,8 @@ ips_isinit_morpheus(ips_ha_t * ha)
4661 uint32_t bits; 4651 uint32_t bits;
4662 4652
4663 METHOD_TRACE("ips_is_init_morpheus", 1); 4653 METHOD_TRACE("ips_is_init_morpheus", 1);
4664 4654
4665 if (ips_isintr_morpheus(ha)) 4655 if (ips_isintr_morpheus(ha))
4666 ips_flush_and_reset(ha); 4656 ips_flush_and_reset(ha);
4667 4657
4668 post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); 4658 post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
@@ -4686,7 +4676,7 @@ ips_isinit_morpheus(ips_ha_t * ha)
4686/* state ( was trying to INIT and an interrupt was already pending ) ... */ 4676/* state ( was trying to INIT and an interrupt was already pending ) ... */
4687/* */ 4677/* */
4688/****************************************************************************/ 4678/****************************************************************************/
4689static void 4679static void
4690ips_flush_and_reset(ips_ha_t *ha) 4680ips_flush_and_reset(ips_ha_t *ha)
4691{ 4681{
4692 ips_scb_t *scb; 4682 ips_scb_t *scb;
@@ -4718,9 +4708,9 @@ ips_flush_and_reset(ips_ha_t *ha)
4718 if (ret == IPS_SUCCESS) { 4708 if (ret == IPS_SUCCESS) {
4719 time = 60 * IPS_ONE_SEC; /* Max Wait time is 60 seconds */ 4709 time = 60 * IPS_ONE_SEC; /* Max Wait time is 60 seconds */
4720 done = 0; 4710 done = 0;
4721 4711
4722 while ((time > 0) && (!done)) { 4712 while ((time > 0) && (!done)) {
4723 done = ips_poll_for_flush_complete(ha); 4713 done = ips_poll_for_flush_complete(ha);
4724 /* This may look evil, but it's only done during extremely rare start-up conditions ! */ 4714 /* This may look evil, but it's only done during extremely rare start-up conditions ! */
4725 udelay(1000); 4715 udelay(1000);
4726 time--; 4716 time--;
@@ -4749,17 +4739,17 @@ static int
4749ips_poll_for_flush_complete(ips_ha_t * ha) 4739ips_poll_for_flush_complete(ips_ha_t * ha)
4750{ 4740{
4751 IPS_STATUS cstatus; 4741 IPS_STATUS cstatus;
4752 4742
4753 while (TRUE) { 4743 while (TRUE) {
4754 cstatus.value = (*ha->func.statupd) (ha); 4744 cstatus.value = (*ha->func.statupd) (ha);
4755 4745
4756 if (cstatus.value == 0xffffffff) /* If No Interrupt to process */ 4746 if (cstatus.value == 0xffffffff) /* If No Interrupt to process */
4757 break; 4747 break;
4758 4748
4759 /* Success is when we see the Flush Command ID */ 4749 /* Success is when we see the Flush Command ID */
4760 if (cstatus.fields.command_id == IPS_MAX_CMDS ) 4750 if (cstatus.fields.command_id == IPS_MAX_CMDS)
4761 return 1; 4751 return 1;
4762 } 4752 }
4763 4753
4764 return 0; 4754 return 0;
4765} 4755}
@@ -4903,7 +4893,7 @@ ips_init_copperhead(ips_ha_t * ha)
4903 /* Enable busmastering */ 4893 /* Enable busmastering */
4904 outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR); 4894 outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR);
4905 4895
4906 if (ha->revision_id == IPS_REVID_TROMBONE64) 4896 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
4907 /* fix for anaconda64 */ 4897 /* fix for anaconda64 */
4908 outl(0, ha->io_addr + IPS_REG_NDAE); 4898 outl(0, ha->io_addr + IPS_REG_NDAE);
4909 4899
@@ -4997,7 +4987,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
4997 /* Enable busmastering */ 4987 /* Enable busmastering */
4998 writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR); 4988 writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR);
4999 4989
5000 if (ha->revision_id == IPS_REVID_TROMBONE64) 4990 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
5001 /* fix for anaconda64 */ 4991 /* fix for anaconda64 */
5002 writel(0, ha->mem_ptr + IPS_REG_NDAE); 4992 writel(0, ha->mem_ptr + IPS_REG_NDAE);
5003 4993
@@ -5142,7 +5132,7 @@ ips_reset_copperhead(ips_ha_t * ha)
5142 METHOD_TRACE("ips_reset_copperhead", 1); 5132 METHOD_TRACE("ips_reset_copperhead", 1);
5143 5133
5144 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d", 5134 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d",
5145 ips_name, ha->host_num, ha->io_addr, ha->irq); 5135 ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq);
5146 5136
5147 reset_counter = 0; 5137 reset_counter = 0;
5148 5138
@@ -5187,7 +5177,7 @@ ips_reset_copperhead_memio(ips_ha_t * ha)
5187 METHOD_TRACE("ips_reset_copperhead_memio", 1); 5177 METHOD_TRACE("ips_reset_copperhead_memio", 1);
5188 5178
5189 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d", 5179 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d",
5190 ips_name, ha->host_num, ha->mem_addr, ha->irq); 5180 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5191 5181
5192 reset_counter = 0; 5182 reset_counter = 0;
5193 5183
@@ -5233,7 +5223,7 @@ ips_reset_morpheus(ips_ha_t * ha)
5233 METHOD_TRACE("ips_reset_morpheus", 1); 5223 METHOD_TRACE("ips_reset_morpheus", 1);
5234 5224
5235 DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d", 5225 DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d",
5236 ips_name, ha->host_num, ha->mem_addr, ha->irq); 5226 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5237 5227
5238 reset_counter = 0; 5228 reset_counter = 0;
5239 5229
@@ -5920,7 +5910,7 @@ ips_read_config(ips_ha_t * ha, int intr)
5920 5910
5921 return (0); 5911 return (0);
5922 } 5912 }
5923 5913
5924 memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf)); 5914 memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf));
5925 return (1); 5915 return (1);
5926} 5916}
@@ -5959,7 +5949,7 @@ ips_readwrite_page5(ips_ha_t * ha, int write, int intr)
5959 scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr; 5949 scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr;
5960 if (write) 5950 if (write)
5961 memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram)); 5951 memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram));
5962 5952
5963 /* issue the command */ 5953 /* issue the command */
5964 if (((ret = 5954 if (((ret =
5965 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) 5955 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
@@ -6196,32 +6186,32 @@ ips_erase_bios(ips_ha_t * ha)
6196 6186
6197 /* Clear the status register */ 6187 /* Clear the status register */
6198 outl(0, ha->io_addr + IPS_REG_FLAP); 6188 outl(0, ha->io_addr + IPS_REG_FLAP);
6199 if (ha->revision_id == IPS_REVID_TROMBONE64) 6189 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6200 udelay(25); /* 25 us */ 6190 udelay(25); /* 25 us */
6201 6191
6202 outb(0x50, ha->io_addr + IPS_REG_FLDP); 6192 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6203 if (ha->revision_id == IPS_REVID_TROMBONE64) 6193 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6204 udelay(25); /* 25 us */ 6194 udelay(25); /* 25 us */
6205 6195
6206 /* Erase Setup */ 6196 /* Erase Setup */
6207 outb(0x20, ha->io_addr + IPS_REG_FLDP); 6197 outb(0x20, ha->io_addr + IPS_REG_FLDP);
6208 if (ha->revision_id == IPS_REVID_TROMBONE64) 6198 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6209 udelay(25); /* 25 us */ 6199 udelay(25); /* 25 us */
6210 6200
6211 /* Erase Confirm */ 6201 /* Erase Confirm */
6212 outb(0xD0, ha->io_addr + IPS_REG_FLDP); 6202 outb(0xD0, ha->io_addr + IPS_REG_FLDP);
6213 if (ha->revision_id == IPS_REVID_TROMBONE64) 6203 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6214 udelay(25); /* 25 us */ 6204 udelay(25); /* 25 us */
6215 6205
6216 /* Erase Status */ 6206 /* Erase Status */
6217 outb(0x70, ha->io_addr + IPS_REG_FLDP); 6207 outb(0x70, ha->io_addr + IPS_REG_FLDP);
6218 if (ha->revision_id == IPS_REVID_TROMBONE64) 6208 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6219 udelay(25); /* 25 us */ 6209 udelay(25); /* 25 us */
6220 6210
6221 timeout = 80000; /* 80 seconds */ 6211 timeout = 80000; /* 80 seconds */
6222 6212
6223 while (timeout > 0) { 6213 while (timeout > 0) {
6224 if (ha->revision_id == IPS_REVID_TROMBONE64) { 6214 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6225 outl(0, ha->io_addr + IPS_REG_FLAP); 6215 outl(0, ha->io_addr + IPS_REG_FLAP);
6226 udelay(25); /* 25 us */ 6216 udelay(25); /* 25 us */
6227 } 6217 }
@@ -6241,13 +6231,13 @@ ips_erase_bios(ips_ha_t * ha)
6241 6231
6242 /* try to suspend the erase */ 6232 /* try to suspend the erase */
6243 outb(0xB0, ha->io_addr + IPS_REG_FLDP); 6233 outb(0xB0, ha->io_addr + IPS_REG_FLDP);
6244 if (ha->revision_id == IPS_REVID_TROMBONE64) 6234 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6245 udelay(25); /* 25 us */ 6235 udelay(25); /* 25 us */
6246 6236
6247 /* wait for 10 seconds */ 6237 /* wait for 10 seconds */
6248 timeout = 10000; 6238 timeout = 10000;
6249 while (timeout > 0) { 6239 while (timeout > 0) {
6250 if (ha->revision_id == IPS_REVID_TROMBONE64) { 6240 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6251 outl(0, ha->io_addr + IPS_REG_FLAP); 6241 outl(0, ha->io_addr + IPS_REG_FLAP);
6252 udelay(25); /* 25 us */ 6242 udelay(25); /* 25 us */
6253 } 6243 }
@@ -6277,12 +6267,12 @@ ips_erase_bios(ips_ha_t * ha)
6277 /* Otherwise, we were successful */ 6267 /* Otherwise, we were successful */
6278 /* clear status */ 6268 /* clear status */
6279 outb(0x50, ha->io_addr + IPS_REG_FLDP); 6269 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6280 if (ha->revision_id == IPS_REVID_TROMBONE64) 6270 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6281 udelay(25); /* 25 us */ 6271 udelay(25); /* 25 us */
6282 6272
6283 /* enable reads */ 6273 /* enable reads */
6284 outb(0xFF, ha->io_addr + IPS_REG_FLDP); 6274 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6285 if (ha->revision_id == IPS_REVID_TROMBONE64) 6275 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6286 udelay(25); /* 25 us */ 6276 udelay(25); /* 25 us */
6287 6277
6288 return (0); 6278 return (0);
@@ -6308,32 +6298,32 @@ ips_erase_bios_memio(ips_ha_t * ha)
6308 6298
6309 /* Clear the status register */ 6299 /* Clear the status register */
6310 writel(0, ha->mem_ptr + IPS_REG_FLAP); 6300 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6311 if (ha->revision_id == IPS_REVID_TROMBONE64) 6301 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6312 udelay(25); /* 25 us */ 6302 udelay(25); /* 25 us */
6313 6303
6314 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); 6304 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6315 if (ha->revision_id == IPS_REVID_TROMBONE64) 6305 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6316 udelay(25); /* 25 us */ 6306 udelay(25); /* 25 us */
6317 6307
6318 /* Erase Setup */ 6308 /* Erase Setup */
6319 writeb(0x20, ha->mem_ptr + IPS_REG_FLDP); 6309 writeb(0x20, ha->mem_ptr + IPS_REG_FLDP);
6320 if (ha->revision_id == IPS_REVID_TROMBONE64) 6310 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6321 udelay(25); /* 25 us */ 6311 udelay(25); /* 25 us */
6322 6312
6323 /* Erase Confirm */ 6313 /* Erase Confirm */
6324 writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP); 6314 writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP);
6325 if (ha->revision_id == IPS_REVID_TROMBONE64) 6315 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6326 udelay(25); /* 25 us */ 6316 udelay(25); /* 25 us */
6327 6317
6328 /* Erase Status */ 6318 /* Erase Status */
6329 writeb(0x70, ha->mem_ptr + IPS_REG_FLDP); 6319 writeb(0x70, ha->mem_ptr + IPS_REG_FLDP);
6330 if (ha->revision_id == IPS_REVID_TROMBONE64) 6320 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6331 udelay(25); /* 25 us */ 6321 udelay(25); /* 25 us */
6332 6322
6333 timeout = 80000; /* 80 seconds */ 6323 timeout = 80000; /* 80 seconds */
6334 6324
6335 while (timeout > 0) { 6325 while (timeout > 0) {
6336 if (ha->revision_id == IPS_REVID_TROMBONE64) { 6326 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6337 writel(0, ha->mem_ptr + IPS_REG_FLAP); 6327 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6338 udelay(25); /* 25 us */ 6328 udelay(25); /* 25 us */
6339 } 6329 }
@@ -6353,13 +6343,13 @@ ips_erase_bios_memio(ips_ha_t * ha)
6353 6343
6354 /* try to suspend the erase */ 6344 /* try to suspend the erase */
6355 writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP); 6345 writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP);
6356 if (ha->revision_id == IPS_REVID_TROMBONE64) 6346 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6357 udelay(25); /* 25 us */ 6347 udelay(25); /* 25 us */
6358 6348
6359 /* wait for 10 seconds */ 6349 /* wait for 10 seconds */
6360 timeout = 10000; 6350 timeout = 10000;
6361 while (timeout > 0) { 6351 while (timeout > 0) {
6362 if (ha->revision_id == IPS_REVID_TROMBONE64) { 6352 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6363 writel(0, ha->mem_ptr + IPS_REG_FLAP); 6353 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6364 udelay(25); /* 25 us */ 6354 udelay(25); /* 25 us */
6365 } 6355 }
@@ -6389,12 +6379,12 @@ ips_erase_bios_memio(ips_ha_t * ha)
6389 /* Otherwise, we were successful */ 6379 /* Otherwise, we were successful */
6390 /* clear status */ 6380 /* clear status */
6391 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); 6381 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6392 if (ha->revision_id == IPS_REVID_TROMBONE64) 6382 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6393 udelay(25); /* 25 us */ 6383 udelay(25); /* 25 us */
6394 6384
6395 /* enable reads */ 6385 /* enable reads */
6396 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); 6386 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6397 if (ha->revision_id == IPS_REVID_TROMBONE64) 6387 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6398 udelay(25); /* 25 us */ 6388 udelay(25); /* 25 us */
6399 6389
6400 return (0); 6390 return (0);
@@ -6423,21 +6413,21 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6423 for (i = 0; i < buffersize; i++) { 6413 for (i = 0; i < buffersize; i++) {
6424 /* write a byte */ 6414 /* write a byte */
6425 outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); 6415 outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP);
6426 if (ha->revision_id == IPS_REVID_TROMBONE64) 6416 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6427 udelay(25); /* 25 us */ 6417 udelay(25); /* 25 us */
6428 6418
6429 outb(0x40, ha->io_addr + IPS_REG_FLDP); 6419 outb(0x40, ha->io_addr + IPS_REG_FLDP);
6430 if (ha->revision_id == IPS_REVID_TROMBONE64) 6420 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6431 udelay(25); /* 25 us */ 6421 udelay(25); /* 25 us */
6432 6422
6433 outb(buffer[i], ha->io_addr + IPS_REG_FLDP); 6423 outb(buffer[i], ha->io_addr + IPS_REG_FLDP);
6434 if (ha->revision_id == IPS_REVID_TROMBONE64) 6424 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6435 udelay(25); /* 25 us */ 6425 udelay(25); /* 25 us */
6436 6426
6437 /* wait up to one second */ 6427 /* wait up to one second */
6438 timeout = 1000; 6428 timeout = 1000;
6439 while (timeout > 0) { 6429 while (timeout > 0) {
6440 if (ha->revision_id == IPS_REVID_TROMBONE64) { 6430 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6441 outl(0, ha->io_addr + IPS_REG_FLAP); 6431 outl(0, ha->io_addr + IPS_REG_FLAP);
6442 udelay(25); /* 25 us */ 6432 udelay(25); /* 25 us */
6443 } 6433 }
@@ -6454,11 +6444,11 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6454 if (timeout == 0) { 6444 if (timeout == 0) {
6455 /* timeout error */ 6445 /* timeout error */
6456 outl(0, ha->io_addr + IPS_REG_FLAP); 6446 outl(0, ha->io_addr + IPS_REG_FLAP);
6457 if (ha->revision_id == IPS_REVID_TROMBONE64) 6447 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6458 udelay(25); /* 25 us */ 6448 udelay(25); /* 25 us */
6459 6449
6460 outb(0xFF, ha->io_addr + IPS_REG_FLDP); 6450 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6461 if (ha->revision_id == IPS_REVID_TROMBONE64) 6451 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6462 udelay(25); /* 25 us */ 6452 udelay(25); /* 25 us */
6463 6453
6464 return (1); 6454 return (1);
@@ -6468,11 +6458,11 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6468 if (status & 0x18) { 6458 if (status & 0x18) {
6469 /* programming error */ 6459 /* programming error */
6470 outl(0, ha->io_addr + IPS_REG_FLAP); 6460 outl(0, ha->io_addr + IPS_REG_FLAP);
6471 if (ha->revision_id == IPS_REVID_TROMBONE64) 6461 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6472 udelay(25); /* 25 us */ 6462 udelay(25); /* 25 us */
6473 6463
6474 outb(0xFF, ha->io_addr + IPS_REG_FLDP); 6464 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6475 if (ha->revision_id == IPS_REVID_TROMBONE64) 6465 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6476 udelay(25); /* 25 us */ 6466 udelay(25); /* 25 us */
6477 6467
6478 return (1); 6468 return (1);
@@ -6481,11 +6471,11 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6481 6471
6482 /* Enable reading */ 6472 /* Enable reading */
6483 outl(0, ha->io_addr + IPS_REG_FLAP); 6473 outl(0, ha->io_addr + IPS_REG_FLAP);
6484 if (ha->revision_id == IPS_REVID_TROMBONE64) 6474 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6485 udelay(25); /* 25 us */ 6475 udelay(25); /* 25 us */
6486 6476
6487 outb(0xFF, ha->io_addr + IPS_REG_FLDP); 6477 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6488 if (ha->revision_id == IPS_REVID_TROMBONE64) 6478 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6489 udelay(25); /* 25 us */ 6479 udelay(25); /* 25 us */
6490 6480
6491 return (0); 6481 return (0);
@@ -6514,21 +6504,21 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6514 for (i = 0; i < buffersize; i++) { 6504 for (i = 0; i < buffersize; i++) {
6515 /* write a byte */ 6505 /* write a byte */
6516 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); 6506 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6517 if (ha->revision_id == IPS_REVID_TROMBONE64) 6507 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6518 udelay(25); /* 25 us */ 6508 udelay(25); /* 25 us */
6519 6509
6520 writeb(0x40, ha->mem_ptr + IPS_REG_FLDP); 6510 writeb(0x40, ha->mem_ptr + IPS_REG_FLDP);
6521 if (ha->revision_id == IPS_REVID_TROMBONE64) 6511 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6522 udelay(25); /* 25 us */ 6512 udelay(25); /* 25 us */
6523 6513
6524 writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP); 6514 writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP);
6525 if (ha->revision_id == IPS_REVID_TROMBONE64) 6515 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6526 udelay(25); /* 25 us */ 6516 udelay(25); /* 25 us */
6527 6517
6528 /* wait up to one second */ 6518 /* wait up to one second */
6529 timeout = 1000; 6519 timeout = 1000;
6530 while (timeout > 0) { 6520 while (timeout > 0) {
6531 if (ha->revision_id == IPS_REVID_TROMBONE64) { 6521 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6532 writel(0, ha->mem_ptr + IPS_REG_FLAP); 6522 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6533 udelay(25); /* 25 us */ 6523 udelay(25); /* 25 us */
6534 } 6524 }
@@ -6545,11 +6535,11 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6545 if (timeout == 0) { 6535 if (timeout == 0) {
6546 /* timeout error */ 6536 /* timeout error */
6547 writel(0, ha->mem_ptr + IPS_REG_FLAP); 6537 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6548 if (ha->revision_id == IPS_REVID_TROMBONE64) 6538 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6549 udelay(25); /* 25 us */ 6539 udelay(25); /* 25 us */
6550 6540
6551 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); 6541 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6552 if (ha->revision_id == IPS_REVID_TROMBONE64) 6542 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6553 udelay(25); /* 25 us */ 6543 udelay(25); /* 25 us */
6554 6544
6555 return (1); 6545 return (1);
@@ -6559,11 +6549,11 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6559 if (status & 0x18) { 6549 if (status & 0x18) {
6560 /* programming error */ 6550 /* programming error */
6561 writel(0, ha->mem_ptr + IPS_REG_FLAP); 6551 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6562 if (ha->revision_id == IPS_REVID_TROMBONE64) 6552 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6563 udelay(25); /* 25 us */ 6553 udelay(25); /* 25 us */
6564 6554
6565 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); 6555 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6566 if (ha->revision_id == IPS_REVID_TROMBONE64) 6556 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6567 udelay(25); /* 25 us */ 6557 udelay(25); /* 25 us */
6568 6558
6569 return (1); 6559 return (1);
@@ -6572,11 +6562,11 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6572 6562
6573 /* Enable reading */ 6563 /* Enable reading */
6574 writel(0, ha->mem_ptr + IPS_REG_FLAP); 6564 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6575 if (ha->revision_id == IPS_REVID_TROMBONE64) 6565 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6576 udelay(25); /* 25 us */ 6566 udelay(25); /* 25 us */
6577 6567
6578 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); 6568 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6579 if (ha->revision_id == IPS_REVID_TROMBONE64) 6569 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6580 udelay(25); /* 25 us */ 6570 udelay(25); /* 25 us */
6581 6571
6582 return (0); 6572 return (0);
@@ -6601,14 +6591,14 @@ ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6601 6591
6602 /* test 1st byte */ 6592 /* test 1st byte */
6603 outl(0, ha->io_addr + IPS_REG_FLAP); 6593 outl(0, ha->io_addr + IPS_REG_FLAP);
6604 if (ha->revision_id == IPS_REVID_TROMBONE64) 6594 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6605 udelay(25); /* 25 us */ 6595 udelay(25); /* 25 us */
6606 6596
6607 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) 6597 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
6608 return (1); 6598 return (1);
6609 6599
6610 outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); 6600 outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP);
6611 if (ha->revision_id == IPS_REVID_TROMBONE64) 6601 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6612 udelay(25); /* 25 us */ 6602 udelay(25); /* 25 us */
6613 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) 6603 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
6614 return (1); 6604 return (1);
@@ -6617,7 +6607,7 @@ ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6617 for (i = 2; i < buffersize; i++) { 6607 for (i = 2; i < buffersize; i++) {
6618 6608
6619 outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); 6609 outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP);
6620 if (ha->revision_id == IPS_REVID_TROMBONE64) 6610 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6621 udelay(25); /* 25 us */ 6611 udelay(25); /* 25 us */
6622 6612
6623 checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP); 6613 checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP);
@@ -6650,14 +6640,14 @@ ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6650 6640
6651 /* test 1st byte */ 6641 /* test 1st byte */
6652 writel(0, ha->mem_ptr + IPS_REG_FLAP); 6642 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6653 if (ha->revision_id == IPS_REVID_TROMBONE64) 6643 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6654 udelay(25); /* 25 us */ 6644 udelay(25); /* 25 us */
6655 6645
6656 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) 6646 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
6657 return (1); 6647 return (1);
6658 6648
6659 writel(1, ha->mem_ptr + IPS_REG_FLAP); 6649 writel(1, ha->mem_ptr + IPS_REG_FLAP);
6660 if (ha->revision_id == IPS_REVID_TROMBONE64) 6650 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6661 udelay(25); /* 25 us */ 6651 udelay(25); /* 25 us */
6662 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) 6652 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
6663 return (1); 6653 return (1);
@@ -6666,7 +6656,7 @@ ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6666 for (i = 2; i < buffersize; i++) { 6656 for (i = 2; i < buffersize; i++) {
6667 6657
6668 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); 6658 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6669 if (ha->revision_id == IPS_REVID_TROMBONE64) 6659 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6670 udelay(25); /* 25 us */ 6660 udelay(25); /* 25 us */
6671 6661
6672 checksum = 6662 checksum =
@@ -6837,24 +6827,18 @@ ips_register_scsi(int index)
6837 } 6827 }
6838 ha = IPS_HA(sh); 6828 ha = IPS_HA(sh);
6839 memcpy(ha, oldha, sizeof (ips_ha_t)); 6829 memcpy(ha, oldha, sizeof (ips_ha_t));
6840 free_irq(oldha->irq, oldha); 6830 free_irq(oldha->pcidev->irq, oldha);
6841 /* Install the interrupt handler with the new ha */ 6831 /* Install the interrupt handler with the new ha */
6842 if (request_irq(ha->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) { 6832 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
6843 IPS_PRINTK(KERN_WARNING, ha->pcidev, 6833 IPS_PRINTK(KERN_WARNING, ha->pcidev,
6844 "Unable to install interrupt handler\n"); 6834 "Unable to install interrupt handler\n");
6845 scsi_host_put(sh); 6835 goto err_out_sh;
6846 return -1;
6847 } 6836 }
6848 6837
6849 kfree(oldha); 6838 kfree(oldha);
6850 ips_sh[index] = sh;
6851 ips_ha[index] = ha;
6852 6839
6853 /* Store away needed values for later use */ 6840 /* Store away needed values for later use */
6854 sh->io_port = ha->io_addr;
6855 sh->n_io_port = ha->io_addr ? 255 : 0;
6856 sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr; 6841 sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr;
6857 sh->irq = ha->irq;
6858 sh->sg_tablesize = sh->hostt->sg_tablesize; 6842 sh->sg_tablesize = sh->hostt->sg_tablesize;
6859 sh->can_queue = sh->hostt->can_queue; 6843 sh->can_queue = sh->hostt->can_queue;
6860 sh->cmd_per_lun = sh->hostt->cmd_per_lun; 6844 sh->cmd_per_lun = sh->hostt->cmd_per_lun;
@@ -6867,10 +6851,21 @@ ips_register_scsi(int index)
6867 sh->max_channel = ha->nbus - 1; 6851 sh->max_channel = ha->nbus - 1;
6868 sh->can_queue = ha->max_cmds - 1; 6852 sh->can_queue = ha->max_cmds - 1;
6869 6853
6870 scsi_add_host(sh, NULL); 6854 if (scsi_add_host(sh, &ha->pcidev->dev))
6855 goto err_out;
6856
6857 ips_sh[index] = sh;
6858 ips_ha[index] = ha;
6859
6871 scsi_scan_host(sh); 6860 scsi_scan_host(sh);
6872 6861
6873 return 0; 6862 return 0;
6863
6864err_out:
6865 free_irq(ha->pcidev->irq, ha);
6866err_out_sh:
6867 scsi_host_put(sh);
6868 return -1;
6874} 6869}
6875 6870
6876/*---------------------------------------------------------------------------*/ 6871/*---------------------------------------------------------------------------*/
@@ -6882,20 +6877,14 @@ ips_register_scsi(int index)
6882static void __devexit 6877static void __devexit
6883ips_remove_device(struct pci_dev *pci_dev) 6878ips_remove_device(struct pci_dev *pci_dev)
6884{ 6879{
6885 int i; 6880 struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
6886 struct Scsi_Host *sh;
6887 ips_ha_t *ha;
6888 6881
6889 for (i = 0; i < IPS_MAX_ADAPTERS; i++) { 6882 pci_set_drvdata(pci_dev, NULL);
6890 ha = ips_ha[i]; 6883
6891 if (ha) { 6884 ips_release(sh);
6892 if ((pci_dev->bus->number == ha->pcidev->bus->number) && 6885
6893 (pci_dev->devfn == ha->pcidev->devfn)) { 6886 pci_release_regions(pci_dev);
6894 sh = ips_sh[i]; 6887 pci_disable_device(pci_dev);
6895 ips_release(sh);
6896 }
6897 }
6898 }
6899} 6888}
6900 6889
6901/****************************************************************************/ 6890/****************************************************************************/
@@ -6949,12 +6938,17 @@ module_exit(ips_module_exit);
6949static int __devinit 6938static int __devinit
6950ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent) 6939ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
6951{ 6940{
6952 int uninitialized_var(index); 6941 int index = -1;
6953 int rc; 6942 int rc;
6954 6943
6955 METHOD_TRACE("ips_insert_device", 1); 6944 METHOD_TRACE("ips_insert_device", 1);
6956 if (pci_enable_device(pci_dev)) 6945 rc = pci_enable_device(pci_dev);
6957 return -1; 6946 if (rc)
6947 return rc;
6948
6949 rc = pci_request_regions(pci_dev, "ips");
6950 if (rc)
6951 goto err_out;
6958 6952
6959 rc = ips_init_phase1(pci_dev, &index); 6953 rc = ips_init_phase1(pci_dev, &index);
6960 if (rc == SUCCESS) 6954 if (rc == SUCCESS)
@@ -6970,6 +6964,19 @@ ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
6970 ips_num_controllers++; 6964 ips_num_controllers++;
6971 6965
6972 ips_next_controller = ips_num_controllers; 6966 ips_next_controller = ips_num_controllers;
6967
6968 if (rc < 0) {
6969 rc = -ENODEV;
6970 goto err_out_regions;
6971 }
6972
6973 pci_set_drvdata(pci_dev, ips_sh[index]);
6974 return 0;
6975
6976err_out_regions:
6977 pci_release_regions(pci_dev);
6978err_out:
6979 pci_disable_device(pci_dev);
6973 return rc; 6980 return rc;
6974} 6981}
6975 6982
@@ -6992,8 +6999,6 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
6992 uint32_t mem_len; 6999 uint32_t mem_len;
6993 uint8_t bus; 7000 uint8_t bus;
6994 uint8_t func; 7001 uint8_t func;
6995 uint8_t irq;
6996 uint16_t subdevice_id;
6997 int j; 7002 int j;
6998 int index; 7003 int index;
6999 dma_addr_t dma_address; 7004 dma_addr_t dma_address;
@@ -7004,7 +7009,7 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
7004 METHOD_TRACE("ips_init_phase1", 1); 7009 METHOD_TRACE("ips_init_phase1", 1);
7005 index = IPS_MAX_ADAPTERS; 7010 index = IPS_MAX_ADAPTERS;
7006 for (j = 0; j < IPS_MAX_ADAPTERS; j++) { 7011 for (j = 0; j < IPS_MAX_ADAPTERS; j++) {
7007 if (ips_ha[j] == 0) { 7012 if (ips_ha[j] == NULL) {
7008 index = j; 7013 index = j;
7009 break; 7014 break;
7010 } 7015 }
@@ -7014,7 +7019,6 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
7014 return -1; 7019 return -1;
7015 7020
7016 /* stuff that we get in dev */ 7021 /* stuff that we get in dev */
7017 irq = pci_dev->irq;
7018 bus = pci_dev->bus->number; 7022 bus = pci_dev->bus->number;
7019 func = pci_dev->devfn; 7023 func = pci_dev->devfn;
7020 7024
@@ -7042,34 +7046,17 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
7042 uint32_t base; 7046 uint32_t base;
7043 uint32_t offs; 7047 uint32_t offs;
7044 7048
7045 if (!request_mem_region(mem_addr, mem_len, "ips")) {
7046 IPS_PRINTK(KERN_WARNING, pci_dev,
7047 "Couldn't allocate IO Memory space %x len %d.\n",
7048 mem_addr, mem_len);
7049 return -1;
7050 }
7051
7052 base = mem_addr & PAGE_MASK; 7049 base = mem_addr & PAGE_MASK;
7053 offs = mem_addr - base; 7050 offs = mem_addr - base;
7054 ioremap_ptr = ioremap(base, PAGE_SIZE); 7051 ioremap_ptr = ioremap(base, PAGE_SIZE);
7052 if (!ioremap_ptr)
7053 return -1;
7055 mem_ptr = ioremap_ptr + offs; 7054 mem_ptr = ioremap_ptr + offs;
7056 } else { 7055 } else {
7057 ioremap_ptr = NULL; 7056 ioremap_ptr = NULL;
7058 mem_ptr = NULL; 7057 mem_ptr = NULL;
7059 } 7058 }
7060 7059
7061 /* setup I/O mapped area (if applicable) */
7062 if (io_addr) {
7063 if (!request_region(io_addr, io_len, "ips")) {
7064 IPS_PRINTK(KERN_WARNING, pci_dev,
7065 "Couldn't allocate IO space %x len %d.\n",
7066 io_addr, io_len);
7067 return -1;
7068 }
7069 }
7070
7071 subdevice_id = pci_dev->subsystem_device;
7072
7073 /* found a controller */ 7060 /* found a controller */
7074 ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL); 7061 ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
7075 if (ha == NULL) { 7062 if (ha == NULL) {
@@ -7078,13 +7065,11 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
7078 return -1; 7065 return -1;
7079 } 7066 }
7080 7067
7081
7082 ips_sh[index] = NULL; 7068 ips_sh[index] = NULL;
7083 ips_ha[index] = ha; 7069 ips_ha[index] = ha;
7084 ha->active = 1; 7070 ha->active = 1;
7085 7071
7086 /* Store info in HA structure */ 7072 /* Store info in HA structure */
7087 ha->irq = irq;
7088 ha->io_addr = io_addr; 7073 ha->io_addr = io_addr;
7089 ha->io_len = io_len; 7074 ha->io_len = io_len;
7090 ha->mem_addr = mem_addr; 7075 ha->mem_addr = mem_addr;
@@ -7092,10 +7077,7 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
7092 ha->mem_ptr = mem_ptr; 7077 ha->mem_ptr = mem_ptr;
7093 ha->ioremap_ptr = ioremap_ptr; 7078 ha->ioremap_ptr = ioremap_ptr;
7094 ha->host_num = (uint32_t) index; 7079 ha->host_num = (uint32_t) index;
7095 ha->revision_id = pci_dev->revision;
7096 ha->slot_num = PCI_SLOT(pci_dev->devfn); 7080 ha->slot_num = PCI_SLOT(pci_dev->devfn);
7097 ha->device_id = pci_dev->device;
7098 ha->subdevice_id = subdevice_id;
7099 ha->pcidev = pci_dev; 7081 ha->pcidev = pci_dev;
7100 7082
7101 /* 7083 /*
@@ -7240,7 +7222,7 @@ ips_init_phase2(int index)
7240 } 7222 }
7241 7223
7242 /* Install the interrupt handler */ 7224 /* Install the interrupt handler */
7243 if (request_irq(ha->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) { 7225 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
7244 IPS_PRINTK(KERN_WARNING, ha->pcidev, 7226 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7245 "Unable to install interrupt handler\n"); 7227 "Unable to install interrupt handler\n");
7246 return ips_abort_init(ha, index); 7228 return ips_abort_init(ha, index);
@@ -7253,14 +7235,14 @@ ips_init_phase2(int index)
7253 if (!ips_allocatescbs(ha)) { 7235 if (!ips_allocatescbs(ha)) {
7254 IPS_PRINTK(KERN_WARNING, ha->pcidev, 7236 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7255 "Unable to allocate a CCB\n"); 7237 "Unable to allocate a CCB\n");
7256 free_irq(ha->irq, ha); 7238 free_irq(ha->pcidev->irq, ha);
7257 return ips_abort_init(ha, index); 7239 return ips_abort_init(ha, index);
7258 } 7240 }
7259 7241
7260 if (!ips_hainit(ha)) { 7242 if (!ips_hainit(ha)) {
7261 IPS_PRINTK(KERN_WARNING, ha->pcidev, 7243 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7262 "Unable to initialize controller\n"); 7244 "Unable to initialize controller\n");
7263 free_irq(ha->irq, ha); 7245 free_irq(ha->pcidev->irq, ha);
7264 return ips_abort_init(ha, index); 7246 return ips_abort_init(ha, index);
7265 } 7247 }
7266 /* Free the temporary SCB */ 7248 /* Free the temporary SCB */
@@ -7270,7 +7252,7 @@ ips_init_phase2(int index)
7270 if (!ips_allocatescbs(ha)) { 7252 if (!ips_allocatescbs(ha)) {
7271 IPS_PRINTK(KERN_WARNING, ha->pcidev, 7253 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7272 "Unable to allocate CCBs\n"); 7254 "Unable to allocate CCBs\n");
7273 free_irq(ha->irq, ha); 7255 free_irq(ha->pcidev->irq, ha);
7274 return ips_abort_init(ha, index); 7256 return ips_abort_init(ha, index);
7275 } 7257 }
7276 7258
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 3bcbd9ff056b..e0657b6f009c 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -60,14 +60,14 @@
60 */ 60 */
61 #define IPS_HA(x) ((ips_ha_t *) x->hostdata) 61 #define IPS_HA(x) ((ips_ha_t *) x->hostdata)
62 #define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs) 62 #define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs)
63 #define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \ 63 #define IPS_IS_TROMBONE(ha) (((ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) && \
64 (ha->revision_id >= IPS_REVID_TROMBONE32) && \ 64 (ha->pcidev->revision >= IPS_REVID_TROMBONE32) && \
65 (ha->revision_id <= IPS_REVID_TROMBONE64)) ? 1 : 0) 65 (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) ? 1 : 0)
66 #define IPS_IS_CLARINET(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \ 66 #define IPS_IS_CLARINET(ha) (((ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) && \
67 (ha->revision_id >= IPS_REVID_CLARINETP1) && \ 67 (ha->pcidev->revision >= IPS_REVID_CLARINETP1) && \
68 (ha->revision_id <= IPS_REVID_CLARINETP3)) ? 1 : 0) 68 (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) ? 1 : 0)
69 #define IPS_IS_MORPHEUS(ha) (ha->device_id == IPS_DEVICEID_MORPHEUS) 69 #define IPS_IS_MORPHEUS(ha) (ha->pcidev->device == IPS_DEVICEID_MORPHEUS)
70 #define IPS_IS_MARCO(ha) (ha->device_id == IPS_DEVICEID_MARCO) 70 #define IPS_IS_MARCO(ha) (ha->pcidev->device == IPS_DEVICEID_MARCO)
71 #define IPS_USE_I2O_DELIVER(ha) ((IPS_IS_MORPHEUS(ha) || \ 71 #define IPS_USE_I2O_DELIVER(ha) ((IPS_IS_MORPHEUS(ha) || \
72 (IPS_IS_TROMBONE(ha) && \ 72 (IPS_IS_TROMBONE(ha) && \
73 (ips_force_i2o))) ? 1 : 0) 73 (ips_force_i2o))) ? 1 : 0)
@@ -92,7 +92,7 @@
92 #ifndef min 92 #ifndef min
93 #define min(x,y) ((x) < (y) ? x : y) 93 #define min(x,y) ((x) < (y) ? x : y)
94 #endif 94 #endif
95 95
96 #ifndef __iomem /* For clean compiles in earlier kernels without __iomem annotations */ 96 #ifndef __iomem /* For clean compiles in earlier kernels without __iomem annotations */
97 #define __iomem 97 #define __iomem
98 #endif 98 #endif
@@ -171,7 +171,7 @@
171 #define IPS_CMD_DOWNLOAD 0x20 171 #define IPS_CMD_DOWNLOAD 0x20
172 #define IPS_CMD_RW_BIOSFW 0x22 172 #define IPS_CMD_RW_BIOSFW 0x22
173 #define IPS_CMD_GET_VERSION_INFO 0xC6 173 #define IPS_CMD_GET_VERSION_INFO 0xC6
174 #define IPS_CMD_RESET_CHANNEL 0x1A 174 #define IPS_CMD_RESET_CHANNEL 0x1A
175 175
176 /* 176 /*
177 * Adapter Equates 177 * Adapter Equates
@@ -458,7 +458,7 @@ typedef struct {
458 uint32_t reserved3; 458 uint32_t reserved3;
459 uint32_t buffer_addr; 459 uint32_t buffer_addr;
460 uint32_t reserved4; 460 uint32_t reserved4;
461} IPS_IOCTL_CMD, *PIPS_IOCTL_CMD; 461} IPS_IOCTL_CMD, *PIPS_IOCTL_CMD;
462 462
463typedef struct { 463typedef struct {
464 uint8_t op_code; 464 uint8_t op_code;
@@ -552,7 +552,7 @@ typedef struct {
552 uint32_t cccr; 552 uint32_t cccr;
553} IPS_NVRAM_CMD, *PIPS_NVRAM_CMD; 553} IPS_NVRAM_CMD, *PIPS_NVRAM_CMD;
554 554
555typedef struct 555typedef struct
556{ 556{
557 uint8_t op_code; 557 uint8_t op_code;
558 uint8_t command_id; 558 uint8_t command_id;
@@ -650,7 +650,7 @@ typedef struct {
650 uint8_t device_address; 650 uint8_t device_address;
651 uint8_t cmd_attribute; 651 uint8_t cmd_attribute;
652 uint8_t cdb_length; 652 uint8_t cdb_length;
653 uint8_t reserved_for_LUN; 653 uint8_t reserved_for_LUN;
654 uint32_t transfer_length; 654 uint32_t transfer_length;
655 uint32_t buffer_pointer; 655 uint32_t buffer_pointer;
656 uint16_t sg_count; 656 uint16_t sg_count;
@@ -790,7 +790,7 @@ typedef struct {
790 /* SubSystem Parameter[4] */ 790 /* SubSystem Parameter[4] */
791#define IPS_GET_VERSION_SUPPORT 0x00018000 /* Mask for Versioning Support */ 791#define IPS_GET_VERSION_SUPPORT 0x00018000 /* Mask for Versioning Support */
792 792
793typedef struct 793typedef struct
794{ 794{
795 uint32_t revision; 795 uint32_t revision;
796 uint8_t bootBlkVersion[32]; 796 uint8_t bootBlkVersion[32];
@@ -1034,7 +1034,6 @@ typedef struct ips_ha {
1034 uint8_t ha_id[IPS_MAX_CHANNELS+1]; 1034 uint8_t ha_id[IPS_MAX_CHANNELS+1];
1035 uint32_t dcdb_active[IPS_MAX_CHANNELS]; 1035 uint32_t dcdb_active[IPS_MAX_CHANNELS];
1036 uint32_t io_addr; /* Base I/O address */ 1036 uint32_t io_addr; /* Base I/O address */
1037 uint8_t irq; /* IRQ for adapter */
1038 uint8_t ntargets; /* Number of targets */ 1037 uint8_t ntargets; /* Number of targets */
1039 uint8_t nbus; /* Number of buses */ 1038 uint8_t nbus; /* Number of buses */
1040 uint8_t nlun; /* Number of Luns */ 1039 uint8_t nlun; /* Number of Luns */
@@ -1066,10 +1065,7 @@ typedef struct ips_ha {
1066 int ioctl_reset; /* IOCTL Requested Reset Flag */ 1065 int ioctl_reset; /* IOCTL Requested Reset Flag */
1067 uint16_t reset_count; /* number of resets */ 1066 uint16_t reset_count; /* number of resets */
1068 time_t last_ffdc; /* last time we sent ffdc info*/ 1067 time_t last_ffdc; /* last time we sent ffdc info*/
1069 uint8_t revision_id; /* Revision level */
1070 uint16_t device_id; /* PCI device ID */
1071 uint8_t slot_num; /* PCI Slot Number */ 1068 uint8_t slot_num; /* PCI Slot Number */
1072 uint16_t subdevice_id; /* Subsystem device ID */
1073 int ioctl_len; /* size of ioctl buffer */ 1069 int ioctl_len; /* size of ioctl buffer */
1074 dma_addr_t ioctl_busaddr; /* dma address of ioctl buffer*/ 1070 dma_addr_t ioctl_busaddr; /* dma address of ioctl buffer*/
1075 uint8_t bios_version[8]; /* BIOS Revision */ 1071 uint8_t bios_version[8]; /* BIOS Revision */
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 57ce2251abc8..e5be5fd4ef58 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -48,7 +48,7 @@ MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
48 "Alex Aizman <itn780@yahoo.com>"); 48 "Alex Aizman <itn780@yahoo.com>");
49MODULE_DESCRIPTION("iSCSI/TCP data-path"); 49MODULE_DESCRIPTION("iSCSI/TCP data-path");
50MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51/* #define DEBUG_TCP */ 51#undef DEBUG_TCP
52#define DEBUG_ASSERT 52#define DEBUG_ASSERT
53 53
54#ifdef DEBUG_TCP 54#ifdef DEBUG_TCP
@@ -67,115 +67,429 @@ MODULE_LICENSE("GPL");
67static unsigned int iscsi_max_lun = 512; 67static unsigned int iscsi_max_lun = 512;
68module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); 68module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
69 69
70static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
71 struct iscsi_segment *segment);
72
73/*
74 * Scatterlist handling: inside the iscsi_segment, we
75 * remember an index into the scatterlist, and set data/size
76 * to the current scatterlist entry. For highmem pages, we
77 * kmap as needed.
78 *
79 * Note that the page is unmapped when we return from
80 * TCP's data_ready handler, so we may end up mapping and
81 * unmapping the same page repeatedly. The whole reason
82 * for this is that we shouldn't keep the page mapped
83 * outside the softirq.
84 */
85
86/**
87 * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
88 * @segment: the buffer object
89 * @sg: scatterlist
90 * @offset: byte offset into that sg entry
91 *
92 * This function sets up the segment so that subsequent
93 * data is copied to the indicated sg entry, at the given
94 * offset.
95 */
70static inline void 96static inline void
71iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size) 97iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
98 struct scatterlist *sg, unsigned int offset)
72{ 99{
73 sg_init_one(&ibuf->sg, vbuf, size); 100 segment->sg = sg;
74 ibuf->sent = 0; 101 segment->sg_offset = offset;
75 ibuf->use_sendmsg = 1; 102 segment->size = min(sg->length - offset,
103 segment->total_size - segment->total_copied);
104 segment->data = NULL;
76} 105}
77 106
107/**
108 * iscsi_tcp_segment_map - map the current S/G page
109 * @segment: iscsi_segment
110 * @recv: 1 if called from recv path
111 *
112 * We only need to possibly kmap data if scatter lists are being used,
113 * because the iscsi passthrough and internal IO paths will never use high
114 * mem pages.
115 */
78static inline void 116static inline void
79iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg) 117iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
80{ 118{
81 sg_init_table(&ibuf->sg, 1); 119 struct scatterlist *sg;
82 sg_set_page(&ibuf->sg, sg_page(sg), sg->length, sg->offset); 120
121 if (segment->data != NULL || !segment->sg)
122 return;
123
124 sg = segment->sg;
125 BUG_ON(segment->sg_mapped);
126 BUG_ON(sg->length == 0);
127
83 /* 128 /*
84 * Fastpath: sg element fits into single page 129 * If the page count is greater than one it is ok to send
130 * to the network layer's zero copy send path. If not we
131 * have to go the slow sendmsg path. We always map for the
132 * recv path.
85 */ 133 */
86 if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg_page(sg))) 134 if (page_count(sg_page(sg)) >= 1 && !recv)
87 ibuf->use_sendmsg = 0; 135 return;
88 else 136
89 ibuf->use_sendmsg = 1; 137 debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
90 ibuf->sent = 0; 138 segment);
139 segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
140 segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
91} 141}
92 142
93static inline int 143static inline void
94iscsi_buf_left(struct iscsi_buf *ibuf) 144iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
95{ 145{
96 int rc; 146 debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
97 147
98 rc = ibuf->sg.length - ibuf->sent; 148 if (segment->sg_mapped) {
99 BUG_ON(rc < 0); 149 debug_tcp("iscsi_tcp_segment_unmap valid\n");
100 return rc; 150 kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
151 segment->sg_mapped = NULL;
152 segment->data = NULL;
153 }
101} 154}
102 155
156/*
157 * Splice the digest buffer into the buffer
158 */
103static inline void 159static inline void
104iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf, 160iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
105 u8* crc)
106{ 161{
107 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 162 segment->data = digest;
108 163 segment->digest_len = ISCSI_DIGEST_SIZE;
109 crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc); 164 segment->total_size += ISCSI_DIGEST_SIZE;
110 buf->sg.length += sizeof(u32); 165 segment->size = ISCSI_DIGEST_SIZE;
166 segment->copied = 0;
167 segment->sg = NULL;
168 segment->hash = NULL;
111} 169}
112 170
171/**
172 * iscsi_tcp_segment_done - check whether the segment is complete
173 * @segment: iscsi segment to check
174 * @recv: set to one of this is called from the recv path
175 * @copied: number of bytes copied
176 *
177 * Check if we're done receiving this segment. If the receive
178 * buffer is full but we expect more data, move on to the
179 * next entry in the scatterlist.
180 *
181 * If the amount of data we received isn't a multiple of 4,
182 * we will transparently receive the pad bytes, too.
183 *
184 * This function must be re-entrant.
185 */
113static inline int 186static inline int
114iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn) 187iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
115{ 188{
116 struct sk_buff *skb = tcp_conn->in.skb; 189 static unsigned char padbuf[ISCSI_PAD_LEN];
117 190 struct scatterlist sg;
118 tcp_conn->in.zero_copy_hdr = 0; 191 unsigned int pad;
119 192
120 if (tcp_conn->in.copy >= tcp_conn->hdr_size && 193 debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
121 tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER) { 194 segment->size, recv ? "recv" : "xmit");
195 if (segment->hash && copied) {
122 /* 196 /*
123 * Zero-copy PDU Header: using connection context 197 * If a segment is kmapd we must unmap it before sending
124 * to store header pointer. 198 * to the crypto layer since that will try to kmap it again.
125 */ 199 */
126 if (skb_shinfo(skb)->frag_list == NULL && 200 iscsi_tcp_segment_unmap(segment);
127 !skb_shinfo(skb)->nr_frags) { 201
128 tcp_conn->in.hdr = (struct iscsi_hdr *) 202 if (!segment->data) {
129 ((char*)skb->data + tcp_conn->in.offset); 203 sg_init_table(&sg, 1);
130 tcp_conn->in.zero_copy_hdr = 1; 204 sg_set_page(&sg, sg_page(segment->sg), copied,
205 segment->copied + segment->sg_offset +
206 segment->sg->offset);
207 } else
208 sg_init_one(&sg, segment->data + segment->copied,
209 copied);
210 crypto_hash_update(segment->hash, &sg, copied);
211 }
212
213 segment->copied += copied;
214 if (segment->copied < segment->size) {
215 iscsi_tcp_segment_map(segment, recv);
216 return 0;
217 }
218
219 segment->total_copied += segment->copied;
220 segment->copied = 0;
221 segment->size = 0;
222
223 /* Unmap the current scatterlist page, if there is one. */
224 iscsi_tcp_segment_unmap(segment);
225
226 /* Do we have more scatterlist entries? */
227 debug_tcp("total copied %u total size %u\n", segment->total_copied,
228 segment->total_size);
229 if (segment->total_copied < segment->total_size) {
230 /* Proceed to the next entry in the scatterlist. */
231 iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
232 0);
233 iscsi_tcp_segment_map(segment, recv);
234 BUG_ON(segment->size == 0);
235 return 0;
236 }
237
238 /* Do we need to handle padding? */
239 pad = iscsi_padding(segment->total_copied);
240 if (pad != 0) {
241 debug_tcp("consume %d pad bytes\n", pad);
242 segment->total_size += pad;
243 segment->size = pad;
244 segment->data = padbuf;
245 return 0;
246 }
247
248 /*
249 * Set us up for transferring the data digest. hdr digest
250 * is completely handled in hdr done function.
251 */
252 if (segment->hash) {
253 crypto_hash_final(segment->hash, segment->digest);
254 iscsi_tcp_segment_splice_digest(segment,
255 recv ? segment->recv_digest : segment->digest);
256 return 0;
257 }
258
259 return 1;
260}
261
262/**
263 * iscsi_tcp_xmit_segment - transmit segment
264 * @tcp_conn: the iSCSI TCP connection
265 * @segment: the buffer to transmnit
266 *
267 * This function transmits as much of the buffer as
268 * the network layer will accept, and returns the number of
269 * bytes transmitted.
270 *
271 * If CRC hashing is enabled, the function will compute the
272 * hash as it goes. When the entire segment has been transmitted,
273 * it will retrieve the hash value and send it as well.
274 */
275static int
276iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
277 struct iscsi_segment *segment)
278{
279 struct socket *sk = tcp_conn->sock;
280 unsigned int copied = 0;
281 int r = 0;
282
283 while (!iscsi_tcp_segment_done(segment, 0, r)) {
284 struct scatterlist *sg;
285 unsigned int offset, copy;
286 int flags = 0;
287
288 r = 0;
289 offset = segment->copied;
290 copy = segment->size - offset;
291
292 if (segment->total_copied + segment->size < segment->total_size)
293 flags |= MSG_MORE;
294
295 /* Use sendpage if we can; else fall back to sendmsg */
296 if (!segment->data) {
297 sg = segment->sg;
298 offset += segment->sg_offset + sg->offset;
299 r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
300 flags);
131 } else { 301 } else {
132 /* ignoring return code since we checked 302 struct msghdr msg = { .msg_flags = flags };
133 * in.copy before */ 303 struct kvec iov = {
134 skb_copy_bits(skb, tcp_conn->in.offset, 304 .iov_base = segment->data + offset,
135 &tcp_conn->hdr, tcp_conn->hdr_size); 305 .iov_len = copy
136 tcp_conn->in.hdr = &tcp_conn->hdr; 306 };
307
308 r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
137 } 309 }
138 tcp_conn->in.offset += tcp_conn->hdr_size;
139 tcp_conn->in.copy -= tcp_conn->hdr_size;
140 } else {
141 int hdr_remains;
142 int copylen;
143 310
144 /* 311 if (r < 0) {
145 * PDU header scattered across SKB's, 312 iscsi_tcp_segment_unmap(segment);
146 * copying it... This'll happen quite rarely. 313 if (copied || r == -EAGAIN)
147 */ 314 break;
315 return r;
316 }
317 copied += r;
318 }
319 return copied;
320}
321
322/**
323 * iscsi_tcp_segment_recv - copy data to segment
324 * @tcp_conn: the iSCSI TCP connection
325 * @segment: the buffer to copy to
326 * @ptr: data pointer
327 * @len: amount of data available
328 *
329 * This function copies up to @len bytes to the
330 * given buffer, and returns the number of bytes
331 * consumed, which can actually be less than @len.
332 *
333 * If hash digest is enabled, the function will update the
334 * hash while copying.
335 * Combining these two operations doesn't buy us a lot (yet),
336 * but in the future we could implement combined copy+crc,
337 * just way we do for network layer checksums.
338 */
339static int
340iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
341 struct iscsi_segment *segment, const void *ptr,
342 unsigned int len)
343{
344 unsigned int copy = 0, copied = 0;
345
346 while (!iscsi_tcp_segment_done(segment, 1, copy)) {
347 if (copied == len) {
348 debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
349 len);
350 break;
351 }
352
353 copy = min(len - copied, segment->size - segment->copied);
354 debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
355 memcpy(segment->data + segment->copied, ptr + copied, copy);
356 copied += copy;
357 }
358 return copied;
359}
148 360
149 if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER) 361static inline void
150 tcp_conn->in.hdr_offset = 0; 362iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
363 unsigned char digest[ISCSI_DIGEST_SIZE])
364{
365 struct scatterlist sg;
151 366
152 hdr_remains = tcp_conn->hdr_size - tcp_conn->in.hdr_offset; 367 sg_init_one(&sg, hdr, hdrlen);
153 BUG_ON(hdr_remains <= 0); 368 crypto_hash_digest(hash, &sg, hdrlen, digest);
369}
154 370
155 copylen = min(tcp_conn->in.copy, hdr_remains); 371static inline int
156 skb_copy_bits(skb, tcp_conn->in.offset, 372iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
157 (char*)&tcp_conn->hdr + tcp_conn->in.hdr_offset, 373 struct iscsi_segment *segment)
158 copylen); 374{
375 if (!segment->digest_len)
376 return 1;
159 377
160 debug_tcp("PDU gather offset %d bytes %d in.offset %d " 378 if (memcmp(segment->recv_digest, segment->digest,
161 "in.copy %d\n", tcp_conn->in.hdr_offset, copylen, 379 segment->digest_len)) {
162 tcp_conn->in.offset, tcp_conn->in.copy); 380 debug_scsi("digest mismatch\n");
381 return 0;
382 }
163 383
164 tcp_conn->in.offset += copylen; 384 return 1;
165 tcp_conn->in.copy -= copylen; 385}
166 if (copylen < hdr_remains) { 386
167 tcp_conn->in_progress = IN_PROGRESS_HEADER_GATHER; 387/*
168 tcp_conn->in.hdr_offset += copylen; 388 * Helper function to set up segment buffer
169 return -EAGAIN; 389 */
390static inline void
391__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
392 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
393{
394 memset(segment, 0, sizeof(*segment));
395 segment->total_size = size;
396 segment->done = done;
397
398 if (hash) {
399 segment->hash = hash;
400 crypto_hash_init(hash);
401 }
402}
403
404static inline void
405iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
406 size_t size, iscsi_segment_done_fn_t *done,
407 struct hash_desc *hash)
408{
409 __iscsi_segment_init(segment, size, done, hash);
410 segment->data = data;
411 segment->size = size;
412}
413
414static inline int
415iscsi_segment_seek_sg(struct iscsi_segment *segment,
416 struct scatterlist *sg_list, unsigned int sg_count,
417 unsigned int offset, size_t size,
418 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
419{
420 struct scatterlist *sg;
421 unsigned int i;
422
423 debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
424 offset, size);
425 __iscsi_segment_init(segment, size, done, hash);
426 for_each_sg(sg_list, sg, sg_count, i) {
427 debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
428 sg->offset);
429 if (offset < sg->length) {
430 iscsi_tcp_segment_init_sg(segment, sg, offset);
431 return 0;
170 } 432 }
171 tcp_conn->in.hdr = &tcp_conn->hdr; 433 offset -= sg->length;
172 tcp_conn->discontiguous_hdr_cnt++;
173 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
174 } 434 }
175 435
436 return ISCSI_ERR_DATA_OFFSET;
437}
438
439/**
440 * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
441 * @tcp_conn: iscsi connection to prep for
442 *
443 * This function always passes NULL for the hash argument, because when this
444 * function is called we do not yet know the final size of the header and want
445 * to delay the digest processing until we know that.
446 */
447static void
448iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
449{
450 debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
451 tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
452 iscsi_segment_init_linear(&tcp_conn->in.segment,
453 tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
454 iscsi_tcp_hdr_recv_done, NULL);
455}
456
457/*
458 * Handle incoming reply to any other type of command
459 */
460static int
461iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
462 struct iscsi_segment *segment)
463{
464 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
465 int rc = 0;
466
467 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
468 return ISCSI_ERR_DATA_DGST;
469
470 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
471 conn->data, tcp_conn->in.datalen);
472 if (rc)
473 return rc;
474
475 iscsi_tcp_hdr_recv_prep(tcp_conn);
176 return 0; 476 return 0;
177} 477}
178 478
479static void
480iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
481{
482 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
483 struct hash_desc *rx_hash = NULL;
484
485 if (conn->datadgst_en)
486 rx_hash = &tcp_conn->rx_hash;
487
488 iscsi_segment_init_linear(&tcp_conn->in.segment,
489 conn->data, tcp_conn->in.datalen,
490 iscsi_tcp_data_recv_done, rx_hash);
491}
492
179/* 493/*
180 * must be called with session lock 494 * must be called with session lock
181 */ 495 */
@@ -184,7 +498,6 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
184{ 498{
185 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 499 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
186 struct iscsi_r2t_info *r2t; 500 struct iscsi_r2t_info *r2t;
187 struct scsi_cmnd *sc;
188 501
189 /* flush ctask's r2t queues */ 502 /* flush ctask's r2t queues */
190 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { 503 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
@@ -193,12 +506,12 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
193 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); 506 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
194 } 507 }
195 508
196 sc = ctask->sc; 509 r2t = tcp_ctask->r2t;
197 if (unlikely(!sc)) 510 if (r2t != NULL) {
198 return; 511 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
199 512 sizeof(void*));
200 tcp_ctask->xmstate = XMSTATE_VALUE_IDLE; 513 tcp_ctask->r2t = NULL;
201 tcp_ctask->r2t = NULL; 514 }
202} 515}
203 516
204/** 517/**
@@ -217,11 +530,6 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
217 int datasn = be32_to_cpu(rhdr->datasn); 530 int datasn = be32_to_cpu(rhdr->datasn);
218 531
219 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 532 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
220 /*
221 * setup Data-In byte counter (gets decremented..)
222 */
223 ctask->data_count = tcp_conn->in.datalen;
224
225 if (tcp_conn->in.datalen == 0) 533 if (tcp_conn->in.datalen == 0)
226 return 0; 534 return 0;
227 535
@@ -242,22 +550,20 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
242 } 550 }
243 551
244 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) { 552 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
553 sc->result = (DID_OK << 16) | rhdr->cmd_status;
245 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 554 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
246 if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) { 555 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
556 ISCSI_FLAG_DATA_OVERFLOW)) {
247 int res_count = be32_to_cpu(rhdr->residual_count); 557 int res_count = be32_to_cpu(rhdr->residual_count);
248 558
249 if (res_count > 0 && 559 if (res_count > 0 &&
250 res_count <= scsi_bufflen(sc)) { 560 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
561 res_count <= scsi_bufflen(sc)))
251 scsi_set_resid(sc, res_count); 562 scsi_set_resid(sc, res_count);
252 sc->result = (DID_OK << 16) | rhdr->cmd_status; 563 else
253 } else
254 sc->result = (DID_BAD_TARGET << 16) | 564 sc->result = (DID_BAD_TARGET << 16) |
255 rhdr->cmd_status; 565 rhdr->cmd_status;
256 } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) { 566 }
257 scsi_set_resid(sc, be32_to_cpu(rhdr->residual_count));
258 sc->result = (DID_OK << 16) | rhdr->cmd_status;
259 } else
260 sc->result = (DID_OK << 16) | rhdr->cmd_status;
261 } 567 }
262 568
263 conn->datain_pdus_cnt++; 569 conn->datain_pdus_cnt++;
@@ -281,9 +587,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
281 struct iscsi_r2t_info *r2t) 587 struct iscsi_r2t_info *r2t)
282{ 588{
283 struct iscsi_data *hdr; 589 struct iscsi_data *hdr;
284 struct scsi_cmnd *sc = ctask->sc;
285 int i, sg_count = 0;
286 struct scatterlist *sg;
287 590
288 hdr = &r2t->dtask.hdr; 591 hdr = &r2t->dtask.hdr;
289 memset(hdr, 0, sizeof(struct iscsi_data)); 592 memset(hdr, 0, sizeof(struct iscsi_data));
@@ -307,34 +610,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
307 conn->dataout_pdus_cnt++; 610 conn->dataout_pdus_cnt++;
308 611
309 r2t->sent = 0; 612 r2t->sent = 0;
310
311 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
312 sizeof(struct iscsi_hdr));
313
314 sg = scsi_sglist(sc);
315 r2t->sg = NULL;
316 for (i = 0; i < scsi_sg_count(sc); i++, sg += 1) {
317 /* FIXME: prefetch ? */
318 if (sg_count + sg->length > r2t->data_offset) {
319 int page_offset;
320
321 /* sg page found! */
322
323 /* offset within this page */
324 page_offset = r2t->data_offset - sg_count;
325
326 /* fill in this buffer */
327 iscsi_buf_init_sg(&r2t->sendbuf, sg);
328 r2t->sendbuf.sg.offset += page_offset;
329 r2t->sendbuf.sg.length -= page_offset;
330
331 /* xmit logic will continue with next one */
332 r2t->sg = sg + 1;
333 break;
334 }
335 sg_count += sg->length;
336 }
337 BUG_ON(r2t->sg == NULL);
338} 613}
339 614
340/** 615/**
@@ -366,14 +641,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
366 } 641 }
367 642
368 /* fill-in new R2T associated with the task */ 643 /* fill-in new R2T associated with the task */
369 spin_lock(&session->lock);
370 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 644 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
371 645
372 if (!ctask->sc || ctask->mtask || 646 if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
373 session->state != ISCSI_STATE_LOGGED_IN) {
374 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in " 647 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
375 "recovery...\n", ctask->itt); 648 "recovery...\n", ctask->itt);
376 spin_unlock(&session->lock);
377 return 0; 649 return 0;
378 } 650 }
379 651
@@ -384,7 +656,8 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
384 r2t->data_length = be32_to_cpu(rhdr->data_length); 656 r2t->data_length = be32_to_cpu(rhdr->data_length);
385 if (r2t->data_length == 0) { 657 if (r2t->data_length == 0) {
386 printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n"); 658 printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
387 spin_unlock(&session->lock); 659 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
660 sizeof(void*));
388 return ISCSI_ERR_DATALEN; 661 return ISCSI_ERR_DATALEN;
389 } 662 }
390 663
@@ -395,10 +668,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
395 668
396 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 669 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
397 if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) { 670 if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
398 spin_unlock(&session->lock);
399 printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at " 671 printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
400 "offset %u and total length %d\n", r2t->data_length, 672 "offset %u and total length %d\n", r2t->data_length,
401 r2t->data_offset, scsi_bufflen(ctask->sc)); 673 r2t->data_offset, scsi_bufflen(ctask->sc));
674 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
675 sizeof(void*));
402 return ISCSI_ERR_DATALEN; 676 return ISCSI_ERR_DATALEN;
403 } 677 }
404 678
@@ -409,26 +683,55 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
409 683
410 tcp_ctask->exp_datasn = r2tsn + 1; 684 tcp_ctask->exp_datasn = r2tsn + 1;
411 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); 685 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
412 set_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate);
413 list_move_tail(&ctask->running, &conn->xmitqueue);
414
415 scsi_queue_work(session->host, &conn->xmitwork);
416 conn->r2t_pdus_cnt++; 686 conn->r2t_pdus_cnt++;
417 spin_unlock(&session->lock);
418 687
688 iscsi_requeue_ctask(ctask);
419 return 0; 689 return 0;
420} 690}
421 691
692/*
693 * Handle incoming reply to DataIn command
694 */
422static int 695static int
423iscsi_tcp_hdr_recv(struct iscsi_conn *conn) 696iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
697 struct iscsi_segment *segment)
698{
699 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
700 struct iscsi_hdr *hdr = tcp_conn->in.hdr;
701 int rc;
702
703 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
704 return ISCSI_ERR_DATA_DGST;
705
706 /* check for non-exceptional status */
707 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
708 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
709 if (rc)
710 return rc;
711 }
712
713 iscsi_tcp_hdr_recv_prep(tcp_conn);
714 return 0;
715}
716
717/**
718 * iscsi_tcp_hdr_dissect - process PDU header
719 * @conn: iSCSI connection
720 * @hdr: PDU header
721 *
722 * This function analyzes the header of the PDU received,
723 * and performs several sanity checks. If the PDU is accompanied
724 * by data, the receive buffer is set up to copy the incoming data
725 * to the correct location.
726 */
727static int
728iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
424{ 729{
425 int rc = 0, opcode, ahslen; 730 int rc = 0, opcode, ahslen;
426 struct iscsi_hdr *hdr;
427 struct iscsi_session *session = conn->session; 731 struct iscsi_session *session = conn->session;
428 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 732 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
429 uint32_t cdgst, rdgst = 0, itt; 733 struct iscsi_cmd_task *ctask;
430 734 uint32_t itt;
431 hdr = tcp_conn->in.hdr;
432 735
433 /* verify PDU length */ 736 /* verify PDU length */
434 tcp_conn->in.datalen = ntoh24(hdr->dlength); 737 tcp_conn->in.datalen = ntoh24(hdr->dlength);
@@ -437,78 +740,73 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
437 tcp_conn->in.datalen, conn->max_recv_dlength); 740 tcp_conn->in.datalen, conn->max_recv_dlength);
438 return ISCSI_ERR_DATALEN; 741 return ISCSI_ERR_DATALEN;
439 } 742 }
440 tcp_conn->data_copied = 0;
441 743
442 /* read AHS */ 744 /* Additional header segments. So far, we don't
745 * process additional headers.
746 */
443 ahslen = hdr->hlength << 2; 747 ahslen = hdr->hlength << 2;
444 tcp_conn->in.offset += ahslen;
445 tcp_conn->in.copy -= ahslen;
446 if (tcp_conn->in.copy < 0) {
447 printk(KERN_ERR "iscsi_tcp: can't handle AHS with length "
448 "%d bytes\n", ahslen);
449 return ISCSI_ERR_AHSLEN;
450 }
451
452 /* calculate read padding */
453 tcp_conn->in.padding = tcp_conn->in.datalen & (ISCSI_PAD_LEN-1);
454 if (tcp_conn->in.padding) {
455 tcp_conn->in.padding = ISCSI_PAD_LEN - tcp_conn->in.padding;
456 debug_scsi("read padding %d bytes\n", tcp_conn->in.padding);
457 }
458
459 if (conn->hdrdgst_en) {
460 struct scatterlist sg;
461
462 sg_init_one(&sg, (u8 *)hdr,
463 sizeof(struct iscsi_hdr) + ahslen);
464 crypto_hash_digest(&tcp_conn->rx_hash, &sg, sg.length,
465 (u8 *)&cdgst);
466 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
467 ahslen);
468 if (cdgst != rdgst) {
469 printk(KERN_ERR "iscsi_tcp: hdrdgst error "
470 "recv 0x%x calc 0x%x\n", rdgst, cdgst);
471 return ISCSI_ERR_HDR_DGST;
472 }
473 }
474 748
475 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 749 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
476 /* verify itt (itt encoding: age+cid+itt) */ 750 /* verify itt (itt encoding: age+cid+itt) */
477 rc = iscsi_verify_itt(conn, hdr, &itt); 751 rc = iscsi_verify_itt(conn, hdr, &itt);
478 if (rc == ISCSI_ERR_NO_SCSI_CMD) { 752 if (rc)
479 tcp_conn->in.datalen = 0; /* force drop */
480 return 0;
481 } else if (rc)
482 return rc; 753 return rc;
483 754
484 debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n", 755 debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
485 opcode, tcp_conn->in.offset, tcp_conn->in.copy, 756 opcode, ahslen, tcp_conn->in.datalen);
486 ahslen, tcp_conn->in.datalen);
487 757
488 switch(opcode) { 758 switch(opcode) {
489 case ISCSI_OP_SCSI_DATA_IN: 759 case ISCSI_OP_SCSI_DATA_IN:
490 tcp_conn->in.ctask = session->cmds[itt]; 760 ctask = session->cmds[itt];
491 rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); 761 spin_lock(&conn->session->lock);
762 rc = iscsi_data_rsp(conn, ctask);
763 spin_unlock(&conn->session->lock);
492 if (rc) 764 if (rc)
493 return rc; 765 return rc;
766 if (tcp_conn->in.datalen) {
767 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
768 struct hash_desc *rx_hash = NULL;
769
770 /*
771 * Setup copy of Data-In into the Scsi_Cmnd
772 * Scatterlist case:
773 * We set up the iscsi_segment to point to the next
774 * scatterlist entry to copy to. As we go along,
775 * we move on to the next scatterlist entry and
776 * update the digest per-entry.
777 */
778 if (conn->datadgst_en)
779 rx_hash = &tcp_conn->rx_hash;
780
781 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
782 "datalen=%d)\n", tcp_conn,
783 tcp_ctask->data_offset,
784 tcp_conn->in.datalen);
785 return iscsi_segment_seek_sg(&tcp_conn->in.segment,
786 scsi_sglist(ctask->sc),
787 scsi_sg_count(ctask->sc),
788 tcp_ctask->data_offset,
789 tcp_conn->in.datalen,
790 iscsi_tcp_process_data_in,
791 rx_hash);
792 }
494 /* fall through */ 793 /* fall through */
495 case ISCSI_OP_SCSI_CMD_RSP: 794 case ISCSI_OP_SCSI_CMD_RSP:
496 tcp_conn->in.ctask = session->cmds[itt]; 795 if (tcp_conn->in.datalen) {
497 if (tcp_conn->in.datalen) 796 iscsi_tcp_data_recv_prep(tcp_conn);
498 goto copy_hdr; 797 return 0;
499 798 }
500 spin_lock(&session->lock); 799 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
501 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
502 spin_unlock(&session->lock);
503 break; 800 break;
504 case ISCSI_OP_R2T: 801 case ISCSI_OP_R2T:
505 tcp_conn->in.ctask = session->cmds[itt]; 802 ctask = session->cmds[itt];
506 if (ahslen) 803 if (ahslen)
507 rc = ISCSI_ERR_AHSLEN; 804 rc = ISCSI_ERR_AHSLEN;
508 else if (tcp_conn->in.ctask->sc->sc_data_direction == 805 else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
509 DMA_TO_DEVICE) 806 spin_lock(&session->lock);
510 rc = iscsi_r2t_rsp(conn, tcp_conn->in.ctask); 807 rc = iscsi_r2t_rsp(conn, ctask);
511 else 808 spin_unlock(&session->lock);
809 } else
512 rc = ISCSI_ERR_PROTO; 810 rc = ISCSI_ERR_PROTO;
513 break; 811 break;
514 case ISCSI_OP_LOGIN_RSP: 812 case ISCSI_OP_LOGIN_RSP:
@@ -520,8 +818,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
520 * than 8K, but there are no targets that currently do this. 818 * than 8K, but there are no targets that currently do this.
521 * For now we fail until we find a vendor that needs it 819 * For now we fail until we find a vendor that needs it
522 */ 820 */
523 if (ISCSI_DEF_MAX_RECV_SEG_LEN < 821 if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
524 tcp_conn->in.datalen) {
525 printk(KERN_ERR "iscsi_tcp: received buffer of len %u " 822 printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
526 "but conn buffer is only %u (opcode %0x)\n", 823 "but conn buffer is only %u (opcode %0x)\n",
527 tcp_conn->in.datalen, 824 tcp_conn->in.datalen,
@@ -530,8 +827,13 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
530 break; 827 break;
531 } 828 }
532 829
533 if (tcp_conn->in.datalen) 830 /* If there's data coming in with the response,
534 goto copy_hdr; 831 * receive it to the connection's buffer.
832 */
833 if (tcp_conn->in.datalen) {
834 iscsi_tcp_data_recv_prep(tcp_conn);
835 return 0;
836 }
535 /* fall through */ 837 /* fall through */
536 case ISCSI_OP_LOGOUT_RSP: 838 case ISCSI_OP_LOGOUT_RSP:
537 case ISCSI_OP_NOOP_IN: 839 case ISCSI_OP_NOOP_IN:
@@ -543,461 +845,161 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
543 break; 845 break;
544 } 846 }
545 847
546 return rc; 848 if (rc == 0) {
547 849 /* Anything that comes with data should have
548copy_hdr: 850 * been handled above. */
549 /* 851 if (tcp_conn->in.datalen)
550 * if we did zero copy for the header but we will need multiple 852 return ISCSI_ERR_PROTO;
551 * skbs to complete the command then we have to copy the header 853 iscsi_tcp_hdr_recv_prep(tcp_conn);
552 * for later use
553 */
554 if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
555 (tcp_conn->in.datalen + tcp_conn->in.padding +
556 (conn->datadgst_en ? 4 : 0))) {
557 debug_tcp("Copying header for later use. in.copy %d in.datalen"
558 " %d\n", tcp_conn->in.copy, tcp_conn->in.datalen);
559 memcpy(&tcp_conn->hdr, tcp_conn->in.hdr,
560 sizeof(struct iscsi_hdr));
561 tcp_conn->in.hdr = &tcp_conn->hdr;
562 tcp_conn->in.zero_copy_hdr = 0;
563 }
564 return 0;
565}
566
567/**
568 * iscsi_ctask_copy - copy skb bits to the destanation cmd task
569 * @conn: iscsi tcp connection
570 * @ctask: scsi command task
571 * @buf: buffer to copy to
572 * @buf_size: size of buffer
573 * @offset: offset within the buffer
574 *
575 * Notes:
576 * The function calls skb_copy_bits() and updates per-connection and
577 * per-cmd byte counters.
578 *
579 * Read counters (in bytes):
580 *
581 * conn->in.offset offset within in progress SKB
582 * conn->in.copy left to copy from in progress SKB
583 * including padding
584 * conn->in.copied copied already from in progress SKB
585 * conn->data_copied copied already from in progress buffer
586 * ctask->sent total bytes sent up to the MidLayer
587 * ctask->data_count left to copy from in progress Data-In
588 * buf_left left to copy from in progress buffer
589 **/
590static inline int
591iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
592 void *buf, int buf_size, int offset)
593{
594 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
595 int buf_left = buf_size - (tcp_conn->data_copied + offset);
596 unsigned size = min(tcp_conn->in.copy, buf_left);
597 int rc;
598
599 size = min(size, ctask->data_count);
600
601 debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
602 size, tcp_conn->in.offset, tcp_conn->in.copied);
603
604 BUG_ON(size <= 0);
605 BUG_ON(tcp_ctask->sent + size > scsi_bufflen(ctask->sc));
606
607 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
608 (char*)buf + (offset + tcp_conn->data_copied), size);
609 /* must fit into skb->len */
610 BUG_ON(rc);
611
612 tcp_conn->in.offset += size;
613 tcp_conn->in.copy -= size;
614 tcp_conn->in.copied += size;
615 tcp_conn->data_copied += size;
616 tcp_ctask->sent += size;
617 ctask->data_count -= size;
618
619 BUG_ON(tcp_conn->in.copy < 0);
620 BUG_ON(ctask->data_count < 0);
621
622 if (buf_size != (tcp_conn->data_copied + offset)) {
623 if (!ctask->data_count) {
624 BUG_ON(buf_size - tcp_conn->data_copied < 0);
625 /* done with this PDU */
626 return buf_size - tcp_conn->data_copied;
627 }
628 return -EAGAIN;
629 } 854 }
630 855
631 /* done with this buffer or with both - PDU and buffer */ 856 return rc;
632 tcp_conn->data_copied = 0;
633 return 0;
634} 857}
635 858
636/** 859/**
637 * iscsi_tcp_copy - copy skb bits to the destanation buffer 860 * iscsi_tcp_hdr_recv_done - process PDU header
638 * @conn: iscsi tcp connection
639 * 861 *
640 * Notes: 862 * This is the callback invoked when the PDU header has
641 * The function calls skb_copy_bits() and updates per-connection 863 * been received. If the header is followed by additional
642 * byte counters. 864 * header segments, we go back for more data.
643 **/ 865 */
644static inline int 866static int
645iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size) 867iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
646{ 868 struct iscsi_segment *segment)
647 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
648 int buf_left = buf_size - tcp_conn->data_copied;
649 int size = min(tcp_conn->in.copy, buf_left);
650 int rc;
651
652 debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
653 size, tcp_conn->in.offset, tcp_conn->data_copied);
654 BUG_ON(size <= 0);
655
656 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
657 (char*)conn->data + tcp_conn->data_copied, size);
658 BUG_ON(rc);
659
660 tcp_conn->in.offset += size;
661 tcp_conn->in.copy -= size;
662 tcp_conn->in.copied += size;
663 tcp_conn->data_copied += size;
664
665 if (buf_size != tcp_conn->data_copied)
666 return -EAGAIN;
667
668 return 0;
669}
670
671static inline void
672partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
673 int offset, int length)
674{
675 struct scatterlist temp;
676
677 sg_init_table(&temp, 1);
678 sg_set_page(&temp, sg_page(sg), length, offset);
679 crypto_hash_update(desc, &temp, length);
680}
681
682static void
683iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
684{
685 struct scatterlist tmp;
686
687 sg_init_one(&tmp, buf, len);
688 crypto_hash_update(&tcp_conn->rx_hash, &tmp, len);
689}
690
691static int iscsi_scsi_data_in(struct iscsi_conn *conn)
692{ 869{
693 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 870 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
694 struct iscsi_cmd_task *ctask = tcp_conn->in.ctask; 871 struct iscsi_hdr *hdr;
695 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
696 struct scsi_cmnd *sc = ctask->sc;
697 struct scatterlist *sg;
698 int i, offset, rc = 0;
699
700 BUG_ON((void*)ctask != sc->SCp.ptr);
701
702 offset = tcp_ctask->data_offset;
703 sg = scsi_sglist(sc);
704
705 if (tcp_ctask->data_offset)
706 for (i = 0; i < tcp_ctask->sg_count; i++)
707 offset -= sg[i].length;
708 /* we've passed through partial sg*/
709 if (offset < 0)
710 offset = 0;
711
712 for (i = tcp_ctask->sg_count; i < scsi_sg_count(sc); i++) {
713 char *dest;
714
715 dest = kmap_atomic(sg_page(&sg[i]), KM_SOFTIRQ0);
716 rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset,
717 sg[i].length, offset);
718 kunmap_atomic(dest, KM_SOFTIRQ0);
719 if (rc == -EAGAIN)
720 /* continue with the next SKB/PDU */
721 return rc;
722 if (!rc) {
723 if (conn->datadgst_en) {
724 if (!offset)
725 crypto_hash_update(
726 &tcp_conn->rx_hash,
727 &sg[i], sg[i].length);
728 else
729 partial_sg_digest_update(
730 &tcp_conn->rx_hash,
731 &sg[i],
732 sg[i].offset + offset,
733 sg[i].length - offset);
734 }
735 offset = 0;
736 tcp_ctask->sg_count++;
737 }
738
739 if (!ctask->data_count) {
740 if (rc && conn->datadgst_en)
741 /*
742 * data-in is complete, but buffer not...
743 */
744 partial_sg_digest_update(&tcp_conn->rx_hash,
745 &sg[i],
746 sg[i].offset,
747 sg[i].length-rc);
748 rc = 0;
749 break;
750 }
751
752 if (!tcp_conn->in.copy)
753 return -EAGAIN;
754 }
755 BUG_ON(ctask->data_count);
756 872
757 /* check for non-exceptional status */ 873 /* Check if there are additional header segments
758 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { 874 * *prior* to computing the digest, because we
759 debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n", 875 * may need to go back to the caller for more.
760 (long)sc, sc->result, ctask->itt, 876 */
761 tcp_conn->in.hdr->flags); 877 hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
762 spin_lock(&conn->session->lock); 878 if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
763 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); 879 /* Bump the header length - the caller will
764 spin_unlock(&conn->session->lock); 880 * just loop around and get the AHS for us, and
881 * call again. */
882 unsigned int ahslen = hdr->hlength << 2;
883
884 /* Make sure we don't overflow */
885 if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
886 return ISCSI_ERR_AHSLEN;
887
888 segment->total_size += ahslen;
889 segment->size += ahslen;
890 return 0;
765 } 891 }
766 892
767 return rc; 893 /* We're done processing the header. See if we're doing
768} 894 * header digests; if so, set up the recv_digest buffer
769 895 * and go back for more. */
770static int 896 if (conn->hdrdgst_en) {
771iscsi_data_recv(struct iscsi_conn *conn) 897 if (segment->digest_len == 0) {
772{ 898 iscsi_tcp_segment_splice_digest(segment,
773 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 899 segment->recv_digest);
774 int rc = 0, opcode; 900 return 0;
775
776 opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
777 switch (opcode) {
778 case ISCSI_OP_SCSI_DATA_IN:
779 rc = iscsi_scsi_data_in(conn);
780 break;
781 case ISCSI_OP_SCSI_CMD_RSP:
782 case ISCSI_OP_TEXT_RSP:
783 case ISCSI_OP_LOGIN_RSP:
784 case ISCSI_OP_ASYNC_EVENT:
785 case ISCSI_OP_REJECT:
786 /*
787 * Collect data segment to the connection's data
788 * placeholder
789 */
790 if (iscsi_tcp_copy(conn, tcp_conn->in.datalen)) {
791 rc = -EAGAIN;
792 goto exit;
793 } 901 }
902 iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
903 segment->total_copied - ISCSI_DIGEST_SIZE,
904 segment->digest);
794 905
795 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data, 906 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
796 tcp_conn->in.datalen); 907 return ISCSI_ERR_HDR_DGST;
797 if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
798 iscsi_recv_digest_update(tcp_conn, conn->data,
799 tcp_conn->in.datalen);
800 break;
801 default:
802 BUG_ON(1);
803 } 908 }
804exit: 909
805 return rc; 910 tcp_conn->in.hdr = hdr;
911 return iscsi_tcp_hdr_dissect(conn, hdr);
806} 912}
807 913
808/** 914/**
809 * iscsi_tcp_data_recv - TCP receive in sendfile fashion 915 * iscsi_tcp_recv - TCP receive in sendfile fashion
810 * @rd_desc: read descriptor 916 * @rd_desc: read descriptor
811 * @skb: socket buffer 917 * @skb: socket buffer
812 * @offset: offset in skb 918 * @offset: offset in skb
813 * @len: skb->len - offset 919 * @len: skb->len - offset
814 **/ 920 **/
815static int 921static int
816iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 922iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
817 unsigned int offset, size_t len) 923 unsigned int offset, size_t len)
818{ 924{
819 int rc;
820 struct iscsi_conn *conn = rd_desc->arg.data; 925 struct iscsi_conn *conn = rd_desc->arg.data;
821 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 926 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
822 int processed; 927 struct iscsi_segment *segment = &tcp_conn->in.segment;
823 char pad[ISCSI_PAD_LEN]; 928 struct skb_seq_state seq;
824 struct scatterlist sg; 929 unsigned int consumed = 0;
825 930 int rc = 0;
826 /*
827 * Save current SKB and its offset in the corresponding
828 * connection context.
829 */
830 tcp_conn->in.copy = skb->len - offset;
831 tcp_conn->in.offset = offset;
832 tcp_conn->in.skb = skb;
833 tcp_conn->in.len = tcp_conn->in.copy;
834 BUG_ON(tcp_conn->in.copy <= 0);
835 debug_tcp("in %d bytes\n", tcp_conn->in.copy);
836 931
837more: 932 debug_tcp("in %d bytes\n", skb->len - offset);
838 tcp_conn->in.copied = 0;
839 rc = 0;
840 933
841 if (unlikely(conn->suspend_rx)) { 934 if (unlikely(conn->suspend_rx)) {
842 debug_tcp("conn %d Rx suspended!\n", conn->id); 935 debug_tcp("conn %d Rx suspended!\n", conn->id);
843 return 0; 936 return 0;
844 } 937 }
845 938
846 if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER || 939 skb_prepare_seq_read(skb, offset, skb->len, &seq);
847 tcp_conn->in_progress == IN_PROGRESS_HEADER_GATHER) { 940 while (1) {
848 rc = iscsi_hdr_extract(tcp_conn); 941 unsigned int avail;
849 if (rc) { 942 const u8 *ptr;
850 if (rc == -EAGAIN)
851 goto nomore;
852 else {
853 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
854 return 0;
855 }
856 }
857 943
858 /* 944 avail = skb_seq_read(consumed, &ptr, &seq);
859 * Verify and process incoming PDU header. 945 if (avail == 0) {
860 */ 946 debug_tcp("no more data avail. Consumed %d\n",
861 rc = iscsi_tcp_hdr_recv(conn); 947 consumed);
862 if (!rc && tcp_conn->in.datalen) { 948 break;
863 if (conn->datadgst_en)
864 crypto_hash_init(&tcp_conn->rx_hash);
865 tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
866 } else if (rc) {
867 iscsi_conn_failure(conn, rc);
868 return 0;
869 } 949 }
870 } 950 BUG_ON(segment->copied >= segment->size);
871 951
872 if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV && 952 debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
873 tcp_conn->in.copy) { 953 rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
874 uint32_t recv_digest; 954 BUG_ON(rc == 0);
875 955 consumed += rc;
876 debug_tcp("extra data_recv offset %d copy %d\n", 956
877 tcp_conn->in.offset, tcp_conn->in.copy); 957 if (segment->total_copied >= segment->total_size) {
878 958 debug_tcp("segment done\n");
879 if (!tcp_conn->data_copied) { 959 rc = segment->done(tcp_conn, segment);
880 if (tcp_conn->in.padding) { 960 if (rc != 0) {
881 debug_tcp("padding -> %d\n", 961 skb_abort_seq_read(&seq);
882 tcp_conn->in.padding); 962 goto error;
883 memset(pad, 0, tcp_conn->in.padding);
884 sg_init_one(&sg, pad, tcp_conn->in.padding);
885 crypto_hash_update(&tcp_conn->rx_hash,
886 &sg, sg.length);
887 } 963 }
888 crypto_hash_final(&tcp_conn->rx_hash,
889 (u8 *) &tcp_conn->in.datadgst);
890 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
891 }
892 964
893 rc = iscsi_tcp_copy(conn, sizeof(uint32_t)); 965 /* The done() functions sets up the
894 if (rc) { 966 * next segment. */
895 if (rc == -EAGAIN)
896 goto again;
897 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
898 return 0;
899 }
900
901 memcpy(&recv_digest, conn->data, sizeof(uint32_t));
902 if (recv_digest != tcp_conn->in.datadgst) {
903 debug_tcp("iscsi_tcp: data digest error!"
904 "0x%x != 0x%x\n", recv_digest,
905 tcp_conn->in.datadgst);
906 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
907 return 0;
908 } else {
909 debug_tcp("iscsi_tcp: data digest match!"
910 "0x%x == 0x%x\n", recv_digest,
911 tcp_conn->in.datadgst);
912 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
913 } 967 }
914 } 968 }
969 skb_abort_seq_read(&seq);
970 conn->rxdata_octets += consumed;
971 return consumed;
915 972
916 if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV && 973error:
917 tcp_conn->in.copy) { 974 debug_tcp("Error receiving PDU, errno=%d\n", rc);
918 debug_tcp("data_recv offset %d copy %d\n", 975 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
919 tcp_conn->in.offset, tcp_conn->in.copy); 976 return 0;
920
921 rc = iscsi_data_recv(conn);
922 if (rc) {
923 if (rc == -EAGAIN)
924 goto again;
925 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
926 return 0;
927 }
928
929 if (tcp_conn->in.padding)
930 tcp_conn->in_progress = IN_PROGRESS_PAD_RECV;
931 else if (conn->datadgst_en)
932 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
933 else
934 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
935 tcp_conn->data_copied = 0;
936 }
937
938 if (tcp_conn->in_progress == IN_PROGRESS_PAD_RECV &&
939 tcp_conn->in.copy) {
940 int copylen = min(tcp_conn->in.padding - tcp_conn->data_copied,
941 tcp_conn->in.copy);
942
943 tcp_conn->in.copy -= copylen;
944 tcp_conn->in.offset += copylen;
945 tcp_conn->data_copied += copylen;
946
947 if (tcp_conn->data_copied != tcp_conn->in.padding)
948 tcp_conn->in_progress = IN_PROGRESS_PAD_RECV;
949 else if (conn->datadgst_en)
950 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
951 else
952 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
953 tcp_conn->data_copied = 0;
954 }
955
956 debug_tcp("f, processed %d from out of %d padding %d\n",
957 tcp_conn->in.offset - offset, (int)len, tcp_conn->in.padding);
958 BUG_ON(tcp_conn->in.offset - offset > len);
959
960 if (tcp_conn->in.offset - offset != len) {
961 debug_tcp("continue to process %d bytes\n",
962 (int)len - (tcp_conn->in.offset - offset));
963 goto more;
964 }
965
966nomore:
967 processed = tcp_conn->in.offset - offset;
968 BUG_ON(processed == 0);
969 return processed;
970
971again:
972 processed = tcp_conn->in.offset - offset;
973 debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
974 processed, (int)len, (int)rd_desc->count);
975 BUG_ON(processed == 0);
976 BUG_ON(processed > len);
977
978 conn->rxdata_octets += processed;
979 return processed;
980} 977}
981 978
982static void 979static void
983iscsi_tcp_data_ready(struct sock *sk, int flag) 980iscsi_tcp_data_ready(struct sock *sk, int flag)
984{ 981{
985 struct iscsi_conn *conn = sk->sk_user_data; 982 struct iscsi_conn *conn = sk->sk_user_data;
983 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
986 read_descriptor_t rd_desc; 984 read_descriptor_t rd_desc;
987 985
988 read_lock(&sk->sk_callback_lock); 986 read_lock(&sk->sk_callback_lock);
989 987
990 /* 988 /*
991 * Use rd_desc to pass 'conn' to iscsi_tcp_data_recv. 989 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
992 * We set count to 1 because we want the network layer to 990 * We set count to 1 because we want the network layer to
993 * hand us all the skbs that are available. iscsi_tcp_data_recv 991 * hand us all the skbs that are available. iscsi_tcp_recv
994 * handled pdus that cross buffers or pdus that still need data. 992 * handled pdus that cross buffers or pdus that still need data.
995 */ 993 */
996 rd_desc.arg.data = conn; 994 rd_desc.arg.data = conn;
997 rd_desc.count = 1; 995 rd_desc.count = 1;
998 tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv); 996 tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
999 997
1000 read_unlock(&sk->sk_callback_lock); 998 read_unlock(&sk->sk_callback_lock);
999
1000 /* If we had to (atomically) map a highmem page,
1001 * unmap it now. */
1002 iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
1001} 1003}
1002 1004
1003static void 1005static void
@@ -1077,121 +1079,173 @@ iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
1077} 1079}
1078 1080
1079/** 1081/**
1080 * iscsi_send - generic send routine 1082 * iscsi_xmit - TCP transmit
1081 * @sk: kernel's socket 1083 **/
1082 * @buf: buffer to write from 1084static int
1083 * @size: actual size to write 1085iscsi_xmit(struct iscsi_conn *conn)
1084 * @flags: socket's flags
1085 */
1086static inline int
1087iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
1088{ 1086{
1089 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1087 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1090 struct socket *sk = tcp_conn->sock; 1088 struct iscsi_segment *segment = &tcp_conn->out.segment;
1091 int offset = buf->sg.offset + buf->sent, res; 1089 unsigned int consumed = 0;
1090 int rc = 0;
1092 1091
1093 /* 1092 while (1) {
1094 * if we got use_sg=0 or are sending something we kmallocd 1093 rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
1095 * then we did not have to do kmap (kmap returns page_address) 1094 if (rc < 0)
1096 * 1095 goto error;
1097 * if we got use_sg > 0, but had to drop down, we do not 1096 if (rc == 0)
1098 * set clustering so this should only happen for that 1097 break;
1099 * slab case. 1098
1100 */ 1099 consumed += rc;
1101 if (buf->use_sendmsg) 1100
1102 res = sock_no_sendpage(sk, sg_page(&buf->sg), offset, size, flags); 1101 if (segment->total_copied >= segment->total_size) {
1103 else 1102 if (segment->done != NULL) {
1104 res = tcp_conn->sendpage(sk, sg_page(&buf->sg), offset, size, flags); 1103 rc = segment->done(tcp_conn, segment);
1105 1104 if (rc < 0)
1106 if (res >= 0) { 1105 goto error;
1107 conn->txdata_octets += res; 1106 }
1108 buf->sent += res; 1107 }
1109 return res;
1110 } 1108 }
1111 1109
1112 tcp_conn->sendpage_failures_cnt++; 1110 debug_tcp("xmit %d bytes\n", consumed);
1113 if (res == -EAGAIN) 1111
1114 res = -ENOBUFS; 1112 conn->txdata_octets += consumed;
1115 else 1113 return consumed;
1116 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1114
1117 return res; 1115error:
1116 /* Transmit error. We could initiate error recovery
1117 * here. */
1118 debug_tcp("Error sending PDU, errno=%d\n", rc);
1119 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1120 return rc;
1118} 1121}
1119 1122
1120/** 1123/**
1121 * iscsi_sendhdr - send PDU Header via tcp_sendpage() 1124 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
1122 * @conn: iscsi connection 1125 */
1123 * @buf: buffer to write from
1124 * @datalen: lenght of data to be sent after the header
1125 *
1126 * Notes:
1127 * (Tx, Fast Path)
1128 **/
1129static inline int 1126static inline int
1130iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen) 1127iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
1131{ 1128{
1132 int flags = 0; /* MSG_DONTWAIT; */ 1129 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1133 int res, size; 1130 struct iscsi_segment *segment = &tcp_conn->out.segment;
1134
1135 size = buf->sg.length - buf->sent;
1136 BUG_ON(buf->sent + size > buf->sg.length);
1137 if (buf->sent + size != buf->sg.length || datalen)
1138 flags |= MSG_MORE;
1139
1140 res = iscsi_send(conn, buf, size, flags);
1141 debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
1142 if (res >= 0) {
1143 if (size != res)
1144 return -EAGAIN;
1145 return 0;
1146 }
1147 1131
1148 return res; 1132 return segment->total_copied - segment->total_size;
1149} 1133}
1150 1134
1151/**
1152 * iscsi_sendpage - send one page of iSCSI Data-Out.
1153 * @conn: iscsi connection
1154 * @buf: buffer to write from
1155 * @count: remaining data
1156 * @sent: number of bytes sent
1157 *
1158 * Notes:
1159 * (Tx, Fast Path)
1160 **/
1161static inline int 1135static inline int
1162iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf, 1136iscsi_tcp_flush(struct iscsi_conn *conn)
1163 int *count, int *sent)
1164{ 1137{
1165 int flags = 0; /* MSG_DONTWAIT; */ 1138 int rc;
1166 int res, size; 1139
1167 1140 while (iscsi_tcp_xmit_qlen(conn)) {
1168 size = buf->sg.length - buf->sent; 1141 rc = iscsi_xmit(conn);
1169 BUG_ON(buf->sent + size > buf->sg.length); 1142 if (rc == 0)
1170 if (size > *count)
1171 size = *count;
1172 if (buf->sent + size != buf->sg.length || *count != size)
1173 flags |= MSG_MORE;
1174
1175 res = iscsi_send(conn, buf, size, flags);
1176 debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
1177 size, buf->sent, *count, *sent, res);
1178 if (res >= 0) {
1179 *count -= res;
1180 *sent += res;
1181 if (size != res)
1182 return -EAGAIN; 1143 return -EAGAIN;
1183 return 0; 1144 if (rc < 0)
1145 return rc;
1184 } 1146 }
1185 1147
1186 return res; 1148 return 0;
1187} 1149}
1188 1150
1189static inline void 1151/*
1190iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, 1152 * This is called when we're done sending the header.
1191 struct iscsi_tcp_cmd_task *tcp_ctask) 1153 * Simply copy the data_segment to the send segment, and return.
1154 */
1155static int
1156iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
1157 struct iscsi_segment *segment)
1158{
1159 tcp_conn->out.segment = tcp_conn->out.data_segment;
1160 debug_tcp("Header done. Next segment size %u total_size %u\n",
1161 tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
1162 return 0;
1163}
1164
1165static void
1166iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1192{ 1167{
1193 crypto_hash_init(&tcp_conn->tx_hash); 1168 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1194 tcp_ctask->digest_count = 4; 1169
1170 debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
1171 conn->hdrdgst_en? ", digest enabled" : "");
1172
1173 /* Clear the data segment - needs to be filled in by the
1174 * caller using iscsi_tcp_send_data_prep() */
1175 memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
1176
1177 /* If header digest is enabled, compute the CRC and
1178 * place the digest into the same buffer. We make
1179 * sure that both iscsi_tcp_ctask and mtask have
1180 * sufficient room.
1181 */
1182 if (conn->hdrdgst_en) {
1183 iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
1184 hdr + hdrlen);
1185 hdrlen += ISCSI_DIGEST_SIZE;
1186 }
1187
1188 /* Remember header pointer for later, when we need
1189 * to decide whether there's a payload to go along
1190 * with the header. */
1191 tcp_conn->out.hdr = hdr;
1192
1193 iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
1194 iscsi_tcp_send_hdr_done, NULL);
1195}
1196
1197/*
1198 * Prepare the send buffer for the payload data.
1199 * Padding and checksumming will all be taken care
1200 * of by the iscsi_segment routines.
1201 */
1202static int
1203iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
1204 unsigned int count, unsigned int offset,
1205 unsigned int len)
1206{
1207 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1208 struct hash_desc *tx_hash = NULL;
1209 unsigned int hdr_spec_len;
1210
1211 debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
1212 tcp_conn, offset, len,
1213 conn->datadgst_en? ", digest enabled" : "");
1214
1215 /* Make sure the datalen matches what the caller
1216 said he would send. */
1217 hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
1218 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
1219
1220 if (conn->datadgst_en)
1221 tx_hash = &tcp_conn->tx_hash;
1222
1223 return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
1224 sg, count, offset, len,
1225 NULL, tx_hash);
1226}
1227
1228static void
1229iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
1230 size_t len)
1231{
1232 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1233 struct hash_desc *tx_hash = NULL;
1234 unsigned int hdr_spec_len;
1235
1236 debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
1237 conn->datadgst_en? ", digest enabled" : "");
1238
1239 /* Make sure the datalen matches what the caller
1240 said he would send. */
1241 hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
1242 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
1243
1244 if (conn->datadgst_en)
1245 tx_hash = &tcp_conn->tx_hash;
1246
1247 iscsi_segment_init_linear(&tcp_conn->out.data_segment,
1248 data, len, NULL, tx_hash);
1195} 1249}
1196 1250
1197/** 1251/**
@@ -1207,12 +1261,17 @@ iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
1207 * 1261 *
1208 * Called under connection lock. 1262 * Called under connection lock.
1209 **/ 1263 **/
1210static void 1264static int
1211iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1265iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1212 struct iscsi_r2t_info *r2t, int left) 1266 struct iscsi_r2t_info *r2t)
1213{ 1267{
1214 struct iscsi_data *hdr; 1268 struct iscsi_data *hdr;
1215 int new_offset; 1269 int new_offset, left;
1270
1271 BUG_ON(r2t->data_length - r2t->sent < 0);
1272 left = r2t->data_length - r2t->sent;
1273 if (left == 0)
1274 return 0;
1216 1275
1217 hdr = &r2t->dtask.hdr; 1276 hdr = &r2t->dtask.hdr;
1218 memset(hdr, 0, sizeof(struct iscsi_data)); 1277 memset(hdr, 0, sizeof(struct iscsi_data));
@@ -1233,43 +1292,46 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1233 r2t->data_count = left; 1292 r2t->data_count = left;
1234 hdr->flags = ISCSI_FLAG_CMD_FINAL; 1293 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1235 } 1294 }
1236 conn->dataout_pdus_cnt++;
1237
1238 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
1239 sizeof(struct iscsi_hdr));
1240
1241 if (iscsi_buf_left(&r2t->sendbuf))
1242 return;
1243
1244 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1245 r2t->sg += 1;
1246}
1247 1295
1248static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask, 1296 conn->dataout_pdus_cnt++;
1249 unsigned long len) 1297 return 1;
1250{
1251 tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1);
1252 if (!tcp_ctask->pad_count)
1253 return;
1254
1255 tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
1256 debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
1257 set_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate);
1258} 1298}
1259 1299
1260/** 1300/**
1261 * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands 1301 * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1262 * @conn: iscsi connection 1302 * @conn: iscsi connection
1263 * @ctask: scsi command task 1303 * @ctask: scsi command task
1264 * @sc: scsi command 1304 * @sc: scsi command
1265 **/ 1305 **/
1266static void 1306static int
1267iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) 1307iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
1268{ 1308{
1269 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1309 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1310 struct iscsi_conn *conn = ctask->conn;
1311 struct scsi_cmnd *sc = ctask->sc;
1312 int err;
1270 1313
1271 BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); 1314 BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
1272 tcp_ctask->xmstate = 1 << XMSTATE_BIT_CMD_HDR_INIT; 1315 tcp_ctask->sent = 0;
1316 tcp_ctask->exp_datasn = 0;
1317
1318 /* Prepare PDU, optionally w/ immediate data */
1319 debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
1320 conn->id, ctask->itt, ctask->imm_count,
1321 ctask->unsol_count);
1322 iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
1323
1324 if (!ctask->imm_count)
1325 return 0;
1326
1327 /* If we have immediate data, attach a payload */
1328 err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
1329 0, ctask->imm_count);
1330 if (err)
1331 return err;
1332 tcp_ctask->sent += ctask->imm_count;
1333 ctask->imm_count = 0;
1334 return 0;
1273} 1335}
1274 1336
1275/** 1337/**
@@ -1281,484 +1343,130 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
1281 * The function can return -EAGAIN in which case caller must 1343 * The function can return -EAGAIN in which case caller must
1282 * call it again later, or recover. '0' return code means successful 1344 * call it again later, or recover. '0' return code means successful
1283 * xmit. 1345 * xmit.
1284 *
1285 * Management xmit state machine consists of these states:
1286 * XMSTATE_BIT_IMM_HDR_INIT - calculate digest of PDU Header
1287 * XMSTATE_BIT_IMM_HDR - PDU Header xmit in progress
1288 * XMSTATE_BIT_IMM_DATA - PDU Data xmit in progress
1289 * XMSTATE_VALUE_IDLE - management PDU is done
1290 **/ 1346 **/
1291static int 1347static int
1292iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) 1348iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1293{ 1349{
1294 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
1295 int rc; 1350 int rc;
1296 1351
1297 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", 1352 /* Flush any pending data first. */
1298 conn->id, tcp_mtask->xmstate, mtask->itt); 1353 rc = iscsi_tcp_flush(conn);
1299 1354 if (rc < 0)
1300 if (test_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate)) { 1355 return rc;
1301 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
1302 sizeof(struct iscsi_hdr));
1303
1304 if (mtask->data_count) {
1305 set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate);
1306 iscsi_buf_init_iov(&tcp_mtask->sendbuf,
1307 (char*)mtask->data,
1308 mtask->data_count);
1309 }
1310
1311 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
1312 conn->stop_stage != STOP_CONN_RECOVER &&
1313 conn->hdrdgst_en)
1314 iscsi_hdr_digest(conn, &tcp_mtask->headbuf,
1315 (u8*)tcp_mtask->hdrext);
1316
1317 tcp_mtask->sent = 0;
1318 clear_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate);
1319 set_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate);
1320 }
1321
1322 if (test_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate)) {
1323 rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
1324 mtask->data_count);
1325 if (rc)
1326 return rc;
1327 clear_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate);
1328 }
1329
1330 if (test_and_clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate)) {
1331 BUG_ON(!mtask->data_count);
1332 /* FIXME: implement.
1333 * Virtual buffer could be spreaded across multiple pages...
1334 */
1335 do {
1336 int rc;
1337
1338 rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf,
1339 &mtask->data_count, &tcp_mtask->sent);
1340 if (rc) {
1341 set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate);
1342 return rc;
1343 }
1344 } while (mtask->data_count);
1345 }
1346 1356
1347 BUG_ON(tcp_mtask->xmstate != XMSTATE_VALUE_IDLE);
1348 if (mtask->hdr->itt == RESERVED_ITT) { 1357 if (mtask->hdr->itt == RESERVED_ITT) {
1349 struct iscsi_session *session = conn->session; 1358 struct iscsi_session *session = conn->session;
1350 1359
1351 spin_lock_bh(&session->lock); 1360 spin_lock_bh(&session->lock);
1352 list_del(&conn->mtask->running); 1361 iscsi_free_mgmt_task(conn, mtask);
1353 __kfifo_put(session->mgmtpool.queue, (void*)&conn->mtask,
1354 sizeof(void*));
1355 spin_unlock_bh(&session->lock); 1362 spin_unlock_bh(&session->lock);
1356 } 1363 }
1364
1357 return 0; 1365 return 0;
1358} 1366}
1359 1367
1368/*
1369 * iscsi_tcp_ctask_xmit - xmit normal PDU task
1370 * @conn: iscsi connection
1371 * @ctask: iscsi command task
1372 *
1373 * We're expected to return 0 when everything was transmitted succesfully,
1374 * -EAGAIN if there's still data in the queue, or != 0 for any other kind
1375 * of error.
1376 */
1360static int 1377static int
1361iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1378iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1362{ 1379{
1363 struct scsi_cmnd *sc = ctask->sc;
1364 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1380 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1381 struct scsi_cmnd *sc = ctask->sc;
1365 int rc = 0; 1382 int rc = 0;
1366 1383
1367 if (test_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate)) { 1384flush:
1368 tcp_ctask->sent = 0; 1385 /* Flush any pending data first. */
1369 tcp_ctask->sg_count = 0; 1386 rc = iscsi_tcp_flush(conn);
1370 tcp_ctask->exp_datasn = 0; 1387 if (rc < 0)
1371
1372 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1373 struct scatterlist *sg = scsi_sglist(sc);
1374
1375 iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
1376 tcp_ctask->sg = sg + 1;
1377 tcp_ctask->bad_sg = sg + scsi_sg_count(sc);
1378
1379 debug_scsi("cmd [itt 0x%x total %d imm_data %d "
1380 "unsol count %d, unsol offset %d]\n",
1381 ctask->itt, scsi_bufflen(sc),
1382 ctask->imm_count, ctask->unsol_count,
1383 ctask->unsol_offset);
1384 }
1385
1386 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
1387 sizeof(struct iscsi_hdr));
1388
1389 if (conn->hdrdgst_en)
1390 iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1391 (u8*)tcp_ctask->hdrext);
1392 clear_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate);
1393 set_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate);
1394 }
1395
1396 if (test_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate)) {
1397 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
1398 if (rc)
1399 return rc;
1400 clear_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate);
1401
1402 if (sc->sc_data_direction != DMA_TO_DEVICE)
1403 return 0;
1404
1405 if (ctask->imm_count) {
1406 set_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate);
1407 iscsi_set_padding(tcp_ctask, ctask->imm_count);
1408
1409 if (ctask->conn->datadgst_en) {
1410 iscsi_data_digest_init(ctask->conn->dd_data,
1411 tcp_ctask);
1412 tcp_ctask->immdigest = 0;
1413 }
1414 }
1415
1416 if (ctask->unsol_count) {
1417 set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate);
1418 set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate);
1419 }
1420 }
1421 return rc;
1422}
1423
1424static int
1425iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1426{
1427 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1428 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1429 int sent = 0, rc;
1430
1431 if (test_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate)) {
1432 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
1433 tcp_ctask->pad_count);
1434 if (conn->datadgst_en)
1435 crypto_hash_update(&tcp_conn->tx_hash,
1436 &tcp_ctask->sendbuf.sg,
1437 tcp_ctask->sendbuf.sg.length);
1438 } else if (!test_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate))
1439 return 0;
1440
1441 clear_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate);
1442 clear_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate);
1443 debug_scsi("sending %d pad bytes for itt 0x%x\n",
1444 tcp_ctask->pad_count, ctask->itt);
1445 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
1446 &sent);
1447 if (rc) {
1448 debug_scsi("padding send failed %d\n", rc);
1449 set_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate);
1450 }
1451 return rc;
1452}
1453
1454static int
1455iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1456 struct iscsi_buf *buf, uint32_t *digest)
1457{
1458 struct iscsi_tcp_cmd_task *tcp_ctask;
1459 struct iscsi_tcp_conn *tcp_conn;
1460 int rc, sent = 0;
1461
1462 if (!conn->datadgst_en)
1463 return 0;
1464
1465 tcp_ctask = ctask->dd_data;
1466 tcp_conn = conn->dd_data;
1467
1468 if (!test_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate)) {
1469 crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
1470 iscsi_buf_init_iov(buf, (char*)digest, 4);
1471 }
1472 clear_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate);
1473
1474 rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
1475 if (!rc)
1476 debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest,
1477 ctask->itt);
1478 else {
1479 debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
1480 *digest, ctask->itt);
1481 set_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate);
1482 }
1483 return rc;
1484}
1485
1486static int
1487iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
1488 struct scatterlist **sg, int *sent, int *count,
1489 struct iscsi_buf *digestbuf, uint32_t *digest)
1490{
1491 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1492 struct iscsi_conn *conn = ctask->conn;
1493 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1494 int rc, buf_sent, offset;
1495
1496 while (*count) {
1497 buf_sent = 0;
1498 offset = sendbuf->sent;
1499
1500 rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
1501 *sent = *sent + buf_sent;
1502 if (buf_sent && conn->datadgst_en)
1503 partial_sg_digest_update(&tcp_conn->tx_hash,
1504 &sendbuf->sg, sendbuf->sg.offset + offset,
1505 buf_sent);
1506 if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
1507 iscsi_buf_init_sg(sendbuf, *sg);
1508 *sg = *sg + 1;
1509 }
1510
1511 if (rc)
1512 return rc;
1513 }
1514
1515 rc = iscsi_send_padding(conn, ctask);
1516 if (rc)
1517 return rc; 1388 return rc;
1518 1389
1519 return iscsi_send_digest(conn, ctask, digestbuf, digest); 1390 /* Are we done already? */
1520} 1391 if (sc->sc_data_direction != DMA_TO_DEVICE)
1521 1392 return 0;
1522static int
1523iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1524{
1525 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1526 struct iscsi_data_task *dtask;
1527 int rc;
1528
1529 set_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate);
1530 if (test_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate)) {
1531 dtask = &tcp_ctask->unsol_dtask;
1532
1533 iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
1534 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
1535 sizeof(struct iscsi_hdr));
1536 if (conn->hdrdgst_en)
1537 iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1538 (u8*)dtask->hdrext);
1539
1540 clear_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate);
1541 iscsi_set_padding(tcp_ctask, ctask->data_count);
1542 }
1543
1544 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
1545 if (rc) {
1546 clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate);
1547 set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate);
1548 return rc;
1549 }
1550 1393
1551 if (conn->datadgst_en) { 1394 if (ctask->unsol_count != 0) {
1552 dtask = &tcp_ctask->unsol_dtask; 1395 struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
1553 iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
1554 dtask->digest = 0;
1555 }
1556 1396
1557 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n", 1397 /* Prepare a header for the unsolicited PDU.
1558 ctask->itt, ctask->unsol_count, tcp_ctask->sent); 1398 * The amount of data we want to send will be
1559 return 0; 1399 * in ctask->data_count.
1560} 1400 * FIXME: return the data count instead.
1401 */
1402 iscsi_prep_unsolicit_data_pdu(ctask, hdr);
1561 1403
1562static int 1404 debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
1563iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1405 ctask->itt, tcp_ctask->sent, ctask->data_count);
1564{
1565 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1566 int rc;
1567 1406
1568 if (test_and_clear_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate)) { 1407 iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
1569 BUG_ON(!ctask->unsol_count); 1408 rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
1570send_hdr: 1409 scsi_sg_count(sc),
1571 rc = iscsi_send_unsol_hdr(conn, ctask); 1410 tcp_ctask->sent,
1411 ctask->data_count);
1572 if (rc) 1412 if (rc)
1573 return rc; 1413 goto fail;
1574 } 1414 tcp_ctask->sent += ctask->data_count;
1575 1415 ctask->unsol_count -= ctask->data_count;
1576 if (test_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate)) { 1416 goto flush;
1577 struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask; 1417 } else {
1578 int start = tcp_ctask->sent; 1418 struct iscsi_session *session = conn->session;
1419 struct iscsi_r2t_info *r2t;
1579 1420
1580 rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, 1421 /* All unsolicited PDUs sent. Check for solicited PDUs.
1581 &tcp_ctask->sent, &ctask->data_count,
1582 &dtask->digestbuf, &dtask->digest);
1583 ctask->unsol_count -= tcp_ctask->sent - start;
1584 if (rc)
1585 return rc;
1586 clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate);
1587 /*
1588 * Done with the Data-Out. Next, check if we need
1589 * to send another unsolicited Data-Out.
1590 */ 1422 */
1591 if (ctask->unsol_count) { 1423 spin_lock_bh(&session->lock);
1592 debug_scsi("sending more uns\n"); 1424 r2t = tcp_ctask->r2t;
1593 set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); 1425 if (r2t != NULL) {
1594 goto send_hdr; 1426 /* Continue with this R2T? */
1427 if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
1428 debug_scsi(" done with r2t %p\n", r2t);
1429
1430 __kfifo_put(tcp_ctask->r2tpool.queue,
1431 (void*)&r2t, sizeof(void*));
1432 tcp_ctask->r2t = r2t = NULL;
1433 }
1595 } 1434 }
1596 }
1597 return 0;
1598}
1599
1600static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
1601 struct iscsi_cmd_task *ctask)
1602{
1603 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1604 struct iscsi_session *session = conn->session;
1605 struct iscsi_r2t_info *r2t;
1606 struct iscsi_data_task *dtask;
1607 int left, rc;
1608 1435
1609 if (test_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate)) { 1436 if (r2t == NULL) {
1610 if (!tcp_ctask->r2t) {
1611 spin_lock_bh(&session->lock);
1612 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, 1437 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
1613 sizeof(void*)); 1438 sizeof(void*));
1614 spin_unlock_bh(&session->lock); 1439 r2t = tcp_ctask->r2t;
1615 } 1440 }
1616send_hdr: 1441 spin_unlock_bh(&session->lock);
1617 r2t = tcp_ctask->r2t;
1618 dtask = &r2t->dtask;
1619
1620 if (conn->hdrdgst_en)
1621 iscsi_hdr_digest(conn, &r2t->headbuf,
1622 (u8*)dtask->hdrext);
1623 clear_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate);
1624 set_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate);
1625 }
1626
1627 if (test_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate)) {
1628 r2t = tcp_ctask->r2t;
1629 dtask = &r2t->dtask;
1630
1631 rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
1632 if (rc)
1633 return rc;
1634 clear_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate);
1635 set_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate);
1636 1442
1637 if (conn->datadgst_en) { 1443 /* Waiting for more R2Ts to arrive. */
1638 iscsi_data_digest_init(conn->dd_data, tcp_ctask); 1444 if (r2t == NULL) {
1639 dtask->digest = 0; 1445 debug_tcp("no R2Ts yet\n");
1446 return 0;
1640 } 1447 }
1641 1448
1642 iscsi_set_padding(tcp_ctask, r2t->data_count); 1449 debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
1643 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n", 1450 r2t, r2t->solicit_datasn - 1, ctask->itt,
1644 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count, 1451 r2t->data_offset + r2t->sent, r2t->data_count);
1645 r2t->sent);
1646 }
1647 1452
1648 if (test_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate)) { 1453 iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
1649 r2t = tcp_ctask->r2t; 1454 sizeof(struct iscsi_hdr));
1650 dtask = &r2t->dtask;
1651 1455
1652 rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg, 1456 rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
1653 &r2t->sent, &r2t->data_count, 1457 scsi_sg_count(sc),
1654 &dtask->digestbuf, &dtask->digest); 1458 r2t->data_offset + r2t->sent,
1459 r2t->data_count);
1655 if (rc) 1460 if (rc)
1656 return rc; 1461 goto fail;
1657 clear_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate); 1462 tcp_ctask->sent += r2t->data_count;
1658 1463 r2t->sent += r2t->data_count;
1659 /* 1464 goto flush;
1660 * Done with this Data-Out. Next, check if we have
1661 * to send another Data-Out for this R2T.
1662 */
1663 BUG_ON(r2t->data_length - r2t->sent < 0);
1664 left = r2t->data_length - r2t->sent;
1665 if (left) {
1666 iscsi_solicit_data_cont(conn, ctask, r2t, left);
1667 goto send_hdr;
1668 }
1669
1670 /*
1671 * Done with this R2T. Check if there are more
1672 * outstanding R2Ts ready to be processed.
1673 */
1674 spin_lock_bh(&session->lock);
1675 tcp_ctask->r2t = NULL;
1676 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
1677 sizeof(void*));
1678 if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
1679 sizeof(void*))) {
1680 tcp_ctask->r2t = r2t;
1681 spin_unlock_bh(&session->lock);
1682 goto send_hdr;
1683 }
1684 spin_unlock_bh(&session->lock);
1685 } 1465 }
1686 return 0; 1466 return 0;
1687} 1467fail:
1688 1468 iscsi_conn_failure(conn, rc);
1689/** 1469 return -EIO;
1690 * iscsi_tcp_ctask_xmit - xmit normal PDU task
1691 * @conn: iscsi connection
1692 * @ctask: iscsi command task
1693 *
1694 * Notes:
1695 * The function can return -EAGAIN in which case caller must
1696 * call it again later, or recover. '0' return code means successful
1697 * xmit.
1698 * The function is devided to logical helpers (above) for the different
1699 * xmit stages.
1700 *
1701 *iscsi_send_cmd_hdr()
1702 * XMSTATE_BIT_CMD_HDR_INIT - prepare Header and Data buffers Calculate
1703 * Header Digest
1704 * XMSTATE_BIT_CMD_HDR_XMIT - Transmit header in progress
1705 *
1706 *iscsi_send_padding
1707 * XMSTATE_BIT_W_PAD - Prepare and send pading
1708 * XMSTATE_BIT_W_RESEND_PAD - retry send pading
1709 *
1710 *iscsi_send_digest
1711 * XMSTATE_BIT_W_RESEND_DATA_DIGEST - Finalize and send Data Digest
1712 * XMSTATE_BIT_W_RESEND_DATA_DIGEST - retry sending digest
1713 *
1714 *iscsi_send_unsol_hdr
1715 * XMSTATE_BIT_UNS_INIT - prepare un-solicit data header and digest
1716 * XMSTATE_BIT_UNS_HDR - send un-solicit header
1717 *
1718 *iscsi_send_unsol_pdu
1719 * XMSTATE_BIT_UNS_DATA - send un-solicit data in progress
1720 *
1721 *iscsi_send_sol_pdu
1722 * XMSTATE_BIT_SOL_HDR_INIT - solicit data header and digest initialize
1723 * XMSTATE_BIT_SOL_HDR - send solicit header
1724 * XMSTATE_BIT_SOL_DATA - send solicit data
1725 *
1726 *iscsi_tcp_ctask_xmit
1727 * XMSTATE_BIT_IMM_DATA - xmit managment data (??)
1728 **/
1729static int
1730iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1731{
1732 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1733 int rc = 0;
1734
1735 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
1736 conn->id, tcp_ctask->xmstate, ctask->itt);
1737
1738 rc = iscsi_send_cmd_hdr(conn, ctask);
1739 if (rc)
1740 return rc;
1741 if (ctask->sc->sc_data_direction != DMA_TO_DEVICE)
1742 return 0;
1743
1744 if (test_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate)) {
1745 rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
1746 &tcp_ctask->sent, &ctask->imm_count,
1747 &tcp_ctask->immbuf, &tcp_ctask->immdigest);
1748 if (rc)
1749 return rc;
1750 clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate);
1751 }
1752
1753 rc = iscsi_send_unsol_pdu(conn, ctask);
1754 if (rc)
1755 return rc;
1756
1757 rc = iscsi_send_sol_pdu(conn, ctask);
1758 if (rc)
1759 return rc;
1760
1761 return rc;
1762} 1470}
1763 1471
1764static struct iscsi_cls_conn * 1472static struct iscsi_cls_conn *
@@ -1784,9 +1492,6 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1784 1492
1785 conn->dd_data = tcp_conn; 1493 conn->dd_data = tcp_conn;
1786 tcp_conn->iscsi_conn = conn; 1494 tcp_conn->iscsi_conn = conn;
1787 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1788 /* initial operational parameters */
1789 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1790 1495
1791 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1496 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1792 CRYPTO_ALG_ASYNC); 1497 CRYPTO_ALG_ASYNC);
@@ -1863,11 +1568,9 @@ static void
1863iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 1568iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1864{ 1569{
1865 struct iscsi_conn *conn = cls_conn->dd_data; 1570 struct iscsi_conn *conn = cls_conn->dd_data;
1866 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1867 1571
1868 iscsi_conn_stop(cls_conn, flag); 1572 iscsi_conn_stop(cls_conn, flag);
1869 iscsi_tcp_release_conn(conn); 1573 iscsi_tcp_release_conn(conn);
1870 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1871} 1574}
1872 1575
1873static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock, 1576static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
@@ -1967,7 +1670,7 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1967 /* 1670 /*
1968 * set receive state machine into initial state 1671 * set receive state machine into initial state
1969 */ 1672 */
1970 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 1673 iscsi_tcp_hdr_recv_prep(tcp_conn);
1971 return 0; 1674 return 0;
1972 1675
1973free_socket: 1676free_socket:
@@ -1977,10 +1680,17 @@ free_socket:
1977 1680
1978/* called with host lock */ 1681/* called with host lock */
1979static void 1682static void
1980iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) 1683iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1981{ 1684{
1982 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; 1685 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
1983 tcp_mtask->xmstate = 1 << XMSTATE_BIT_IMM_HDR_INIT; 1686
1687 /* Prepare PDU, optionally w/ immediate data */
1688 iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
1689
1690 /* If we have immediate data, attach a payload */
1691 if (mtask->data_count)
1692 iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
1693 mtask->data_count);
1984} 1694}
1985 1695
1986static int 1696static int
@@ -2003,8 +1713,7 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
2003 */ 1713 */
2004 1714
2005 /* R2T pool */ 1715 /* R2T pool */
2006 if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, 1716 if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
2007 (void***)&tcp_ctask->r2ts,
2008 sizeof(struct iscsi_r2t_info))) { 1717 sizeof(struct iscsi_r2t_info))) {
2009 goto r2t_alloc_fail; 1718 goto r2t_alloc_fail;
2010 } 1719 }
@@ -2013,8 +1722,7 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
2013 tcp_ctask->r2tqueue = kfifo_alloc( 1722 tcp_ctask->r2tqueue = kfifo_alloc(
2014 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); 1723 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
2015 if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) { 1724 if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
2016 iscsi_pool_free(&tcp_ctask->r2tpool, 1725 iscsi_pool_free(&tcp_ctask->r2tpool);
2017 (void**)tcp_ctask->r2ts);
2018 goto r2t_alloc_fail; 1726 goto r2t_alloc_fail;
2019 } 1727 }
2020 } 1728 }
@@ -2027,8 +1735,7 @@ r2t_alloc_fail:
2027 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1735 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
2028 1736
2029 kfifo_free(tcp_ctask->r2tqueue); 1737 kfifo_free(tcp_ctask->r2tqueue);
2030 iscsi_pool_free(&tcp_ctask->r2tpool, 1738 iscsi_pool_free(&tcp_ctask->r2tpool);
2031 (void**)tcp_ctask->r2ts);
2032 } 1739 }
2033 return -ENOMEM; 1740 return -ENOMEM;
2034} 1741}
@@ -2043,8 +1750,7 @@ iscsi_r2tpool_free(struct iscsi_session *session)
2043 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1750 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
2044 1751
2045 kfifo_free(tcp_ctask->r2tqueue); 1752 kfifo_free(tcp_ctask->r2tqueue);
2046 iscsi_pool_free(&tcp_ctask->r2tpool, 1753 iscsi_pool_free(&tcp_ctask->r2tpool);
2047 (void**)tcp_ctask->r2ts);
2048 } 1754 }
2049} 1755}
2050 1756
@@ -2060,9 +1766,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2060 switch(param) { 1766 switch(param) {
2061 case ISCSI_PARAM_HDRDGST_EN: 1767 case ISCSI_PARAM_HDRDGST_EN:
2062 iscsi_set_param(cls_conn, param, buf, buflen); 1768 iscsi_set_param(cls_conn, param, buf, buflen);
2063 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
2064 if (conn->hdrdgst_en)
2065 tcp_conn->hdr_size += sizeof(__u32);
2066 break; 1769 break;
2067 case ISCSI_PARAM_DATADGST_EN: 1770 case ISCSI_PARAM_DATADGST_EN:
2068 iscsi_set_param(cls_conn, param, buf, buflen); 1771 iscsi_set_param(cls_conn, param, buf, buflen);
@@ -2071,12 +1774,12 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2071 break; 1774 break;
2072 case ISCSI_PARAM_MAX_R2T: 1775 case ISCSI_PARAM_MAX_R2T:
2073 sscanf(buf, "%d", &value); 1776 sscanf(buf, "%d", &value);
2074 if (session->max_r2t == roundup_pow_of_two(value)) 1777 if (value <= 0 || !is_power_of_2(value))
1778 return -EINVAL;
1779 if (session->max_r2t == value)
2075 break; 1780 break;
2076 iscsi_r2tpool_free(session); 1781 iscsi_r2tpool_free(session);
2077 iscsi_set_param(cls_conn, param, buf, buflen); 1782 iscsi_set_param(cls_conn, param, buf, buflen);
2078 if (session->max_r2t & (session->max_r2t - 1))
2079 session->max_r2t = roundup_pow_of_two(session->max_r2t);
2080 if (iscsi_r2tpool_alloc(session)) 1783 if (iscsi_r2tpool_alloc(session))
2081 return -ENOMEM; 1784 return -ENOMEM;
2082 break; 1785 break;
@@ -2183,14 +1886,15 @@ iscsi_tcp_session_create(struct iscsi_transport *iscsit,
2183 struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; 1886 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
2184 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1887 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
2185 1888
2186 ctask->hdr = &tcp_ctask->hdr; 1889 ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
1890 ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
2187 } 1891 }
2188 1892
2189 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) { 1893 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
2190 struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i]; 1894 struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
2191 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; 1895 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
2192 1896
2193 mtask->hdr = &tcp_mtask->hdr; 1897 mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
2194 } 1898 }
2195 1899
2196 if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session))) 1900 if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
@@ -2222,12 +1926,14 @@ static struct scsi_host_template iscsi_sht = {
2222 .queuecommand = iscsi_queuecommand, 1926 .queuecommand = iscsi_queuecommand,
2223 .change_queue_depth = iscsi_change_queue_depth, 1927 .change_queue_depth = iscsi_change_queue_depth,
2224 .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1, 1928 .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
2225 .sg_tablesize = ISCSI_SG_TABLESIZE, 1929 .sg_tablesize = 4096,
2226 .max_sectors = 0xFFFF, 1930 .max_sectors = 0xFFFF,
2227 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 1931 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
2228 .eh_abort_handler = iscsi_eh_abort, 1932 .eh_abort_handler = iscsi_eh_abort,
1933 .eh_device_reset_handler= iscsi_eh_device_reset,
2229 .eh_host_reset_handler = iscsi_eh_host_reset, 1934 .eh_host_reset_handler = iscsi_eh_host_reset,
2230 .use_clustering = DISABLE_CLUSTERING, 1935 .use_clustering = DISABLE_CLUSTERING,
1936 .use_sg_chaining = ENABLE_SG_CHAINING,
2231 .slave_configure = iscsi_tcp_slave_configure, 1937 .slave_configure = iscsi_tcp_slave_configure,
2232 .proc_name = "iscsi_tcp", 1938 .proc_name = "iscsi_tcp",
2233 .this_id = -1, 1939 .this_id = -1,
@@ -2257,14 +1963,17 @@ static struct iscsi_transport iscsi_tcp_transport = {
2257 ISCSI_PERSISTENT_ADDRESS | 1963 ISCSI_PERSISTENT_ADDRESS |
2258 ISCSI_TARGET_NAME | ISCSI_TPGT | 1964 ISCSI_TARGET_NAME | ISCSI_TPGT |
2259 ISCSI_USERNAME | ISCSI_PASSWORD | 1965 ISCSI_USERNAME | ISCSI_PASSWORD |
2260 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN, 1966 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
1967 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
1968 ISCSI_LU_RESET_TMO |
1969 ISCSI_PING_TMO | ISCSI_RECV_TMO,
2261 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | 1970 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
2262 ISCSI_HOST_INITIATOR_NAME | 1971 ISCSI_HOST_INITIATOR_NAME |
2263 ISCSI_HOST_NETDEV_NAME, 1972 ISCSI_HOST_NETDEV_NAME,
2264 .host_template = &iscsi_sht, 1973 .host_template = &iscsi_sht,
2265 .conndata_size = sizeof(struct iscsi_conn), 1974 .conndata_size = sizeof(struct iscsi_conn),
2266 .max_conn = 1, 1975 .max_conn = 1,
2267 .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN, 1976 .max_cmd_len = 16,
2268 /* session management */ 1977 /* session management */
2269 .create_session = iscsi_tcp_session_create, 1978 .create_session = iscsi_tcp_session_create,
2270 .destroy_session = iscsi_tcp_session_destroy, 1979 .destroy_session = iscsi_tcp_session_destroy,
@@ -2283,8 +1992,8 @@ static struct iscsi_transport iscsi_tcp_transport = {
2283 /* IO */ 1992 /* IO */
2284 .send_pdu = iscsi_conn_send_pdu, 1993 .send_pdu = iscsi_conn_send_pdu,
2285 .get_stats = iscsi_conn_get_stats, 1994 .get_stats = iscsi_conn_get_stats,
2286 .init_cmd_task = iscsi_tcp_cmd_init, 1995 .init_cmd_task = iscsi_tcp_ctask_init,
2287 .init_mgmt_task = iscsi_tcp_mgmt_init, 1996 .init_mgmt_task = iscsi_tcp_mtask_init,
2288 .xmit_cmd_task = iscsi_tcp_ctask_xmit, 1997 .xmit_cmd_task = iscsi_tcp_ctask_xmit,
2289 .xmit_mgmt_task = iscsi_tcp_mtask_xmit, 1998 .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
2290 .cleanup_cmd_task = iscsi_tcp_cleanup_ctask, 1999 .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 68c36cc8997e..ed0b991d1e72 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -24,71 +24,61 @@
24 24
25#include <scsi/libiscsi.h> 25#include <scsi/libiscsi.h>
26 26
27/* Socket's Receive state machine */
28#define IN_PROGRESS_WAIT_HEADER 0x0
29#define IN_PROGRESS_HEADER_GATHER 0x1
30#define IN_PROGRESS_DATA_RECV 0x2
31#define IN_PROGRESS_DDIGEST_RECV 0x3
32#define IN_PROGRESS_PAD_RECV 0x4
33
34/* xmit state machine */
35#define XMSTATE_VALUE_IDLE 0
36#define XMSTATE_BIT_CMD_HDR_INIT 0
37#define XMSTATE_BIT_CMD_HDR_XMIT 1
38#define XMSTATE_BIT_IMM_HDR 2
39#define XMSTATE_BIT_IMM_DATA 3
40#define XMSTATE_BIT_UNS_INIT 4
41#define XMSTATE_BIT_UNS_HDR 5
42#define XMSTATE_BIT_UNS_DATA 6
43#define XMSTATE_BIT_SOL_HDR 7
44#define XMSTATE_BIT_SOL_DATA 8
45#define XMSTATE_BIT_W_PAD 9
46#define XMSTATE_BIT_W_RESEND_PAD 10
47#define XMSTATE_BIT_W_RESEND_DATA_DIGEST 11
48#define XMSTATE_BIT_IMM_HDR_INIT 12
49#define XMSTATE_BIT_SOL_HDR_INIT 13
50
51#define ISCSI_PAD_LEN 4
52#define ISCSI_SG_TABLESIZE SG_ALL
53#define ISCSI_TCP_MAX_CMD_LEN 16
54
55struct crypto_hash; 27struct crypto_hash;
56struct socket; 28struct socket;
29struct iscsi_tcp_conn;
30struct iscsi_segment;
31
32typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
33 struct iscsi_segment *);
34
35struct iscsi_segment {
36 unsigned char *data;
37 unsigned int size;
38 unsigned int copied;
39 unsigned int total_size;
40 unsigned int total_copied;
41
42 struct hash_desc *hash;
43 unsigned char recv_digest[ISCSI_DIGEST_SIZE];
44 unsigned char digest[ISCSI_DIGEST_SIZE];
45 unsigned int digest_len;
46
47 struct scatterlist *sg;
48 void *sg_mapped;
49 unsigned int sg_offset;
50
51 iscsi_segment_done_fn_t *done;
52};
57 53
58/* Socket connection recieve helper */ 54/* Socket connection recieve helper */
59struct iscsi_tcp_recv { 55struct iscsi_tcp_recv {
60 struct iscsi_hdr *hdr; 56 struct iscsi_hdr *hdr;
61 struct sk_buff *skb; 57 struct iscsi_segment segment;
62 int offset; 58
63 int len; 59 /* Allocate buffer for BHS + AHS */
64 int hdr_offset; 60 uint32_t hdr_buf[64];
65 int copy;
66 int copied;
67 int padding;
68 struct iscsi_cmd_task *ctask; /* current cmd in progress */
69 61
70 /* copied and flipped values */ 62 /* copied and flipped values */
71 int datalen; 63 int datalen;
72 int datadgst; 64};
73 char zero_copy_hdr; 65
66/* Socket connection send helper */
67struct iscsi_tcp_send {
68 struct iscsi_hdr *hdr;
69 struct iscsi_segment segment;
70 struct iscsi_segment data_segment;
74}; 71};
75 72
76struct iscsi_tcp_conn { 73struct iscsi_tcp_conn {
77 struct iscsi_conn *iscsi_conn; 74 struct iscsi_conn *iscsi_conn;
78 struct socket *sock; 75 struct socket *sock;
79 struct iscsi_hdr hdr; /* header placeholder */
80 char hdrext[4*sizeof(__u16) +
81 sizeof(__u32)];
82 int data_copied;
83 int stop_stage; /* conn_stop() flag: * 76 int stop_stage; /* conn_stop() flag: *
84 * stop to recover, * 77 * stop to recover, *
85 * stop to terminate */ 78 * stop to terminate */
86 /* iSCSI connection-wide sequencing */
87 int hdr_size; /* PDU header size */
88
89 /* control data */ 79 /* control data */
90 struct iscsi_tcp_recv in; /* TCP receive context */ 80 struct iscsi_tcp_recv in; /* TCP receive context */
91 int in_progress; /* connection state machine */ 81 struct iscsi_tcp_send out; /* TCP send context */
92 82
93 /* old values for socket callbacks */ 83 /* old values for socket callbacks */
94 void (*old_data_ready)(struct sock *, int); 84 void (*old_data_ready)(struct sock *, int);
@@ -103,29 +93,19 @@ struct iscsi_tcp_conn {
103 uint32_t sendpage_failures_cnt; 93 uint32_t sendpage_failures_cnt;
104 uint32_t discontiguous_hdr_cnt; 94 uint32_t discontiguous_hdr_cnt;
105 95
106 ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); 96 int error;
107};
108 97
109struct iscsi_buf { 98 ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
110 struct scatterlist sg;
111 unsigned int sent;
112 char use_sendmsg;
113}; 99};
114 100
115struct iscsi_data_task { 101struct iscsi_data_task {
116 struct iscsi_data hdr; /* PDU */ 102 struct iscsi_data hdr; /* PDU */
117 char hdrext[sizeof(__u32)]; /* Header-Digest */ 103 char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
118 struct iscsi_buf digestbuf; /* digest buffer */
119 uint32_t digest; /* data digest */
120}; 104};
121 105
122struct iscsi_tcp_mgmt_task { 106struct iscsi_tcp_mgmt_task {
123 struct iscsi_hdr hdr; 107 struct iscsi_hdr hdr;
124 char hdrext[sizeof(__u32)]; /* Header-Digest */ 108 char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
125 unsigned long xmstate; /* mgmt xmit progress */
126 struct iscsi_buf headbuf; /* header buffer */
127 struct iscsi_buf sendbuf; /* in progress buffer */
128 int sent;
129}; 109};
130 110
131struct iscsi_r2t_info { 111struct iscsi_r2t_info {
@@ -133,38 +113,26 @@ struct iscsi_r2t_info {
133 __be32 exp_statsn; /* copied from R2T */ 113 __be32 exp_statsn; /* copied from R2T */
134 uint32_t data_length; /* copied from R2T */ 114 uint32_t data_length; /* copied from R2T */
135 uint32_t data_offset; /* copied from R2T */ 115 uint32_t data_offset; /* copied from R2T */
136 struct iscsi_buf headbuf; /* Data-Out Header Buffer */
137 struct iscsi_buf sendbuf; /* Data-Out in progress buffer*/
138 int sent; /* R2T sequence progress */ 116 int sent; /* R2T sequence progress */
139 int data_count; /* DATA-Out payload progress */ 117 int data_count; /* DATA-Out payload progress */
140 struct scatterlist *sg; /* per-R2T SG list */
141 int solicit_datasn; 118 int solicit_datasn;
142 struct iscsi_data_task dtask; /* which data task */ 119 struct iscsi_data_task dtask; /* Data-Out header buf */
143}; 120};
144 121
145struct iscsi_tcp_cmd_task { 122struct iscsi_tcp_cmd_task {
146 struct iscsi_cmd hdr; 123 struct iscsi_hdr_buff {
147 char hdrext[4*sizeof(__u16)+ /* AHS */ 124 struct iscsi_cmd cmd_hdr;
148 sizeof(__u32)]; /* HeaderDigest */ 125 char hdrextbuf[ISCSI_MAX_AHS_SIZE +
149 char pad[ISCSI_PAD_LEN]; 126 ISCSI_DIGEST_SIZE];
150 int pad_count; /* padded bytes */ 127 } hdr;
151 struct iscsi_buf headbuf; /* header buf (xmit) */ 128
152 struct iscsi_buf sendbuf; /* in progress buffer*/
153 unsigned long xmstate; /* xmit xtate machine */
154 int sent; 129 int sent;
155 struct scatterlist *sg; /* per-cmd SG list */ 130 uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
156 struct scatterlist *bad_sg; /* assert statement */
157 int sg_count; /* SG's to process */
158 uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
159 int data_offset; 131 int data_offset;
160 struct iscsi_r2t_info *r2t; /* in progress R2T */ 132 struct iscsi_r2t_info *r2t; /* in progress R2T */
161 struct iscsi_queue r2tpool; 133 struct iscsi_pool r2tpool;
162 struct kfifo *r2tqueue; 134 struct kfifo *r2tqueue;
163 struct iscsi_r2t_info **r2ts; 135 struct iscsi_data_task unsol_dtask; /* Data-Out header buf */
164 int digest_count;
165 uint32_t immdigest; /* for imm data */
166 struct iscsi_buf immbuf; /* for imm data digest */
167 struct iscsi_data_task unsol_dtask; /* unsol data task */
168}; 136};
169 137
170#endif /* ISCSI_H */ 138#endif /* ISCSI_H */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8b57af5baaec..553168ae44f1 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -24,6 +24,7 @@
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/kfifo.h> 25#include <linux/kfifo.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/log2.h>
27#include <asm/unaligned.h> 28#include <asm/unaligned.h>
28#include <net/tcp.h> 29#include <net/tcp.h>
29#include <scsi/scsi_cmnd.h> 30#include <scsi/scsi_cmnd.h>
@@ -86,7 +87,7 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
86 * xmit thread 87 * xmit thread
87 */ 88 */
88 if (!list_empty(&session->leadconn->xmitqueue) || 89 if (!list_empty(&session->leadconn->xmitqueue) ||
89 __kfifo_len(session->leadconn->mgmtqueue)) 90 !list_empty(&session->leadconn->mgmtqueue))
90 scsi_queue_work(session->host, 91 scsi_queue_work(session->host,
91 &session->leadconn->xmitwork); 92 &session->leadconn->xmitwork);
92 } 93 }
@@ -122,6 +123,20 @@ void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
122} 123}
123EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu); 124EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
124 125
126static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
127{
128 unsigned exp_len = ctask->hdr_len + len;
129
130 if (exp_len > ctask->hdr_max) {
131 WARN_ON(1);
132 return -EINVAL;
133 }
134
135 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
136 ctask->hdr_len = exp_len;
137 return 0;
138}
139
125/** 140/**
126 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu 141 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
127 * @ctask: iscsi cmd task 142 * @ctask: iscsi cmd task
@@ -129,27 +144,32 @@ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
129 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set 144 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
130 * fields like dlength or final based on how much data it sends 145 * fields like dlength or final based on how much data it sends
131 */ 146 */
132static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) 147static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
133{ 148{
134 struct iscsi_conn *conn = ctask->conn; 149 struct iscsi_conn *conn = ctask->conn;
135 struct iscsi_session *session = conn->session; 150 struct iscsi_session *session = conn->session;
136 struct iscsi_cmd *hdr = ctask->hdr; 151 struct iscsi_cmd *hdr = ctask->hdr;
137 struct scsi_cmnd *sc = ctask->sc; 152 struct scsi_cmnd *sc = ctask->sc;
153 unsigned hdrlength;
154 int rc;
138 155
139 hdr->opcode = ISCSI_OP_SCSI_CMD; 156 ctask->hdr_len = 0;
140 hdr->flags = ISCSI_ATTR_SIMPLE; 157 rc = iscsi_add_hdr(ctask, sizeof(*hdr));
141 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 158 if (rc)
142 hdr->itt = build_itt(ctask->itt, conn->id, session->age); 159 return rc;
143 hdr->data_length = cpu_to_be32(scsi_bufflen(sc)); 160 hdr->opcode = ISCSI_OP_SCSI_CMD;
144 hdr->cmdsn = cpu_to_be32(session->cmdsn); 161 hdr->flags = ISCSI_ATTR_SIMPLE;
145 session->cmdsn++; 162 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
146 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 163 hdr->itt = build_itt(ctask->itt, conn->id, session->age);
147 memcpy(hdr->cdb, sc->cmnd, sc->cmd_len); 164 hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
165 hdr->cmdsn = cpu_to_be32(session->cmdsn);
166 session->cmdsn++;
167 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
168 memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
148 if (sc->cmd_len < MAX_COMMAND_SIZE) 169 if (sc->cmd_len < MAX_COMMAND_SIZE)
149 memset(&hdr->cdb[sc->cmd_len], 0, 170 memset(&hdr->cdb[sc->cmd_len], 0,
150 MAX_COMMAND_SIZE - sc->cmd_len); 171 MAX_COMMAND_SIZE - sc->cmd_len);
151 172
152 ctask->data_count = 0;
153 ctask->imm_count = 0; 173 ctask->imm_count = 0;
154 if (sc->sc_data_direction == DMA_TO_DEVICE) { 174 if (sc->sc_data_direction == DMA_TO_DEVICE) {
155 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 175 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
@@ -178,9 +198,9 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
178 else 198 else
179 ctask->imm_count = min(scsi_bufflen(sc), 199 ctask->imm_count = min(scsi_bufflen(sc),
180 conn->max_xmit_dlength); 200 conn->max_xmit_dlength);
181 hton24(ctask->hdr->dlength, ctask->imm_count); 201 hton24(hdr->dlength, ctask->imm_count);
182 } else 202 } else
183 zero_data(ctask->hdr->dlength); 203 zero_data(hdr->dlength);
184 204
185 if (!session->initial_r2t_en) { 205 if (!session->initial_r2t_en) {
186 ctask->unsol_count = min((session->first_burst), 206 ctask->unsol_count = min((session->first_burst),
@@ -190,7 +210,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
190 210
191 if (!ctask->unsol_count) 211 if (!ctask->unsol_count)
192 /* No unsolicit Data-Out's */ 212 /* No unsolicit Data-Out's */
193 ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL; 213 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
194 } else { 214 } else {
195 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 215 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
196 zero_data(hdr->dlength); 216 zero_data(hdr->dlength);
@@ -199,13 +219,25 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
199 hdr->flags |= ISCSI_FLAG_CMD_READ; 219 hdr->flags |= ISCSI_FLAG_CMD_READ;
200 } 220 }
201 221
202 conn->scsicmd_pdus_cnt++; 222 /* calculate size of additional header segments (AHSs) */
223 hdrlength = ctask->hdr_len - sizeof(*hdr);
224
225 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
226 hdrlength /= ISCSI_PAD_LEN;
227
228 WARN_ON(hdrlength >= 256);
229 hdr->hlength = hdrlength & 0xFF;
230
231 if (conn->session->tt->init_cmd_task(conn->ctask))
232 return EIO;
203 233
204 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d " 234 conn->scsicmd_pdus_cnt++;
235 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
205 "cmdsn %d win %d]\n", 236 "cmdsn %d win %d]\n",
206 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", 237 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
207 conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc), 238 conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
208 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); 239 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
240 return 0;
209} 241}
210 242
211/** 243/**
@@ -218,13 +250,16 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
218 */ 250 */
219static void iscsi_complete_command(struct iscsi_cmd_task *ctask) 251static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
220{ 252{
221 struct iscsi_session *session = ctask->conn->session; 253 struct iscsi_conn *conn = ctask->conn;
254 struct iscsi_session *session = conn->session;
222 struct scsi_cmnd *sc = ctask->sc; 255 struct scsi_cmnd *sc = ctask->sc;
223 256
224 ctask->state = ISCSI_TASK_COMPLETED; 257 ctask->state = ISCSI_TASK_COMPLETED;
225 ctask->sc = NULL; 258 ctask->sc = NULL;
226 /* SCSI eh reuses commands to verify us */ 259 /* SCSI eh reuses commands to verify us */
227 sc->SCp.ptr = NULL; 260 sc->SCp.ptr = NULL;
261 if (conn->ctask == ctask)
262 conn->ctask = NULL;
228 list_del_init(&ctask->running); 263 list_del_init(&ctask->running);
229 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 264 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
230 sc->scsi_done(sc); 265 sc->scsi_done(sc);
@@ -241,6 +276,112 @@ static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
241 iscsi_complete_command(ctask); 276 iscsi_complete_command(ctask);
242} 277}
243 278
279/*
280 * session lock must be held
281 */
282static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
283 int err)
284{
285 struct scsi_cmnd *sc;
286
287 sc = ctask->sc;
288 if (!sc)
289 return;
290
291 if (ctask->state == ISCSI_TASK_PENDING)
292 /*
293 * cmd never made it to the xmit thread, so we should not count
294 * the cmd in the sequencing
295 */
296 conn->session->queued_cmdsn--;
297 else
298 conn->session->tt->cleanup_cmd_task(conn, ctask);
299
300 sc->result = err;
301 scsi_set_resid(sc, scsi_bufflen(sc));
302 if (conn->ctask == ctask)
303 conn->ctask = NULL;
304 /* release ref from queuecommand */
305 __iscsi_put_ctask(ctask);
306}
307
308/**
309 * iscsi_free_mgmt_task - return mgmt task back to pool
310 * @conn: iscsi connection
311 * @mtask: mtask
312 *
313 * Must be called with session lock.
314 */
315void iscsi_free_mgmt_task(struct iscsi_conn *conn,
316 struct iscsi_mgmt_task *mtask)
317{
318 list_del_init(&mtask->running);
319 if (conn->login_mtask == mtask)
320 return;
321
322 if (conn->ping_mtask == mtask)
323 conn->ping_mtask = NULL;
324 __kfifo_put(conn->session->mgmtpool.queue,
325 (void*)&mtask, sizeof(void*));
326}
327EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
328
329static struct iscsi_mgmt_task *
330__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
331 char *data, uint32_t data_size)
332{
333 struct iscsi_session *session = conn->session;
334 struct iscsi_mgmt_task *mtask;
335
336 if (session->state == ISCSI_STATE_TERMINATE)
337 return NULL;
338
339 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
340 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
341 /*
342 * Login and Text are sent serially, in
343 * request-followed-by-response sequence.
344 * Same mtask can be used. Same ITT must be used.
345 * Note that login_mtask is preallocated at conn_create().
346 */
347 mtask = conn->login_mtask;
348 else {
349 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
350 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
351
352 if (!__kfifo_get(session->mgmtpool.queue,
353 (void*)&mtask, sizeof(void*)))
354 return NULL;
355 }
356
357 if (data_size) {
358 memcpy(mtask->data, data, data_size);
359 mtask->data_count = data_size;
360 } else
361 mtask->data_count = 0;
362
363 memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
364 INIT_LIST_HEAD(&mtask->running);
365 list_add_tail(&mtask->running, &conn->mgmtqueue);
366 return mtask;
367}
368
369int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
370 char *data, uint32_t data_size)
371{
372 struct iscsi_conn *conn = cls_conn->dd_data;
373 struct iscsi_session *session = conn->session;
374 int err = 0;
375
376 spin_lock_bh(&session->lock);
377 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
378 err = -EPERM;
379 spin_unlock_bh(&session->lock);
380 scsi_queue_work(session->host, &conn->xmitwork);
381 return err;
382}
383EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
384
244/** 385/**
245 * iscsi_cmd_rsp - SCSI Command Response processing 386 * iscsi_cmd_rsp - SCSI Command Response processing
246 * @conn: iscsi connection 387 * @conn: iscsi connection
@@ -291,17 +432,19 @@ invalid_datalen:
291 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); 432 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
292 } 433 }
293 434
294 if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) { 435 if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
436 ISCSI_FLAG_CMD_OVERFLOW)) {
295 int res_count = be32_to_cpu(rhdr->residual_count); 437 int res_count = be32_to_cpu(rhdr->residual_count);
296 438
297 if (res_count > 0 && res_count <= scsi_bufflen(sc)) 439 if (res_count > 0 &&
440 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
441 res_count <= scsi_bufflen(sc)))
298 scsi_set_resid(sc, res_count); 442 scsi_set_resid(sc, res_count);
299 else 443 else
300 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 444 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
301 } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW) 445 } else if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
446 ISCSI_FLAG_CMD_BIDI_OVERFLOW))
302 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 447 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
303 else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
304 scsi_set_resid(sc, be32_to_cpu(rhdr->residual_count));
305 448
306out: 449out:
307 debug_scsi("done [sc %lx res %d itt 0x%x]\n", 450 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
@@ -318,18 +461,51 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
318 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 461 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
319 conn->tmfrsp_pdus_cnt++; 462 conn->tmfrsp_pdus_cnt++;
320 463
321 if (conn->tmabort_state != TMABORT_INITIAL) 464 if (conn->tmf_state != TMF_QUEUED)
322 return; 465 return;
323 466
324 if (tmf->response == ISCSI_TMF_RSP_COMPLETE) 467 if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
325 conn->tmabort_state = TMABORT_SUCCESS; 468 conn->tmf_state = TMF_SUCCESS;
326 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) 469 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
327 conn->tmabort_state = TMABORT_NOT_FOUND; 470 conn->tmf_state = TMF_NOT_FOUND;
328 else 471 else
329 conn->tmabort_state = TMABORT_FAILED; 472 conn->tmf_state = TMF_FAILED;
330 wake_up(&conn->ehwait); 473 wake_up(&conn->ehwait);
331} 474}
332 475
476static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
477{
478 struct iscsi_nopout hdr;
479 struct iscsi_mgmt_task *mtask;
480
481 if (!rhdr && conn->ping_mtask)
482 return;
483
484 memset(&hdr, 0, sizeof(struct iscsi_nopout));
485 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
486 hdr.flags = ISCSI_FLAG_CMD_FINAL;
487
488 if (rhdr) {
489 memcpy(hdr.lun, rhdr->lun, 8);
490 hdr.ttt = rhdr->ttt;
491 hdr.itt = RESERVED_ITT;
492 } else
493 hdr.ttt = RESERVED_ITT;
494
495 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
496 if (!mtask) {
497 printk(KERN_ERR "Could not send nopout\n");
498 return;
499 }
500
501 /* only track our nops */
502 if (!rhdr) {
503 conn->ping_mtask = mtask;
504 conn->last_ping = jiffies;
505 }
506 scsi_queue_work(conn->session->host, &conn->xmitwork);
507}
508
333static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 509static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
334 char *data, int datalen) 510 char *data, int datalen)
335{ 511{
@@ -374,6 +550,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
374 struct iscsi_mgmt_task *mtask; 550 struct iscsi_mgmt_task *mtask;
375 uint32_t itt; 551 uint32_t itt;
376 552
553 conn->last_recv = jiffies;
377 if (hdr->itt != RESERVED_ITT) 554 if (hdr->itt != RESERVED_ITT)
378 itt = get_itt(hdr->itt); 555 itt = get_itt(hdr->itt);
379 else 556 else
@@ -429,10 +606,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
429 */ 606 */
430 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 607 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
431 rc = ISCSI_ERR_CONN_FAILED; 608 rc = ISCSI_ERR_CONN_FAILED;
432 list_del(&mtask->running); 609 iscsi_free_mgmt_task(conn, mtask);
433 if (conn->login_mtask != mtask)
434 __kfifo_put(session->mgmtpool.queue,
435 (void*)&mtask, sizeof(void*));
436 break; 610 break;
437 case ISCSI_OP_SCSI_TMFUNC_RSP: 611 case ISCSI_OP_SCSI_TMFUNC_RSP:
438 if (datalen) { 612 if (datalen) {
@@ -441,20 +615,26 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
441 } 615 }
442 616
443 iscsi_tmf_rsp(conn, hdr); 617 iscsi_tmf_rsp(conn, hdr);
618 iscsi_free_mgmt_task(conn, mtask);
444 break; 619 break;
445 case ISCSI_OP_NOOP_IN: 620 case ISCSI_OP_NOOP_IN:
446 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) { 621 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
622 datalen) {
447 rc = ISCSI_ERR_PROTO; 623 rc = ISCSI_ERR_PROTO;
448 break; 624 break;
449 } 625 }
450 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 626 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
451 627
452 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 628 if (conn->ping_mtask != mtask) {
453 rc = ISCSI_ERR_CONN_FAILED; 629 /*
454 list_del(&mtask->running); 630 * If this is not in response to one of our
455 if (conn->login_mtask != mtask) 631 * nops then it must be from userspace.
456 __kfifo_put(session->mgmtpool.queue, 632 */
457 (void*)&mtask, sizeof(void*)); 633 if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
634 datalen))
635 rc = ISCSI_ERR_CONN_FAILED;
636 }
637 iscsi_free_mgmt_task(conn, mtask);
458 break; 638 break;
459 default: 639 default:
460 rc = ISCSI_ERR_BAD_OPCODE; 640 rc = ISCSI_ERR_BAD_OPCODE;
@@ -473,8 +653,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
473 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) 653 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
474 break; 654 break;
475 655
476 if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0)) 656 iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
477 rc = ISCSI_ERR_CONN_FAILED;
478 break; 657 break;
479 case ISCSI_OP_REJECT: 658 case ISCSI_OP_REJECT:
480 rc = iscsi_handle_reject(conn, hdr, data, datalen); 659 rc = iscsi_handle_reject(conn, hdr, data, datalen);
@@ -609,20 +788,19 @@ static void iscsi_prep_mtask(struct iscsi_conn *conn,
609 session->tt->init_mgmt_task(conn, mtask); 788 session->tt->init_mgmt_task(conn, mtask);
610 789
611 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n", 790 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
612 hdr->opcode, hdr->itt, mtask->data_count); 791 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
792 mtask->data_count);
613} 793}
614 794
615static int iscsi_xmit_mtask(struct iscsi_conn *conn) 795static int iscsi_xmit_mtask(struct iscsi_conn *conn)
616{ 796{
617 struct iscsi_hdr *hdr = conn->mtask->hdr; 797 struct iscsi_hdr *hdr = conn->mtask->hdr;
618 int rc, was_logout = 0; 798 int rc;
619 799
800 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
801 conn->session->state = ISCSI_STATE_LOGGING_OUT;
620 spin_unlock_bh(&conn->session->lock); 802 spin_unlock_bh(&conn->session->lock);
621 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) { 803
622 conn->session->state = ISCSI_STATE_IN_RECOVERY;
623 iscsi_block_session(session_to_cls(conn->session));
624 was_logout = 1;
625 }
626 rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask); 804 rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
627 spin_lock_bh(&conn->session->lock); 805 spin_lock_bh(&conn->session->lock);
628 if (rc) 806 if (rc)
@@ -630,11 +808,6 @@ static int iscsi_xmit_mtask(struct iscsi_conn *conn)
630 808
631 /* done with this in-progress mtask */ 809 /* done with this in-progress mtask */
632 conn->mtask = NULL; 810 conn->mtask = NULL;
633
634 if (was_logout) {
635 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
636 return -ENODATA;
637 }
638 return 0; 811 return 0;
639} 812}
640 813
@@ -658,21 +831,13 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
658static int iscsi_xmit_ctask(struct iscsi_conn *conn) 831static int iscsi_xmit_ctask(struct iscsi_conn *conn)
659{ 832{
660 struct iscsi_cmd_task *ctask = conn->ctask; 833 struct iscsi_cmd_task *ctask = conn->ctask;
661 int rc = 0; 834 int rc;
662
663 /*
664 * serialize with TMF AbortTask
665 */
666 if (ctask->state == ISCSI_TASK_ABORTING)
667 goto done;
668 835
669 __iscsi_get_ctask(ctask); 836 __iscsi_get_ctask(ctask);
670 spin_unlock_bh(&conn->session->lock); 837 spin_unlock_bh(&conn->session->lock);
671 rc = conn->session->tt->xmit_cmd_task(conn, ctask); 838 rc = conn->session->tt->xmit_cmd_task(conn, ctask);
672 spin_lock_bh(&conn->session->lock); 839 spin_lock_bh(&conn->session->lock);
673 __iscsi_put_ctask(ctask); 840 __iscsi_put_ctask(ctask);
674
675done:
676 if (!rc) 841 if (!rc)
677 /* done with this ctask */ 842 /* done with this ctask */
678 conn->ctask = NULL; 843 conn->ctask = NULL;
@@ -680,6 +845,22 @@ done:
680} 845}
681 846
682/** 847/**
848 * iscsi_requeue_ctask - requeue ctask to run from session workqueue
849 * @ctask: ctask to requeue
850 *
851 * LLDs that need to run a ctask from the session workqueue should call
852 * this. The session lock must be held.
853 */
854void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
855{
856 struct iscsi_conn *conn = ctask->conn;
857
858 list_move_tail(&ctask->running, &conn->requeue);
859 scsi_queue_work(conn->session->host, &conn->xmitwork);
860}
861EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
862
863/**
683 * iscsi_data_xmit - xmit any command into the scheduled connection 864 * iscsi_data_xmit - xmit any command into the scheduled connection
684 * @conn: iscsi connection 865 * @conn: iscsi connection
685 * 866 *
@@ -717,36 +898,40 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
717 * overflow us with nop-ins 898 * overflow us with nop-ins
718 */ 899 */
719check_mgmt: 900check_mgmt:
720 while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask, 901 while (!list_empty(&conn->mgmtqueue)) {
721 sizeof(void*))) { 902 conn->mtask = list_entry(conn->mgmtqueue.next,
903 struct iscsi_mgmt_task, running);
904 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
905 iscsi_free_mgmt_task(conn, conn->mtask);
906 conn->mtask = NULL;
907 continue;
908 }
909
722 iscsi_prep_mtask(conn, conn->mtask); 910 iscsi_prep_mtask(conn, conn->mtask);
723 list_add_tail(&conn->mtask->running, &conn->mgmt_run_list); 911 list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
724 rc = iscsi_xmit_mtask(conn); 912 rc = iscsi_xmit_mtask(conn);
725 if (rc) 913 if (rc)
726 goto again; 914 goto again;
727 } 915 }
728 916
729 /* process command queue */ 917 /* process pending command queue */
730 while (!list_empty(&conn->xmitqueue)) { 918 while (!list_empty(&conn->xmitqueue)) {
731 /* 919 if (conn->tmf_state == TMF_QUEUED)
732 * iscsi tcp may readd the task to the xmitqueue to send 920 break;
733 * write data 921
734 */
735 conn->ctask = list_entry(conn->xmitqueue.next, 922 conn->ctask = list_entry(conn->xmitqueue.next,
736 struct iscsi_cmd_task, running); 923 struct iscsi_cmd_task, running);
737 switch (conn->ctask->state) { 924 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
738 case ISCSI_TASK_ABORTING: 925 fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
739 break; 926 continue;
740 case ISCSI_TASK_PENDING: 927 }
741 iscsi_prep_scsi_cmd_pdu(conn->ctask); 928 if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
742 conn->session->tt->init_cmd_task(conn->ctask); 929 fail_command(conn, conn->ctask, DID_ABORT << 16);
743 /* fall through */ 930 continue;
744 default:
745 conn->ctask->state = ISCSI_TASK_RUNNING;
746 break;
747 } 931 }
748 list_move_tail(conn->xmitqueue.next, &conn->run_list);
749 932
933 conn->ctask->state = ISCSI_TASK_RUNNING;
934 list_move_tail(conn->xmitqueue.next, &conn->run_list);
750 rc = iscsi_xmit_ctask(conn); 935 rc = iscsi_xmit_ctask(conn);
751 if (rc) 936 if (rc)
752 goto again; 937 goto again;
@@ -755,7 +940,28 @@ check_mgmt:
755 * we need to check the mgmt queue for nops that need to 940 * we need to check the mgmt queue for nops that need to
756 * be sent to aviod starvation 941 * be sent to aviod starvation
757 */ 942 */
758 if (__kfifo_len(conn->mgmtqueue)) 943 if (!list_empty(&conn->mgmtqueue))
944 goto check_mgmt;
945 }
946
947 while (!list_empty(&conn->requeue)) {
948 if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL)
949 break;
950
951 /*
952 * we always do fastlogout - conn stop code will clean up.
953 */
954 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
955 break;
956
957 conn->ctask = list_entry(conn->requeue.next,
958 struct iscsi_cmd_task, running);
959 conn->ctask->state = ISCSI_TASK_RUNNING;
960 list_move_tail(conn->requeue.next, &conn->run_list);
961 rc = iscsi_xmit_ctask(conn);
962 if (rc)
963 goto again;
964 if (!list_empty(&conn->mgmtqueue))
759 goto check_mgmt; 965 goto check_mgmt;
760 } 966 }
761 spin_unlock_bh(&conn->session->lock); 967 spin_unlock_bh(&conn->session->lock);
@@ -790,6 +996,7 @@ enum {
790 FAILURE_SESSION_TERMINATE, 996 FAILURE_SESSION_TERMINATE,
791 FAILURE_SESSION_IN_RECOVERY, 997 FAILURE_SESSION_IN_RECOVERY,
792 FAILURE_SESSION_RECOVERY_TIMEOUT, 998 FAILURE_SESSION_RECOVERY_TIMEOUT,
999 FAILURE_SESSION_LOGGING_OUT,
793}; 1000};
794 1001
795int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 1002int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
@@ -805,8 +1012,9 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
805 sc->SCp.ptr = NULL; 1012 sc->SCp.ptr = NULL;
806 1013
807 host = sc->device->host; 1014 host = sc->device->host;
808 session = iscsi_hostdata(host->hostdata); 1015 spin_unlock(host->host_lock);
809 1016
1017 session = iscsi_hostdata(host->hostdata);
810 spin_lock(&session->lock); 1018 spin_lock(&session->lock);
811 1019
812 /* 1020 /*
@@ -822,17 +1030,22 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
822 * be entering our queuecommand while a block is starting 1030 * be entering our queuecommand while a block is starting
823 * up because the block code is not locked) 1031 * up because the block code is not locked)
824 */ 1032 */
825 if (session->state == ISCSI_STATE_IN_RECOVERY) { 1033 switch (session->state) {
1034 case ISCSI_STATE_IN_RECOVERY:
826 reason = FAILURE_SESSION_IN_RECOVERY; 1035 reason = FAILURE_SESSION_IN_RECOVERY;
827 goto reject; 1036 goto reject;
828 } 1037 case ISCSI_STATE_LOGGING_OUT:
829 1038 reason = FAILURE_SESSION_LOGGING_OUT;
830 if (session->state == ISCSI_STATE_RECOVERY_FAILED) 1039 goto reject;
1040 case ISCSI_STATE_RECOVERY_FAILED:
831 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1041 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
832 else if (session->state == ISCSI_STATE_TERMINATE) 1042 break;
1043 case ISCSI_STATE_TERMINATE:
833 reason = FAILURE_SESSION_TERMINATE; 1044 reason = FAILURE_SESSION_TERMINATE;
834 else 1045 break;
1046 default:
835 reason = FAILURE_SESSION_FREED; 1047 reason = FAILURE_SESSION_FREED;
1048 }
836 goto fault; 1049 goto fault;
837 } 1050 }
838 1051
@@ -859,7 +1072,6 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
859 1072
860 atomic_set(&ctask->refcount, 1); 1073 atomic_set(&ctask->refcount, 1);
861 ctask->state = ISCSI_TASK_PENDING; 1074 ctask->state = ISCSI_TASK_PENDING;
862 ctask->mtask = NULL;
863 ctask->conn = conn; 1075 ctask->conn = conn;
864 ctask->sc = sc; 1076 ctask->sc = sc;
865 INIT_LIST_HEAD(&ctask->running); 1077 INIT_LIST_HEAD(&ctask->running);
@@ -868,11 +1080,13 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
868 spin_unlock(&session->lock); 1080 spin_unlock(&session->lock);
869 1081
870 scsi_queue_work(host, &conn->xmitwork); 1082 scsi_queue_work(host, &conn->xmitwork);
1083 spin_lock(host->host_lock);
871 return 0; 1084 return 0;
872 1085
873reject: 1086reject:
874 spin_unlock(&session->lock); 1087 spin_unlock(&session->lock);
875 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); 1088 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
1089 spin_lock(host->host_lock);
876 return SCSI_MLQUEUE_HOST_BUSY; 1090 return SCSI_MLQUEUE_HOST_BUSY;
877 1091
878fault: 1092fault:
@@ -882,6 +1096,7 @@ fault:
882 sc->result = (DID_NO_CONNECT << 16); 1096 sc->result = (DID_NO_CONNECT << 16);
883 scsi_set_resid(sc, scsi_bufflen(sc)); 1097 scsi_set_resid(sc, scsi_bufflen(sc));
884 sc->scsi_done(sc); 1098 sc->scsi_done(sc);
1099 spin_lock(host->host_lock);
885 return 0; 1100 return 0;
886} 1101}
887EXPORT_SYMBOL_GPL(iscsi_queuecommand); 1102EXPORT_SYMBOL_GPL(iscsi_queuecommand);
@@ -895,72 +1110,15 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
895} 1110}
896EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); 1111EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
897 1112
898static struct iscsi_mgmt_task *
899__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
900 char *data, uint32_t data_size)
901{
902 struct iscsi_session *session = conn->session;
903 struct iscsi_mgmt_task *mtask;
904
905 if (session->state == ISCSI_STATE_TERMINATE)
906 return NULL;
907
908 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
909 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
910 /*
911 * Login and Text are sent serially, in
912 * request-followed-by-response sequence.
913 * Same mtask can be used. Same ITT must be used.
914 * Note that login_mtask is preallocated at conn_create().
915 */
916 mtask = conn->login_mtask;
917 else {
918 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
919 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
920
921 if (!__kfifo_get(session->mgmtpool.queue,
922 (void*)&mtask, sizeof(void*)))
923 return NULL;
924 }
925
926 if (data_size) {
927 memcpy(mtask->data, data, data_size);
928 mtask->data_count = data_size;
929 } else
930 mtask->data_count = 0;
931
932 INIT_LIST_HEAD(&mtask->running);
933 memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
934 __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
935 return mtask;
936}
937
938int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
939 char *data, uint32_t data_size)
940{
941 struct iscsi_conn *conn = cls_conn->dd_data;
942 struct iscsi_session *session = conn->session;
943 int err = 0;
944
945 spin_lock_bh(&session->lock);
946 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
947 err = -EPERM;
948 spin_unlock_bh(&session->lock);
949 scsi_queue_work(session->host, &conn->xmitwork);
950 return err;
951}
952EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
953
954void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) 1113void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
955{ 1114{
956 struct iscsi_session *session = class_to_transport_session(cls_session); 1115 struct iscsi_session *session = class_to_transport_session(cls_session);
957 struct iscsi_conn *conn = session->leadconn;
958 1116
959 spin_lock_bh(&session->lock); 1117 spin_lock_bh(&session->lock);
960 if (session->state != ISCSI_STATE_LOGGED_IN) { 1118 if (session->state != ISCSI_STATE_LOGGED_IN) {
961 session->state = ISCSI_STATE_RECOVERY_FAILED; 1119 session->state = ISCSI_STATE_RECOVERY_FAILED;
962 if (conn) 1120 if (session->leadconn)
963 wake_up(&conn->ehwait); 1121 wake_up(&session->leadconn->ehwait);
964 } 1122 }
965 spin_unlock_bh(&session->lock); 1123 spin_unlock_bh(&session->lock);
966} 1124}
@@ -971,30 +1129,25 @@ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
971 struct Scsi_Host *host = sc->device->host; 1129 struct Scsi_Host *host = sc->device->host;
972 struct iscsi_session *session = iscsi_hostdata(host->hostdata); 1130 struct iscsi_session *session = iscsi_hostdata(host->hostdata);
973 struct iscsi_conn *conn = session->leadconn; 1131 struct iscsi_conn *conn = session->leadconn;
974 int fail_session = 0;
975 1132
1133 mutex_lock(&session->eh_mutex);
976 spin_lock_bh(&session->lock); 1134 spin_lock_bh(&session->lock);
977 if (session->state == ISCSI_STATE_TERMINATE) { 1135 if (session->state == ISCSI_STATE_TERMINATE) {
978failed: 1136failed:
979 debug_scsi("failing host reset: session terminated " 1137 debug_scsi("failing host reset: session terminated "
980 "[CID %d age %d]\n", conn->id, session->age); 1138 "[CID %d age %d]\n", conn->id, session->age);
981 spin_unlock_bh(&session->lock); 1139 spin_unlock_bh(&session->lock);
1140 mutex_unlock(&session->eh_mutex);
982 return FAILED; 1141 return FAILED;
983 } 1142 }
984 1143
985 if (sc->SCp.phase == session->age) {
986 debug_scsi("failing connection CID %d due to SCSI host reset\n",
987 conn->id);
988 fail_session = 1;
989 }
990 spin_unlock_bh(&session->lock); 1144 spin_unlock_bh(&session->lock);
991 1145 mutex_unlock(&session->eh_mutex);
992 /* 1146 /*
993 * we drop the lock here but the leadconn cannot be destoyed while 1147 * we drop the lock here but the leadconn cannot be destoyed while
994 * we are in the scsi eh 1148 * we are in the scsi eh
995 */ 1149 */
996 if (fail_session) 1150 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
997 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
998 1151
999 debug_scsi("iscsi_eh_host_reset wait for relogin\n"); 1152 debug_scsi("iscsi_eh_host_reset wait for relogin\n");
1000 wait_event_interruptible(conn->ehwait, 1153 wait_event_interruptible(conn->ehwait,
@@ -1004,73 +1157,56 @@ failed:
1004 if (signal_pending(current)) 1157 if (signal_pending(current))
1005 flush_signals(current); 1158 flush_signals(current);
1006 1159
1160 mutex_lock(&session->eh_mutex);
1007 spin_lock_bh(&session->lock); 1161 spin_lock_bh(&session->lock);
1008 if (session->state == ISCSI_STATE_LOGGED_IN) 1162 if (session->state == ISCSI_STATE_LOGGED_IN)
1009 printk(KERN_INFO "iscsi: host reset succeeded\n"); 1163 printk(KERN_INFO "iscsi: host reset succeeded\n");
1010 else 1164 else
1011 goto failed; 1165 goto failed;
1012 spin_unlock_bh(&session->lock); 1166 spin_unlock_bh(&session->lock);
1013 1167 mutex_unlock(&session->eh_mutex);
1014 return SUCCESS; 1168 return SUCCESS;
1015} 1169}
1016EXPORT_SYMBOL_GPL(iscsi_eh_host_reset); 1170EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
1017 1171
1018static void iscsi_tmabort_timedout(unsigned long data) 1172static void iscsi_tmf_timedout(unsigned long data)
1019{ 1173{
1020 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data; 1174 struct iscsi_conn *conn = (struct iscsi_conn *)data;
1021 struct iscsi_conn *conn = ctask->conn;
1022 struct iscsi_session *session = conn->session; 1175 struct iscsi_session *session = conn->session;
1023 1176
1024 spin_lock(&session->lock); 1177 spin_lock(&session->lock);
1025 if (conn->tmabort_state == TMABORT_INITIAL) { 1178 if (conn->tmf_state == TMF_QUEUED) {
1026 conn->tmabort_state = TMABORT_TIMEDOUT; 1179 conn->tmf_state = TMF_TIMEDOUT;
1027 debug_scsi("tmabort timedout [sc %p itt 0x%x]\n", 1180 debug_scsi("tmf timedout\n");
1028 ctask->sc, ctask->itt);
1029 /* unblock eh_abort() */ 1181 /* unblock eh_abort() */
1030 wake_up(&conn->ehwait); 1182 wake_up(&conn->ehwait);
1031 } 1183 }
1032 spin_unlock(&session->lock); 1184 spin_unlock(&session->lock);
1033} 1185}
1034 1186
1035static int iscsi_exec_abort_task(struct scsi_cmnd *sc, 1187static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1036 struct iscsi_cmd_task *ctask) 1188 struct iscsi_tm *hdr, int age,
1189 int timeout)
1037{ 1190{
1038 struct iscsi_conn *conn = ctask->conn;
1039 struct iscsi_session *session = conn->session; 1191 struct iscsi_session *session = conn->session;
1040 struct iscsi_tm *hdr = &conn->tmhdr; 1192 struct iscsi_mgmt_task *mtask;
1041
1042 /*
1043 * ctask timed out but session is OK requests must be serialized.
1044 */
1045 memset(hdr, 0, sizeof(struct iscsi_tm));
1046 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1047 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
1048 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1049 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
1050 hdr->rtt = ctask->hdr->itt;
1051 hdr->refcmdsn = ctask->hdr->cmdsn;
1052 1193
1053 ctask->mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, 1194 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1054 NULL, 0); 1195 NULL, 0);
1055 if (!ctask->mtask) { 1196 if (!mtask) {
1056 spin_unlock_bh(&session->lock); 1197 spin_unlock_bh(&session->lock);
1057 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1198 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1058 spin_lock_bh(&session->lock) 1199 spin_lock_bh(&session->lock);
1059 debug_scsi("abort sent failure [itt 0x%x]\n", ctask->itt); 1200 debug_scsi("tmf exec failure\n");
1060 return -EPERM; 1201 return -EPERM;
1061 } 1202 }
1062 ctask->state = ISCSI_TASK_ABORTING; 1203 conn->tmfcmd_pdus_cnt++;
1204 conn->tmf_timer.expires = timeout * HZ + jiffies;
1205 conn->tmf_timer.function = iscsi_tmf_timedout;
1206 conn->tmf_timer.data = (unsigned long)conn;
1207 add_timer(&conn->tmf_timer);
1208 debug_scsi("tmf set timeout\n");
1063 1209
1064 debug_scsi("abort sent [itt 0x%x]\n", ctask->itt);
1065
1066 if (conn->tmabort_state == TMABORT_INITIAL) {
1067 conn->tmfcmd_pdus_cnt++;
1068 conn->tmabort_timer.expires = 20*HZ + jiffies;
1069 conn->tmabort_timer.function = iscsi_tmabort_timedout;
1070 conn->tmabort_timer.data = (unsigned long)ctask;
1071 add_timer(&conn->tmabort_timer);
1072 debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt);
1073 }
1074 spin_unlock_bh(&session->lock); 1210 spin_unlock_bh(&session->lock);
1075 mutex_unlock(&session->eh_mutex); 1211 mutex_unlock(&session->eh_mutex);
1076 scsi_queue_work(session->host, &conn->xmitwork); 1212 scsi_queue_work(session->host, &conn->xmitwork);
@@ -1078,113 +1214,197 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
1078 /* 1214 /*
1079 * block eh thread until: 1215 * block eh thread until:
1080 * 1216 *
1081 * 1) abort response 1217 * 1) tmf response
1082 * 2) abort timeout 1218 * 2) tmf timeout
1083 * 3) session is terminated or restarted or userspace has 1219 * 3) session is terminated or restarted or userspace has
1084 * given up on recovery 1220 * given up on recovery
1085 */ 1221 */
1086 wait_event_interruptible(conn->ehwait, 1222 wait_event_interruptible(conn->ehwait, age != session->age ||
1087 sc->SCp.phase != session->age ||
1088 session->state != ISCSI_STATE_LOGGED_IN || 1223 session->state != ISCSI_STATE_LOGGED_IN ||
1089 conn->tmabort_state != TMABORT_INITIAL); 1224 conn->tmf_state != TMF_QUEUED);
1090 if (signal_pending(current)) 1225 if (signal_pending(current))
1091 flush_signals(current); 1226 flush_signals(current);
1092 del_timer_sync(&conn->tmabort_timer); 1227 del_timer_sync(&conn->tmf_timer);
1228
1093 mutex_lock(&session->eh_mutex); 1229 mutex_lock(&session->eh_mutex);
1094 spin_lock_bh(&session->lock); 1230 spin_lock_bh(&session->lock);
1231 /* if the session drops it will clean up the mtask */
1232 if (age != session->age ||
1233 session->state != ISCSI_STATE_LOGGED_IN)
1234 return -ENOTCONN;
1095 return 0; 1235 return 0;
1096} 1236}
1097 1237
1098/* 1238/*
1099 * session lock must be held 1239 * Fail commands. session lock held and recv side suspended and xmit
1240 * thread flushed
1100 */ 1241 */
1101static struct iscsi_mgmt_task * 1242static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
1102iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
1103{ 1243{
1104 int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); 1244 struct iscsi_cmd_task *ctask, *tmp;
1105 struct iscsi_mgmt_task *task;
1106 1245
1107 debug_scsi("searching %d tasks\n", nr_tasks); 1246 if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
1247 conn->ctask = NULL;
1108 1248
1109 for (i = 0; i < nr_tasks; i++) { 1249 /* flush pending */
1110 __kfifo_get(fifo, (void*)&task, sizeof(void*)); 1250 list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
1111 debug_scsi("check task %u\n", task->itt); 1251 if (lun == ctask->sc->device->lun || lun == -1) {
1252 debug_scsi("failing pending sc %p itt 0x%x\n",
1253 ctask->sc, ctask->itt);
1254 fail_command(conn, ctask, DID_BUS_BUSY << 16);
1255 }
1256 }
1112 1257
1113 if (task->itt == itt) { 1258 list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
1114 debug_scsi("matched task\n"); 1259 if (lun == ctask->sc->device->lun || lun == -1) {
1115 return task; 1260 debug_scsi("failing requeued sc %p itt 0x%x\n",
1261 ctask->sc, ctask->itt);
1262 fail_command(conn, ctask, DID_BUS_BUSY << 16);
1116 } 1263 }
1264 }
1117 1265
1118 __kfifo_put(fifo, (void*)&task, sizeof(void*)); 1266 /* fail all other running */
1267 list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
1268 if (lun == ctask->sc->device->lun || lun == -1) {
1269 debug_scsi("failing in progress sc %p itt 0x%x\n",
1270 ctask->sc, ctask->itt);
1271 fail_command(conn, ctask, DID_BUS_BUSY << 16);
1272 }
1119 } 1273 }
1120 return NULL;
1121} 1274}
1122 1275
1123static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) 1276static void iscsi_suspend_tx(struct iscsi_conn *conn)
1124{ 1277{
1125 struct iscsi_conn *conn = ctask->conn; 1278 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1126 struct iscsi_session *session = conn->session; 1279 scsi_flush_work(conn->session->host);
1127 1280}
1128 if (!ctask->mtask)
1129 return -EINVAL;
1130 1281
1131 if (!iscsi_remove_mgmt_task(conn->mgmtqueue, ctask->mtask->itt)) 1282static void iscsi_start_tx(struct iscsi_conn *conn)
1132 list_del(&ctask->mtask->running); 1283{
1133 __kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask, 1284 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1134 sizeof(void*)); 1285 scsi_queue_work(conn->session->host, &conn->xmitwork);
1135 ctask->mtask = NULL;
1136 return 0;
1137} 1286}
1138 1287
1139/* 1288static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1140 * session lock must be held
1141 */
1142static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1143 int err)
1144{ 1289{
1145 struct scsi_cmnd *sc; 1290 struct iscsi_cls_session *cls_session;
1291 struct iscsi_session *session;
1292 struct iscsi_conn *conn;
1293 enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
1146 1294
1147 sc = ctask->sc; 1295 cls_session = starget_to_session(scsi_target(scmd->device));
1148 if (!sc) 1296 session = class_to_transport_session(cls_session);
1149 return;
1150 1297
1151 if (ctask->state == ISCSI_TASK_PENDING) 1298 debug_scsi("scsi cmd %p timedout\n", scmd);
1299
1300 spin_lock(&session->lock);
1301 if (session->state != ISCSI_STATE_LOGGED_IN) {
1152 /* 1302 /*
1153 * cmd never made it to the xmit thread, so we should not count 1303 * We are probably in the middle of iscsi recovery so let
1154 * the cmd in the sequencing 1304 * that complete and handle the error.
1155 */ 1305 */
1156 conn->session->queued_cmdsn--; 1306 rc = EH_RESET_TIMER;
1157 else 1307 goto done;
1158 conn->session->tt->cleanup_cmd_task(conn, ctask); 1308 }
1159 iscsi_ctask_mtask_cleanup(ctask);
1160 1309
1161 sc->result = err; 1310 conn = session->leadconn;
1162 scsi_set_resid(sc, scsi_bufflen(sc)); 1311 if (!conn) {
1163 if (conn->ctask == ctask) 1312 /* In the middle of shuting down */
1164 conn->ctask = NULL; 1313 rc = EH_RESET_TIMER;
1165 /* release ref from queuecommand */ 1314 goto done;
1166 __iscsi_put_ctask(ctask); 1315 }
1316
1317 if (!conn->recv_timeout && !conn->ping_timeout)
1318 goto done;
1319 /*
1320 * if the ping timedout then we are in the middle of cleaning up
1321 * and can let the iscsi eh handle it
1322 */
1323 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1324 (conn->ping_timeout * HZ), jiffies))
1325 rc = EH_RESET_TIMER;
1326 /*
1327 * if we are about to check the transport then give the command
1328 * more time
1329 */
1330 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1331 jiffies))
1332 rc = EH_RESET_TIMER;
1333 /* if in the middle of checking the transport then give us more time */
1334 if (conn->ping_mtask)
1335 rc = EH_RESET_TIMER;
1336done:
1337 spin_unlock(&session->lock);
1338 debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
1339 return rc;
1167} 1340}
1168 1341
1169static void iscsi_suspend_tx(struct iscsi_conn *conn) 1342static void iscsi_check_transport_timeouts(unsigned long data)
1170{ 1343{
1171 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1344 struct iscsi_conn *conn = (struct iscsi_conn *)data;
1172 scsi_flush_work(conn->session->host); 1345 struct iscsi_session *session = conn->session;
1346 unsigned long timeout, next_timeout = 0, last_recv;
1347
1348 spin_lock(&session->lock);
1349 if (session->state != ISCSI_STATE_LOGGED_IN)
1350 goto done;
1351
1352 timeout = conn->recv_timeout;
1353 if (!timeout)
1354 goto done;
1355
1356 timeout *= HZ;
1357 last_recv = conn->last_recv;
1358 if (time_before_eq(last_recv + timeout + (conn->ping_timeout * HZ),
1359 jiffies)) {
1360 printk(KERN_ERR "ping timeout of %d secs expired, "
1361 "last rx %lu, last ping %lu, now %lu\n",
1362 conn->ping_timeout, last_recv,
1363 conn->last_ping, jiffies);
1364 spin_unlock(&session->lock);
1365 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1366 return;
1367 }
1368
1369 if (time_before_eq(last_recv + timeout, jiffies)) {
1370 if (time_before_eq(conn->last_ping, last_recv)) {
1371 /* send a ping to try to provoke some traffic */
1372 debug_scsi("Sending nopout as ping on conn %p\n", conn);
1373 iscsi_send_nopout(conn, NULL);
1374 }
1375 next_timeout = last_recv + timeout + (conn->ping_timeout * HZ);
1376 } else {
1377 next_timeout = last_recv + timeout;
1378 }
1379
1380 if (next_timeout) {
1381 debug_scsi("Setting next tmo %lu\n", next_timeout);
1382 mod_timer(&conn->transport_timer, next_timeout);
1383 }
1384done:
1385 spin_unlock(&session->lock);
1173} 1386}
1174 1387
1175static void iscsi_start_tx(struct iscsi_conn *conn) 1388static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
1389 struct iscsi_tm *hdr)
1176{ 1390{
1177 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1391 memset(hdr, 0, sizeof(*hdr));
1178 scsi_queue_work(conn->session->host, &conn->xmitwork); 1392 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1393 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
1394 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1395 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
1396 hdr->rtt = ctask->hdr->itt;
1397 hdr->refcmdsn = ctask->hdr->cmdsn;
1179} 1398}
1180 1399
1181int iscsi_eh_abort(struct scsi_cmnd *sc) 1400int iscsi_eh_abort(struct scsi_cmnd *sc)
1182{ 1401{
1183 struct Scsi_Host *host = sc->device->host; 1402 struct Scsi_Host *host = sc->device->host;
1184 struct iscsi_session *session = iscsi_hostdata(host->hostdata); 1403 struct iscsi_session *session = iscsi_hostdata(host->hostdata);
1185 struct iscsi_cmd_task *ctask;
1186 struct iscsi_conn *conn; 1404 struct iscsi_conn *conn;
1187 int rc; 1405 struct iscsi_cmd_task *ctask;
1406 struct iscsi_tm *hdr;
1407 int rc, age;
1188 1408
1189 mutex_lock(&session->eh_mutex); 1409 mutex_lock(&session->eh_mutex);
1190 spin_lock_bh(&session->lock); 1410 spin_lock_bh(&session->lock);
@@ -1199,19 +1419,23 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1199 return SUCCESS; 1419 return SUCCESS;
1200 } 1420 }
1201 1421
1202 ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
1203 conn = ctask->conn;
1204
1205 conn->eh_abort_cnt++;
1206 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
1207
1208 /* 1422 /*
1209 * If we are not logged in or we have started a new session 1423 * If we are not logged in or we have started a new session
1210 * then let the host reset code handle this 1424 * then let the host reset code handle this
1211 */ 1425 */
1212 if (session->state != ISCSI_STATE_LOGGED_IN || 1426 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
1213 sc->SCp.phase != session->age) 1427 sc->SCp.phase != session->age) {
1214 goto failed; 1428 spin_unlock_bh(&session->lock);
1429 mutex_unlock(&session->eh_mutex);
1430 return FAILED;
1431 }
1432
1433 conn = session->leadconn;
1434 conn->eh_abort_cnt++;
1435 age = session->age;
1436
1437 ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
1438 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
1215 1439
1216 /* ctask completed before time out */ 1440 /* ctask completed before time out */
1217 if (!ctask->sc) { 1441 if (!ctask->sc) {
@@ -1219,27 +1443,26 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1219 goto success; 1443 goto success;
1220 } 1444 }
1221 1445
1222 /* what should we do here ? */
1223 if (conn->ctask == ctask) {
1224 printk(KERN_INFO "iscsi: sc %p itt 0x%x partially sent. "
1225 "Failing abort\n", sc, ctask->itt);
1226 goto failed;
1227 }
1228
1229 if (ctask->state == ISCSI_TASK_PENDING) { 1446 if (ctask->state == ISCSI_TASK_PENDING) {
1230 fail_command(conn, ctask, DID_ABORT << 16); 1447 fail_command(conn, ctask, DID_ABORT << 16);
1231 goto success; 1448 goto success;
1232 } 1449 }
1233 1450
1234 conn->tmabort_state = TMABORT_INITIAL; 1451 /* only have one tmf outstanding at a time */
1235 rc = iscsi_exec_abort_task(sc, ctask); 1452 if (conn->tmf_state != TMF_INITIAL)
1236 if (rc || sc->SCp.phase != session->age || 1453 goto failed;
1237 session->state != ISCSI_STATE_LOGGED_IN) 1454 conn->tmf_state = TMF_QUEUED;
1455
1456 hdr = &conn->tmhdr;
1457 iscsi_prep_abort_task_pdu(ctask, hdr);
1458
1459 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
1460 rc = FAILED;
1238 goto failed; 1461 goto failed;
1239 iscsi_ctask_mtask_cleanup(ctask); 1462 }
1240 1463
1241 switch (conn->tmabort_state) { 1464 switch (conn->tmf_state) {
1242 case TMABORT_SUCCESS: 1465 case TMF_SUCCESS:
1243 spin_unlock_bh(&session->lock); 1466 spin_unlock_bh(&session->lock);
1244 iscsi_suspend_tx(conn); 1467 iscsi_suspend_tx(conn);
1245 /* 1468 /*
@@ -1248,22 +1471,26 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1248 write_lock_bh(conn->recv_lock); 1471 write_lock_bh(conn->recv_lock);
1249 spin_lock(&session->lock); 1472 spin_lock(&session->lock);
1250 fail_command(conn, ctask, DID_ABORT << 16); 1473 fail_command(conn, ctask, DID_ABORT << 16);
1474 conn->tmf_state = TMF_INITIAL;
1251 spin_unlock(&session->lock); 1475 spin_unlock(&session->lock);
1252 write_unlock_bh(conn->recv_lock); 1476 write_unlock_bh(conn->recv_lock);
1253 iscsi_start_tx(conn); 1477 iscsi_start_tx(conn);
1254 goto success_unlocked; 1478 goto success_unlocked;
1255 case TMABORT_NOT_FOUND: 1479 case TMF_TIMEDOUT:
1256 if (!ctask->sc) { 1480 spin_unlock_bh(&session->lock);
1481 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1482 goto failed_unlocked;
1483 case TMF_NOT_FOUND:
1484 if (!sc->SCp.ptr) {
1485 conn->tmf_state = TMF_INITIAL;
1257 /* ctask completed before tmf abort response */ 1486 /* ctask completed before tmf abort response */
1258 debug_scsi("sc completed while abort in progress\n"); 1487 debug_scsi("sc completed while abort in progress\n");
1259 goto success; 1488 goto success;
1260 } 1489 }
1261 /* fall through */ 1490 /* fall through */
1262 default: 1491 default:
1263 /* timedout or failed */ 1492 conn->tmf_state = TMF_INITIAL;
1264 spin_unlock_bh(&session->lock); 1493 goto failed;
1265 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1266 goto failed_unlocked;
1267 } 1494 }
1268 1495
1269success: 1496success:
@@ -1276,65 +1503,152 @@ success_unlocked:
1276failed: 1503failed:
1277 spin_unlock_bh(&session->lock); 1504 spin_unlock_bh(&session->lock);
1278failed_unlocked: 1505failed_unlocked:
1279 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); 1506 debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
1507 ctask ? ctask->itt : 0);
1280 mutex_unlock(&session->eh_mutex); 1508 mutex_unlock(&session->eh_mutex);
1281 return FAILED; 1509 return FAILED;
1282} 1510}
1283EXPORT_SYMBOL_GPL(iscsi_eh_abort); 1511EXPORT_SYMBOL_GPL(iscsi_eh_abort);
1284 1512
1513static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
1514{
1515 memset(hdr, 0, sizeof(*hdr));
1516 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1517 hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
1518 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1519 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
1520 hdr->rtt = RESERVED_ITT;
1521}
1522
1523int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1524{
1525 struct Scsi_Host *host = sc->device->host;
1526 struct iscsi_session *session = iscsi_hostdata(host->hostdata);
1527 struct iscsi_conn *conn;
1528 struct iscsi_tm *hdr;
1529 int rc = FAILED;
1530
1531 debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
1532
1533 mutex_lock(&session->eh_mutex);
1534 spin_lock_bh(&session->lock);
1535 /*
1536 * Just check if we are not logged in. We cannot check for
1537 * the phase because the reset could come from a ioctl.
1538 */
1539 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
1540 goto unlock;
1541 conn = session->leadconn;
1542
1543 /* only have one tmf outstanding at a time */
1544 if (conn->tmf_state != TMF_INITIAL)
1545 goto unlock;
1546 conn->tmf_state = TMF_QUEUED;
1547
1548 hdr = &conn->tmhdr;
1549 iscsi_prep_lun_reset_pdu(sc, hdr);
1550
1551 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
1552 session->lu_reset_timeout)) {
1553 rc = FAILED;
1554 goto unlock;
1555 }
1556
1557 switch (conn->tmf_state) {
1558 case TMF_SUCCESS:
1559 break;
1560 case TMF_TIMEDOUT:
1561 spin_unlock_bh(&session->lock);
1562 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1563 goto done;
1564 default:
1565 conn->tmf_state = TMF_INITIAL;
1566 goto unlock;
1567 }
1568
1569 rc = SUCCESS;
1570 spin_unlock_bh(&session->lock);
1571
1572 iscsi_suspend_tx(conn);
1573 /* need to grab the recv lock then session lock */
1574 write_lock_bh(conn->recv_lock);
1575 spin_lock(&session->lock);
1576 fail_all_commands(conn, sc->device->lun);
1577 conn->tmf_state = TMF_INITIAL;
1578 spin_unlock(&session->lock);
1579 write_unlock_bh(conn->recv_lock);
1580
1581 iscsi_start_tx(conn);
1582 goto done;
1583
1584unlock:
1585 spin_unlock_bh(&session->lock);
1586done:
1587 debug_scsi("iscsi_eh_device_reset %s\n",
1588 rc == SUCCESS ? "SUCCESS" : "FAILED");
1589 mutex_unlock(&session->eh_mutex);
1590 return rc;
1591}
1592EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
1593
1594/*
1595 * Pre-allocate a pool of @max items of @item_size. By default, the pool
1596 * should be accessed via kfifo_{get,put} on q->queue.
1597 * Optionally, the caller can obtain the array of object pointers
1598 * by passing in a non-NULL @items pointer
1599 */
1285int 1600int
1286iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size) 1601iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
1287{ 1602{
1288 int i; 1603 int i, num_arrays = 1;
1289 1604
1290 *items = kmalloc(max * sizeof(void*), GFP_KERNEL); 1605 memset(q, 0, sizeof(*q));
1291 if (*items == NULL)
1292 return -ENOMEM;
1293 1606
1294 q->max = max; 1607 q->max = max;
1295 q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL); 1608
1296 if (q->pool == NULL) { 1609 /* If the user passed an items pointer, he wants a copy of
1297 kfree(*items); 1610 * the array. */
1298 return -ENOMEM; 1611 if (items)
1299 } 1612 num_arrays++;
1613 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
1614 if (q->pool == NULL)
1615 goto enomem;
1300 1616
1301 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*), 1617 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
1302 GFP_KERNEL, NULL); 1618 GFP_KERNEL, NULL);
1303 if (q->queue == ERR_PTR(-ENOMEM)) { 1619 if (q->queue == ERR_PTR(-ENOMEM))
1304 kfree(q->pool); 1620 goto enomem;
1305 kfree(*items);
1306 return -ENOMEM;
1307 }
1308 1621
1309 for (i = 0; i < max; i++) { 1622 for (i = 0; i < max; i++) {
1310 q->pool[i] = kmalloc(item_size, GFP_KERNEL); 1623 q->pool[i] = kzalloc(item_size, GFP_KERNEL);
1311 if (q->pool[i] == NULL) { 1624 if (q->pool[i] == NULL) {
1312 int j; 1625 q->max = i;
1313 1626 goto enomem;
1314 for (j = 0; j < i; j++)
1315 kfree(q->pool[j]);
1316
1317 kfifo_free(q->queue);
1318 kfree(q->pool);
1319 kfree(*items);
1320 return -ENOMEM;
1321 } 1627 }
1322 memset(q->pool[i], 0, item_size);
1323 (*items)[i] = q->pool[i];
1324 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*)); 1628 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
1325 } 1629 }
1630
1631 if (items) {
1632 *items = q->pool + max;
1633 memcpy(*items, q->pool, max * sizeof(void *));
1634 }
1635
1326 return 0; 1636 return 0;
1637
1638enomem:
1639 iscsi_pool_free(q);
1640 return -ENOMEM;
1327} 1641}
1328EXPORT_SYMBOL_GPL(iscsi_pool_init); 1642EXPORT_SYMBOL_GPL(iscsi_pool_init);
1329 1643
1330void iscsi_pool_free(struct iscsi_queue *q, void **items) 1644void iscsi_pool_free(struct iscsi_pool *q)
1331{ 1645{
1332 int i; 1646 int i;
1333 1647
1334 for (i = 0; i < q->max; i++) 1648 for (i = 0; i < q->max; i++)
1335 kfree(items[i]); 1649 kfree(q->pool[i]);
1336 kfree(q->pool); 1650 if (q->pool)
1337 kfree(items); 1651 kfree(q->pool);
1338} 1652}
1339EXPORT_SYMBOL_GPL(iscsi_pool_free); 1653EXPORT_SYMBOL_GPL(iscsi_pool_free);
1340 1654
@@ -1387,7 +1701,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1387 qdepth = ISCSI_DEF_CMD_PER_LUN; 1701 qdepth = ISCSI_DEF_CMD_PER_LUN;
1388 } 1702 }
1389 1703
1390 if (cmds_max < 2 || (cmds_max & (cmds_max - 1)) || 1704 if (!is_power_of_2(cmds_max) ||
1391 cmds_max >= ISCSI_MGMT_ITT_OFFSET) { 1705 cmds_max >= ISCSI_MGMT_ITT_OFFSET) {
1392 if (cmds_max != 0) 1706 if (cmds_max != 0)
1393 printk(KERN_ERR "iscsi: invalid can_queue of %d. " 1707 printk(KERN_ERR "iscsi: invalid can_queue of %d. "
@@ -1411,12 +1725,16 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1411 shost->max_cmd_len = iscsit->max_cmd_len; 1725 shost->max_cmd_len = iscsit->max_cmd_len;
1412 shost->transportt = scsit; 1726 shost->transportt = scsit;
1413 shost->transportt->create_work_queue = 1; 1727 shost->transportt->create_work_queue = 1;
1728 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
1414 *hostno = shost->host_no; 1729 *hostno = shost->host_no;
1415 1730
1416 session = iscsi_hostdata(shost->hostdata); 1731 session = iscsi_hostdata(shost->hostdata);
1417 memset(session, 0, sizeof(struct iscsi_session)); 1732 memset(session, 0, sizeof(struct iscsi_session));
1418 session->host = shost; 1733 session->host = shost;
1419 session->state = ISCSI_STATE_FREE; 1734 session->state = ISCSI_STATE_FREE;
1735 session->fast_abort = 1;
1736 session->lu_reset_timeout = 15;
1737 session->abort_timeout = 10;
1420 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; 1738 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
1421 session->cmds_max = cmds_max; 1739 session->cmds_max = cmds_max;
1422 session->queued_cmdsn = session->cmdsn = initial_cmdsn; 1740 session->queued_cmdsn = session->cmdsn = initial_cmdsn;
@@ -1479,9 +1797,9 @@ module_put:
1479cls_session_fail: 1797cls_session_fail:
1480 scsi_remove_host(shost); 1798 scsi_remove_host(shost);
1481add_host_fail: 1799add_host_fail:
1482 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); 1800 iscsi_pool_free(&session->mgmtpool);
1483mgmtpool_alloc_fail: 1801mgmtpool_alloc_fail:
1484 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 1802 iscsi_pool_free(&session->cmdpool);
1485cmdpool_alloc_fail: 1803cmdpool_alloc_fail:
1486 scsi_host_put(shost); 1804 scsi_host_put(shost);
1487 return NULL; 1805 return NULL;
@@ -1501,11 +1819,11 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1501 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 1819 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1502 struct module *owner = cls_session->transport->owner; 1820 struct module *owner = cls_session->transport->owner;
1503 1821
1504 iscsi_unblock_session(cls_session); 1822 iscsi_remove_session(cls_session);
1505 scsi_remove_host(shost); 1823 scsi_remove_host(shost);
1506 1824
1507 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); 1825 iscsi_pool_free(&session->mgmtpool);
1508 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 1826 iscsi_pool_free(&session->cmdpool);
1509 1827
1510 kfree(session->password); 1828 kfree(session->password);
1511 kfree(session->password_in); 1829 kfree(session->password_in);
@@ -1516,7 +1834,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1516 kfree(session->hwaddress); 1834 kfree(session->hwaddress);
1517 kfree(session->initiatorname); 1835 kfree(session->initiatorname);
1518 1836
1519 iscsi_destroy_session(cls_session); 1837 iscsi_free_session(cls_session);
1520 scsi_host_put(shost); 1838 scsi_host_put(shost);
1521 module_put(owner); 1839 module_put(owner);
1522} 1840}
@@ -1546,17 +1864,17 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1546 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 1864 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
1547 conn->id = conn_idx; 1865 conn->id = conn_idx;
1548 conn->exp_statsn = 0; 1866 conn->exp_statsn = 0;
1549 conn->tmabort_state = TMABORT_INITIAL; 1867 conn->tmf_state = TMF_INITIAL;
1868
1869 init_timer(&conn->transport_timer);
1870 conn->transport_timer.data = (unsigned long)conn;
1871 conn->transport_timer.function = iscsi_check_transport_timeouts;
1872
1550 INIT_LIST_HEAD(&conn->run_list); 1873 INIT_LIST_HEAD(&conn->run_list);
1551 INIT_LIST_HEAD(&conn->mgmt_run_list); 1874 INIT_LIST_HEAD(&conn->mgmt_run_list);
1875 INIT_LIST_HEAD(&conn->mgmtqueue);
1552 INIT_LIST_HEAD(&conn->xmitqueue); 1876 INIT_LIST_HEAD(&conn->xmitqueue);
1553 1877 INIT_LIST_HEAD(&conn->requeue);
1554 /* initialize general immediate & non-immediate PDU commands queue */
1555 conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
1556 GFP_KERNEL, NULL);
1557 if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
1558 goto mgmtqueue_alloc_fail;
1559
1560 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 1878 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
1561 1879
1562 /* allocate login_mtask used for the login/text sequences */ 1880 /* allocate login_mtask used for the login/text sequences */
@@ -1574,7 +1892,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1574 goto login_mtask_data_alloc_fail; 1892 goto login_mtask_data_alloc_fail;
1575 conn->login_mtask->data = conn->data = data; 1893 conn->login_mtask->data = conn->data = data;
1576 1894
1577 init_timer(&conn->tmabort_timer); 1895 init_timer(&conn->tmf_timer);
1578 init_waitqueue_head(&conn->ehwait); 1896 init_waitqueue_head(&conn->ehwait);
1579 1897
1580 return cls_conn; 1898 return cls_conn;
@@ -1583,8 +1901,6 @@ login_mtask_data_alloc_fail:
1583 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, 1901 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
1584 sizeof(void*)); 1902 sizeof(void*));
1585login_mtask_alloc_fail: 1903login_mtask_alloc_fail:
1586 kfifo_free(conn->mgmtqueue);
1587mgmtqueue_alloc_fail:
1588 iscsi_destroy_conn(cls_conn); 1904 iscsi_destroy_conn(cls_conn);
1589 return NULL; 1905 return NULL;
1590} 1906}
@@ -1603,8 +1919,9 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1603 struct iscsi_session *session = conn->session; 1919 struct iscsi_session *session = conn->session;
1604 unsigned long flags; 1920 unsigned long flags;
1605 1921
1922 del_timer_sync(&conn->transport_timer);
1923
1606 spin_lock_bh(&session->lock); 1924 spin_lock_bh(&session->lock);
1607 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1608 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 1925 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
1609 if (session->leadconn == conn) { 1926 if (session->leadconn == conn) {
1610 /* 1927 /*
@@ -1637,7 +1954,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1637 } 1954 }
1638 1955
1639 /* flush queued up work because we free the connection below */ 1956 /* flush queued up work because we free the connection below */
1640 scsi_flush_work(session->host); 1957 iscsi_suspend_tx(conn);
1641 1958
1642 spin_lock_bh(&session->lock); 1959 spin_lock_bh(&session->lock);
1643 kfree(conn->data); 1960 kfree(conn->data);
@@ -1648,8 +1965,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1648 session->leadconn = NULL; 1965 session->leadconn = NULL;
1649 spin_unlock_bh(&session->lock); 1966 spin_unlock_bh(&session->lock);
1650 1967
1651 kfifo_free(conn->mgmtqueue);
1652
1653 iscsi_destroy_conn(cls_conn); 1968 iscsi_destroy_conn(cls_conn);
1654} 1969}
1655EXPORT_SYMBOL_GPL(iscsi_conn_teardown); 1970EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
@@ -1672,11 +1987,29 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
1672 return -EINVAL; 1987 return -EINVAL;
1673 } 1988 }
1674 1989
1990 if (conn->ping_timeout && !conn->recv_timeout) {
1991 printk(KERN_ERR "iscsi: invalid recv timeout of zero "
1992 "Using 5 seconds\n.");
1993 conn->recv_timeout = 5;
1994 }
1995
1996 if (conn->recv_timeout && !conn->ping_timeout) {
1997 printk(KERN_ERR "iscsi: invalid ping timeout of zero "
1998 "Using 5 seconds.\n");
1999 conn->ping_timeout = 5;
2000 }
2001
1675 spin_lock_bh(&session->lock); 2002 spin_lock_bh(&session->lock);
1676 conn->c_stage = ISCSI_CONN_STARTED; 2003 conn->c_stage = ISCSI_CONN_STARTED;
1677 session->state = ISCSI_STATE_LOGGED_IN; 2004 session->state = ISCSI_STATE_LOGGED_IN;
1678 session->queued_cmdsn = session->cmdsn; 2005 session->queued_cmdsn = session->cmdsn;
1679 2006
2007 conn->last_recv = jiffies;
2008 conn->last_ping = jiffies;
2009 if (conn->recv_timeout && conn->ping_timeout)
2010 mod_timer(&conn->transport_timer,
2011 jiffies + (conn->recv_timeout * HZ));
2012
1680 switch(conn->stop_stage) { 2013 switch(conn->stop_stage) {
1681 case STOP_CONN_RECOVER: 2014 case STOP_CONN_RECOVER:
1682 /* 2015 /*
@@ -1684,7 +2017,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
1684 * commands after successful recovery 2017 * commands after successful recovery
1685 */ 2018 */
1686 conn->stop_stage = 0; 2019 conn->stop_stage = 0;
1687 conn->tmabort_state = TMABORT_INITIAL; 2020 conn->tmf_state = TMF_INITIAL;
1688 session->age++; 2021 session->age++;
1689 spin_unlock_bh(&session->lock); 2022 spin_unlock_bh(&session->lock);
1690 2023
@@ -1709,55 +2042,27 @@ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
1709 struct iscsi_mgmt_task *mtask, *tmp; 2042 struct iscsi_mgmt_task *mtask, *tmp;
1710 2043
1711 /* handle pending */ 2044 /* handle pending */
1712 while (__kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) { 2045 list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
1713 if (mtask == conn->login_mtask)
1714 continue;
1715 debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt); 2046 debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
1716 __kfifo_put(session->mgmtpool.queue, (void*)&mtask, 2047 iscsi_free_mgmt_task(conn, mtask);
1717 sizeof(void*));
1718 } 2048 }
1719 2049
1720 /* handle running */ 2050 /* handle running */
1721 list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) { 2051 list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
1722 debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt); 2052 debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
1723 list_del(&mtask->running); 2053 iscsi_free_mgmt_task(conn, mtask);
1724
1725 if (mtask == conn->login_mtask)
1726 continue;
1727 __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
1728 sizeof(void*));
1729 } 2054 }
1730 2055
1731 conn->mtask = NULL; 2056 conn->mtask = NULL;
1732} 2057}
1733 2058
1734/* Fail commands. Mutex and session lock held and recv side suspended */
1735static void fail_all_commands(struct iscsi_conn *conn)
1736{
1737 struct iscsi_cmd_task *ctask, *tmp;
1738
1739 /* flush pending */
1740 list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
1741 debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
1742 ctask->itt);
1743 fail_command(conn, ctask, DID_BUS_BUSY << 16);
1744 }
1745
1746 /* fail all other running */
1747 list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
1748 debug_scsi("failing in progress sc %p itt 0x%x\n",
1749 ctask->sc, ctask->itt);
1750 fail_command(conn, ctask, DID_BUS_BUSY << 16);
1751 }
1752
1753 conn->ctask = NULL;
1754}
1755
1756static void iscsi_start_session_recovery(struct iscsi_session *session, 2059static void iscsi_start_session_recovery(struct iscsi_session *session,
1757 struct iscsi_conn *conn, int flag) 2060 struct iscsi_conn *conn, int flag)
1758{ 2061{
1759 int old_stop_stage; 2062 int old_stop_stage;
1760 2063
2064 del_timer_sync(&conn->transport_timer);
2065
1761 mutex_lock(&session->eh_mutex); 2066 mutex_lock(&session->eh_mutex);
1762 spin_lock_bh(&session->lock); 2067 spin_lock_bh(&session->lock);
1763 if (conn->stop_stage == STOP_CONN_TERM) { 2068 if (conn->stop_stage == STOP_CONN_TERM) {
@@ -1818,7 +2123,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1818 * flush queues. 2123 * flush queues.
1819 */ 2124 */
1820 spin_lock_bh(&session->lock); 2125 spin_lock_bh(&session->lock);
1821 fail_all_commands(conn); 2126 fail_all_commands(conn, -1);
1822 flush_control_queues(session, conn); 2127 flush_control_queues(session, conn);
1823 spin_unlock_bh(&session->lock); 2128 spin_unlock_bh(&session->lock);
1824 mutex_unlock(&session->eh_mutex); 2129 mutex_unlock(&session->eh_mutex);
@@ -1869,6 +2174,21 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
1869 uint32_t value; 2174 uint32_t value;
1870 2175
1871 switch(param) { 2176 switch(param) {
2177 case ISCSI_PARAM_FAST_ABORT:
2178 sscanf(buf, "%d", &session->fast_abort);
2179 break;
2180 case ISCSI_PARAM_ABORT_TMO:
2181 sscanf(buf, "%d", &session->abort_timeout);
2182 break;
2183 case ISCSI_PARAM_LU_RESET_TMO:
2184 sscanf(buf, "%d", &session->lu_reset_timeout);
2185 break;
2186 case ISCSI_PARAM_PING_TMO:
2187 sscanf(buf, "%d", &conn->ping_timeout);
2188 break;
2189 case ISCSI_PARAM_RECV_TMO:
2190 sscanf(buf, "%d", &conn->recv_timeout);
2191 break;
1872 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2192 case ISCSI_PARAM_MAX_RECV_DLENGTH:
1873 sscanf(buf, "%d", &conn->max_recv_dlength); 2193 sscanf(buf, "%d", &conn->max_recv_dlength);
1874 break; 2194 break;
@@ -1983,6 +2303,15 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
1983 int len; 2303 int len;
1984 2304
1985 switch(param) { 2305 switch(param) {
2306 case ISCSI_PARAM_FAST_ABORT:
2307 len = sprintf(buf, "%d\n", session->fast_abort);
2308 break;
2309 case ISCSI_PARAM_ABORT_TMO:
2310 len = sprintf(buf, "%d\n", session->abort_timeout);
2311 break;
2312 case ISCSI_PARAM_LU_RESET_TMO:
2313 len = sprintf(buf, "%d\n", session->lu_reset_timeout);
2314 break;
1986 case ISCSI_PARAM_INITIAL_R2T_EN: 2315 case ISCSI_PARAM_INITIAL_R2T_EN:
1987 len = sprintf(buf, "%d\n", session->initial_r2t_en); 2316 len = sprintf(buf, "%d\n", session->initial_r2t_en);
1988 break; 2317 break;
@@ -2040,6 +2369,12 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
2040 int len; 2369 int len;
2041 2370
2042 switch(param) { 2371 switch(param) {
2372 case ISCSI_PARAM_PING_TMO:
2373 len = sprintf(buf, "%u\n", conn->ping_timeout);
2374 break;
2375 case ISCSI_PARAM_RECV_TMO:
2376 len = sprintf(buf, "%u\n", conn->recv_timeout);
2377 break;
2043 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2378 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2044 len = sprintf(buf, "%u\n", conn->max_recv_dlength); 2379 len = sprintf(buf, "%u\n", conn->max_recv_dlength);
2045 break; 2380 break;
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index c01a40d321d4..18f33cd54411 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -38,6 +38,15 @@ config SCSI_SAS_ATA
38 Builds in ATA support into libsas. Will necessitate 38 Builds in ATA support into libsas. Will necessitate
39 the loading of libata along with libsas. 39 the loading of libata along with libsas.
40 40
41config SCSI_SAS_HOST_SMP
42 bool "Support for SMP interpretation for SAS hosts"
43 default y
44 depends on SCSI_SAS_LIBSAS
45 help
46 Allows sas hosts to receive SMP frames. Selecting this
47 option builds an SMP interpreter into libsas. Say
48 N here if you want to save the few kb this consumes.
49
41config SCSI_SAS_LIBSAS_DEBUG 50config SCSI_SAS_LIBSAS_DEBUG
42 bool "Compile the SAS Domain Transport Attributes in debug mode" 51 bool "Compile the SAS Domain Transport Attributes in debug mode"
43 default y 52 default y
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index fd387b91856e..1ad1323c60fa 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -33,5 +33,7 @@ libsas-y += sas_init.o \
33 sas_dump.o \ 33 sas_dump.o \
34 sas_discover.o \ 34 sas_discover.o \
35 sas_expander.o \ 35 sas_expander.o \
36 sas_scsi_host.o 36 sas_scsi_host.o \
37 sas_task.o
37libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o 38libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
39libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o \ No newline at end of file
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 827cfb132f21..0996f866f14c 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -498,7 +498,7 @@ static int sas_execute_task(struct sas_task *task, void *buffer, int size,
498 goto ex_err; 498 goto ex_err;
499 } 499 }
500 wait_for_completion(&task->completion); 500 wait_for_completion(&task->completion);
501 res = -ETASK; 501 res = -ECOMM;
502 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 502 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
503 int res2; 503 int res2;
504 SAS_DPRINTK("task aborted, flags:0x%x\n", 504 SAS_DPRINTK("task aborted, flags:0x%x\n",
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 5f3a0d7b18de..31b9af224243 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -98,7 +98,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
98 dev->dev_type = SATA_PM; 98 dev->dev_type = SATA_PM;
99 else 99 else
100 dev->dev_type = SATA_DEV; 100 dev->dev_type = SATA_DEV;
101 dev->tproto = SATA_PROTO; 101 dev->tproto = SAS_PROTOCOL_SATA;
102 } else { 102 } else {
103 struct sas_identify_frame *id = 103 struct sas_identify_frame *id =
104 (struct sas_identify_frame *) dev->frame_rcvd; 104 (struct sas_identify_frame *) dev->frame_rcvd;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 8727436b222d..aefd865a5788 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -96,7 +96,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
96 } 96 }
97 97
98 wait_for_completion(&task->completion); 98 wait_for_completion(&task->completion);
99 res = -ETASK; 99 res = -ECOMM;
100 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 100 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
101 SAS_DPRINTK("smp task timed out or aborted\n"); 101 SAS_DPRINTK("smp task timed out or aborted\n");
102 i->dft->lldd_abort_task(task); 102 i->dft->lldd_abort_task(task);
@@ -109,6 +109,16 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
109 task->task_status.stat == SAM_GOOD) { 109 task->task_status.stat == SAM_GOOD) {
110 res = 0; 110 res = 0;
111 break; 111 break;
112 } if (task->task_status.resp == SAS_TASK_COMPLETE &&
113 task->task_status.stat == SAS_DATA_UNDERRUN) {
114 /* no error, but return the number of bytes of
115 * underrun */
116 res = task->task_status.residual;
117 break;
118 } if (task->task_status.resp == SAS_TASK_COMPLETE &&
119 task->task_status.stat == SAS_DATA_OVERRUN) {
120 res = -EMSGSIZE;
121 break;
112 } else { 122 } else {
113 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " 123 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
114 "status 0x%x\n", __FUNCTION__, 124 "status 0x%x\n", __FUNCTION__,
@@ -656,9 +666,9 @@ static struct domain_device *sas_ex_discover_end_dev(
656 sas_ex_get_linkrate(parent, child, phy); 666 sas_ex_get_linkrate(parent, child, phy);
657 667
658#ifdef CONFIG_SCSI_SAS_ATA 668#ifdef CONFIG_SCSI_SAS_ATA
659 if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) { 669 if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
660 child->dev_type = SATA_DEV; 670 child->dev_type = SATA_DEV;
661 if (phy->attached_tproto & SAS_PROTO_STP) 671 if (phy->attached_tproto & SAS_PROTOCOL_STP)
662 child->tproto = phy->attached_tproto; 672 child->tproto = phy->attached_tproto;
663 if (phy->attached_sata_dev) 673 if (phy->attached_sata_dev)
664 child->tproto |= SATA_DEV; 674 child->tproto |= SATA_DEV;
@@ -695,7 +705,7 @@ static struct domain_device *sas_ex_discover_end_dev(
695 } 705 }
696 } else 706 } else
697#endif 707#endif
698 if (phy->attached_tproto & SAS_PROTO_SSP) { 708 if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
699 child->dev_type = SAS_END_DEV; 709 child->dev_type = SAS_END_DEV;
700 rphy = sas_end_device_alloc(phy->port); 710 rphy = sas_end_device_alloc(phy->port);
701 /* FIXME: error handling */ 711 /* FIXME: error handling */
@@ -1896,11 +1906,9 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1896 } 1906 }
1897 1907
1898 /* no rphy means no smp target support (ie aic94xx host) */ 1908 /* no rphy means no smp target support (ie aic94xx host) */
1899 if (!rphy) { 1909 if (!rphy)
1900 printk("%s: can we send a smp request to a host?\n", 1910 return sas_smp_host_handler(shost, req, rsp);
1901 __FUNCTION__); 1911
1902 return -EINVAL;
1903 }
1904 type = rphy->identify.device_type; 1912 type = rphy->identify.device_type;
1905 1913
1906 if (type != SAS_EDGE_EXPANDER_DEVICE && 1914 if (type != SAS_EDGE_EXPANDER_DEVICE &&
@@ -1926,6 +1934,15 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1926 1934
1927 ret = smp_execute_task(dev, bio_data(req->bio), req->data_len, 1935 ret = smp_execute_task(dev, bio_data(req->bio), req->data_len,
1928 bio_data(rsp->bio), rsp->data_len); 1936 bio_data(rsp->bio), rsp->data_len);
1937 if (ret > 0) {
1938 /* positive number is the untransferred residual */
1939 rsp->data_len = ret;
1940 req->data_len = 0;
1941 ret = 0;
1942 } else if (ret == 0) {
1943 rsp->data_len = 0;
1944 req->data_len = 0;
1945 }
1929 1946
1930 return ret; 1947 return ret;
1931} 1948}
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
new file mode 100644
index 000000000000..16f93123271e
--- /dev/null
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -0,0 +1,274 @@
1/*
2 * Serial Attached SCSI (SAS) Expander discovery and configuration
3 *
4 * Copyright (C) 2007 James E.J. Bottomley
5 * <James.Bottomley@HansenPartnership.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; version 2 only.
10 */
11#include <linux/scatterlist.h>
12#include <linux/blkdev.h>
13
14#include "sas_internal.h"
15
16#include <scsi/scsi_transport.h>
17#include <scsi/scsi_transport_sas.h>
18#include "../scsi_sas_internal.h"
19
20static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
21 u8 phy_id)
22{
23 struct sas_phy *phy;
24 struct sas_rphy *rphy;
25
26 if (phy_id >= sas_ha->num_phys) {
27 resp_data[2] = SMP_RESP_NO_PHY;
28 return;
29 }
30 resp_data[2] = SMP_RESP_FUNC_ACC;
31
32 phy = sas_ha->sas_phy[phy_id]->phy;
33 resp_data[9] = phy_id;
34 resp_data[13] = phy->negotiated_linkrate;
35 memcpy(resp_data + 16, sas_ha->sas_addr, SAS_ADDR_SIZE);
36 memcpy(resp_data + 24, sas_ha->sas_phy[phy_id]->attached_sas_addr,
37 SAS_ADDR_SIZE);
38 resp_data[40] = (phy->minimum_linkrate << 4) |
39 phy->minimum_linkrate_hw;
40 resp_data[41] = (phy->maximum_linkrate << 4) |
41 phy->maximum_linkrate_hw;
42
43 if (!sas_ha->sas_phy[phy_id]->port ||
44 !sas_ha->sas_phy[phy_id]->port->port_dev)
45 return;
46
47 rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy;
48 resp_data[12] = rphy->identify.device_type << 4;
49 resp_data[14] = rphy->identify.initiator_port_protocols;
50 resp_data[15] = rphy->identify.target_port_protocols;
51}
52
53static void sas_report_phy_sata(struct sas_ha_struct *sas_ha, u8 *resp_data,
54 u8 phy_id)
55{
56 struct sas_rphy *rphy;
57 struct dev_to_host_fis *fis;
58 int i;
59
60 if (phy_id >= sas_ha->num_phys) {
61 resp_data[2] = SMP_RESP_NO_PHY;
62 return;
63 }
64
65 resp_data[2] = SMP_RESP_PHY_NO_SATA;
66
67 if (!sas_ha->sas_phy[phy_id]->port)
68 return;
69
70 rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy;
71 fis = (struct dev_to_host_fis *)
72 sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd;
73 if (rphy->identify.target_port_protocols != SAS_PROTOCOL_SATA)
74 return;
75
76 resp_data[2] = SMP_RESP_FUNC_ACC;
77 resp_data[9] = phy_id;
78 memcpy(resp_data + 16, sas_ha->sas_phy[phy_id]->attached_sas_addr,
79 SAS_ADDR_SIZE);
80
81 /* check to see if we have a valid d2h fis */
82 if (fis->fis_type != 0x34)
83 return;
84
85 /* the d2h fis is required by the standard to be in LE format */
86 for (i = 0; i < 20; i += 4) {
87 u8 *dst = resp_data + 24 + i, *src =
88 &sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd[i];
89 dst[0] = src[3];
90 dst[1] = src[2];
91 dst[2] = src[1];
92 dst[3] = src[0];
93 }
94}
95
96static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
97 u8 phy_op, enum sas_linkrate min,
98 enum sas_linkrate max, u8 *resp_data)
99{
100 struct sas_internal *i =
101 to_sas_internal(sas_ha->core.shost->transportt);
102 struct sas_phy_linkrates rates;
103
104 if (phy_id >= sas_ha->num_phys) {
105 resp_data[2] = SMP_RESP_NO_PHY;
106 return;
107 }
108 switch (phy_op) {
109 case PHY_FUNC_NOP:
110 case PHY_FUNC_LINK_RESET:
111 case PHY_FUNC_HARD_RESET:
112 case PHY_FUNC_DISABLE:
113 case PHY_FUNC_CLEAR_ERROR_LOG:
114 case PHY_FUNC_CLEAR_AFFIL:
115 case PHY_FUNC_TX_SATA_PS_SIGNAL:
116 break;
117
118 default:
119 resp_data[2] = SMP_RESP_PHY_UNK_OP;
120 return;
121 }
122
123 rates.minimum_linkrate = min;
124 rates.maximum_linkrate = max;
125
126 if (i->dft->lldd_control_phy(sas_ha->sas_phy[phy_id], phy_op, &rates))
127 resp_data[2] = SMP_RESP_FUNC_FAILED;
128 else
129 resp_data[2] = SMP_RESP_FUNC_ACC;
130}
131
132int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
133 struct request *rsp)
134{
135 u8 *req_data = NULL, *resp_data = NULL, *buf;
136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
137 int error = -EINVAL, resp_data_len = rsp->data_len;
138
139 /* eight is the minimum size for request and response frames */
140 if (req->data_len < 8 || rsp->data_len < 8)
141 goto out;
142
143 if (bio_offset(req->bio) + req->data_len > PAGE_SIZE ||
144 bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) {
145 shost_printk(KERN_ERR, shost,
146 "SMP request/response frame crosses page boundary");
147 goto out;
148 }
149
150 req_data = kzalloc(req->data_len, GFP_KERNEL);
151
152 /* make sure frame can always be built ... we copy
153 * back only the requested length */
154 resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL);
155
156 if (!req_data || !resp_data) {
157 error = -ENOMEM;
158 goto out;
159 }
160
161 local_irq_disable();
162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
163 memcpy(req_data, buf, req->data_len);
164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
165 local_irq_enable();
166
167 if (req_data[0] != SMP_REQUEST)
168 goto out;
169
170 /* always succeeds ... even if we can't process the request
171 * the result is in the response frame */
172 error = 0;
173
174 /* set up default don't know response */
175 resp_data[0] = SMP_RESPONSE;
176 resp_data[1] = req_data[1];
177 resp_data[2] = SMP_RESP_FUNC_UNK;
178
179 switch (req_data[1]) {
180 case SMP_REPORT_GENERAL:
181 req->data_len -= 8;
182 resp_data_len -= 32;
183 resp_data[2] = SMP_RESP_FUNC_ACC;
184 resp_data[9] = sas_ha->num_phys;
185 break;
186
187 case SMP_REPORT_MANUF_INFO:
188 req->data_len -= 8;
189 resp_data_len -= 64;
190 resp_data[2] = SMP_RESP_FUNC_ACC;
191 memcpy(resp_data + 12, shost->hostt->name,
192 SAS_EXPANDER_VENDOR_ID_LEN);
193 memcpy(resp_data + 20, "libsas virt phy",
194 SAS_EXPANDER_PRODUCT_ID_LEN);
195 break;
196
197 case SMP_READ_GPIO_REG:
198 /* FIXME: need GPIO support in the transport class */
199 break;
200
201 case SMP_DISCOVER:
202 req->data_len =- 16;
203 if (req->data_len < 0) {
204 req->data_len = 0;
205 error = -EINVAL;
206 goto out;
207 }
208 resp_data_len -= 56;
209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
210 break;
211
212 case SMP_REPORT_PHY_ERR_LOG:
213 /* FIXME: could implement this with additional
214 * libsas callbacks providing the HW supports it */
215 break;
216
217 case SMP_REPORT_PHY_SATA:
218 req->data_len =- 16;
219 if (req->data_len < 0) {
220 req->data_len = 0;
221 error = -EINVAL;
222 goto out;
223 }
224 resp_data_len -= 60;
225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
226 break;
227
228 case SMP_REPORT_ROUTE_INFO:
229 /* Can't implement; hosts have no routes */
230 break;
231
232 case SMP_WRITE_GPIO_REG:
233 /* FIXME: need GPIO support in the transport class */
234 break;
235
236 case SMP_CONF_ROUTE_INFO:
237 /* Can't implement; hosts have no routes */
238 break;
239
240 case SMP_PHY_CONTROL:
241 req->data_len =- 44;
242 if (req->data_len < 0) {
243 req->data_len = 0;
244 error = -EINVAL;
245 goto out;
246 }
247 resp_data_len -= 8;
248 sas_phy_control(sas_ha, req_data[9], req_data[10],
249 req_data[32] >> 4, req_data[33] >> 4,
250 resp_data);
251 break;
252
253 case SMP_PHY_TEST_FUNCTION:
254 /* FIXME: should this be implemented? */
255 break;
256
257 default:
258 /* probably a 2.0 function */
259 break;
260 }
261
262 local_irq_disable();
263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
264 memcpy(buf, resp_data, rsp->data_len);
265 flush_kernel_dcache_page(bio_page(rsp->bio));
266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
267 local_irq_enable();
268 rsp->data_len = resp_data_len;
269
270 out:
271 kfree(req_data);
272 kfree(resp_data);
273 return error;
274}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 2b8213b1832d..b4f9368f116a 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -45,7 +45,7 @@
45void sas_scsi_recover_host(struct Scsi_Host *shost); 45void sas_scsi_recover_host(struct Scsi_Host *shost);
46 46
47int sas_show_class(enum sas_class class, char *buf); 47int sas_show_class(enum sas_class class, char *buf);
48int sas_show_proto(enum sas_proto proto, char *buf); 48int sas_show_proto(enum sas_protocol proto, char *buf);
49int sas_show_linkrate(enum sas_linkrate linkrate, char *buf); 49int sas_show_linkrate(enum sas_linkrate linkrate, char *buf);
50int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf); 50int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
51 51
@@ -80,6 +80,20 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
80 80
81void sas_hae_reset(struct work_struct *work); 81void sas_hae_reset(struct work_struct *work);
82 82
83#ifdef CONFIG_SCSI_SAS_HOST_SMP
84extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
85 struct request *rsp);
86#else
87static inline int sas_smp_host_handler(struct Scsi_Host *shost,
88 struct request *req,
89 struct request *rsp)
90{
91 shost_printk(KERN_ERR, shost,
92 "Cannot send SMP to a sas host (not enabled in CONFIG)\n");
93 return -EINVAL;
94}
95#endif
96
83static inline void sas_queue_event(int event, spinlock_t *lock, 97static inline void sas_queue_event(int event, spinlock_t *lock,
84 unsigned long *pending, 98 unsigned long *pending,
85 struct work_struct *work, 99 struct work_struct *work,
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index a3fdc57e2673..f869fba86807 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -108,7 +108,7 @@ static void sas_scsi_task_done(struct sas_task *task)
108 break; 108 break;
109 case SAM_CHECK_COND: 109 case SAM_CHECK_COND:
110 memcpy(sc->sense_buffer, ts->buf, 110 memcpy(sc->sense_buffer, ts->buf,
111 max(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size)); 111 min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
112 stat = SAM_CHECK_COND; 112 stat = SAM_CHECK_COND;
113 break; 113 break;
114 default: 114 default:
@@ -148,7 +148,6 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
148 if (!task) 148 if (!task)
149 return NULL; 149 return NULL;
150 150
151 *(u32 *)cmd->sense_buffer = 0;
152 task->uldd_task = cmd; 151 task->uldd_task = cmd;
153 ASSIGN_SAS_TASK(cmd, task); 152 ASSIGN_SAS_TASK(cmd, task);
154 153
@@ -200,6 +199,10 @@ int sas_queue_up(struct sas_task *task)
200 */ 199 */
201int sas_queuecommand(struct scsi_cmnd *cmd, 200int sas_queuecommand(struct scsi_cmnd *cmd,
202 void (*scsi_done)(struct scsi_cmnd *)) 201 void (*scsi_done)(struct scsi_cmnd *))
202 __releases(host->host_lock)
203 __acquires(dev->sata_dev.ap->lock)
204 __releases(dev->sata_dev.ap->lock)
205 __acquires(host->host_lock)
203{ 206{
204 int res = 0; 207 int res = 0;
205 struct domain_device *dev = cmd_to_domain_dev(cmd); 208 struct domain_device *dev = cmd_to_domain_dev(cmd);
@@ -410,7 +413,7 @@ static int sas_recover_I_T(struct domain_device *dev)
410} 413}
411 414
412/* Find the sas_phy that's attached to this device */ 415/* Find the sas_phy that's attached to this device */
413struct sas_phy *find_local_sas_phy(struct domain_device *dev) 416static struct sas_phy *find_local_sas_phy(struct domain_device *dev)
414{ 417{
415 struct domain_device *pdev = dev->parent; 418 struct domain_device *pdev = dev->parent;
416 struct ex_phy *exphy = NULL; 419 struct ex_phy *exphy = NULL;
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
new file mode 100644
index 000000000000..594524d5bfa1
--- /dev/null
+++ b/drivers/scsi/libsas/sas_task.c
@@ -0,0 +1,36 @@
1#include <linux/kernel.h>
2#include <scsi/sas.h>
3#include <scsi/libsas.h>
4
5/* fill task_status_struct based on SSP response frame */
6void sas_ssp_task_response(struct device *dev, struct sas_task *task,
7 struct ssp_response_iu *iu)
8{
9 struct task_status_struct *tstat = &task->task_status;
10
11 tstat->resp = SAS_TASK_COMPLETE;
12
13 if (iu->datapres == 0)
14 tstat->stat = iu->status;
15 else if (iu->datapres == 1)
16 tstat->stat = iu->resp_data[3];
17 else if (iu->datapres == 2) {
18 tstat->stat = SAM_CHECK_COND;
19 tstat->buf_valid_size =
20 min_t(int, SAS_STATUS_BUF_SIZE,
21 be32_to_cpu(iu->sense_data_len));
22 memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
23
24 if (iu->status != SAM_CHECK_COND)
25 dev_printk(KERN_WARNING, dev,
26 "dev %llx sent sense data, but "
27 "stat(%x) is not CHECK CONDITION\n",
28 SAS_ADDR(task->dev->sas_addr),
29 iu->status);
30 }
31 else
32 /* when datapres contains corrupt/unknown value... */
33 tstat->stat = SAM_CHECK_COND;
34}
35EXPORT_SYMBOL_GPL(sas_ssp_task_response);
36
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 2ad0a27dbaab..5cff0204227d 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -192,18 +192,18 @@ static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
192 192
193 if (dma_map) { 193 if (dma_map) {
194 iue = (struct iu_entry *) sc->SCp.ptr; 194 iue = (struct iu_entry *) sc->SCp.ptr;
195 sg = sc->request_buffer; 195 sg = scsi_sglist(sc);
196 196
197 dprintk("%p %u %u %d\n", iue, sc->request_bufflen, 197 dprintk("%p %u %u %d\n", iue, scsi_bufflen(sc),
198 md->len, sc->use_sg); 198 md->len, scsi_sg_count(sc));
199 199
200 nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, 200 nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
201 DMA_BIDIRECTIONAL); 201 DMA_BIDIRECTIONAL);
202 if (!nsg) { 202 if (!nsg) {
203 printk("fail to map %p %d\n", iue, sc->use_sg); 203 printk("fail to map %p %d\n", iue, scsi_sg_count(sc));
204 return 0; 204 return 0;
205 } 205 }
206 len = min(sc->request_bufflen, md->len); 206 len = min(scsi_bufflen(sc), md->len);
207 } else 207 } else
208 len = md->len; 208 len = md->len;
209 209
@@ -229,10 +229,10 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
229 229
230 if (dma_map || ext_desc) { 230 if (dma_map || ext_desc) {
231 iue = (struct iu_entry *) sc->SCp.ptr; 231 iue = (struct iu_entry *) sc->SCp.ptr;
232 sg = sc->request_buffer; 232 sg = scsi_sglist(sc);
233 233
234 dprintk("%p %u %u %d %d\n", 234 dprintk("%p %u %u %d %d\n",
235 iue, sc->request_bufflen, id->len, 235 iue, scsi_bufflen(sc), id->len,
236 cmd->data_in_desc_cnt, cmd->data_out_desc_cnt); 236 cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
237 } 237 }
238 238
@@ -268,13 +268,14 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
268 268
269rdma: 269rdma:
270 if (dma_map) { 270 if (dma_map) {
271 nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL); 271 nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
272 DMA_BIDIRECTIONAL);
272 if (!nsg) { 273 if (!nsg) {
273 eprintk("fail to map %p %d\n", iue, sc->use_sg); 274 eprintk("fail to map %p %d\n", iue, scsi_sg_count(sc));
274 err = -EIO; 275 err = -EIO;
275 goto free_mem; 276 goto free_mem;
276 } 277 }
277 len = min(sc->request_bufflen, id->len); 278 len = min(scsi_bufflen(sc), id->len);
278 } else 279 } else
279 len = id->len; 280 len = id->len;
280 281
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index ba3ecab9baf3..f26b9538affe 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -29,7 +29,8 @@ struct lpfc_sli2_slim;
29#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact 29#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
30 the NameServer before giving up. */ 30 the NameServer before giving up. */
31#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ 31#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
32#define LPFC_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ 32#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
33#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */
33#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 34#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
34#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 35#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
35 36
@@ -68,6 +69,7 @@ struct lpfc_dmabuf {
68 struct list_head list; 69 struct list_head list;
69 void *virt; /* virtual address ptr */ 70 void *virt; /* virtual address ptr */
70 dma_addr_t phys; /* mapped address */ 71 dma_addr_t phys; /* mapped address */
72 uint32_t buffer_tag; /* used for tagged queue ring */
71}; 73};
72 74
73struct lpfc_dma_pool { 75struct lpfc_dma_pool {
@@ -272,10 +274,16 @@ struct lpfc_vport {
272#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ 274#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
273#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ 275#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
274#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ 276#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
275#define FC_RFF_NOT_SUPPORTED 0x40000 /* RFF_ID was rejected by switch */
276#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ 277#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
277#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ 278#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
278 279
280 uint32_t ct_flags;
281#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
282#define FC_CT_RNN_ID 0x2 /* RNN_ID accepted by switch */
283#define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */
284#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
285#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
286
279 struct list_head fc_nodes; 287 struct list_head fc_nodes;
280 288
281 /* Keep counters for the number of entries in each list. */ 289 /* Keep counters for the number of entries in each list. */
@@ -344,6 +352,7 @@ struct lpfc_vport {
344 uint32_t cfg_discovery_threads; 352 uint32_t cfg_discovery_threads;
345 uint32_t cfg_log_verbose; 353 uint32_t cfg_log_verbose;
346 uint32_t cfg_max_luns; 354 uint32_t cfg_max_luns;
355 uint32_t cfg_enable_da_id;
347 356
348 uint32_t dev_loss_tmo_changed; 357 uint32_t dev_loss_tmo_changed;
349 358
@@ -360,6 +369,7 @@ struct lpfc_vport {
360 369
361struct hbq_s { 370struct hbq_s {
362 uint16_t entry_count; /* Current number of HBQ slots */ 371 uint16_t entry_count; /* Current number of HBQ slots */
372 uint16_t buffer_count; /* Current number of buffers posted */
363 uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */ 373 uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */
364 uint32_t hbqPutIdx; /* HBQ slot to use */ 374 uint32_t hbqPutIdx; /* HBQ slot to use */
365 uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */ 375 uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
@@ -377,6 +387,11 @@ struct hbq_s {
377#define LPFC_ELS_HBQ 0 387#define LPFC_ELS_HBQ 0
378#define LPFC_EXTRA_HBQ 1 388#define LPFC_EXTRA_HBQ 1
379 389
390enum hba_temp_state {
391 HBA_NORMAL_TEMP,
392 HBA_OVER_TEMP
393};
394
380struct lpfc_hba { 395struct lpfc_hba {
381 struct lpfc_sli sli; 396 struct lpfc_sli sli;
382 uint32_t sli_rev; /* SLI2 or SLI3 */ 397 uint32_t sli_rev; /* SLI2 or SLI3 */
@@ -457,7 +472,8 @@ struct lpfc_hba {
457 uint64_t cfg_soft_wwnn; 472 uint64_t cfg_soft_wwnn;
458 uint64_t cfg_soft_wwpn; 473 uint64_t cfg_soft_wwpn;
459 uint32_t cfg_hba_queue_depth; 474 uint32_t cfg_hba_queue_depth;
460 475 uint32_t cfg_enable_hba_reset;
476 uint32_t cfg_enable_hba_heartbeat;
461 477
462 lpfc_vpd_t vpd; /* vital product data */ 478 lpfc_vpd_t vpd; /* vital product data */
463 479
@@ -544,8 +560,7 @@ struct lpfc_hba {
544 struct list_head port_list; 560 struct list_head port_list;
545 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 561 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
546 uint16_t max_vpi; /* Maximum virtual nports */ 562 uint16_t max_vpi; /* Maximum virtual nports */
547#define LPFC_MAX_VPI 100 /* Max number of VPI supported */ 563#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
548#define LPFC_MAX_VPORTS (LPFC_MAX_VPI+1)/* Max number of VPorts supported */
549 unsigned long *vpi_bmask; /* vpi allocation table */ 564 unsigned long *vpi_bmask; /* vpi allocation table */
550 565
551 /* Data structure used by fabric iocb scheduler */ 566 /* Data structure used by fabric iocb scheduler */
@@ -563,16 +578,30 @@ struct lpfc_hba {
563 struct dentry *hba_debugfs_root; 578 struct dentry *hba_debugfs_root;
564 atomic_t debugfs_vport_count; 579 atomic_t debugfs_vport_count;
565 struct dentry *debug_hbqinfo; 580 struct dentry *debug_hbqinfo;
566 struct dentry *debug_dumpslim; 581 struct dentry *debug_dumpHostSlim;
582 struct dentry *debug_dumpHBASlim;
567 struct dentry *debug_slow_ring_trc; 583 struct dentry *debug_slow_ring_trc;
568 struct lpfc_debugfs_trc *slow_ring_trc; 584 struct lpfc_debugfs_trc *slow_ring_trc;
569 atomic_t slow_ring_trc_cnt; 585 atomic_t slow_ring_trc_cnt;
570#endif 586#endif
571 587
588 /* Used for deferred freeing of ELS data buffers */
589 struct list_head elsbuf;
590 int elsbuf_cnt;
591 int elsbuf_prev_cnt;
592
593 uint8_t temp_sensor_support;
572 /* Fields used for heart beat. */ 594 /* Fields used for heart beat. */
573 unsigned long last_completion_time; 595 unsigned long last_completion_time;
574 struct timer_list hb_tmofunc; 596 struct timer_list hb_tmofunc;
575 uint8_t hb_outstanding; 597 uint8_t hb_outstanding;
598 /*
599 * Following bit will be set for all buffer tags which are not
600 * associated with any HBQ.
601 */
602#define QUE_BUFTAG_BIT (1<<31)
603 uint32_t buffer_tag_count;
604 enum hba_temp_state over_temp_state;
576}; 605};
577 606
578static inline struct Scsi_Host * 607static inline struct Scsi_Host *
@@ -598,5 +627,15 @@ lpfc_is_link_up(struct lpfc_hba *phba)
598 phba->link_state == LPFC_HBA_READY; 627 phba->link_state == LPFC_HBA_READY;
599} 628}
600 629
601#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ 630#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
631#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature
632 event */
602 633
634struct temp_event {
635 uint32_t event_type;
636 uint32_t event_code;
637 uint32_t data;
638};
639#define LPFC_CRIT_TEMP 0x1
640#define LPFC_THRESHOLD_TEMP 0x2
641#define LPFC_NORMAL_TEMP 0x3
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 80a11218b9bb..4bae4a2ed2f1 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -45,6 +45,10 @@
45#define LPFC_MIN_DEVLOSS_TMO 1 45#define LPFC_MIN_DEVLOSS_TMO 1
46#define LPFC_MAX_DEVLOSS_TMO 255 46#define LPFC_MAX_DEVLOSS_TMO 255
47 47
48#define LPFC_MAX_LINK_SPEED 8
49#define LPFC_LINK_SPEED_BITMAP 0x00000117
50#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
51
48static void 52static void
49lpfc_jedec_to_ascii(int incr, char hdw[]) 53lpfc_jedec_to_ascii(int incr, char hdw[])
50{ 54{
@@ -86,6 +90,15 @@ lpfc_serialnum_show(struct class_device *cdev, char *buf)
86} 90}
87 91
88static ssize_t 92static ssize_t
93lpfc_temp_sensor_show(struct class_device *cdev, char *buf)
94{
95 struct Scsi_Host *shost = class_to_shost(cdev);
96 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
97 struct lpfc_hba *phba = vport->phba;
98 return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
99}
100
101static ssize_t
89lpfc_modeldesc_show(struct class_device *cdev, char *buf) 102lpfc_modeldesc_show(struct class_device *cdev, char *buf)
90{ 103{
91 struct Scsi_Host *shost = class_to_shost(cdev); 104 struct Scsi_Host *shost = class_to_shost(cdev);
@@ -178,12 +191,9 @@ lpfc_state_show(struct class_device *cdev, char *buf)
178 case LPFC_LINK_UP: 191 case LPFC_LINK_UP:
179 case LPFC_CLEAR_LA: 192 case LPFC_CLEAR_LA:
180 case LPFC_HBA_READY: 193 case LPFC_HBA_READY:
181 len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n"); 194 len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
182 195
183 switch (vport->port_state) { 196 switch (vport->port_state) {
184 len += snprintf(buf + len, PAGE_SIZE-len,
185 "initializing\n");
186 break;
187 case LPFC_LOCAL_CFG_LINK: 197 case LPFC_LOCAL_CFG_LINK:
188 len += snprintf(buf + len, PAGE_SIZE-len, 198 len += snprintf(buf + len, PAGE_SIZE-len,
189 "Configuring Link\n"); 199 "Configuring Link\n");
@@ -252,8 +262,7 @@ lpfc_issue_lip(struct Scsi_Host *shost)
252 int mbxstatus = MBXERR_ERROR; 262 int mbxstatus = MBXERR_ERROR;
253 263
254 if ((vport->fc_flag & FC_OFFLINE_MODE) || 264 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
255 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) || 265 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
256 (vport->port_state != LPFC_VPORT_READY))
257 return -EPERM; 266 return -EPERM;
258 267
259 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 268 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
@@ -305,12 +314,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
305 314
306 psli = &phba->sli; 315 psli = &phba->sli;
307 316
317 /* Wait a little for things to settle down, but not
318 * long enough for dev loss timeout to expire.
319 */
308 for (i = 0; i < psli->num_rings; i++) { 320 for (i = 0; i < psli->num_rings; i++) {
309 pring = &psli->ring[i]; 321 pring = &psli->ring[i];
310 /* The linkdown event takes 30 seconds to timeout. */
311 while (pring->txcmplq_cnt) { 322 while (pring->txcmplq_cnt) {
312 msleep(10); 323 msleep(10);
313 if (cnt++ > 3000) { 324 if (cnt++ > 500) { /* 5 secs */
314 lpfc_printf_log(phba, 325 lpfc_printf_log(phba,
315 KERN_WARNING, LOG_INIT, 326 KERN_WARNING, LOG_INIT,
316 "0466 Outstanding IO when " 327 "0466 Outstanding IO when "
@@ -336,6 +347,9 @@ lpfc_selective_reset(struct lpfc_hba *phba)
336 struct completion online_compl; 347 struct completion online_compl;
337 int status = 0; 348 int status = 0;
338 349
350 if (!phba->cfg_enable_hba_reset)
351 return -EIO;
352
339 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 353 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
340 354
341 if (status != 0) 355 if (status != 0)
@@ -409,6 +423,8 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
409 struct completion online_compl; 423 struct completion online_compl;
410 int status=0; 424 int status=0;
411 425
426 if (!phba->cfg_enable_hba_reset)
427 return -EACCES;
412 init_completion(&online_compl); 428 init_completion(&online_compl);
413 429
414 if(strncmp(buf, "online", sizeof("online") - 1) == 0) { 430 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
@@ -908,6 +924,8 @@ static CLASS_DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
908static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); 924static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
909static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); 925static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
910static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); 926static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
927static CLASS_DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show,
928 NULL);
911 929
912 930
913static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 931static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -971,6 +989,14 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
971 unsigned int i, j, cnt=count; 989 unsigned int i, j, cnt=count;
972 u8 wwpn[8]; 990 u8 wwpn[8];
973 991
992 if (!phba->cfg_enable_hba_reset)
993 return -EACCES;
994 spin_lock_irq(&phba->hbalock);
995 if (phba->over_temp_state == HBA_OVER_TEMP) {
996 spin_unlock_irq(&phba->hbalock);
997 return -EACCES;
998 }
999 spin_unlock_irq(&phba->hbalock);
974 /* count may include a LF at end of string */ 1000 /* count may include a LF at end of string */
975 if (buf[cnt-1] == '\n') 1001 if (buf[cnt-1] == '\n')
976 cnt--; 1002 cnt--;
@@ -1102,7 +1128,13 @@ MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
1102 " 2 - select SLI-2 even on SLI-3 capable HBAs," 1128 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
1103 " 3 - select SLI-3"); 1129 " 3 - select SLI-3");
1104 1130
1105LPFC_ATTR_R(enable_npiv, 0, 0, 1, "Enable NPIV functionality"); 1131int lpfc_enable_npiv = 0;
1132module_param(lpfc_enable_npiv, int, 0);
1133MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
1134lpfc_param_show(enable_npiv);
1135lpfc_param_init(enable_npiv, 0, 0, 1);
1136static CLASS_DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
1137 lpfc_enable_npiv_show, NULL);
1106 1138
1107/* 1139/*
1108# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 1140# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -1248,6 +1280,13 @@ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff,
1248 "Verbose logging bit-mask"); 1280 "Verbose logging bit-mask");
1249 1281
1250/* 1282/*
1283# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
1284# objects that have been registered with the nameserver after login.
1285*/
1286LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1,
1287 "Deregister nameserver objects before LOGO");
1288
1289/*
1251# lun_queue_depth: This parameter is used to limit the number of outstanding 1290# lun_queue_depth: This parameter is used to limit the number of outstanding
1252# commands per FCP LUN. Value range is [1,128]. Default value is 30. 1291# commands per FCP LUN. Value range is [1,128]. Default value is 30.
1253*/ 1292*/
@@ -1369,7 +1408,33 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
1369# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. 1408# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
1370# Default value is 0. 1409# Default value is 0.
1371*/ 1410*/
1372LPFC_ATTR_RW(topology, 0, 0, 6, "Select Fibre Channel topology"); 1411static int
1412lpfc_topology_set(struct lpfc_hba *phba, int val)
1413{
1414 int err;
1415 uint32_t prev_val;
1416 if (val >= 0 && val <= 6) {
1417 prev_val = phba->cfg_topology;
1418 phba->cfg_topology = val;
1419 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
1420 if (err)
1421 phba->cfg_topology = prev_val;
1422 return err;
1423 }
1424 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1425 "%d:0467 lpfc_topology attribute cannot be set to %d, "
1426 "allowed range is [0, 6]\n",
1427 phba->brd_no, val);
1428 return -EINVAL;
1429}
1430static int lpfc_topology = 0;
1431module_param(lpfc_topology, int, 0);
1432MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
1433lpfc_param_show(topology)
1434lpfc_param_init(topology, 0, 0, 6)
1435lpfc_param_store(topology)
1436static CLASS_DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
1437 lpfc_topology_show, lpfc_topology_store);
1373 1438
1374/* 1439/*
1375# lpfc_link_speed: Link speed selection for initializing the Fibre Channel 1440# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
@@ -1381,7 +1446,59 @@ LPFC_ATTR_RW(topology, 0, 0, 6, "Select Fibre Channel topology");
1381# 8 = 8 Gigabaud 1446# 8 = 8 Gigabaud
1382# Value range is [0,8]. Default value is 0. 1447# Value range is [0,8]. Default value is 0.
1383*/ 1448*/
1384LPFC_ATTR_R(link_speed, 0, 0, 8, "Select link speed"); 1449static int
1450lpfc_link_speed_set(struct lpfc_hba *phba, int val)
1451{
1452 int err;
1453 uint32_t prev_val;
1454
1455 if (((val == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
1456 ((val == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
1457 ((val == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
1458 ((val == LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
1459 ((val == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)))
1460 return -EINVAL;
1461
1462 if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED)
1463 && (LPFC_LINK_SPEED_BITMAP & (1 << val))) {
1464 prev_val = phba->cfg_link_speed;
1465 phba->cfg_link_speed = val;
1466 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
1467 if (err)
1468 phba->cfg_link_speed = prev_val;
1469 return err;
1470 }
1471
1472 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1473 "%d:0469 lpfc_link_speed attribute cannot be set to %d, "
1474 "allowed range is [0, 8]\n",
1475 phba->brd_no, val);
1476 return -EINVAL;
1477}
1478
1479static int lpfc_link_speed = 0;
1480module_param(lpfc_link_speed, int, 0);
1481MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
1482lpfc_param_show(link_speed)
1483static int
1484lpfc_link_speed_init(struct lpfc_hba *phba, int val)
1485{
1486 if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED)
1487 && (LPFC_LINK_SPEED_BITMAP & (1 << val))) {
1488 phba->cfg_link_speed = val;
1489 return 0;
1490 }
1491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1492 "0454 lpfc_link_speed attribute cannot "
1493 "be set to %d, allowed values are "
1494 "["LPFC_LINK_SPEED_STRING"]\n", val);
1495 phba->cfg_link_speed = 0;
1496 return -EINVAL;
1497}
1498
1499lpfc_param_store(link_speed)
1500static CLASS_DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
1501 lpfc_link_speed_show, lpfc_link_speed_store);
1385 1502
1386/* 1503/*
1387# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 1504# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
@@ -1479,7 +1596,30 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
1479*/ 1596*/
1480LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); 1597LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
1481 1598
1599/*
1600# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
1601# 0 = HBA resets disabled
1602# 1 = HBA resets enabled (default)
1603# Value range is [0,1]. Default value is 1.
1604*/
1605LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
1606
1607/*
1608# lpfc_enable_hba_heartbeat: Enable HBA heartbeat timer..
1609# 0 = HBA Heartbeat disabled
1610# 1 = HBA Heartbeat enabled (default)
1611# Value range is [0,1]. Default value is 1.
1612*/
1613LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
1482 1614
1615/*
1616 * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count
1617 * This value can be set to values between 64 and 256. The default value is
1618 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
1619 * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
1620 */
1621LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
1622 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
1483 1623
1484struct class_device_attribute *lpfc_hba_attrs[] = { 1624struct class_device_attribute *lpfc_hba_attrs[] = {
1485 &class_device_attr_info, 1625 &class_device_attr_info,
@@ -1494,6 +1634,7 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
1494 &class_device_attr_state, 1634 &class_device_attr_state,
1495 &class_device_attr_num_discovered_ports, 1635 &class_device_attr_num_discovered_ports,
1496 &class_device_attr_lpfc_drvr_version, 1636 &class_device_attr_lpfc_drvr_version,
1637 &class_device_attr_lpfc_temp_sensor,
1497 &class_device_attr_lpfc_log_verbose, 1638 &class_device_attr_lpfc_log_verbose,
1498 &class_device_attr_lpfc_lun_queue_depth, 1639 &class_device_attr_lpfc_lun_queue_depth,
1499 &class_device_attr_lpfc_hba_queue_depth, 1640 &class_device_attr_lpfc_hba_queue_depth,
@@ -1530,6 +1671,9 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
1530 &class_device_attr_lpfc_soft_wwnn, 1671 &class_device_attr_lpfc_soft_wwnn,
1531 &class_device_attr_lpfc_soft_wwpn, 1672 &class_device_attr_lpfc_soft_wwpn,
1532 &class_device_attr_lpfc_soft_wwn_enable, 1673 &class_device_attr_lpfc_soft_wwn_enable,
1674 &class_device_attr_lpfc_enable_hba_reset,
1675 &class_device_attr_lpfc_enable_hba_heartbeat,
1676 &class_device_attr_lpfc_sg_seg_cnt,
1533 NULL, 1677 NULL,
1534}; 1678};
1535 1679
@@ -1552,6 +1696,7 @@ struct class_device_attribute *lpfc_vport_attrs[] = {
1552 &class_device_attr_lpfc_max_luns, 1696 &class_device_attr_lpfc_max_luns,
1553 &class_device_attr_nport_evt_cnt, 1697 &class_device_attr_nport_evt_cnt,
1554 &class_device_attr_npiv_info, 1698 &class_device_attr_npiv_info,
1699 &class_device_attr_lpfc_enable_da_id,
1555 NULL, 1700 NULL,
1556}; 1701};
1557 1702
@@ -1727,13 +1872,18 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1727 1872
1728 spin_lock_irq(&phba->hbalock); 1873 spin_lock_irq(&phba->hbalock);
1729 1874
1875 if (phba->over_temp_state == HBA_OVER_TEMP) {
1876 sysfs_mbox_idle(phba);
1877 spin_unlock_irq(&phba->hbalock);
1878 return -EACCES;
1879 }
1880
1730 if (off == 0 && 1881 if (off == 0 &&
1731 phba->sysfs_mbox.state == SMBOX_WRITING && 1882 phba->sysfs_mbox.state == SMBOX_WRITING &&
1732 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { 1883 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
1733 1884
1734 switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { 1885 switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
1735 /* Offline only */ 1886 /* Offline only */
1736 case MBX_WRITE_NV:
1737 case MBX_INIT_LINK: 1887 case MBX_INIT_LINK:
1738 case MBX_DOWN_LINK: 1888 case MBX_DOWN_LINK:
1739 case MBX_CONFIG_LINK: 1889 case MBX_CONFIG_LINK:
@@ -1744,9 +1894,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1744 case MBX_DUMP_CONTEXT: 1894 case MBX_DUMP_CONTEXT:
1745 case MBX_RUN_DIAGS: 1895 case MBX_RUN_DIAGS:
1746 case MBX_RESTART: 1896 case MBX_RESTART:
1747 case MBX_FLASH_WR_ULA:
1748 case MBX_SET_MASK: 1897 case MBX_SET_MASK:
1749 case MBX_SET_SLIM:
1750 case MBX_SET_DEBUG: 1898 case MBX_SET_DEBUG:
1751 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 1899 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
1752 printk(KERN_WARNING "mbox_read:Command 0x%x " 1900 printk(KERN_WARNING "mbox_read:Command 0x%x "
@@ -1756,6 +1904,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1756 spin_unlock_irq(&phba->hbalock); 1904 spin_unlock_irq(&phba->hbalock);
1757 return -EPERM; 1905 return -EPERM;
1758 } 1906 }
1907 case MBX_WRITE_NV:
1908 case MBX_WRITE_VPARMS:
1759 case MBX_LOAD_SM: 1909 case MBX_LOAD_SM:
1760 case MBX_READ_NV: 1910 case MBX_READ_NV:
1761 case MBX_READ_CONFIG: 1911 case MBX_READ_CONFIG:
@@ -1772,6 +1922,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1772 case MBX_LOAD_EXP_ROM: 1922 case MBX_LOAD_EXP_ROM:
1773 case MBX_BEACON: 1923 case MBX_BEACON:
1774 case MBX_DEL_LD_ENTRY: 1924 case MBX_DEL_LD_ENTRY:
1925 case MBX_SET_VARIABLE:
1926 case MBX_WRITE_WWN:
1775 break; 1927 break;
1776 case MBX_READ_SPARM64: 1928 case MBX_READ_SPARM64:
1777 case MBX_READ_LA: 1929 case MBX_READ_LA:
@@ -1793,6 +1945,17 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1793 return -EPERM; 1945 return -EPERM;
1794 } 1946 }
1795 1947
1948 /* If HBA encountered an error attention, allow only DUMP
1949 * mailbox command until the HBA is restarted.
1950 */
1951 if ((phba->pport->stopped) &&
1952 (phba->sysfs_mbox.mbox->mb.mbxCommand
1953 != MBX_DUMP_MEMORY)) {
1954 sysfs_mbox_idle(phba);
1955 spin_unlock_irq(&phba->hbalock);
1956 return -EPERM;
1957 }
1958
1796 phba->sysfs_mbox.mbox->vport = vport; 1959 phba->sysfs_mbox.mbox->vport = vport;
1797 1960
1798 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 1961 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
@@ -1993,7 +2156,8 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
1993 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 2156 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1994 break; 2157 break;
1995 } 2158 }
1996 } 2159 } else
2160 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1997 2161
1998 spin_unlock_irq(shost->host_lock); 2162 spin_unlock_irq(shost->host_lock);
1999} 2163}
@@ -2013,7 +2177,7 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
2013 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); 2177 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
2014 else 2178 else
2015 /* fabric is local port if there is no F/FL_Port */ 2179 /* fabric is local port if there is no F/FL_Port */
2016 node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 2180 node_name = 0;
2017 2181
2018 spin_unlock_irq(shost->host_lock); 2182 spin_unlock_irq(shost->host_lock);
2019 2183
@@ -2337,8 +2501,6 @@ struct fc_function_template lpfc_transport_functions = {
2337 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, 2501 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
2338 .terminate_rport_io = lpfc_terminate_rport_io, 2502 .terminate_rport_io = lpfc_terminate_rport_io,
2339 2503
2340 .vport_create = lpfc_vport_create,
2341 .vport_delete = lpfc_vport_delete,
2342 .dd_fcvport_size = sizeof(struct lpfc_vport *), 2504 .dd_fcvport_size = sizeof(struct lpfc_vport *),
2343}; 2505};
2344 2506
@@ -2414,21 +2576,23 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
2414 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 2576 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
2415 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 2577 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
2416 lpfc_use_msi_init(phba, lpfc_use_msi); 2578 lpfc_use_msi_init(phba, lpfc_use_msi);
2579 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
2580 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
2417 phba->cfg_poll = lpfc_poll; 2581 phba->cfg_poll = lpfc_poll;
2418 phba->cfg_soft_wwnn = 0L; 2582 phba->cfg_soft_wwnn = 0L;
2419 phba->cfg_soft_wwpn = 0L; 2583 phba->cfg_soft_wwpn = 0L;
2420 /* 2584 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
2421 * The total number of segments is the configuration value plus 2 2585 /* Also reinitialize the host templates with new values. */
2422 * since the IOCB need a command and response bde. 2586 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
2423 */ 2587 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
2424 phba->cfg_sg_seg_cnt = LPFC_SG_SEG_CNT + 2;
2425 /* 2588 /*
2426 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 2589 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
2427 * used to create the sg_dma_buf_pool must be dynamically calculated 2590 * used to create the sg_dma_buf_pool must be dynamically calculated.
2591 * 2 segments are added since the IOCB needs a command and response bde.
2428 */ 2592 */
2429 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 2593 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
2430 sizeof(struct fcp_rsp) + 2594 sizeof(struct fcp_rsp) +
2431 (phba->cfg_sg_seg_cnt * sizeof(struct ulp_bde64)); 2595 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
2432 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 2596 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
2433 return; 2597 return;
2434} 2598}
@@ -2448,5 +2612,6 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
2448 lpfc_discovery_threads_init(vport, lpfc_discovery_threads); 2612 lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
2449 lpfc_max_luns_init(vport, lpfc_max_luns); 2613 lpfc_max_luns_init(vport, lpfc_max_luns);
2450 lpfc_scan_down_init(vport, lpfc_scan_down); 2614 lpfc_scan_down_init(vport, lpfc_scan_down);
2615 lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
2451 return; 2616 return;
2452} 2617}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index a599e1510710..50fcb7c930bc 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param);
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 25void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
27
26void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
27int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, 29int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
28 struct lpfc_dmabuf *mp); 30 struct lpfc_dmabuf *mp);
@@ -43,9 +45,9 @@ void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
43struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 45struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
44void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove); 46void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
45int lpfc_linkdown(struct lpfc_hba *); 47int lpfc_linkdown(struct lpfc_hba *);
48void lpfc_port_link_failure(struct lpfc_vport *);
46void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 49void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
47 50
48void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
49void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 51void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
50void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); 52void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
51void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 53void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -66,15 +68,15 @@ int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
66void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t); 68void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
67struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); 69struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
68int lpfc_nlp_put(struct lpfc_nodelist *); 70int lpfc_nlp_put(struct lpfc_nodelist *);
71int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
69struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t); 72struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
70void lpfc_disc_list_loopmap(struct lpfc_vport *); 73void lpfc_disc_list_loopmap(struct lpfc_vport *);
71void lpfc_disc_start(struct lpfc_vport *); 74void lpfc_disc_start(struct lpfc_vport *);
72void lpfc_disc_flush_list(struct lpfc_vport *);
73void lpfc_cleanup_discovery_resources(struct lpfc_vport *); 75void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
76void lpfc_cleanup(struct lpfc_vport *);
74void lpfc_disc_timeout(unsigned long); 77void lpfc_disc_timeout(unsigned long);
75 78
76struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); 79struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
77struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
78 80
79void lpfc_worker_wake_up(struct lpfc_hba *); 81void lpfc_worker_wake_up(struct lpfc_hba *);
80int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t); 82int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
@@ -82,17 +84,17 @@ int lpfc_do_work(void *);
82int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *, 84int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
83 uint32_t); 85 uint32_t);
84 86
85void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
86 struct lpfc_nodelist *);
87void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); 87void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
88int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, 88int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
89 struct serv_parm *, uint32_t); 89 struct serv_parm *, uint32_t);
90int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); 90int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
91void lpfc_more_plogi(struct lpfc_vport *);
92void lpfc_more_adisc(struct lpfc_vport *);
93void lpfc_end_rscn(struct lpfc_vport *);
91int lpfc_els_chk_latt(struct lpfc_vport *); 94int lpfc_els_chk_latt(struct lpfc_vport *);
92int lpfc_els_abort_flogi(struct lpfc_hba *); 95int lpfc_els_abort_flogi(struct lpfc_hba *);
93int lpfc_initial_flogi(struct lpfc_vport *); 96int lpfc_initial_flogi(struct lpfc_vport *);
94int lpfc_initial_fdisc(struct lpfc_vport *); 97int lpfc_initial_fdisc(struct lpfc_vport *);
95int lpfc_issue_els_fdisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
96int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t); 98int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
97int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 99int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
98int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 100int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
@@ -112,7 +114,6 @@ int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *,
112void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *); 114void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
113void lpfc_els_retry_delay(unsigned long); 115void lpfc_els_retry_delay(unsigned long);
114void lpfc_els_retry_delay_handler(struct lpfc_nodelist *); 116void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
115void lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *);
116void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 117void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
117 struct lpfc_iocbq *); 118 struct lpfc_iocbq *);
118int lpfc_els_handle_rscn(struct lpfc_vport *); 119int lpfc_els_handle_rscn(struct lpfc_vport *);
@@ -124,7 +125,6 @@ int lpfc_els_disc_adisc(struct lpfc_vport *);
124int lpfc_els_disc_plogi(struct lpfc_vport *); 125int lpfc_els_disc_plogi(struct lpfc_vport *);
125void lpfc_els_timeout(unsigned long); 126void lpfc_els_timeout(unsigned long);
126void lpfc_els_timeout_handler(struct lpfc_vport *); 127void lpfc_els_timeout_handler(struct lpfc_vport *);
127void lpfc_hb_timeout(unsigned long);
128void lpfc_hb_timeout_handler(struct lpfc_hba *); 128void lpfc_hb_timeout_handler(struct lpfc_hba *);
129 129
130void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 130void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -142,7 +142,6 @@ void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
142int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int); 142int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
143void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); 143void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
144int lpfc_online(struct lpfc_hba *); 144int lpfc_online(struct lpfc_hba *);
145void lpfc_block_mgmt_io(struct lpfc_hba *);
146void lpfc_unblock_mgmt_io(struct lpfc_hba *); 145void lpfc_unblock_mgmt_io(struct lpfc_hba *);
147void lpfc_offline_prep(struct lpfc_hba *); 146void lpfc_offline_prep(struct lpfc_hba *);
148void lpfc_offline(struct lpfc_hba *); 147void lpfc_offline(struct lpfc_hba *);
@@ -165,7 +164,6 @@ int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
165 164
166void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 165void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
167 uint32_t , LPFC_MBOXQ_t *); 166 uint32_t , LPFC_MBOXQ_t *);
168struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *, uint32_t);
169struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); 167struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
170void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); 168void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
171 169
@@ -178,7 +176,6 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba);
178void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba); 176void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba);
179struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); 177struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
180void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); 178void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
181void __lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
182uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); 179uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
183 180
184void lpfc_reset_barrier(struct lpfc_hba * phba); 181void lpfc_reset_barrier(struct lpfc_hba * phba);
@@ -204,11 +201,14 @@ int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
204struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, 201struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
205 struct lpfc_sli_ring *, 202 struct lpfc_sli_ring *,
206 dma_addr_t); 203 dma_addr_t);
204
205uint32_t lpfc_sli_get_buffer_tag(struct lpfc_hba *);
206struct lpfc_dmabuf * lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *,
207 struct lpfc_sli_ring *, uint32_t );
208
207int lpfc_sli_hbq_count(void); 209int lpfc_sli_hbq_count(void);
208int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t);
209int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t); 210int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
210void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *); 211void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
211struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t);
212int lpfc_sli_hbq_size(void); 212int lpfc_sli_hbq_size(void);
213int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *, 213int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
214 struct lpfc_iocbq *); 214 struct lpfc_iocbq *);
@@ -219,9 +219,6 @@ int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
219void lpfc_mbox_timeout(unsigned long); 219void lpfc_mbox_timeout(unsigned long);
220void lpfc_mbox_timeout_handler(struct lpfc_hba *); 220void lpfc_mbox_timeout_handler(struct lpfc_hba *);
221 221
222struct lpfc_nodelist *__lpfc_find_node(struct lpfc_vport *, node_filter,
223 void *);
224struct lpfc_nodelist *lpfc_find_node(struct lpfc_vport *, node_filter, void *);
225struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t); 222struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
226struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, 223struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
227 struct lpfc_name *); 224 struct lpfc_name *);
@@ -260,6 +257,7 @@ extern struct scsi_host_template lpfc_vport_template;
260extern struct fc_function_template lpfc_transport_functions; 257extern struct fc_function_template lpfc_transport_functions;
261extern struct fc_function_template lpfc_vport_transport_functions; 258extern struct fc_function_template lpfc_vport_transport_functions;
262extern int lpfc_sli_mode; 259extern int lpfc_sli_mode;
260extern int lpfc_enable_npiv;
263 261
264int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); 262int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
265void lpfc_terminate_rport_io(struct fc_rport *); 263void lpfc_terminate_rport_io(struct fc_rport *);
@@ -281,11 +279,8 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
281extern struct lpfc_hbq_init *lpfc_hbq_defs[]; 279extern struct lpfc_hbq_init *lpfc_hbq_defs[];
282 280
283/* Interface exported by fabric iocb scheduler */ 281/* Interface exported by fabric iocb scheduler */
284int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
285void lpfc_fabric_abort_vport(struct lpfc_vport *);
286void lpfc_fabric_abort_nport(struct lpfc_nodelist *); 282void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
287void lpfc_fabric_abort_hba(struct lpfc_hba *); 283void lpfc_fabric_abort_hba(struct lpfc_hba *);
288void lpfc_fabric_abort_flogi(struct lpfc_hba *);
289void lpfc_fabric_block_timeout(unsigned long); 284void lpfc_fabric_block_timeout(unsigned long);
290void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); 285void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
291void lpfc_adjust_queue_depth(struct lpfc_hba *); 286void lpfc_adjust_queue_depth(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c701e4d611a9..92441ce610ed 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -19,7 +19,7 @@
19 *******************************************************************/ 19 *******************************************************************/
20 20
21/* 21/*
22 * Fibre Channel SCSI LAN Device Driver CT support 22 * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS
23 */ 23 */
24 24
25#include <linux/blkdev.h> 25#include <linux/blkdev.h>
@@ -57,45 +57,27 @@
57 57
58static char *lpfc_release_version = LPFC_DRIVER_VERSION; 58static char *lpfc_release_version = LPFC_DRIVER_VERSION;
59 59
60/*
61 * lpfc_ct_unsol_event
62 */
63static void 60static void
64lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 61lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
65 struct lpfc_dmabuf *mp, uint32_t size) 62 struct lpfc_dmabuf *mp, uint32_t size)
66{ 63{
67 if (!mp) { 64 if (!mp) {
68 printk(KERN_ERR "%s (%d): Unsolited CT, no buffer, " 65 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
69 "piocbq = %p, status = x%x, mp = %p, size = %d\n", 66 "0146 Ignoring unsolicted CT No HBQ "
70 __FUNCTION__, __LINE__, 67 "status = x%x\n",
71 piocbq, piocbq->iocb.ulpStatus, mp, size); 68 piocbq->iocb.ulpStatus);
72 } 69 }
73 70 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
74 printk(KERN_ERR "%s (%d): Ignoring unsolicted CT piocbq = %p, " 71 "0145 Ignoring unsolicted CT HBQ Size:%d "
75 "buffer = %p, size = %d, status = x%x\n", 72 "status = x%x\n",
76 __FUNCTION__, __LINE__, 73 size, piocbq->iocb.ulpStatus);
77 piocbq, mp, size,
78 piocbq->iocb.ulpStatus);
79
80} 74}
81 75
82static void 76static void
83lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 77lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
84 struct lpfc_dmabuf *mp, uint32_t size) 78 struct lpfc_dmabuf *mp, uint32_t size)
85{ 79{
86 if (!mp) { 80 lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size);
87 printk(KERN_ERR "%s (%d): Unsolited CT, no "
88 "HBQ buffer, piocbq = %p, status = x%x\n",
89 __FUNCTION__, __LINE__,
90 piocbq, piocbq->iocb.ulpStatus);
91 } else {
92 lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
93 printk(KERN_ERR "%s (%d): Ignoring unsolicted CT "
94 "piocbq = %p, buffer = %p, size = %d, "
95 "status = x%x\n",
96 __FUNCTION__, __LINE__,
97 piocbq, mp, size, piocbq->iocb.ulpStatus);
98 }
99} 81}
100 82
101void 83void
@@ -109,11 +91,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
109 struct lpfc_iocbq *iocbq; 91 struct lpfc_iocbq *iocbq;
110 dma_addr_t paddr; 92 dma_addr_t paddr;
111 uint32_t size; 93 uint32_t size;
112 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; 94 struct list_head head;
113 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 95 struct lpfc_dmabuf *bdeBuf;
114
115 piocbq->context2 = NULL;
116 piocbq->context3 = NULL;
117 96
118 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { 97 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
119 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 98 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
@@ -122,7 +101,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
122 /* Not enough posted buffers; Try posting more buffers */ 101 /* Not enough posted buffers; Try posting more buffers */
123 phba->fc_stat.NoRcvBuf++; 102 phba->fc_stat.NoRcvBuf++;
124 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 103 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
125 lpfc_post_buffer(phba, pring, 0, 1); 104 lpfc_post_buffer(phba, pring, 2, 1);
126 return; 105 return;
127 } 106 }
128 107
@@ -133,38 +112,34 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
133 return; 112 return;
134 113
135 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 114 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
136 list_for_each_entry(iocbq, &piocbq->list, list) { 115 INIT_LIST_HEAD(&head);
116 list_add_tail(&head, &piocbq->list);
117 list_for_each_entry(iocbq, &head, list) {
137 icmd = &iocbq->iocb; 118 icmd = &iocbq->iocb;
138 if (icmd->ulpBdeCount == 0) { 119 if (icmd->ulpBdeCount == 0)
139 printk(KERN_ERR "%s (%d): Unsolited CT, no "
140 "BDE, iocbq = %p, status = x%x\n",
141 __FUNCTION__, __LINE__,
142 iocbq, iocbq->iocb.ulpStatus);
143 continue; 120 continue;
144 } 121 bdeBuf = iocbq->context2;
145 122 iocbq->context2 = NULL;
146 size = icmd->un.cont64[0].tus.f.bdeSize; 123 size = icmd->un.cont64[0].tus.f.bdeSize;
147 lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf1, size); 124 lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
148 lpfc_in_buf_free(phba, bdeBuf1); 125 lpfc_in_buf_free(phba, bdeBuf);
149 if (icmd->ulpBdeCount == 2) { 126 if (icmd->ulpBdeCount == 2) {
150 lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf2, 127 bdeBuf = iocbq->context3;
151 size); 128 iocbq->context3 = NULL;
152 lpfc_in_buf_free(phba, bdeBuf2); 129 size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize;
130 lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf,
131 size);
132 lpfc_in_buf_free(phba, bdeBuf);
153 } 133 }
154 } 134 }
135 list_del(&head);
155 } else { 136 } else {
156 struct lpfc_iocbq *next; 137 struct lpfc_iocbq *next;
157 138
158 list_for_each_entry_safe(iocbq, next, &piocbq->list, list) { 139 list_for_each_entry_safe(iocbq, next, &piocbq->list, list) {
159 icmd = &iocbq->iocb; 140 icmd = &iocbq->iocb;
160 if (icmd->ulpBdeCount == 0) { 141 if (icmd->ulpBdeCount == 0)
161 printk(KERN_ERR "%s (%d): Unsolited CT, no " 142 lpfc_ct_unsol_buffer(phba, piocbq, NULL, 0);
162 "BDE, iocbq = %p, status = x%x\n",
163 __FUNCTION__, __LINE__,
164 iocbq, iocbq->iocb.ulpStatus);
165 continue;
166 }
167
168 for (i = 0; i < icmd->ulpBdeCount; i++) { 143 for (i = 0; i < icmd->ulpBdeCount; i++) {
169 paddr = getPaddr(icmd->un.cont64[i].addrHigh, 144 paddr = getPaddr(icmd->un.cont64[i].addrHigh,
170 icmd->un.cont64[i].addrLow); 145 icmd->un.cont64[i].addrLow);
@@ -176,6 +151,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
176 } 151 }
177 list_del(&iocbq->list); 152 list_del(&iocbq->list);
178 lpfc_sli_release_iocbq(phba, iocbq); 153 lpfc_sli_release_iocbq(phba, iocbq);
154 lpfc_post_buffer(phba, pring, i, 1);
179 } 155 }
180 } 156 }
181} 157}
@@ -203,7 +179,7 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
203 struct lpfc_dmabuf *mp; 179 struct lpfc_dmabuf *mp;
204 int cnt, i = 0; 180 int cnt, i = 0;
205 181
206 /* We get chucks of FCELSSIZE */ 182 /* We get chunks of FCELSSIZE */
207 cnt = size > FCELSSIZE ? FCELSSIZE: size; 183 cnt = size > FCELSSIZE ? FCELSSIZE: size;
208 184
209 while (size) { 185 while (size) {
@@ -426,6 +402,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
426 402
427 lpfc_set_disctmo(vport); 403 lpfc_set_disctmo(vport);
428 vport->num_disc_nodes = 0; 404 vport->num_disc_nodes = 0;
405 vport->fc_ns_retry = 0;
429 406
430 407
431 list_add_tail(&head, &mp->list); 408 list_add_tail(&head, &mp->list);
@@ -458,7 +435,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
458 ((lpfc_find_vport_by_did(phba, Did) == NULL) || 435 ((lpfc_find_vport_by_did(phba, Did) == NULL) ||
459 vport->cfg_peer_port_login)) { 436 vport->cfg_peer_port_login)) {
460 if ((vport->port_type != LPFC_NPIV_PORT) || 437 if ((vport->port_type != LPFC_NPIV_PORT) ||
461 (vport->fc_flag & FC_RFF_NOT_SUPPORTED) || 438 (!(vport->ct_flags & FC_CT_RFF_ID)) ||
462 (!vport->cfg_restrict_login)) { 439 (!vport->cfg_restrict_login)) {
463 ndlp = lpfc_setup_disc_node(vport, Did); 440 ndlp = lpfc_setup_disc_node(vport, Did);
464 if (ndlp) { 441 if (ndlp) {
@@ -506,7 +483,17 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
506 Did, vport->fc_flag, 483 Did, vport->fc_flag,
507 vport->fc_rscn_id_cnt); 484 vport->fc_rscn_id_cnt);
508 485
509 if (lpfc_ns_cmd(vport, 486 /* This NPortID was previously
487 * a FCP target, * Don't even
488 * bother to send GFF_ID.
489 */
490 ndlp = lpfc_findnode_did(vport,
491 Did);
492 if (ndlp && (ndlp->nlp_type &
493 NLP_FCP_TARGET))
494 lpfc_setup_disc_node
495 (vport, Did);
496 else if (lpfc_ns_cmd(vport,
510 SLI_CTNS_GFF_ID, 497 SLI_CTNS_GFF_ID,
511 0, Did) == 0) 498 0, Did) == 0)
512 vport->num_disc_nodes++; 499 vport->num_disc_nodes++;
@@ -554,7 +541,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
554 struct lpfc_dmabuf *outp; 541 struct lpfc_dmabuf *outp;
555 struct lpfc_sli_ct_request *CTrsp; 542 struct lpfc_sli_ct_request *CTrsp;
556 struct lpfc_nodelist *ndlp; 543 struct lpfc_nodelist *ndlp;
557 int rc; 544 int rc, retry;
558 545
559 /* First save ndlp, before we overwrite it */ 546 /* First save ndlp, before we overwrite it */
560 ndlp = cmdiocb->context_un.ndlp; 547 ndlp = cmdiocb->context_un.ndlp;
@@ -574,7 +561,6 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
574 if (vport->load_flag & FC_UNLOADING) 561 if (vport->load_flag & FC_UNLOADING)
575 goto out; 562 goto out;
576 563
577
578 if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) { 564 if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) {
579 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 565 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
580 "0216 Link event during NS query\n"); 566 "0216 Link event during NS query\n");
@@ -585,14 +571,35 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
585 if (irsp->ulpStatus) { 571 if (irsp->ulpStatus) {
586 /* Check for retry */ 572 /* Check for retry */
587 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 573 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
588 if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 574 retry = 1;
589 (irsp->un.ulpWord[4] != IOERR_NO_RESOURCES)) 575 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
576 switch (irsp->un.ulpWord[4]) {
577 case IOERR_NO_RESOURCES:
578 /* We don't increment the retry
579 * count for this case.
580 */
581 break;
582 case IOERR_LINK_DOWN:
583 case IOERR_SLI_ABORTED:
584 case IOERR_SLI_DOWN:
585 retry = 0;
586 break;
587 default:
588 vport->fc_ns_retry++;
589 }
590 }
591 else
590 vport->fc_ns_retry++; 592 vport->fc_ns_retry++;
591 /* CT command is being retried */ 593
592 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 594 if (retry) {
595 /* CT command is being retried */
596 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
593 vport->fc_ns_retry, 0); 597 vport->fc_ns_retry, 0);
594 if (rc == 0) 598 if (rc == 0) {
595 goto out; 599 /* success */
600 goto out;
601 }
602 }
596 } 603 }
597 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 604 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
598 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 605 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -698,7 +705,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
698 struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1; 705 struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
699 struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2; 706 struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
700 struct lpfc_sli_ct_request *CTrsp; 707 struct lpfc_sli_ct_request *CTrsp;
701 int did; 708 int did, rc, retry;
702 uint8_t fbits; 709 uint8_t fbits;
703 struct lpfc_nodelist *ndlp; 710 struct lpfc_nodelist *ndlp;
704 711
@@ -729,6 +736,39 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
729 } 736 }
730 } 737 }
731 else { 738 else {
739 /* Check for retry */
740 if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
741 retry = 1;
742 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
743 switch (irsp->un.ulpWord[4]) {
744 case IOERR_NO_RESOURCES:
745 /* We don't increment the retry
746 * count for this case.
747 */
748 break;
749 case IOERR_LINK_DOWN:
750 case IOERR_SLI_ABORTED:
751 case IOERR_SLI_DOWN:
752 retry = 0;
753 break;
754 default:
755 cmdiocb->retry++;
756 }
757 }
758 else
759 cmdiocb->retry++;
760
761 if (retry) {
762 /* CT command is being retried */
763 rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
764 cmdiocb->retry, did);
765 if (rc == 0) {
766 /* success */
767 lpfc_ct_free_iocb(phba, cmdiocb);
768 return;
769 }
770 }
771 }
732 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 772 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
733 "0267 NameServer GFF Rsp " 773 "0267 NameServer GFF Rsp "
734 "x%x Error (%d %d) Data: x%x x%x\n", 774 "x%x Error (%d %d) Data: x%x x%x\n",
@@ -778,8 +818,8 @@ out:
778 818
779 819
780static void 820static void
781lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 821lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
782 struct lpfc_iocbq *rspiocb) 822 struct lpfc_iocbq *rspiocb)
783{ 823{
784 struct lpfc_vport *vport = cmdiocb->vport; 824 struct lpfc_vport *vport = cmdiocb->vport;
785 struct lpfc_dmabuf *inp; 825 struct lpfc_dmabuf *inp;
@@ -809,7 +849,7 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
809 849
810 /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */ 850 /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
811 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 851 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
812 "0209 RFT request completes, latt %d, " 852 "0209 CT Request completes, latt %d, "
813 "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n", 853 "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
814 latt, irsp->ulpStatus, 854 latt, irsp->ulpStatus,
815 CTrsp->CommandResponse.bits.CmdRsp, 855 CTrsp->CommandResponse.bits.CmdRsp,
@@ -848,10 +888,44 @@ out:
848} 888}
849 889
850static void 890static void
891lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
892 struct lpfc_iocbq *rspiocb)
893{
894 IOCB_t *irsp = &rspiocb->iocb;
895 struct lpfc_vport *vport = cmdiocb->vport;
896
897 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
898 struct lpfc_dmabuf *outp;
899 struct lpfc_sli_ct_request *CTrsp;
900
901 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
902 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
903 if (CTrsp->CommandResponse.bits.CmdRsp ==
904 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
905 vport->ct_flags |= FC_CT_RFT_ID;
906 }
907 lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
908 return;
909}
910
911static void
851lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 912lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
852 struct lpfc_iocbq *rspiocb) 913 struct lpfc_iocbq *rspiocb)
853{ 914{
854 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); 915 IOCB_t *irsp = &rspiocb->iocb;
916 struct lpfc_vport *vport = cmdiocb->vport;
917
918 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
919 struct lpfc_dmabuf *outp;
920 struct lpfc_sli_ct_request *CTrsp;
921
922 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
923 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
924 if (CTrsp->CommandResponse.bits.CmdRsp ==
925 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
926 vport->ct_flags |= FC_CT_RNN_ID;
927 }
928 lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
855 return; 929 return;
856} 930}
857 931
@@ -859,7 +933,20 @@ static void
859lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 933lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
860 struct lpfc_iocbq *rspiocb) 934 struct lpfc_iocbq *rspiocb)
861{ 935{
862 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); 936 IOCB_t *irsp = &rspiocb->iocb;
937 struct lpfc_vport *vport = cmdiocb->vport;
938
939 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
940 struct lpfc_dmabuf *outp;
941 struct lpfc_sli_ct_request *CTrsp;
942
943 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
944 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
945 if (CTrsp->CommandResponse.bits.CmdRsp ==
946 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
947 vport->ct_flags |= FC_CT_RSPN_ID;
948 }
949 lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
863 return; 950 return;
864} 951}
865 952
@@ -867,7 +954,32 @@ static void
867lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 954lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
868 struct lpfc_iocbq *rspiocb) 955 struct lpfc_iocbq *rspiocb)
869{ 956{
870 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); 957 IOCB_t *irsp = &rspiocb->iocb;
958 struct lpfc_vport *vport = cmdiocb->vport;
959
960 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
961 struct lpfc_dmabuf *outp;
962 struct lpfc_sli_ct_request *CTrsp;
963
964 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
965 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
966 if (CTrsp->CommandResponse.bits.CmdRsp ==
967 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
968 vport->ct_flags |= FC_CT_RSNN_NN;
969 }
970 lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
971 return;
972}
973
974static void
975lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
976 struct lpfc_iocbq *rspiocb)
977{
978 struct lpfc_vport *vport = cmdiocb->vport;
979
980 /* even if it fails we will act as though it succeeded. */
981 vport->ct_flags = 0;
982 lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
871 return; 983 return;
872} 984}
873 985
@@ -878,10 +990,17 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
878 IOCB_t *irsp = &rspiocb->iocb; 990 IOCB_t *irsp = &rspiocb->iocb;
879 struct lpfc_vport *vport = cmdiocb->vport; 991 struct lpfc_vport *vport = cmdiocb->vport;
880 992
881 if (irsp->ulpStatus != IOSTAT_SUCCESS) 993 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
882 vport->fc_flag |= FC_RFF_NOT_SUPPORTED; 994 struct lpfc_dmabuf *outp;
995 struct lpfc_sli_ct_request *CTrsp;
883 996
884 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); 997 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
998 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
999 if (CTrsp->CommandResponse.bits.CmdRsp ==
1000 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
1001 vport->ct_flags |= FC_CT_RFF_ID;
1002 }
1003 lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
885 return; 1004 return;
886} 1005}
887 1006
@@ -1001,6 +1120,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1001 bpl->tus.f.bdeSize = RSPN_REQUEST_SZ; 1120 bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
1002 else if (cmdcode == SLI_CTNS_RSNN_NN) 1121 else if (cmdcode == SLI_CTNS_RSNN_NN)
1003 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; 1122 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
1123 else if (cmdcode == SLI_CTNS_DA_ID)
1124 bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ;
1004 else if (cmdcode == SLI_CTNS_RFF_ID) 1125 else if (cmdcode == SLI_CTNS_RFF_ID)
1005 bpl->tus.f.bdeSize = RFF_REQUEST_SZ; 1126 bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
1006 else 1127 else
@@ -1029,31 +1150,34 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1029 case SLI_CTNS_GFF_ID: 1150 case SLI_CTNS_GFF_ID:
1030 CtReq->CommandResponse.bits.CmdRsp = 1151 CtReq->CommandResponse.bits.CmdRsp =
1031 be16_to_cpu(SLI_CTNS_GFF_ID); 1152 be16_to_cpu(SLI_CTNS_GFF_ID);
1032 CtReq->un.gff.PortId = be32_to_cpu(context); 1153 CtReq->un.gff.PortId = cpu_to_be32(context);
1033 cmpl = lpfc_cmpl_ct_cmd_gff_id; 1154 cmpl = lpfc_cmpl_ct_cmd_gff_id;
1034 break; 1155 break;
1035 1156
1036 case SLI_CTNS_RFT_ID: 1157 case SLI_CTNS_RFT_ID:
1158 vport->ct_flags &= ~FC_CT_RFT_ID;
1037 CtReq->CommandResponse.bits.CmdRsp = 1159 CtReq->CommandResponse.bits.CmdRsp =
1038 be16_to_cpu(SLI_CTNS_RFT_ID); 1160 be16_to_cpu(SLI_CTNS_RFT_ID);
1039 CtReq->un.rft.PortId = be32_to_cpu(vport->fc_myDID); 1161 CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
1040 CtReq->un.rft.fcpReg = 1; 1162 CtReq->un.rft.fcpReg = 1;
1041 cmpl = lpfc_cmpl_ct_cmd_rft_id; 1163 cmpl = lpfc_cmpl_ct_cmd_rft_id;
1042 break; 1164 break;
1043 1165
1044 case SLI_CTNS_RNN_ID: 1166 case SLI_CTNS_RNN_ID:
1167 vport->ct_flags &= ~FC_CT_RNN_ID;
1045 CtReq->CommandResponse.bits.CmdRsp = 1168 CtReq->CommandResponse.bits.CmdRsp =
1046 be16_to_cpu(SLI_CTNS_RNN_ID); 1169 be16_to_cpu(SLI_CTNS_RNN_ID);
1047 CtReq->un.rnn.PortId = be32_to_cpu(vport->fc_myDID); 1170 CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID);
1048 memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename, 1171 memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename,
1049 sizeof (struct lpfc_name)); 1172 sizeof (struct lpfc_name));
1050 cmpl = lpfc_cmpl_ct_cmd_rnn_id; 1173 cmpl = lpfc_cmpl_ct_cmd_rnn_id;
1051 break; 1174 break;
1052 1175
1053 case SLI_CTNS_RSPN_ID: 1176 case SLI_CTNS_RSPN_ID:
1177 vport->ct_flags &= ~FC_CT_RSPN_ID;
1054 CtReq->CommandResponse.bits.CmdRsp = 1178 CtReq->CommandResponse.bits.CmdRsp =
1055 be16_to_cpu(SLI_CTNS_RSPN_ID); 1179 be16_to_cpu(SLI_CTNS_RSPN_ID);
1056 CtReq->un.rspn.PortId = be32_to_cpu(vport->fc_myDID); 1180 CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID);
1057 size = sizeof(CtReq->un.rspn.symbname); 1181 size = sizeof(CtReq->un.rspn.symbname);
1058 CtReq->un.rspn.len = 1182 CtReq->un.rspn.len =
1059 lpfc_vport_symbolic_port_name(vport, 1183 lpfc_vport_symbolic_port_name(vport,
@@ -1061,6 +1185,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1061 cmpl = lpfc_cmpl_ct_cmd_rspn_id; 1185 cmpl = lpfc_cmpl_ct_cmd_rspn_id;
1062 break; 1186 break;
1063 case SLI_CTNS_RSNN_NN: 1187 case SLI_CTNS_RSNN_NN:
1188 vport->ct_flags &= ~FC_CT_RSNN_NN;
1064 CtReq->CommandResponse.bits.CmdRsp = 1189 CtReq->CommandResponse.bits.CmdRsp =
1065 be16_to_cpu(SLI_CTNS_RSNN_NN); 1190 be16_to_cpu(SLI_CTNS_RSNN_NN);
1066 memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename, 1191 memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
@@ -1071,11 +1196,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1071 CtReq->un.rsnn.symbname, size); 1196 CtReq->un.rsnn.symbname, size);
1072 cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; 1197 cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
1073 break; 1198 break;
1199 case SLI_CTNS_DA_ID:
1200 /* Implement DA_ID Nameserver request */
1201 CtReq->CommandResponse.bits.CmdRsp =
1202 be16_to_cpu(SLI_CTNS_DA_ID);
1203 CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID);
1204 cmpl = lpfc_cmpl_ct_cmd_da_id;
1205 break;
1074 case SLI_CTNS_RFF_ID: 1206 case SLI_CTNS_RFF_ID:
1075 vport->fc_flag &= ~FC_RFF_NOT_SUPPORTED; 1207 vport->ct_flags &= ~FC_CT_RFF_ID;
1076 CtReq->CommandResponse.bits.CmdRsp = 1208 CtReq->CommandResponse.bits.CmdRsp =
1077 be16_to_cpu(SLI_CTNS_RFF_ID); 1209 be16_to_cpu(SLI_CTNS_RFF_ID);
1078 CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);; 1210 CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);;
1079 CtReq->un.rff.fbits = FC4_FEATURE_INIT; 1211 CtReq->un.rff.fbits = FC4_FEATURE_INIT;
1080 CtReq->un.rff.type_code = FC_FCP_DATA; 1212 CtReq->un.rff.type_code = FC_FCP_DATA;
1081 cmpl = lpfc_cmpl_ct_cmd_rff_id; 1213 cmpl = lpfc_cmpl_ct_cmd_rff_id;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index d6a98bc970ff..783d1eea13ef 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -43,6 +43,7 @@
43#include "lpfc_crtn.h" 43#include "lpfc_crtn.h"
44#include "lpfc_vport.h" 44#include "lpfc_vport.h"
45#include "lpfc_version.h" 45#include "lpfc_version.h"
46#include "lpfc_compat.h"
46#include "lpfc_debugfs.h" 47#include "lpfc_debugfs.h"
47 48
48#ifdef CONFIG_LPFC_DEBUG_FS 49#ifdef CONFIG_LPFC_DEBUG_FS
@@ -75,18 +76,18 @@ module_param(lpfc_debugfs_enable, int, 0);
75MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services"); 76MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
76 77
77/* This MUST be a power of 2 */ 78/* This MUST be a power of 2 */
78static int lpfc_debugfs_max_disc_trc = 0; 79static int lpfc_debugfs_max_disc_trc;
79module_param(lpfc_debugfs_max_disc_trc, int, 0); 80module_param(lpfc_debugfs_max_disc_trc, int, 0);
80MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc, 81MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
81 "Set debugfs discovery trace depth"); 82 "Set debugfs discovery trace depth");
82 83
83/* This MUST be a power of 2 */ 84/* This MUST be a power of 2 */
84static int lpfc_debugfs_max_slow_ring_trc = 0; 85static int lpfc_debugfs_max_slow_ring_trc;
85module_param(lpfc_debugfs_max_slow_ring_trc, int, 0); 86module_param(lpfc_debugfs_max_slow_ring_trc, int, 0);
86MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, 87MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
87 "Set debugfs slow ring trace depth"); 88 "Set debugfs slow ring trace depth");
88 89
89static int lpfc_debugfs_mask_disc_trc = 0; 90int lpfc_debugfs_mask_disc_trc;
90module_param(lpfc_debugfs_mask_disc_trc, int, 0); 91module_param(lpfc_debugfs_mask_disc_trc, int, 0);
91MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, 92MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
92 "Set debugfs discovery trace mask"); 93 "Set debugfs discovery trace mask");
@@ -100,8 +101,11 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
100#define LPFC_NODELIST_SIZE 8192 101#define LPFC_NODELIST_SIZE 8192
101#define LPFC_NODELIST_ENTRY_SIZE 120 102#define LPFC_NODELIST_ENTRY_SIZE 120
102 103
103/* dumpslim output buffer size */ 104/* dumpHBASlim output buffer size */
104#define LPFC_DUMPSLIM_SIZE 4096 105#define LPFC_DUMPHBASLIM_SIZE 4096
106
107/* dumpHostSlim output buffer size */
108#define LPFC_DUMPHOSTSLIM_SIZE 4096
105 109
106/* hbqinfo output buffer size */ 110/* hbqinfo output buffer size */
107#define LPFC_HBQINFO_SIZE 8192 111#define LPFC_HBQINFO_SIZE 8192
@@ -243,16 +247,17 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
243 raw_index = phba->hbq_get[i]; 247 raw_index = phba->hbq_get[i];
244 getidx = le32_to_cpu(raw_index); 248 getidx = le32_to_cpu(raw_index);
245 len += snprintf(buf+len, size-len, 249 len += snprintf(buf+len, size-len,
246 "entrys:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", 250 "entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
247 hbqs->entry_count, hbqs->hbqPutIdx, hbqs->next_hbqPutIdx, 251 hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
248 hbqs->local_hbqGetIdx, getidx); 252 hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
249 253
250 hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt; 254 hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
251 for (j=0; j<hbqs->entry_count; j++) { 255 for (j=0; j<hbqs->entry_count; j++) {
252 len += snprintf(buf+len, size-len, 256 len += snprintf(buf+len, size-len,
253 "%03d: %08x %04x %05x ", j, 257 "%03d: %08x %04x %05x ", j,
254 hbqe->bde.addrLow, hbqe->bde.tus.w, hbqe->buffer_tag); 258 le32_to_cpu(hbqe->bde.addrLow),
255 259 le32_to_cpu(hbqe->bde.tus.w),
260 le32_to_cpu(hbqe->buffer_tag));
256 i = 0; 261 i = 0;
257 found = 0; 262 found = 0;
258 263
@@ -276,7 +281,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
276 list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) { 281 list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) {
277 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 282 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
278 phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff); 283 phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
279 if (phys == hbqe->bde.addrLow) { 284 if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
280 len += snprintf(buf+len, size-len, 285 len += snprintf(buf+len, size-len,
281 "Buf%d: %p %06x\n", i, 286 "Buf%d: %p %06x\n", i,
282 hbq_buf->dbuf.virt, hbq_buf->tag); 287 hbq_buf->dbuf.virt, hbq_buf->tag);
@@ -297,18 +302,58 @@ skipit:
297 return len; 302 return len;
298} 303}
299 304
305static int lpfc_debugfs_last_hba_slim_off;
306
307static int
308lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
309{
310 int len = 0;
311 int i, off;
312 uint32_t *ptr;
313 char buffer[1024];
314
315 off = 0;
316 spin_lock_irq(&phba->hbalock);
317
318 len += snprintf(buf+len, size-len, "HBA SLIM\n");
319 lpfc_memcpy_from_slim(buffer,
320 ((uint8_t *)phba->MBslimaddr) + lpfc_debugfs_last_hba_slim_off,
321 1024);
322
323 ptr = (uint32_t *)&buffer[0];
324 off = lpfc_debugfs_last_hba_slim_off;
325
326 /* Set it up for the next time */
327 lpfc_debugfs_last_hba_slim_off += 1024;
328 if (lpfc_debugfs_last_hba_slim_off >= 4096)
329 lpfc_debugfs_last_hba_slim_off = 0;
330
331 i = 1024;
332 while (i > 0) {
333 len += snprintf(buf+len, size-len,
334 "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
335 off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
336 *(ptr+5), *(ptr+6), *(ptr+7));
337 ptr += 8;
338 i -= (8 * sizeof(uint32_t));
339 off += (8 * sizeof(uint32_t));
340 }
341
342 spin_unlock_irq(&phba->hbalock);
343 return len;
344}
345
300static int 346static int
301lpfc_debugfs_dumpslim_data(struct lpfc_hba *phba, char *buf, int size) 347lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
302{ 348{
303 int len = 0; 349 int len = 0;
304 int cnt, i, off; 350 int i, off;
305 uint32_t word0, word1, word2, word3; 351 uint32_t word0, word1, word2, word3;
306 uint32_t *ptr; 352 uint32_t *ptr;
307 struct lpfc_pgp *pgpp; 353 struct lpfc_pgp *pgpp;
308 struct lpfc_sli *psli = &phba->sli; 354 struct lpfc_sli *psli = &phba->sli;
309 struct lpfc_sli_ring *pring; 355 struct lpfc_sli_ring *pring;
310 356
311 cnt = LPFC_DUMPSLIM_SIZE;
312 off = 0; 357 off = 0;
313 spin_lock_irq(&phba->hbalock); 358 spin_lock_irq(&phba->hbalock);
314 359
@@ -620,7 +665,34 @@ out:
620} 665}
621 666
622static int 667static int
623lpfc_debugfs_dumpslim_open(struct inode *inode, struct file *file) 668lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
669{
670 struct lpfc_hba *phba = inode->i_private;
671 struct lpfc_debug *debug;
672 int rc = -ENOMEM;
673
674 debug = kmalloc(sizeof(*debug), GFP_KERNEL);
675 if (!debug)
676 goto out;
677
678 /* Round to page boundry */
679 debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL);
680 if (!debug->buffer) {
681 kfree(debug);
682 goto out;
683 }
684
685 debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer,
686 LPFC_DUMPHBASLIM_SIZE);
687 file->private_data = debug;
688
689 rc = 0;
690out:
691 return rc;
692}
693
694static int
695lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
624{ 696{
625 struct lpfc_hba *phba = inode->i_private; 697 struct lpfc_hba *phba = inode->i_private;
626 struct lpfc_debug *debug; 698 struct lpfc_debug *debug;
@@ -631,14 +703,14 @@ lpfc_debugfs_dumpslim_open(struct inode *inode, struct file *file)
631 goto out; 703 goto out;
632 704
633 /* Round to page boundry */ 705 /* Round to page boundry */
634 debug->buffer = kmalloc(LPFC_DUMPSLIM_SIZE, GFP_KERNEL); 706 debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL);
635 if (!debug->buffer) { 707 if (!debug->buffer) {
636 kfree(debug); 708 kfree(debug);
637 goto out; 709 goto out;
638 } 710 }
639 711
640 debug->len = lpfc_debugfs_dumpslim_data(phba, debug->buffer, 712 debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer,
641 LPFC_DUMPSLIM_SIZE); 713 LPFC_DUMPHOSTSLIM_SIZE);
642 file->private_data = debug; 714 file->private_data = debug;
643 715
644 rc = 0; 716 rc = 0;
@@ -741,10 +813,19 @@ static struct file_operations lpfc_debugfs_op_hbqinfo = {
741 .release = lpfc_debugfs_release, 813 .release = lpfc_debugfs_release,
742}; 814};
743 815
744#undef lpfc_debugfs_op_dumpslim 816#undef lpfc_debugfs_op_dumpHBASlim
745static struct file_operations lpfc_debugfs_op_dumpslim = { 817static struct file_operations lpfc_debugfs_op_dumpHBASlim = {
818 .owner = THIS_MODULE,
819 .open = lpfc_debugfs_dumpHBASlim_open,
820 .llseek = lpfc_debugfs_lseek,
821 .read = lpfc_debugfs_read,
822 .release = lpfc_debugfs_release,
823};
824
825#undef lpfc_debugfs_op_dumpHostSlim
826static struct file_operations lpfc_debugfs_op_dumpHostSlim = {
746 .owner = THIS_MODULE, 827 .owner = THIS_MODULE,
747 .open = lpfc_debugfs_dumpslim_open, 828 .open = lpfc_debugfs_dumpHostSlim_open,
748 .llseek = lpfc_debugfs_lseek, 829 .llseek = lpfc_debugfs_lseek,
749 .read = lpfc_debugfs_read, 830 .read = lpfc_debugfs_read,
750 .release = lpfc_debugfs_release, 831 .release = lpfc_debugfs_release,
@@ -812,15 +893,27 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
812 goto debug_failed; 893 goto debug_failed;
813 } 894 }
814 895
815 /* Setup dumpslim */ 896 /* Setup dumpHBASlim */
816 snprintf(name, sizeof(name), "dumpslim"); 897 snprintf(name, sizeof(name), "dumpHBASlim");
817 phba->debug_dumpslim = 898 phba->debug_dumpHBASlim =
899 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
900 phba->hba_debugfs_root,
901 phba, &lpfc_debugfs_op_dumpHBASlim);
902 if (!phba->debug_dumpHBASlim) {
903 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
904 "0409 Cannot create debugfs dumpHBASlim\n");
905 goto debug_failed;
906 }
907
908 /* Setup dumpHostSlim */
909 snprintf(name, sizeof(name), "dumpHostSlim");
910 phba->debug_dumpHostSlim =
818 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, 911 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
819 phba->hba_debugfs_root, 912 phba->hba_debugfs_root,
820 phba, &lpfc_debugfs_op_dumpslim); 913 phba, &lpfc_debugfs_op_dumpHostSlim);
821 if (!phba->debug_dumpslim) { 914 if (!phba->debug_dumpHostSlim) {
822 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 915 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
823 "0409 Cannot create debugfs dumpslim\n"); 916 "0409 Cannot create debugfs dumpHostSlim\n");
824 goto debug_failed; 917 goto debug_failed;
825 } 918 }
826 919
@@ -970,9 +1063,13 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
970 debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ 1063 debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
971 phba->debug_hbqinfo = NULL; 1064 phba->debug_hbqinfo = NULL;
972 } 1065 }
973 if (phba->debug_dumpslim) { 1066 if (phba->debug_dumpHBASlim) {
974 debugfs_remove(phba->debug_dumpslim); /* dumpslim */ 1067 debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
975 phba->debug_dumpslim = NULL; 1068 phba->debug_dumpHBASlim = NULL;
1069 }
1070 if (phba->debug_dumpHostSlim) {
1071 debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
1072 phba->debug_dumpHostSlim = NULL;
976 } 1073 }
977 if (phba->slow_ring_trc) { 1074 if (phba->slow_ring_trc) {
978 kfree(phba->slow_ring_trc); 1075 kfree(phba->slow_ring_trc);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index aacac9ac5381..cfe81c50529a 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -36,7 +36,6 @@ enum lpfc_work_type {
36 LPFC_EVT_WARM_START, 36 LPFC_EVT_WARM_START,
37 LPFC_EVT_KILL, 37 LPFC_EVT_KILL,
38 LPFC_EVT_ELS_RETRY, 38 LPFC_EVT_ELS_RETRY,
39 LPFC_EVT_DEV_LOSS_DELAY,
40 LPFC_EVT_DEV_LOSS, 39 LPFC_EVT_DEV_LOSS,
41}; 40};
42 41
@@ -92,6 +91,7 @@ struct lpfc_nodelist {
92#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ 91#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */
93#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ 92#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */
94#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ 93#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */
94#define NLP_DEFER_RM 0x10000 /* Remove this ndlp if no longer used */
95#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ 95#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */
96#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ 96#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */
97#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ 97#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8085900635d4..c6b739dc6bc3 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -18,7 +18,7 @@
18 * more details, a copy of which can be found in the file COPYING * 18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. * 19 * included with this package. *
20 *******************************************************************/ 20 *******************************************************************/
21 21/* See Fibre Channel protocol T11 FC-LS for details */
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
@@ -42,6 +42,14 @@ static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
42 struct lpfc_iocbq *); 42 struct lpfc_iocbq *);
43static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 43static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
44 struct lpfc_iocbq *); 44 struct lpfc_iocbq *);
45static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
46static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
47 struct lpfc_nodelist *ndlp, uint8_t retry);
48static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
49 struct lpfc_iocbq *iocb);
50static void lpfc_register_new_vport(struct lpfc_hba *phba,
51 struct lpfc_vport *vport,
52 struct lpfc_nodelist *ndlp);
45 53
46static int lpfc_max_els_tries = 3; 54static int lpfc_max_els_tries = 3;
47 55
@@ -109,14 +117,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
109 117
110 /* fill in BDEs for command */ 118 /* fill in BDEs for command */
111 /* Allocate buffer for command payload */ 119 /* Allocate buffer for command payload */
112 if (((pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL)) == 0) || 120 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
113 ((pcmd->virt = lpfc_mbuf_alloc(phba, 121 if (pcmd)
114 MEM_PRI, &(pcmd->phys))) == 0)) { 122 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
115 kfree(pcmd); 123 if (!pcmd || !pcmd->virt)
116 124 goto els_iocb_free_pcmb_exit;
117 lpfc_sli_release_iocbq(phba, elsiocb);
118 return NULL;
119 }
120 125
121 INIT_LIST_HEAD(&pcmd->list); 126 INIT_LIST_HEAD(&pcmd->list);
122 127
@@ -126,13 +131,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
126 if (prsp) 131 if (prsp)
127 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 132 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
128 &prsp->phys); 133 &prsp->phys);
129 if (prsp == 0 || prsp->virt == 0) { 134 if (!prsp || !prsp->virt)
130 kfree(prsp); 135 goto els_iocb_free_prsp_exit;
131 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
132 kfree(pcmd);
133 lpfc_sli_release_iocbq(phba, elsiocb);
134 return NULL;
135 }
136 INIT_LIST_HEAD(&prsp->list); 136 INIT_LIST_HEAD(&prsp->list);
137 } else { 137 } else {
138 prsp = NULL; 138 prsp = NULL;
@@ -143,15 +143,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
143 if (pbuflist) 143 if (pbuflist)
144 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 144 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
145 &pbuflist->phys); 145 &pbuflist->phys);
146 if (pbuflist == 0 || pbuflist->virt == 0) { 146 if (!pbuflist || !pbuflist->virt)
147 lpfc_sli_release_iocbq(phba, elsiocb); 147 goto els_iocb_free_pbuf_exit;
148 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
149 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
150 kfree(pcmd);
151 kfree(prsp);
152 kfree(pbuflist);
153 return NULL;
154 }
155 148
156 INIT_LIST_HEAD(&pbuflist->list); 149 INIT_LIST_HEAD(&pbuflist->list);
157 150
@@ -196,7 +189,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
196 bpl->tus.w = le32_to_cpu(bpl->tus.w); 189 bpl->tus.w = le32_to_cpu(bpl->tus.w);
197 } 190 }
198 191
192 /* prevent preparing iocb with NULL ndlp reference */
199 elsiocb->context1 = lpfc_nlp_get(ndlp); 193 elsiocb->context1 = lpfc_nlp_get(ndlp);
194 if (!elsiocb->context1)
195 goto els_iocb_free_pbuf_exit;
200 elsiocb->context2 = pcmd; 196 elsiocb->context2 = pcmd;
201 elsiocb->context3 = pbuflist; 197 elsiocb->context3 = pbuflist;
202 elsiocb->retry = retry; 198 elsiocb->retry = retry;
@@ -222,8 +218,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
222 cmdSize); 218 cmdSize);
223 } 219 }
224 return elsiocb; 220 return elsiocb;
225}
226 221
222els_iocb_free_pbuf_exit:
223 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
224 kfree(pbuflist);
225
226els_iocb_free_prsp_exit:
227 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
228 kfree(prsp);
229
230els_iocb_free_pcmb_exit:
231 kfree(pcmd);
232 lpfc_sli_release_iocbq(phba, elsiocb);
233 return NULL;
234}
227 235
228static int 236static int
229lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 237lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
@@ -234,40 +242,53 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
234 struct lpfc_nodelist *ndlp; 242 struct lpfc_nodelist *ndlp;
235 struct serv_parm *sp; 243 struct serv_parm *sp;
236 int rc; 244 int rc;
245 int err = 0;
237 246
238 sp = &phba->fc_fabparam; 247 sp = &phba->fc_fabparam;
239 ndlp = lpfc_findnode_did(vport, Fabric_DID); 248 ndlp = lpfc_findnode_did(vport, Fabric_DID);
240 if (!ndlp) 249 if (!ndlp) {
250 err = 1;
241 goto fail; 251 goto fail;
252 }
242 253
243 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 254 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
244 if (!mbox) 255 if (!mbox) {
256 err = 2;
245 goto fail; 257 goto fail;
258 }
246 259
247 vport->port_state = LPFC_FABRIC_CFG_LINK; 260 vport->port_state = LPFC_FABRIC_CFG_LINK;
248 lpfc_config_link(phba, mbox); 261 lpfc_config_link(phba, mbox);
249 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 262 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
250 mbox->vport = vport; 263 mbox->vport = vport;
251 264
252 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); 265 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
253 if (rc == MBX_NOT_FINISHED) 266 if (rc == MBX_NOT_FINISHED) {
267 err = 3;
254 goto fail_free_mbox; 268 goto fail_free_mbox;
269 }
255 270
256 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 271 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
257 if (!mbox) 272 if (!mbox) {
273 err = 4;
258 goto fail; 274 goto fail;
275 }
259 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 276 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
260 0); 277 0);
261 if (rc) 278 if (rc) {
279 err = 5;
262 goto fail_free_mbox; 280 goto fail_free_mbox;
281 }
263 282
264 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 283 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
265 mbox->vport = vport; 284 mbox->vport = vport;
266 mbox->context2 = lpfc_nlp_get(ndlp); 285 mbox->context2 = lpfc_nlp_get(ndlp);
267 286
268 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); 287 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
269 if (rc == MBX_NOT_FINISHED) 288 if (rc == MBX_NOT_FINISHED) {
289 err = 6;
270 goto fail_issue_reg_login; 290 goto fail_issue_reg_login;
291 }
271 292
272 return 0; 293 return 0;
273 294
@@ -282,7 +303,7 @@ fail_free_mbox:
282fail: 303fail:
283 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 304 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
284 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 305 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
285 "0249 Cannot issue Register Fabric login\n"); 306 "0249 Cannot issue Register Fabric login: Err %d\n", err);
286 return -ENXIO; 307 return -ENXIO;
287} 308}
288 309
@@ -370,11 +391,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
370 } 391 }
371 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 392 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
372 lpfc_mbx_unreg_vpi(vport); 393 lpfc_mbx_unreg_vpi(vport);
394 spin_lock_irq(shost->host_lock);
373 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 395 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
396 spin_unlock_irq(shost->host_lock);
374 } 397 }
375 } 398 }
376 399
377 ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID;
378 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 400 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
379 401
380 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 402 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
@@ -429,8 +451,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
429 451
430 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 452 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
431 mbox->vport = vport; 453 mbox->vport = vport;
432 rc = lpfc_sli_issue_mbox(phba, mbox, 454 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
433 MBX_NOWAIT | MBX_STOP_IOCB);
434 if (rc == MBX_NOT_FINISHED) { 455 if (rc == MBX_NOT_FINISHED) {
435 mempool_free(mbox, phba->mbox_mem_pool); 456 mempool_free(mbox, phba->mbox_mem_pool);
436 goto fail; 457 goto fail;
@@ -463,6 +484,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
463 lpfc_nlp_put(ndlp); 484 lpfc_nlp_put(ndlp);
464 } 485 }
465 486
487 /* If we are pt2pt with another NPort, force NPIV off! */
488 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
489
466 spin_lock_irq(shost->host_lock); 490 spin_lock_irq(shost->host_lock);
467 vport->fc_flag |= FC_PT2PT; 491 vport->fc_flag |= FC_PT2PT;
468 spin_unlock_irq(shost->host_lock); 492 spin_unlock_irq(shost->host_lock);
@@ -488,6 +512,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
488 512
489 /* Check to see if link went down during discovery */ 513 /* Check to see if link went down during discovery */
490 if (lpfc_els_chk_latt(vport)) { 514 if (lpfc_els_chk_latt(vport)) {
515 /* One additional decrement on node reference count to
516 * trigger the release of the node
517 */
491 lpfc_nlp_put(ndlp); 518 lpfc_nlp_put(ndlp);
492 goto out; 519 goto out;
493 } 520 }
@@ -562,8 +589,13 @@ flogifail:
562 589
563 /* Start discovery */ 590 /* Start discovery */
564 lpfc_disc_start(vport); 591 lpfc_disc_start(vport);
592 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
593 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
594 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
595 (phba->link_state != LPFC_CLEAR_LA)) {
596 /* If FLOGI failed enable link interrupt. */
597 lpfc_issue_clear_la(phba, vport);
565 } 598 }
566
567out: 599out:
568 lpfc_els_free_iocb(phba, cmdiocb); 600 lpfc_els_free_iocb(phba, cmdiocb);
569} 601}
@@ -685,6 +717,9 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
685 struct lpfc_hba *phba = vport->phba; 717 struct lpfc_hba *phba = vport->phba;
686 struct lpfc_nodelist *ndlp; 718 struct lpfc_nodelist *ndlp;
687 719
720 vport->port_state = LPFC_FLOGI;
721 lpfc_set_disctmo(vport);
722
688 /* First look for the Fabric ndlp */ 723 /* First look for the Fabric ndlp */
689 ndlp = lpfc_findnode_did(vport, Fabric_DID); 724 ndlp = lpfc_findnode_did(vport, Fabric_DID);
690 if (!ndlp) { 725 if (!ndlp) {
@@ -696,7 +731,11 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
696 } else { 731 } else {
697 lpfc_dequeue_node(vport, ndlp); 732 lpfc_dequeue_node(vport, ndlp);
698 } 733 }
734
699 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 735 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
736 /* This decrement of reference count to node shall kick off
737 * the release of the node.
738 */
700 lpfc_nlp_put(ndlp); 739 lpfc_nlp_put(ndlp);
701 } 740 }
702 return 1; 741 return 1;
@@ -720,11 +759,16 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
720 lpfc_dequeue_node(vport, ndlp); 759 lpfc_dequeue_node(vport, ndlp);
721 } 760 }
722 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 761 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
762 /* decrement node reference count to trigger the release of
763 * the node.
764 */
723 lpfc_nlp_put(ndlp); 765 lpfc_nlp_put(ndlp);
766 return 0;
724 } 767 }
725 return 1; 768 return 1;
726} 769}
727static void 770
771void
728lpfc_more_plogi(struct lpfc_vport *vport) 772lpfc_more_plogi(struct lpfc_vport *vport)
729{ 773{
730 int sentplogi; 774 int sentplogi;
@@ -752,6 +796,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
752{ 796{
753 struct lpfc_vport *vport = ndlp->vport; 797 struct lpfc_vport *vport = ndlp->vport;
754 struct lpfc_nodelist *new_ndlp; 798 struct lpfc_nodelist *new_ndlp;
799 struct lpfc_rport_data *rdata;
800 struct fc_rport *rport;
755 struct serv_parm *sp; 801 struct serv_parm *sp;
756 uint8_t name[sizeof(struct lpfc_name)]; 802 uint8_t name[sizeof(struct lpfc_name)];
757 uint32_t rc; 803 uint32_t rc;
@@ -788,11 +834,34 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
788 lpfc_unreg_rpi(vport, new_ndlp); 834 lpfc_unreg_rpi(vport, new_ndlp);
789 new_ndlp->nlp_DID = ndlp->nlp_DID; 835 new_ndlp->nlp_DID = ndlp->nlp_DID;
790 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 836 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
837
838 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
839 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
840 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
841
791 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 842 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
792 843
793 /* Move this back to NPR state */ 844 /* Move this back to NPR state */
794 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) 845 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
846 /* The new_ndlp is replacing ndlp totally, so we need
847 * to put ndlp on UNUSED list and try to free it.
848 */
849
850 /* Fix up the rport accordingly */
851 rport = ndlp->rport;
852 if (rport) {
853 rdata = rport->dd_data;
854 if (rdata->pnode == ndlp) {
855 lpfc_nlp_put(ndlp);
856 ndlp->rport = NULL;
857 rdata->pnode = lpfc_nlp_get(new_ndlp);
858 new_ndlp->rport = rport;
859 }
860 new_ndlp->nlp_type = ndlp->nlp_type;
861 }
862
795 lpfc_drop_node(vport, ndlp); 863 lpfc_drop_node(vport, ndlp);
864 }
796 else { 865 else {
797 lpfc_unreg_rpi(vport, ndlp); 866 lpfc_unreg_rpi(vport, ndlp);
798 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 867 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
@@ -801,6 +870,27 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
801 return new_ndlp; 870 return new_ndlp;
802} 871}
803 872
873void
874lpfc_end_rscn(struct lpfc_vport *vport)
875{
876 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
877
878 if (vport->fc_flag & FC_RSCN_MODE) {
879 /*
880 * Check to see if more RSCNs came in while we were
881 * processing this one.
882 */
883 if (vport->fc_rscn_id_cnt ||
884 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
885 lpfc_els_handle_rscn(vport);
886 else {
887 spin_lock_irq(shost->host_lock);
888 vport->fc_flag &= ~FC_RSCN_MODE;
889 spin_unlock_irq(shost->host_lock);
890 }
891 }
892}
893
804static void 894static void
805lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 895lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
806 struct lpfc_iocbq *rspiocb) 896 struct lpfc_iocbq *rspiocb)
@@ -871,13 +961,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
871 goto out; 961 goto out;
872 } 962 }
873 /* PLOGI failed */ 963 /* PLOGI failed */
874 if (ndlp->nlp_DID == NameServer_DID) {
875 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
876 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
877 "0250 Nameserver login error: "
878 "0x%x / 0x%x\n",
879 irsp->ulpStatus, irsp->un.ulpWord[4]);
880 }
881 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 964 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
882 if (lpfc_error_lost_link(irsp)) { 965 if (lpfc_error_lost_link(irsp)) {
883 rc = NLP_STE_FREED_NODE; 966 rc = NLP_STE_FREED_NODE;
@@ -905,20 +988,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
905 spin_unlock_irq(shost->host_lock); 988 spin_unlock_irq(shost->host_lock);
906 989
907 lpfc_can_disctmo(vport); 990 lpfc_can_disctmo(vport);
908 if (vport->fc_flag & FC_RSCN_MODE) { 991 lpfc_end_rscn(vport);
909 /*
910 * Check to see if more RSCNs came in while
911 * we were processing this one.
912 */
913 if ((vport->fc_rscn_id_cnt == 0) &&
914 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
915 spin_lock_irq(shost->host_lock);
916 vport->fc_flag &= ~FC_RSCN_MODE;
917 spin_unlock_irq(shost->host_lock);
918 } else {
919 lpfc_els_handle_rscn(vport);
920 }
921 }
922 } 992 }
923 } 993 }
924 994
@@ -933,6 +1003,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
933 struct lpfc_hba *phba = vport->phba; 1003 struct lpfc_hba *phba = vport->phba;
934 struct serv_parm *sp; 1004 struct serv_parm *sp;
935 IOCB_t *icmd; 1005 IOCB_t *icmd;
1006 struct lpfc_nodelist *ndlp;
936 struct lpfc_iocbq *elsiocb; 1007 struct lpfc_iocbq *elsiocb;
937 struct lpfc_sli_ring *pring; 1008 struct lpfc_sli_ring *pring;
938 struct lpfc_sli *psli; 1009 struct lpfc_sli *psli;
@@ -943,8 +1014,11 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
943 psli = &phba->sli; 1014 psli = &phba->sli;
944 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1015 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
945 1016
1017 ndlp = lpfc_findnode_did(vport, did);
1018 /* If ndlp if not NULL, we will bump the reference count on it */
1019
946 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1020 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
947 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did, 1021 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
948 ELS_CMD_PLOGI); 1022 ELS_CMD_PLOGI);
949 if (!elsiocb) 1023 if (!elsiocb)
950 return 1; 1024 return 1;
@@ -1109,7 +1183,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1109 return 0; 1183 return 0;
1110} 1184}
1111 1185
1112static void 1186void
1113lpfc_more_adisc(struct lpfc_vport *vport) 1187lpfc_more_adisc(struct lpfc_vport *vport)
1114{ 1188{
1115 int sentadisc; 1189 int sentadisc;
@@ -1134,8 +1208,6 @@ lpfc_more_adisc(struct lpfc_vport *vport)
1134static void 1208static void
1135lpfc_rscn_disc(struct lpfc_vport *vport) 1209lpfc_rscn_disc(struct lpfc_vport *vport)
1136{ 1210{
1137 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1138
1139 lpfc_can_disctmo(vport); 1211 lpfc_can_disctmo(vport);
1140 1212
1141 /* RSCN discovery */ 1213 /* RSCN discovery */
@@ -1144,19 +1216,7 @@ lpfc_rscn_disc(struct lpfc_vport *vport)
1144 if (lpfc_els_disc_plogi(vport)) 1216 if (lpfc_els_disc_plogi(vport))
1145 return; 1217 return;
1146 1218
1147 if (vport->fc_flag & FC_RSCN_MODE) { 1219 lpfc_end_rscn(vport);
1148 /* Check to see if more RSCNs came in while we were
1149 * processing this one.
1150 */
1151 if ((vport->fc_rscn_id_cnt == 0) &&
1152 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
1153 spin_lock_irq(shost->host_lock);
1154 vport->fc_flag &= ~FC_RSCN_MODE;
1155 spin_unlock_irq(shost->host_lock);
1156 } else {
1157 lpfc_els_handle_rscn(vport);
1158 }
1159 }
1160} 1220}
1161 1221
1162static void 1222static void
@@ -1413,6 +1473,13 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1413 psli = &phba->sli; 1473 psli = &phba->sli;
1414 pring = &psli->ring[LPFC_ELS_RING]; 1474 pring = &psli->ring[LPFC_ELS_RING];
1415 1475
1476 spin_lock_irq(shost->host_lock);
1477 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1478 spin_unlock_irq(shost->host_lock);
1479 return 0;
1480 }
1481 spin_unlock_irq(shost->host_lock);
1482
1416 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 1483 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
1417 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1484 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1418 ndlp->nlp_DID, ELS_CMD_LOGO); 1485 ndlp->nlp_DID, ELS_CMD_LOGO);
@@ -1499,6 +1566,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1499 ndlp->nlp_DID, ELS_CMD_SCR); 1566 ndlp->nlp_DID, ELS_CMD_SCR);
1500 1567
1501 if (!elsiocb) { 1568 if (!elsiocb) {
1569 /* This will trigger the release of the node just
1570 * allocated
1571 */
1502 lpfc_nlp_put(ndlp); 1572 lpfc_nlp_put(ndlp);
1503 return 1; 1573 return 1;
1504 } 1574 }
@@ -1520,10 +1590,17 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1520 phba->fc_stat.elsXmitSCR++; 1590 phba->fc_stat.elsXmitSCR++;
1521 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 1591 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1522 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1592 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1593 /* The additional lpfc_nlp_put will cause the following
1594 * lpfc_els_free_iocb routine to trigger the rlease of
1595 * the node.
1596 */
1523 lpfc_nlp_put(ndlp); 1597 lpfc_nlp_put(ndlp);
1524 lpfc_els_free_iocb(phba, elsiocb); 1598 lpfc_els_free_iocb(phba, elsiocb);
1525 return 1; 1599 return 1;
1526 } 1600 }
1601 /* This will cause the callback-function lpfc_cmpl_els_cmd to
1602 * trigger the release of node.
1603 */
1527 lpfc_nlp_put(ndlp); 1604 lpfc_nlp_put(ndlp);
1528 return 0; 1605 return 0;
1529} 1606}
@@ -1555,6 +1632,9 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1555 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1632 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1556 ndlp->nlp_DID, ELS_CMD_RNID); 1633 ndlp->nlp_DID, ELS_CMD_RNID);
1557 if (!elsiocb) { 1634 if (!elsiocb) {
1635 /* This will trigger the release of the node just
1636 * allocated
1637 */
1558 lpfc_nlp_put(ndlp); 1638 lpfc_nlp_put(ndlp);
1559 return 1; 1639 return 1;
1560 } 1640 }
@@ -1591,35 +1671,21 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1591 phba->fc_stat.elsXmitFARPR++; 1671 phba->fc_stat.elsXmitFARPR++;
1592 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 1672 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1593 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1673 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1674 /* The additional lpfc_nlp_put will cause the following
1675 * lpfc_els_free_iocb routine to trigger the release of
1676 * the node.
1677 */
1594 lpfc_nlp_put(ndlp); 1678 lpfc_nlp_put(ndlp);
1595 lpfc_els_free_iocb(phba, elsiocb); 1679 lpfc_els_free_iocb(phba, elsiocb);
1596 return 1; 1680 return 1;
1597 } 1681 }
1682 /* This will cause the callback-function lpfc_cmpl_els_cmd to
1683 * trigger the release of the node.
1684 */
1598 lpfc_nlp_put(ndlp); 1685 lpfc_nlp_put(ndlp);
1599 return 0; 1686 return 0;
1600} 1687}
1601 1688
1602static void
1603lpfc_end_rscn(struct lpfc_vport *vport)
1604{
1605 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1606
1607 if (vport->fc_flag & FC_RSCN_MODE) {
1608 /*
1609 * Check to see if more RSCNs came in while we were
1610 * processing this one.
1611 */
1612 if (vport->fc_rscn_id_cnt ||
1613 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1614 lpfc_els_handle_rscn(vport);
1615 else {
1616 spin_lock_irq(shost->host_lock);
1617 vport->fc_flag &= ~FC_RSCN_MODE;
1618 spin_unlock_irq(shost->host_lock);
1619 }
1620 }
1621}
1622
1623void 1689void
1624lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 1690lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1625{ 1691{
@@ -1675,7 +1741,10 @@ lpfc_els_retry_delay(unsigned long ptr)
1675 return; 1741 return;
1676 } 1742 }
1677 1743
1678 evtp->evt_arg1 = ndlp; 1744 /* We need to hold the node by incrementing the reference
1745 * count until the queued work is done
1746 */
1747 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
1679 evtp->evt = LPFC_EVT_ELS_RETRY; 1748 evtp->evt = LPFC_EVT_ELS_RETRY;
1680 list_add_tail(&evtp->evt_listp, &phba->work_list); 1749 list_add_tail(&evtp->evt_listp, &phba->work_list);
1681 if (phba->work_wait) 1750 if (phba->work_wait)
@@ -1759,6 +1828,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1759 uint32_t *elscmd; 1828 uint32_t *elscmd;
1760 struct ls_rjt stat; 1829 struct ls_rjt stat;
1761 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 1830 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
1831 int logerr = 0;
1762 uint32_t cmd = 0; 1832 uint32_t cmd = 0;
1763 uint32_t did; 1833 uint32_t did;
1764 1834
@@ -1815,6 +1885,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1815 break; 1885 break;
1816 1886
1817 case IOERR_NO_RESOURCES: 1887 case IOERR_NO_RESOURCES:
1888 logerr = 1; /* HBA out of resources */
1818 retry = 1; 1889 retry = 1;
1819 if (cmdiocb->retry > 100) 1890 if (cmdiocb->retry > 100)
1820 delay = 100; 1891 delay = 100;
@@ -1843,6 +1914,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1843 1914
1844 case IOSTAT_NPORT_BSY: 1915 case IOSTAT_NPORT_BSY:
1845 case IOSTAT_FABRIC_BSY: 1916 case IOSTAT_FABRIC_BSY:
1917 logerr = 1; /* Fabric / Remote NPort out of resources */
1846 retry = 1; 1918 retry = 1;
1847 break; 1919 break;
1848 1920
@@ -1923,6 +1995,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1923 if (did == FDMI_DID) 1995 if (did == FDMI_DID)
1924 retry = 1; 1996 retry = 1;
1925 1997
1998 if ((cmd == ELS_CMD_FLOGI) &&
1999 (phba->fc_topology != TOPOLOGY_LOOP)) {
2000 /* FLOGI retry policy */
2001 retry = 1;
2002 maxretry = 48;
2003 if (cmdiocb->retry >= 32)
2004 delay = 1000;
2005 }
2006
1926 if ((++cmdiocb->retry) >= maxretry) { 2007 if ((++cmdiocb->retry) >= maxretry) {
1927 phba->fc_stat.elsRetryExceeded++; 2008 phba->fc_stat.elsRetryExceeded++;
1928 retry = 0; 2009 retry = 0;
@@ -2006,11 +2087,46 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2006 } 2087 }
2007 } 2088 }
2008 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 2089 /* No retry ELS command <elsCmd> to remote NPORT <did> */
2009 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2090 if (logerr) {
2091 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2092 "0137 No retry ELS command x%x to remote "
2093 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
2094 cmd, did, irsp->ulpStatus,
2095 irsp->un.ulpWord[4]);
2096 }
2097 else {
2098 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2010 "0108 No retry ELS command x%x to remote " 2099 "0108 No retry ELS command x%x to remote "
2011 "NPORT x%x Retried:%d Error:x%x/%x\n", 2100 "NPORT x%x Retried:%d Error:x%x/%x\n",
2012 cmd, did, cmdiocb->retry, irsp->ulpStatus, 2101 cmd, did, cmdiocb->retry, irsp->ulpStatus,
2013 irsp->un.ulpWord[4]); 2102 irsp->un.ulpWord[4]);
2103 }
2104 return 0;
2105}
2106
2107static int
2108lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
2109{
2110 struct lpfc_dmabuf *buf_ptr;
2111
2112 /* Free the response before processing the command. */
2113 if (!list_empty(&buf_ptr1->list)) {
2114 list_remove_head(&buf_ptr1->list, buf_ptr,
2115 struct lpfc_dmabuf,
2116 list);
2117 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2118 kfree(buf_ptr);
2119 }
2120 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2121 kfree(buf_ptr1);
2122 return 0;
2123}
2124
2125static int
2126lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
2127{
2128 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2129 kfree(buf_ptr);
2014 return 0; 2130 return 0;
2015} 2131}
2016 2132
@@ -2018,30 +2134,63 @@ int
2018lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 2134lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
2019{ 2135{
2020 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 2136 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
2137 struct lpfc_nodelist *ndlp;
2021 2138
2022 if (elsiocb->context1) { 2139 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
2023 lpfc_nlp_put(elsiocb->context1); 2140 if (ndlp) {
2141 if (ndlp->nlp_flag & NLP_DEFER_RM) {
2142 lpfc_nlp_put(ndlp);
2143
2144 /* If the ndlp is not being used by another discovery
2145 * thread, free it.
2146 */
2147 if (!lpfc_nlp_not_used(ndlp)) {
2148 /* If ndlp is being used by another discovery
2149 * thread, just clear NLP_DEFER_RM
2150 */
2151 ndlp->nlp_flag &= ~NLP_DEFER_RM;
2152 }
2153 }
2154 else
2155 lpfc_nlp_put(ndlp);
2024 elsiocb->context1 = NULL; 2156 elsiocb->context1 = NULL;
2025 } 2157 }
2026 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 2158 /* context2 = cmd, context2->next = rsp, context3 = bpl */
2027 if (elsiocb->context2) { 2159 if (elsiocb->context2) {
2028 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 2160 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
2029 /* Free the response before processing the command. */ 2161 /* Firmware could still be in progress of DMAing
2030 if (!list_empty(&buf_ptr1->list)) { 2162 * payload, so don't free data buffer till after
2031 list_remove_head(&buf_ptr1->list, buf_ptr, 2163 * a hbeat.
2032 struct lpfc_dmabuf, 2164 */
2033 list); 2165 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
2034 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2166 buf_ptr = elsiocb->context2;
2035 kfree(buf_ptr); 2167 elsiocb->context2 = NULL;
2168 if (buf_ptr) {
2169 buf_ptr1 = NULL;
2170 spin_lock_irq(&phba->hbalock);
2171 if (!list_empty(&buf_ptr->list)) {
2172 list_remove_head(&buf_ptr->list,
2173 buf_ptr1, struct lpfc_dmabuf,
2174 list);
2175 INIT_LIST_HEAD(&buf_ptr1->list);
2176 list_add_tail(&buf_ptr1->list,
2177 &phba->elsbuf);
2178 phba->elsbuf_cnt++;
2179 }
2180 INIT_LIST_HEAD(&buf_ptr->list);
2181 list_add_tail(&buf_ptr->list, &phba->elsbuf);
2182 phba->elsbuf_cnt++;
2183 spin_unlock_irq(&phba->hbalock);
2184 }
2185 } else {
2186 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
2187 lpfc_els_free_data(phba, buf_ptr1);
2036 } 2188 }
2037 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2038 kfree(buf_ptr1);
2039 } 2189 }
2040 2190
2041 if (elsiocb->context3) { 2191 if (elsiocb->context3) {
2042 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 2192 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
2043 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2193 lpfc_els_free_bpl(phba, buf_ptr);
2044 kfree(buf_ptr);
2045 } 2194 }
2046 lpfc_sli_release_iocbq(phba, elsiocb); 2195 lpfc_sli_release_iocbq(phba, elsiocb);
2047 return 0; 2196 return 0;
@@ -2065,15 +2214,20 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2065 "Data: x%x x%x x%x\n", 2214 "Data: x%x x%x x%x\n",
2066 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 2215 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2067 ndlp->nlp_rpi); 2216 ndlp->nlp_rpi);
2068 switch (ndlp->nlp_state) { 2217
2069 case NLP_STE_UNUSED_NODE: /* node is just allocated */ 2218 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
2070 lpfc_drop_node(vport, ndlp); 2219 /* NPort Recovery mode or node is just allocated */
2071 break; 2220 if (!lpfc_nlp_not_used(ndlp)) {
2072 case NLP_STE_NPR_NODE: /* NPort Recovery mode */ 2221 /* If the ndlp is being used by another discovery
2073 lpfc_unreg_rpi(vport, ndlp); 2222 * thread, just unregister the RPI.
2074 break; 2223 */
2075 default: 2224 lpfc_unreg_rpi(vport, ndlp);
2076 break; 2225 } else {
2226 /* Indicate the node has already released, should
2227 * not reference to it from within lpfc_els_free_iocb.
2228 */
2229 cmdiocb->context1 = NULL;
2230 }
2077 } 2231 }
2078 lpfc_els_free_iocb(phba, cmdiocb); 2232 lpfc_els_free_iocb(phba, cmdiocb);
2079 return; 2233 return;
@@ -2089,7 +2243,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2089 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2243 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2090 kfree(mp); 2244 kfree(mp);
2091 mempool_free(pmb, phba->mbox_mem_pool); 2245 mempool_free(pmb, phba->mbox_mem_pool);
2092 lpfc_nlp_put(ndlp); 2246 if (ndlp) {
2247 lpfc_nlp_put(ndlp);
2248 /* This is the end of the default RPI cleanup logic for this
2249 * ndlp. If no other discovery threads are using this ndlp.
2250 * we should free all resources associated with it.
2251 */
2252 lpfc_nlp_not_used(ndlp);
2253 }
2093 return; 2254 return;
2094} 2255}
2095 2256
@@ -2100,15 +2261,29 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2100 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2261 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2101 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 2262 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
2102 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 2263 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
2103 IOCB_t *irsp; 2264 IOCB_t *irsp;
2265 uint8_t *pcmd;
2104 LPFC_MBOXQ_t *mbox = NULL; 2266 LPFC_MBOXQ_t *mbox = NULL;
2105 struct lpfc_dmabuf *mp = NULL; 2267 struct lpfc_dmabuf *mp = NULL;
2268 uint32_t ls_rjt = 0;
2106 2269
2107 irsp = &rspiocb->iocb; 2270 irsp = &rspiocb->iocb;
2108 2271
2109 if (cmdiocb->context_un.mbox) 2272 if (cmdiocb->context_un.mbox)
2110 mbox = cmdiocb->context_un.mbox; 2273 mbox = cmdiocb->context_un.mbox;
2111 2274
2275 /* First determine if this is a LS_RJT cmpl. Note, this callback
2276 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
2277 */
2278 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
2279 if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
2280 /* A LS_RJT associated with Default RPI cleanup has its own
2281 * seperate code path.
2282 */
2283 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
2284 ls_rjt = 1;
2285 }
2286
2112 /* Check to see if link went down during discovery */ 2287 /* Check to see if link went down during discovery */
2113 if (!ndlp || lpfc_els_chk_latt(vport)) { 2288 if (!ndlp || lpfc_els_chk_latt(vport)) {
2114 if (mbox) { 2289 if (mbox) {
@@ -2119,6 +2294,15 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2119 } 2294 }
2120 mempool_free(mbox, phba->mbox_mem_pool); 2295 mempool_free(mbox, phba->mbox_mem_pool);
2121 } 2296 }
2297 if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
2298 if (lpfc_nlp_not_used(ndlp)) {
2299 ndlp = NULL;
2300 /* Indicate the node has already released,
2301 * should not reference to it from within
2302 * the routine lpfc_els_free_iocb.
2303 */
2304 cmdiocb->context1 = NULL;
2305 }
2122 goto out; 2306 goto out;
2123 } 2307 }
2124 2308
@@ -2150,20 +2334,39 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2150 lpfc_nlp_set_state(vport, ndlp, 2334 lpfc_nlp_set_state(vport, ndlp,
2151 NLP_STE_REG_LOGIN_ISSUE); 2335 NLP_STE_REG_LOGIN_ISSUE);
2152 } 2336 }
2153 if (lpfc_sli_issue_mbox(phba, mbox, 2337 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
2154 (MBX_NOWAIT | MBX_STOP_IOCB))
2155 != MBX_NOT_FINISHED) { 2338 != MBX_NOT_FINISHED) {
2156 goto out; 2339 goto out;
2157 } 2340 }
2158 lpfc_nlp_put(ndlp); 2341
2159 /* NOTE: we should have messages for unsuccessful 2342 /* ELS rsp: Cannot issue reg_login for <NPortid> */
2160 reglogin */ 2343 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2344 "0138 ELS rsp: Cannot issue reg_login for x%x "
2345 "Data: x%x x%x x%x\n",
2346 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2347 ndlp->nlp_rpi);
2348
2349 if (lpfc_nlp_not_used(ndlp)) {
2350 ndlp = NULL;
2351 /* Indicate node has already been released,
2352 * should not reference to it from within
2353 * the routine lpfc_els_free_iocb.
2354 */
2355 cmdiocb->context1 = NULL;
2356 }
2161 } else { 2357 } else {
2162 /* Do not drop node for lpfc_els_abort'ed ELS cmds */ 2358 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
2163 if (!lpfc_error_lost_link(irsp) && 2359 if (!lpfc_error_lost_link(irsp) &&
2164 ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 2360 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
2165 lpfc_drop_node(vport, ndlp); 2361 if (lpfc_nlp_not_used(ndlp)) {
2166 ndlp = NULL; 2362 ndlp = NULL;
2363 /* Indicate node has already been
2364 * released, should not reference
2365 * to it from within the routine
2366 * lpfc_els_free_iocb.
2367 */
2368 cmdiocb->context1 = NULL;
2369 }
2167 } 2370 }
2168 } 2371 }
2169 mp = (struct lpfc_dmabuf *) mbox->context1; 2372 mp = (struct lpfc_dmabuf *) mbox->context1;
@@ -2178,7 +2381,21 @@ out:
2178 spin_lock_irq(shost->host_lock); 2381 spin_lock_irq(shost->host_lock);
2179 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); 2382 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
2180 spin_unlock_irq(shost->host_lock); 2383 spin_unlock_irq(shost->host_lock);
2384
2385 /* If the node is not being used by another discovery thread,
2386 * and we are sending a reject, we are done with it.
2387 * Release driver reference count here and free associated
2388 * resources.
2389 */
2390 if (ls_rjt)
2391 if (lpfc_nlp_not_used(ndlp))
2392 /* Indicate node has already been released,
2393 * should not reference to it from within
2394 * the routine lpfc_els_free_iocb.
2395 */
2396 cmdiocb->context1 = NULL;
2181 } 2397 }
2398
2182 lpfc_els_free_iocb(phba, cmdiocb); 2399 lpfc_els_free_iocb(phba, cmdiocb);
2183 return; 2400 return;
2184} 2401}
@@ -2349,14 +2566,6 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
2349 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 2566 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2350 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2567 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2351 2568
2352 /* If the node is in the UNUSED state, and we are sending
2353 * a reject, we are done with it. Release driver reference
2354 * count here. The outstanding els will release its reference on
2355 * completion and the node can be freed then.
2356 */
2357 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2358 lpfc_nlp_put(ndlp);
2359
2360 if (rc == IOCB_ERROR) { 2569 if (rc == IOCB_ERROR) {
2361 lpfc_els_free_iocb(phba, elsiocb); 2570 lpfc_els_free_iocb(phba, elsiocb);
2362 return 1; 2571 return 1;
@@ -2642,7 +2851,10 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
2642 } 2851 }
2643 } 2852 }
2644 } 2853 }
2645 if (sentplogi == 0) { 2854 if (sentplogi) {
2855 lpfc_set_disctmo(vport);
2856 }
2857 else {
2646 spin_lock_irq(shost->host_lock); 2858 spin_lock_irq(shost->host_lock);
2647 vport->fc_flag &= ~FC_NLP_MORE; 2859 vport->fc_flag &= ~FC_NLP_MORE;
2648 spin_unlock_irq(shost->host_lock); 2860 spin_unlock_irq(shost->host_lock);
@@ -2830,10 +3042,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2830 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 3042 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
2831 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 3043 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
2832 3044
3045 spin_lock_irq(shost->host_lock);
2833 vport->fc_flag |= FC_RSCN_DEFERRED; 3046 vport->fc_flag |= FC_RSCN_DEFERRED;
2834 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 3047 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
2835 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 3048 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
2836 spin_lock_irq(shost->host_lock);
2837 vport->fc_flag |= FC_RSCN_MODE; 3049 vport->fc_flag |= FC_RSCN_MODE;
2838 spin_unlock_irq(shost->host_lock); 3050 spin_unlock_irq(shost->host_lock);
2839 if (rscn_cnt) { 3051 if (rscn_cnt) {
@@ -2862,7 +3074,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2862 vport->fc_rscn_id_cnt, vport->fc_flag, 3074 vport->fc_rscn_id_cnt, vport->fc_flag,
2863 vport->port_state); 3075 vport->port_state);
2864 } else { 3076 } else {
2865 spin_lock_irq(shost->host_lock);
2866 vport->fc_flag |= FC_RSCN_DISCOVERY; 3077 vport->fc_flag |= FC_RSCN_DISCOVERY;
2867 spin_unlock_irq(shost->host_lock); 3078 spin_unlock_irq(shost->host_lock);
2868 /* ReDiscovery RSCN */ 3079 /* ReDiscovery RSCN */
@@ -2877,7 +3088,9 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2877 3088
2878 /* send RECOVERY event for ALL nodes that match RSCN payload */ 3089 /* send RECOVERY event for ALL nodes that match RSCN payload */
2879 lpfc_rscn_recovery_check(vport); 3090 lpfc_rscn_recovery_check(vport);
3091 spin_lock_irq(shost->host_lock);
2880 vport->fc_flag &= ~FC_RSCN_DEFERRED; 3092 vport->fc_flag &= ~FC_RSCN_DEFERRED;
3093 spin_unlock_irq(shost->host_lock);
2881 return 0; 3094 return 0;
2882 } 3095 }
2883 3096
@@ -2929,6 +3142,8 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
2929 3142
2930 /* To process RSCN, first compare RSCN data with NameServer */ 3143 /* To process RSCN, first compare RSCN data with NameServer */
2931 vport->fc_ns_retry = 0; 3144 vport->fc_ns_retry = 0;
3145 vport->num_disc_nodes = 0;
3146
2932 ndlp = lpfc_findnode_did(vport, NameServer_DID); 3147 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2933 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 3148 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
2934 /* Good ndlp, issue CT Request to NameServer */ 3149 /* Good ndlp, issue CT Request to NameServer */
@@ -3022,8 +3237,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3022 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 3237 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
3023 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3238 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3024 mbox->vport = vport; 3239 mbox->vport = vport;
3025 rc = lpfc_sli_issue_mbox 3240 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3026 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
3027 lpfc_set_loopback_flag(phba); 3241 lpfc_set_loopback_flag(phba);
3028 if (rc == MBX_NOT_FINISHED) { 3242 if (rc == MBX_NOT_FINISHED) {
3029 mempool_free(mbox, phba->mbox_mem_pool); 3243 mempool_free(mbox, phba->mbox_mem_pool);
@@ -3140,7 +3354,10 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3140 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 3354 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
3141 lpfc_max_els_tries, ndlp, 3355 lpfc_max_els_tries, ndlp,
3142 ndlp->nlp_DID, ELS_CMD_ACC); 3356 ndlp->nlp_DID, ELS_CMD_ACC);
3357
3358 /* Decrement the ndlp reference count from previous mbox command */
3143 lpfc_nlp_put(ndlp); 3359 lpfc_nlp_put(ndlp);
3360
3144 if (!elsiocb) 3361 if (!elsiocb)
3145 return; 3362 return;
3146 3363
@@ -3160,13 +3377,13 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3160 status |= 0x4; 3377 status |= 0x4;
3161 3378
3162 rps_rsp->rsvd1 = 0; 3379 rps_rsp->rsvd1 = 0;
3163 rps_rsp->portStatus = be16_to_cpu(status); 3380 rps_rsp->portStatus = cpu_to_be16(status);
3164 rps_rsp->linkFailureCnt = be32_to_cpu(mb->un.varRdLnk.linkFailureCnt); 3381 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
3165 rps_rsp->lossSyncCnt = be32_to_cpu(mb->un.varRdLnk.lossSyncCnt); 3382 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
3166 rps_rsp->lossSignalCnt = be32_to_cpu(mb->un.varRdLnk.lossSignalCnt); 3383 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
3167 rps_rsp->primSeqErrCnt = be32_to_cpu(mb->un.varRdLnk.primSeqErrCnt); 3384 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
3168 rps_rsp->invalidXmitWord = be32_to_cpu(mb->un.varRdLnk.invalidXmitWord); 3385 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
3169 rps_rsp->crcCnt = be32_to_cpu(mb->un.varRdLnk.crcCnt); 3386 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
3170 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 3387 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
3171 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 3388 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
3172 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, " 3389 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
@@ -3223,11 +3440,13 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3223 mbox->context2 = lpfc_nlp_get(ndlp); 3440 mbox->context2 = lpfc_nlp_get(ndlp);
3224 mbox->vport = vport; 3441 mbox->vport = vport;
3225 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 3442 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
3226 if (lpfc_sli_issue_mbox (phba, mbox, 3443 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
3227 (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) 3444 != MBX_NOT_FINISHED)
3228 /* Mbox completion will send ELS Response */ 3445 /* Mbox completion will send ELS Response */
3229 return 0; 3446 return 0;
3230 3447 /* Decrement reference count used for the failed mbox
3448 * command.
3449 */
3231 lpfc_nlp_put(ndlp); 3450 lpfc_nlp_put(ndlp);
3232 mempool_free(mbox, phba->mbox_mem_pool); 3451 mempool_free(mbox, phba->mbox_mem_pool);
3233 } 3452 }
@@ -3461,6 +3680,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3461 * other NLP_FABRIC logins 3680 * other NLP_FABRIC logins
3462 */ 3681 */
3463 lpfc_drop_node(vport, ndlp); 3682 lpfc_drop_node(vport, ndlp);
3683
3464 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 3684 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
3465 /* Fail outstanding I/O now since this 3685 /* Fail outstanding I/O now since this
3466 * device is marked for PLOGI 3686 * device is marked for PLOGI
@@ -3469,8 +3689,6 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3469 } 3689 }
3470 } 3690 }
3471 3691
3472 vport->port_state = LPFC_FLOGI;
3473 lpfc_set_disctmo(vport);
3474 lpfc_initial_flogi(vport); 3692 lpfc_initial_flogi(vport);
3475 return 0; 3693 return 0;
3476 } 3694 }
@@ -3711,6 +3929,7 @@ static void
3711lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3929lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3712 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 3930 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
3713{ 3931{
3932 struct Scsi_Host *shost;
3714 struct lpfc_nodelist *ndlp; 3933 struct lpfc_nodelist *ndlp;
3715 struct ls_rjt stat; 3934 struct ls_rjt stat;
3716 uint32_t *payload; 3935 uint32_t *payload;
@@ -3750,11 +3969,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3750 goto dropit; 3969 goto dropit;
3751 3970
3752 lpfc_nlp_init(vport, ndlp, did); 3971 lpfc_nlp_init(vport, ndlp, did);
3972 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
3753 newnode = 1; 3973 newnode = 1;
3754 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 3974 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3755 ndlp->nlp_type |= NLP_FABRIC; 3975 ndlp->nlp_type |= NLP_FABRIC;
3756 } 3976 }
3757 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 3977 }
3978 else {
3979 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3980 /* This is simular to the new node path */
3981 lpfc_nlp_get(ndlp);
3982 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
3983 newnode = 1;
3984 }
3758 } 3985 }
3759 3986
3760 phba->fc_stat.elsRcvFrame++; 3987 phba->fc_stat.elsRcvFrame++;
@@ -3783,6 +4010,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3783 rjt_err = LSRJT_UNABLE_TPC; 4010 rjt_err = LSRJT_UNABLE_TPC;
3784 break; 4011 break;
3785 } 4012 }
4013
4014 shost = lpfc_shost_from_vport(vport);
4015 spin_lock_irq(shost->host_lock);
4016 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
4017 spin_unlock_irq(shost->host_lock);
4018
3786 lpfc_disc_state_machine(vport, ndlp, elsiocb, 4019 lpfc_disc_state_machine(vport, ndlp, elsiocb,
3787 NLP_EVT_RCV_PLOGI); 4020 NLP_EVT_RCV_PLOGI);
3788 4021
@@ -3795,7 +4028,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3795 phba->fc_stat.elsRcvFLOGI++; 4028 phba->fc_stat.elsRcvFLOGI++;
3796 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 4029 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
3797 if (newnode) 4030 if (newnode)
3798 lpfc_drop_node(vport, ndlp); 4031 lpfc_nlp_put(ndlp);
3799 break; 4032 break;
3800 case ELS_CMD_LOGO: 4033 case ELS_CMD_LOGO:
3801 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4034 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3825,7 +4058,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3825 phba->fc_stat.elsRcvRSCN++; 4058 phba->fc_stat.elsRcvRSCN++;
3826 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 4059 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
3827 if (newnode) 4060 if (newnode)
3828 lpfc_drop_node(vport, ndlp); 4061 lpfc_nlp_put(ndlp);
3829 break; 4062 break;
3830 case ELS_CMD_ADISC: 4063 case ELS_CMD_ADISC:
3831 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4064 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3897,7 +4130,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3897 phba->fc_stat.elsRcvLIRR++; 4130 phba->fc_stat.elsRcvLIRR++;
3898 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 4131 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
3899 if (newnode) 4132 if (newnode)
3900 lpfc_drop_node(vport, ndlp); 4133 lpfc_nlp_put(ndlp);
3901 break; 4134 break;
3902 case ELS_CMD_RPS: 4135 case ELS_CMD_RPS:
3903 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4136 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3907,7 +4140,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3907 phba->fc_stat.elsRcvRPS++; 4140 phba->fc_stat.elsRcvRPS++;
3908 lpfc_els_rcv_rps(vport, elsiocb, ndlp); 4141 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
3909 if (newnode) 4142 if (newnode)
3910 lpfc_drop_node(vport, ndlp); 4143 lpfc_nlp_put(ndlp);
3911 break; 4144 break;
3912 case ELS_CMD_RPL: 4145 case ELS_CMD_RPL:
3913 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4146 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3917,7 +4150,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3917 phba->fc_stat.elsRcvRPL++; 4150 phba->fc_stat.elsRcvRPL++;
3918 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 4151 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
3919 if (newnode) 4152 if (newnode)
3920 lpfc_drop_node(vport, ndlp); 4153 lpfc_nlp_put(ndlp);
3921 break; 4154 break;
3922 case ELS_CMD_RNID: 4155 case ELS_CMD_RNID:
3923 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4156 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3927,7 +4160,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3927 phba->fc_stat.elsRcvRNID++; 4160 phba->fc_stat.elsRcvRNID++;
3928 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 4161 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
3929 if (newnode) 4162 if (newnode)
3930 lpfc_drop_node(vport, ndlp); 4163 lpfc_nlp_put(ndlp);
3931 break; 4164 break;
3932 default: 4165 default:
3933 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4166 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3942,7 +4175,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3942 "0115 Unknown ELS command x%x " 4175 "0115 Unknown ELS command x%x "
3943 "received from NPORT x%x\n", cmd, did); 4176 "received from NPORT x%x\n", cmd, did);
3944 if (newnode) 4177 if (newnode)
3945 lpfc_drop_node(vport, ndlp); 4178 lpfc_nlp_put(ndlp);
3946 break; 4179 break;
3947 } 4180 }
3948 4181
@@ -3958,10 +4191,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3958 return; 4191 return;
3959 4192
3960dropit: 4193dropit:
3961 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 4194 if (vport && !(vport->load_flag & FC_UNLOADING))
4195 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3962 "(%d):0111 Dropping received ELS cmd " 4196 "(%d):0111 Dropping received ELS cmd "
3963 "Data: x%x x%x x%x\n", 4197 "Data: x%x x%x x%x\n",
3964 vport ? vport->vpi : 0xffff, icmd->ulpStatus, 4198 vport->vpi, icmd->ulpStatus,
3965 icmd->un.ulpWord[4], icmd->ulpTimeout); 4199 icmd->un.ulpWord[4], icmd->ulpTimeout);
3966 phba->fc_stat.elsRcvDrop++; 4200 phba->fc_stat.elsRcvDrop++;
3967} 4201}
@@ -4114,8 +4348,9 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4114 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 4348 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
4115 MAILBOX_t *mb = &pmb->mb; 4349 MAILBOX_t *mb = &pmb->mb;
4116 4350
4351 spin_lock_irq(shost->host_lock);
4117 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 4352 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4118 lpfc_nlp_put(ndlp); 4353 spin_unlock_irq(shost->host_lock);
4119 4354
4120 if (mb->mbxStatus) { 4355 if (mb->mbxStatus) {
4121 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 4356 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -4135,7 +4370,9 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4135 default: 4370 default:
4136 /* Try to recover from this error */ 4371 /* Try to recover from this error */
4137 lpfc_mbx_unreg_vpi(vport); 4372 lpfc_mbx_unreg_vpi(vport);
4373 spin_lock_irq(shost->host_lock);
4138 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4374 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4375 spin_unlock_irq(shost->host_lock);
4139 lpfc_initial_fdisc(vport); 4376 lpfc_initial_fdisc(vport);
4140 break; 4377 break;
4141 } 4378 }
@@ -4146,14 +4383,21 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4146 else 4383 else
4147 lpfc_do_scr_ns_plogi(phba, vport); 4384 lpfc_do_scr_ns_plogi(phba, vport);
4148 } 4385 }
4386
4387 /* Now, we decrement the ndlp reference count held for this
4388 * callback function
4389 */
4390 lpfc_nlp_put(ndlp);
4391
4149 mempool_free(pmb, phba->mbox_mem_pool); 4392 mempool_free(pmb, phba->mbox_mem_pool);
4150 return; 4393 return;
4151} 4394}
4152 4395
4153void 4396static void
4154lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 4397lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
4155 struct lpfc_nodelist *ndlp) 4398 struct lpfc_nodelist *ndlp)
4156{ 4399{
4400 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4157 LPFC_MBOXQ_t *mbox; 4401 LPFC_MBOXQ_t *mbox;
4158 4402
4159 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4403 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -4162,25 +4406,31 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
4162 mbox->vport = vport; 4406 mbox->vport = vport;
4163 mbox->context2 = lpfc_nlp_get(ndlp); 4407 mbox->context2 = lpfc_nlp_get(ndlp);
4164 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 4408 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
4165 if (lpfc_sli_issue_mbox(phba, mbox, 4409 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
4166 MBX_NOWAIT | MBX_STOP_IOCB)
4167 == MBX_NOT_FINISHED) { 4410 == MBX_NOT_FINISHED) {
4411 /* mailbox command not success, decrement ndlp
4412 * reference count for this command
4413 */
4414 lpfc_nlp_put(ndlp);
4168 mempool_free(mbox, phba->mbox_mem_pool); 4415 mempool_free(mbox, phba->mbox_mem_pool);
4169 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4170 4416
4171 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4172 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 4417 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
4173 "0253 Register VPI: Can't send mbox\n"); 4418 "0253 Register VPI: Can't send mbox\n");
4419 goto mbox_err_exit;
4174 } 4420 }
4175 } else { 4421 } else {
4176 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4177
4178 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 4422 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
4179 "0254 Register VPI: no memory\n"); 4423 "0254 Register VPI: no memory\n");
4180 4424 goto mbox_err_exit;
4181 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4182 lpfc_nlp_put(ndlp);
4183 } 4425 }
4426 return;
4427
4428mbox_err_exit:
4429 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4430 spin_lock_irq(shost->host_lock);
4431 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4432 spin_unlock_irq(shost->host_lock);
4433 return;
4184} 4434}
4185 4435
4186static void 4436static void
@@ -4251,7 +4501,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4251 lpfc_unreg_rpi(vport, np); 4501 lpfc_unreg_rpi(vport, np);
4252 } 4502 }
4253 lpfc_mbx_unreg_vpi(vport); 4503 lpfc_mbx_unreg_vpi(vport);
4504 spin_lock_irq(shost->host_lock);
4254 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4505 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4506 spin_unlock_irq(shost->host_lock);
4255 } 4507 }
4256 4508
4257 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 4509 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
@@ -4259,14 +4511,15 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4259 else 4511 else
4260 lpfc_do_scr_ns_plogi(phba, vport); 4512 lpfc_do_scr_ns_plogi(phba, vport);
4261 4513
4262 lpfc_nlp_put(ndlp); /* Free Fabric ndlp for vports */ 4514 /* Unconditionaly kick off releasing fabric node for vports */
4515 lpfc_nlp_put(ndlp);
4263 } 4516 }
4264 4517
4265out: 4518out:
4266 lpfc_els_free_iocb(phba, cmdiocb); 4519 lpfc_els_free_iocb(phba, cmdiocb);
4267} 4520}
4268 4521
4269int 4522static int
4270lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4523lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4271 uint8_t retry) 4524 uint8_t retry)
4272{ 4525{
@@ -4539,7 +4792,7 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4539 } 4792 }
4540} 4793}
4541 4794
4542int 4795static int
4543lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 4796lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4544{ 4797{
4545 unsigned long iflags; 4798 unsigned long iflags;
@@ -4583,7 +4836,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4583} 4836}
4584 4837
4585 4838
4586void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 4839static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
4587{ 4840{
4588 LIST_HEAD(completions); 4841 LIST_HEAD(completions);
4589 struct lpfc_hba *phba = vport->phba; 4842 struct lpfc_hba *phba = vport->phba;
@@ -4663,6 +4916,7 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
4663} 4916}
4664 4917
4665 4918
4919#if 0
4666void lpfc_fabric_abort_flogi(struct lpfc_hba *phba) 4920void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
4667{ 4921{
4668 LIST_HEAD(completions); 4922 LIST_HEAD(completions);
@@ -4693,5 +4947,6 @@ void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
4693 (piocb->iocb_cmpl) (phba, piocb, piocb); 4947 (piocb->iocb_cmpl) (phba, piocb, piocb);
4694 } 4948 }
4695} 4949}
4950#endif /* 0 */
4696 4951
4697 4952
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index c81c2b3228d6..dc042bd97baa 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -57,6 +57,7 @@ static uint8_t lpfcAlpaArray[] = {
57}; 57};
58 58
59static void lpfc_disc_timeout_handler(struct lpfc_vport *); 59static void lpfc_disc_timeout_handler(struct lpfc_vport *);
60static void lpfc_disc_flush_list(struct lpfc_vport *vport);
60 61
61void 62void
62lpfc_terminate_rport_io(struct fc_rport *rport) 63lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -107,20 +108,14 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
107 struct lpfc_nodelist * ndlp; 108 struct lpfc_nodelist * ndlp;
108 struct lpfc_vport *vport; 109 struct lpfc_vport *vport;
109 struct lpfc_hba *phba; 110 struct lpfc_hba *phba;
110 struct completion devloss_compl;
111 struct lpfc_work_evt *evtp; 111 struct lpfc_work_evt *evtp;
112 int put_node;
113 int put_rport;
112 114
113 rdata = rport->dd_data; 115 rdata = rport->dd_data;
114 ndlp = rdata->pnode; 116 ndlp = rdata->pnode;
115 117 if (!ndlp)
116 if (!ndlp) {
117 if (rport->scsi_target_id != -1) {
118 printk(KERN_ERR "Cannot find remote node"
119 " for rport in dev_loss_tmo_callbk x%x\n",
120 rport->port_id);
121 }
122 return; 118 return;
123 }
124 119
125 vport = ndlp->vport; 120 vport = ndlp->vport;
126 phba = vport->phba; 121 phba = vport->phba;
@@ -129,15 +124,35 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
129 "rport devlosscb: sid:x%x did:x%x flg:x%x", 124 "rport devlosscb: sid:x%x did:x%x flg:x%x",
130 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 125 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
131 126
132 init_completion(&devloss_compl); 127 /* Don't defer this if we are in the process of deleting the vport
128 * or unloading the driver. The unload will cleanup the node
129 * appropriately we just need to cleanup the ndlp rport info here.
130 */
131 if (vport->load_flag & FC_UNLOADING) {
132 put_node = rdata->pnode != NULL;
133 put_rport = ndlp->rport != NULL;
134 rdata->pnode = NULL;
135 ndlp->rport = NULL;
136 if (put_node)
137 lpfc_nlp_put(ndlp);
138 if (put_rport)
139 put_device(&rport->dev);
140 return;
141 }
142
143 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
144 return;
145
133 evtp = &ndlp->dev_loss_evt; 146 evtp = &ndlp->dev_loss_evt;
134 147
135 if (!list_empty(&evtp->evt_listp)) 148 if (!list_empty(&evtp->evt_listp))
136 return; 149 return;
137 150
138 spin_lock_irq(&phba->hbalock); 151 spin_lock_irq(&phba->hbalock);
139 evtp->evt_arg1 = ndlp; 152 /* We need to hold the node by incrementing the reference
140 evtp->evt_arg2 = &devloss_compl; 153 * count until this queued work is done
154 */
155 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
141 evtp->evt = LPFC_EVT_DEV_LOSS; 156 evtp->evt = LPFC_EVT_DEV_LOSS;
142 list_add_tail(&evtp->evt_listp, &phba->work_list); 157 list_add_tail(&evtp->evt_listp, &phba->work_list);
143 if (phba->work_wait) 158 if (phba->work_wait)
@@ -145,8 +160,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
145 160
146 spin_unlock_irq(&phba->hbalock); 161 spin_unlock_irq(&phba->hbalock);
147 162
148 wait_for_completion(&devloss_compl);
149
150 return; 163 return;
151} 164}
152 165
@@ -154,7 +167,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
154 * This function is called from the worker thread when dev_loss_tmo 167 * This function is called from the worker thread when dev_loss_tmo
155 * expire. 168 * expire.
156 */ 169 */
157void 170static void
158lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 171lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
159{ 172{
160 struct lpfc_rport_data *rdata; 173 struct lpfc_rport_data *rdata;
@@ -162,6 +175,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
162 struct lpfc_vport *vport; 175 struct lpfc_vport *vport;
163 struct lpfc_hba *phba; 176 struct lpfc_hba *phba;
164 uint8_t *name; 177 uint8_t *name;
178 int put_node;
179 int put_rport;
165 int warn_on = 0; 180 int warn_on = 0;
166 181
167 rport = ndlp->rport; 182 rport = ndlp->rport;
@@ -178,14 +193,32 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
178 "rport devlosstmo:did:x%x type:x%x id:x%x", 193 "rport devlosstmo:did:x%x type:x%x id:x%x",
179 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); 194 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
180 195
181 if (!(vport->load_flag & FC_UNLOADING) && 196 /* Don't defer this if we are in the process of deleting the vport
182 ndlp->nlp_state == NLP_STE_MAPPED_NODE) 197 * or unloading the driver. The unload will cleanup the node
198 * appropriately we just need to cleanup the ndlp rport info here.
199 */
200 if (vport->load_flag & FC_UNLOADING) {
201 if (ndlp->nlp_sid != NLP_NO_SID) {
202 /* flush the target */
203 lpfc_sli_abort_iocb(vport,
204 &phba->sli.ring[phba->sli.fcp_ring],
205 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
206 }
207 put_node = rdata->pnode != NULL;
208 put_rport = ndlp->rport != NULL;
209 rdata->pnode = NULL;
210 ndlp->rport = NULL;
211 if (put_node)
212 lpfc_nlp_put(ndlp);
213 if (put_rport)
214 put_device(&rport->dev);
183 return; 215 return;
216 }
184 217
185 if (ndlp->nlp_type & NLP_FABRIC) { 218 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
186 int put_node; 219 return;
187 int put_rport;
188 220
221 if (ndlp->nlp_type & NLP_FABRIC) {
189 /* We will clean up these Nodes in linkup */ 222 /* We will clean up these Nodes in linkup */
190 put_node = rdata->pnode != NULL; 223 put_node = rdata->pnode != NULL;
191 put_rport = ndlp->rport != NULL; 224 put_rport = ndlp->rport != NULL;
@@ -227,23 +260,20 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
227 ndlp->nlp_state, ndlp->nlp_rpi); 260 ndlp->nlp_state, ndlp->nlp_rpi);
228 } 261 }
229 262
263 put_node = rdata->pnode != NULL;
264 put_rport = ndlp->rport != NULL;
265 rdata->pnode = NULL;
266 ndlp->rport = NULL;
267 if (put_node)
268 lpfc_nlp_put(ndlp);
269 if (put_rport)
270 put_device(&rport->dev);
271
230 if (!(vport->load_flag & FC_UNLOADING) && 272 if (!(vport->load_flag & FC_UNLOADING) &&
231 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 273 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
232 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 274 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
233 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 275 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) {
234 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
235 else {
236 int put_node;
237 int put_rport;
238
239 put_node = rdata->pnode != NULL;
240 put_rport = ndlp->rport != NULL;
241 rdata->pnode = NULL;
242 ndlp->rport = NULL;
243 if (put_node)
244 lpfc_nlp_put(ndlp);
245 if (put_rport)
246 put_device(&rport->dev);
247 } 277 }
248} 278}
249 279
@@ -260,7 +290,6 @@ lpfc_work_list_done(struct lpfc_hba *phba)
260{ 290{
261 struct lpfc_work_evt *evtp = NULL; 291 struct lpfc_work_evt *evtp = NULL;
262 struct lpfc_nodelist *ndlp; 292 struct lpfc_nodelist *ndlp;
263 struct lpfc_vport *vport;
264 int free_evt; 293 int free_evt;
265 294
266 spin_lock_irq(&phba->hbalock); 295 spin_lock_irq(&phba->hbalock);
@@ -270,35 +299,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
270 spin_unlock_irq(&phba->hbalock); 299 spin_unlock_irq(&phba->hbalock);
271 free_evt = 1; 300 free_evt = 1;
272 switch (evtp->evt) { 301 switch (evtp->evt) {
273 case LPFC_EVT_DEV_LOSS_DELAY:
274 free_evt = 0; /* evt is part of ndlp */
275 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
276 vport = ndlp->vport;
277 if (!vport)
278 break;
279
280 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
281 "rport devlossdly:did:x%x flg:x%x",
282 ndlp->nlp_DID, ndlp->nlp_flag, 0);
283
284 if (!(vport->load_flag & FC_UNLOADING) &&
285 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
286 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
287 lpfc_disc_state_machine(vport, ndlp, NULL,
288 NLP_EVT_DEVICE_RM);
289 }
290 break;
291 case LPFC_EVT_ELS_RETRY: 302 case LPFC_EVT_ELS_RETRY:
292 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); 303 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
293 lpfc_els_retry_delay_handler(ndlp); 304 lpfc_els_retry_delay_handler(ndlp);
294 free_evt = 0; /* evt is part of ndlp */ 305 free_evt = 0; /* evt is part of ndlp */
306 /* decrement the node reference count held
307 * for this queued work
308 */
309 lpfc_nlp_put(ndlp);
295 break; 310 break;
296 case LPFC_EVT_DEV_LOSS: 311 case LPFC_EVT_DEV_LOSS:
297 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 312 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
298 lpfc_nlp_get(ndlp);
299 lpfc_dev_loss_tmo_handler(ndlp); 313 lpfc_dev_loss_tmo_handler(ndlp);
300 free_evt = 0; 314 free_evt = 0;
301 complete((struct completion *)(evtp->evt_arg2)); 315 /* decrement the node reference count held for
316 * this queued work
317 */
302 lpfc_nlp_put(ndlp); 318 lpfc_nlp_put(ndlp);
303 break; 319 break;
304 case LPFC_EVT_ONLINE: 320 case LPFC_EVT_ONLINE:
@@ -373,7 +389,7 @@ lpfc_work_done(struct lpfc_hba *phba)
373 lpfc_handle_latt(phba); 389 lpfc_handle_latt(phba);
374 vports = lpfc_create_vport_work_array(phba); 390 vports = lpfc_create_vport_work_array(phba);
375 if (vports != NULL) 391 if (vports != NULL)
376 for(i = 0; i < LPFC_MAX_VPORTS; i++) { 392 for(i = 0; i <= phba->max_vpi; i++) {
377 /* 393 /*
378 * We could have no vports in array if unloading, so if 394 * We could have no vports in array if unloading, so if
379 * this happens then just use the pport 395 * this happens then just use the pport
@@ -405,14 +421,14 @@ lpfc_work_done(struct lpfc_hba *phba)
405 vport->work_port_events &= ~work_port_events; 421 vport->work_port_events &= ~work_port_events;
406 spin_unlock_irq(&vport->work_port_lock); 422 spin_unlock_irq(&vport->work_port_lock);
407 } 423 }
408 lpfc_destroy_vport_work_array(vports); 424 lpfc_destroy_vport_work_array(phba, vports);
409 425
410 pring = &phba->sli.ring[LPFC_ELS_RING]; 426 pring = &phba->sli.ring[LPFC_ELS_RING];
411 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 427 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
412 status >>= (4*LPFC_ELS_RING); 428 status >>= (4*LPFC_ELS_RING);
413 if ((status & HA_RXMASK) 429 if ((status & HA_RXMASK)
414 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 430 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
415 if (pring->flag & LPFC_STOP_IOCB_MASK) { 431 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
416 pring->flag |= LPFC_DEFERRED_RING_EVENT; 432 pring->flag |= LPFC_DEFERRED_RING_EVENT;
417 } else { 433 } else {
418 lpfc_sli_handle_slow_ring_event(phba, pring, 434 lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -544,6 +560,7 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
544void 560void
545lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) 561lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
546{ 562{
563 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
547 struct lpfc_hba *phba = vport->phba; 564 struct lpfc_hba *phba = vport->phba;
548 struct lpfc_nodelist *ndlp, *next_ndlp; 565 struct lpfc_nodelist *ndlp, *next_ndlp;
549 int rc; 566 int rc;
@@ -552,7 +569,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
552 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 569 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
553 continue; 570 continue;
554 571
555 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) 572 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
573 ((vport->port_type == LPFC_NPIV_PORT) &&
574 (ndlp->nlp_DID == NameServer_DID)))
556 lpfc_unreg_rpi(vport, ndlp); 575 lpfc_unreg_rpi(vport, ndlp);
557 576
558 /* Leave Fabric nodes alone on link down */ 577 /* Leave Fabric nodes alone on link down */
@@ -565,14 +584,30 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
565 } 584 }
566 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 585 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
567 lpfc_mbx_unreg_vpi(vport); 586 lpfc_mbx_unreg_vpi(vport);
587 spin_lock_irq(shost->host_lock);
568 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 588 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
589 spin_unlock_irq(shost->host_lock);
569 } 590 }
570} 591}
571 592
593void
594lpfc_port_link_failure(struct lpfc_vport *vport)
595{
596 /* Cleanup any outstanding RSCN activity */
597 lpfc_els_flush_rscn(vport);
598
599 /* Cleanup any outstanding ELS commands */
600 lpfc_els_flush_cmd(vport);
601
602 lpfc_cleanup_rpis(vport, 0);
603
604 /* Turn off discovery timer if its running */
605 lpfc_can_disctmo(vport);
606}
607
572static void 608static void
573lpfc_linkdown_port(struct lpfc_vport *vport) 609lpfc_linkdown_port(struct lpfc_vport *vport)
574{ 610{
575 struct lpfc_nodelist *ndlp, *next_ndlp;
576 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 611 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
577 612
578 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); 613 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
@@ -581,21 +616,8 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
581 "Link Down: state:x%x rtry:x%x flg:x%x", 616 "Link Down: state:x%x rtry:x%x flg:x%x",
582 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 617 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
583 618
584 /* Cleanup any outstanding RSCN activity */ 619 lpfc_port_link_failure(vport);
585 lpfc_els_flush_rscn(vport);
586
587 /* Cleanup any outstanding ELS commands */
588 lpfc_els_flush_cmd(vport);
589 620
590 lpfc_cleanup_rpis(vport, 0);
591
592 /* free any ndlp's on unused list */
593 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
594 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
595 lpfc_drop_node(vport, ndlp);
596
597 /* Turn off discovery timer if its running */
598 lpfc_can_disctmo(vport);
599} 621}
600 622
601int 623int
@@ -618,18 +640,18 @@ lpfc_linkdown(struct lpfc_hba *phba)
618 spin_unlock_irq(&phba->hbalock); 640 spin_unlock_irq(&phba->hbalock);
619 vports = lpfc_create_vport_work_array(phba); 641 vports = lpfc_create_vport_work_array(phba);
620 if (vports != NULL) 642 if (vports != NULL)
621 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 643 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
622 /* Issue a LINK DOWN event to all nodes */ 644 /* Issue a LINK DOWN event to all nodes */
623 lpfc_linkdown_port(vports[i]); 645 lpfc_linkdown_port(vports[i]);
624 } 646 }
625 lpfc_destroy_vport_work_array(vports); 647 lpfc_destroy_vport_work_array(phba, vports);
626 /* Clean up any firmware default rpi's */ 648 /* Clean up any firmware default rpi's */
627 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 649 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
628 if (mb) { 650 if (mb) {
629 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); 651 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
630 mb->vport = vport; 652 mb->vport = vport;
631 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 653 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
632 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) 654 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
633 == MBX_NOT_FINISHED) { 655 == MBX_NOT_FINISHED) {
634 mempool_free(mb, phba->mbox_mem_pool); 656 mempool_free(mb, phba->mbox_mem_pool);
635 } 657 }
@@ -643,8 +665,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
643 lpfc_config_link(phba, mb); 665 lpfc_config_link(phba, mb);
644 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 666 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
645 mb->vport = vport; 667 mb->vport = vport;
646 if (lpfc_sli_issue_mbox(phba, mb, 668 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
647 (MBX_NOWAIT | MBX_STOP_IOCB))
648 == MBX_NOT_FINISHED) { 669 == MBX_NOT_FINISHED) {
649 mempool_free(mb, phba->mbox_mem_pool); 670 mempool_free(mb, phba->mbox_mem_pool);
650 } 671 }
@@ -686,7 +707,6 @@ static void
686lpfc_linkup_port(struct lpfc_vport *vport) 707lpfc_linkup_port(struct lpfc_vport *vport)
687{ 708{
688 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 709 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
689 struct lpfc_nodelist *ndlp, *next_ndlp;
690 struct lpfc_hba *phba = vport->phba; 710 struct lpfc_hba *phba = vport->phba;
691 711
692 if ((vport->load_flag & FC_UNLOADING) != 0) 712 if ((vport->load_flag & FC_UNLOADING) != 0)
@@ -713,11 +733,6 @@ lpfc_linkup_port(struct lpfc_vport *vport)
713 if (vport->fc_flag & FC_LBIT) 733 if (vport->fc_flag & FC_LBIT)
714 lpfc_linkup_cleanup_nodes(vport); 734 lpfc_linkup_cleanup_nodes(vport);
715 735
716 /* free any ndlp's in unused state */
717 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
718 nlp_listp)
719 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
720 lpfc_drop_node(vport, ndlp);
721} 736}
722 737
723static int 738static int
@@ -734,9 +749,9 @@ lpfc_linkup(struct lpfc_hba *phba)
734 749
735 vports = lpfc_create_vport_work_array(phba); 750 vports = lpfc_create_vport_work_array(phba);
736 if (vports != NULL) 751 if (vports != NULL)
737 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) 752 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
738 lpfc_linkup_port(vports[i]); 753 lpfc_linkup_port(vports[i]);
739 lpfc_destroy_vport_work_array(vports); 754 lpfc_destroy_vport_work_array(phba, vports);
740 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 755 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
741 lpfc_issue_clear_la(phba, phba->pport); 756 lpfc_issue_clear_la(phba, phba->pport);
742 757
@@ -749,7 +764,7 @@ lpfc_linkup(struct lpfc_hba *phba)
749 * as the completion routine when the command is 764 * as the completion routine when the command is
750 * handed off to the SLI layer. 765 * handed off to the SLI layer.
751 */ 766 */
752void 767static void
753lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 768lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
754{ 769{
755 struct lpfc_vport *vport = pmb->vport; 770 struct lpfc_vport *vport = pmb->vport;
@@ -852,8 +867,6 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
852 * LPFC_FLOGI while waiting for FLOGI cmpl 867 * LPFC_FLOGI while waiting for FLOGI cmpl
853 */ 868 */
854 if (vport->port_state != LPFC_FLOGI) { 869 if (vport->port_state != LPFC_FLOGI) {
855 vport->port_state = LPFC_FLOGI;
856 lpfc_set_disctmo(vport);
857 lpfc_initial_flogi(vport); 870 lpfc_initial_flogi(vport);
858 } 871 }
859 return; 872 return;
@@ -1022,8 +1035,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1022 lpfc_read_sparam(phba, sparam_mbox, 0); 1035 lpfc_read_sparam(phba, sparam_mbox, 0);
1023 sparam_mbox->vport = vport; 1036 sparam_mbox->vport = vport;
1024 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 1037 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1025 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, 1038 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
1026 (MBX_NOWAIT | MBX_STOP_IOCB));
1027 if (rc == MBX_NOT_FINISHED) { 1039 if (rc == MBX_NOT_FINISHED) {
1028 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 1040 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1029 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1041 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1040,8 +1052,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1040 lpfc_config_link(phba, cfglink_mbox); 1052 lpfc_config_link(phba, cfglink_mbox);
1041 cfglink_mbox->vport = vport; 1053 cfglink_mbox->vport = vport;
1042 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1054 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1043 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, 1055 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1044 (MBX_NOWAIT | MBX_STOP_IOCB));
1045 if (rc != MBX_NOT_FINISHED) 1056 if (rc != MBX_NOT_FINISHED)
1046 return; 1057 return;
1047 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1058 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
@@ -1174,6 +1185,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1174 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1185 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1175 kfree(mp); 1186 kfree(mp);
1176 mempool_free(pmb, phba->mbox_mem_pool); 1187 mempool_free(pmb, phba->mbox_mem_pool);
1188 /* decrement the node reference count held for this callback
1189 * function.
1190 */
1177 lpfc_nlp_put(ndlp); 1191 lpfc_nlp_put(ndlp);
1178 1192
1179 return; 1193 return;
@@ -1219,7 +1233,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1219 lpfc_unreg_vpi(phba, vport->vpi, mbox); 1233 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1220 mbox->vport = vport; 1234 mbox->vport = vport;
1221 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; 1235 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1222 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 1236 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1223 if (rc == MBX_NOT_FINISHED) { 1237 if (rc == MBX_NOT_FINISHED) {
1224 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 1238 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1225 "1800 Could not issue unreg_vpi\n"); 1239 "1800 Could not issue unreg_vpi\n");
@@ -1319,7 +1333,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1319 vports = lpfc_create_vport_work_array(phba); 1333 vports = lpfc_create_vport_work_array(phba);
1320 if (vports != NULL) 1334 if (vports != NULL)
1321 for(i = 0; 1335 for(i = 0;
1322 i < LPFC_MAX_VPORTS && vports[i] != NULL; 1336 i <= phba->max_vpi && vports[i] != NULL;
1323 i++) { 1337 i++) {
1324 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 1338 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1325 continue; 1339 continue;
@@ -1335,7 +1349,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1335 "Fabric support\n"); 1349 "Fabric support\n");
1336 } 1350 }
1337 } 1351 }
1338 lpfc_destroy_vport_work_array(vports); 1352 lpfc_destroy_vport_work_array(phba, vports);
1339 lpfc_do_scr_ns_plogi(phba, vport); 1353 lpfc_do_scr_ns_plogi(phba, vport);
1340 } 1354 }
1341 1355
@@ -1361,11 +1375,16 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1361 1375
1362 if (mb->mbxStatus) { 1376 if (mb->mbxStatus) {
1363out: 1377out:
1378 /* decrement the node reference count held for this
1379 * callback function.
1380 */
1364 lpfc_nlp_put(ndlp); 1381 lpfc_nlp_put(ndlp);
1365 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1382 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1366 kfree(mp); 1383 kfree(mp);
1367 mempool_free(pmb, phba->mbox_mem_pool); 1384 mempool_free(pmb, phba->mbox_mem_pool);
1368 lpfc_drop_node(vport, ndlp); 1385
1386 /* If no other thread is using the ndlp, free it */
1387 lpfc_nlp_not_used(ndlp);
1369 1388
1370 if (phba->fc_topology == TOPOLOGY_LOOP) { 1389 if (phba->fc_topology == TOPOLOGY_LOOP) {
1371 /* 1390 /*
@@ -1410,6 +1429,9 @@ out:
1410 goto out; 1429 goto out;
1411 } 1430 }
1412 1431
1432 /* decrement the node reference count held for this
1433 * callback function.
1434 */
1413 lpfc_nlp_put(ndlp); 1435 lpfc_nlp_put(ndlp);
1414 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1436 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1415 kfree(mp); 1437 kfree(mp);
@@ -1656,8 +1678,18 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1656void 1678void
1657lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1679lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1658{ 1680{
1681 /*
1682 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
1683 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
1684 * the ndlp from the vport. The ndlp marked as UNUSED on the list
1685 * until ALL other outstanding threads have completed. We check
1686 * that the ndlp not already in the UNUSED state before we proceed.
1687 */
1688 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1689 return;
1659 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 1690 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1660 lpfc_nlp_put(ndlp); 1691 lpfc_nlp_put(ndlp);
1692 return;
1661} 1693}
1662 1694
1663/* 1695/*
@@ -1868,8 +1900,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1868 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 1900 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1869 mbox->vport = vport; 1901 mbox->vport = vport;
1870 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1902 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1871 rc = lpfc_sli_issue_mbox(phba, mbox, 1903 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1872 (MBX_NOWAIT | MBX_STOP_IOCB));
1873 if (rc == MBX_NOT_FINISHED) 1904 if (rc == MBX_NOT_FINISHED)
1874 mempool_free(mbox, phba->mbox_mem_pool); 1905 mempool_free(mbox, phba->mbox_mem_pool);
1875 } 1906 }
@@ -1892,8 +1923,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1892 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); 1923 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1893 mbox->vport = vport; 1924 mbox->vport = vport;
1894 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1925 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1895 rc = lpfc_sli_issue_mbox(phba, mbox, 1926 mbox->context1 = NULL;
1896 (MBX_NOWAIT | MBX_STOP_IOCB)); 1927 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1897 if (rc == MBX_NOT_FINISHED) { 1928 if (rc == MBX_NOT_FINISHED) {
1898 mempool_free(mbox, phba->mbox_mem_pool); 1929 mempool_free(mbox, phba->mbox_mem_pool);
1899 } 1930 }
@@ -1912,8 +1943,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1912 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); 1943 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1913 mbox->vport = vport; 1944 mbox->vport = vport;
1914 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1945 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1915 rc = lpfc_sli_issue_mbox(phba, mbox, 1946 mbox->context1 = NULL;
1916 (MBX_NOWAIT | MBX_STOP_IOCB)); 1947 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1917 if (rc == MBX_NOT_FINISHED) { 1948 if (rc == MBX_NOT_FINISHED) {
1918 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 1949 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1919 "1815 Could not issue " 1950 "1815 Could not issue "
@@ -1981,11 +2012,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1981 if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) 2012 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
1982 list_del_init(&ndlp->dev_loss_evt.evt_listp); 2013 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1983 2014
1984 if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
1985 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1986 complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
1987 }
1988
1989 lpfc_unreg_rpi(vport, ndlp); 2015 lpfc_unreg_rpi(vport, ndlp);
1990 2016
1991 return 0; 2017 return 0;
@@ -1999,12 +2025,39 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1999static void 2025static void
2000lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2026lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2001{ 2027{
2028 struct lpfc_hba *phba = vport->phba;
2002 struct lpfc_rport_data *rdata; 2029 struct lpfc_rport_data *rdata;
2030 LPFC_MBOXQ_t *mbox;
2031 int rc;
2003 2032
2004 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 2033 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
2005 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2034 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2006 } 2035 }
2007 2036
2037 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
2038 /* For this case we need to cleanup the default rpi
2039 * allocated by the firmware.
2040 */
2041 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2042 != NULL) {
2043 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID,
2044 (uint8_t *) &vport->fc_sparam, mbox, 0);
2045 if (rc) {
2046 mempool_free(mbox, phba->mbox_mem_pool);
2047 }
2048 else {
2049 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
2050 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
2051 mbox->vport = vport;
2052 mbox->context2 = NULL;
2053 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2054 if (rc == MBX_NOT_FINISHED) {
2055 mempool_free(mbox, phba->mbox_mem_pool);
2056 }
2057 }
2058 }
2059 }
2060
2008 lpfc_cleanup_node(vport, ndlp); 2061 lpfc_cleanup_node(vport, ndlp);
2009 2062
2010 /* 2063 /*
@@ -2132,6 +2185,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2132 } 2185 }
2133 if (vport->fc_flag & FC_RSCN_MODE) { 2186 if (vport->fc_flag & FC_RSCN_MODE) {
2134 if (lpfc_rscn_payload_check(vport, did)) { 2187 if (lpfc_rscn_payload_check(vport, did)) {
2188 /* If we've already recieved a PLOGI from this NPort
2189 * we don't need to try to discover it again.
2190 */
2191 if (ndlp->nlp_flag & NLP_RCV_PLOGI)
2192 return NULL;
2193
2135 spin_lock_irq(shost->host_lock); 2194 spin_lock_irq(shost->host_lock);
2136 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2195 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2137 spin_unlock_irq(shost->host_lock); 2196 spin_unlock_irq(shost->host_lock);
@@ -2144,8 +2203,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2144 } else 2203 } else
2145 ndlp = NULL; 2204 ndlp = NULL;
2146 } else { 2205 } else {
2206 /* If we've already recieved a PLOGI from this NPort,
2207 * or we are already in the process of discovery on it,
2208 * we don't need to try to discover it again.
2209 */
2147 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || 2210 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
2148 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) 2211 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2212 ndlp->nlp_flag & NLP_RCV_PLOGI)
2149 return NULL; 2213 return NULL;
2150 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2214 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2151 spin_lock_irq(shost->host_lock); 2215 spin_lock_irq(shost->host_lock);
@@ -2220,8 +2284,7 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2220 lpfc_clear_la(phba, mbox); 2284 lpfc_clear_la(phba, mbox);
2221 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 2285 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2222 mbox->vport = vport; 2286 mbox->vport = vport;
2223 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | 2287 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2224 MBX_STOP_IOCB));
2225 if (rc == MBX_NOT_FINISHED) { 2288 if (rc == MBX_NOT_FINISHED) {
2226 mempool_free(mbox, phba->mbox_mem_pool); 2289 mempool_free(mbox, phba->mbox_mem_pool);
2227 lpfc_disc_flush_list(vport); 2290 lpfc_disc_flush_list(vport);
@@ -2244,8 +2307,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2244 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); 2307 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2245 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 2308 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2246 regvpimbox->vport = vport; 2309 regvpimbox->vport = vport;
2247 if (lpfc_sli_issue_mbox(phba, regvpimbox, 2310 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
2248 (MBX_NOWAIT | MBX_STOP_IOCB))
2249 == MBX_NOT_FINISHED) { 2311 == MBX_NOT_FINISHED) {
2250 mempool_free(regvpimbox, phba->mbox_mem_pool); 2312 mempool_free(regvpimbox, phba->mbox_mem_pool);
2251 } 2313 }
@@ -2414,7 +2476,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2414 } 2476 }
2415} 2477}
2416 2478
2417void 2479static void
2418lpfc_disc_flush_list(struct lpfc_vport *vport) 2480lpfc_disc_flush_list(struct lpfc_vport *vport)
2419{ 2481{
2420 struct lpfc_nodelist *ndlp, *next_ndlp; 2482 struct lpfc_nodelist *ndlp, *next_ndlp;
@@ -2426,7 +2488,6 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
2426 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 2488 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2427 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 2489 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2428 lpfc_free_tx(phba, ndlp); 2490 lpfc_free_tx(phba, ndlp);
2429 lpfc_nlp_put(ndlp);
2430 } 2491 }
2431 } 2492 }
2432 } 2493 }
@@ -2516,6 +2577,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2516 if (ndlp->nlp_type & NLP_FABRIC) { 2577 if (ndlp->nlp_type & NLP_FABRIC) {
2517 /* Clean up the ndlp on Fabric connections */ 2578 /* Clean up the ndlp on Fabric connections */
2518 lpfc_drop_node(vport, ndlp); 2579 lpfc_drop_node(vport, ndlp);
2580
2519 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 2581 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2520 /* Fail outstanding IO now since device 2582 /* Fail outstanding IO now since device
2521 * is marked for PLOGI. 2583 * is marked for PLOGI.
@@ -2524,9 +2586,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2524 } 2586 }
2525 } 2587 }
2526 if (vport->port_state != LPFC_FLOGI) { 2588 if (vport->port_state != LPFC_FLOGI) {
2527 vport->port_state = LPFC_FLOGI;
2528 lpfc_set_disctmo(vport);
2529 lpfc_initial_flogi(vport); 2589 lpfc_initial_flogi(vport);
2590 return;
2530 } 2591 }
2531 break; 2592 break;
2532 2593
@@ -2536,7 +2597,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2536 /* Initial FLOGI timeout */ 2597 /* Initial FLOGI timeout */
2537 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2598 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2538 "0222 Initial %s timeout\n", 2599 "0222 Initial %s timeout\n",
2539 vport->vpi ? "FLOGI" : "FDISC"); 2600 vport->vpi ? "FDISC" : "FLOGI");
2540 2601
2541 /* Assume no Fabric and go on with discovery. 2602 /* Assume no Fabric and go on with discovery.
2542 * Check for outstanding ELS FLOGI to abort. 2603 * Check for outstanding ELS FLOGI to abort.
@@ -2558,10 +2619,10 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2558 /* Next look for NameServer ndlp */ 2619 /* Next look for NameServer ndlp */
2559 ndlp = lpfc_findnode_did(vport, NameServer_DID); 2620 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2560 if (ndlp) 2621 if (ndlp)
2561 lpfc_nlp_put(ndlp); 2622 lpfc_els_abort(phba, ndlp);
2562 /* Start discovery */ 2623
2563 lpfc_disc_start(vport); 2624 /* ReStart discovery */
2564 break; 2625 goto restart_disc;
2565 2626
2566 case LPFC_NS_QRY: 2627 case LPFC_NS_QRY:
2567 /* Check for wait for NameServer Rsp timeout */ 2628 /* Check for wait for NameServer Rsp timeout */
@@ -2580,6 +2641,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2580 } 2641 }
2581 vport->fc_ns_retry = 0; 2642 vport->fc_ns_retry = 0;
2582 2643
2644restart_disc:
2583 /* 2645 /*
2584 * Discovery is over. 2646 * Discovery is over.
2585 * set port_state to PORT_READY if SLI2. 2647 * set port_state to PORT_READY if SLI2.
@@ -2608,8 +2670,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2608 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 2670 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2609 initlinkmbox->vport = vport; 2671 initlinkmbox->vport = vport;
2610 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2672 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2611 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, 2673 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
2612 (MBX_NOWAIT | MBX_STOP_IOCB));
2613 lpfc_set_loopback_flag(phba); 2674 lpfc_set_loopback_flag(phba);
2614 if (rc == MBX_NOT_FINISHED) 2675 if (rc == MBX_NOT_FINISHED)
2615 mempool_free(initlinkmbox, phba->mbox_mem_pool); 2676 mempool_free(initlinkmbox, phba->mbox_mem_pool);
@@ -2664,12 +2725,14 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2664 clrlaerr = 1; 2725 clrlaerr = 1;
2665 break; 2726 break;
2666 2727
2728 case LPFC_LINK_UP:
2729 lpfc_issue_clear_la(phba, vport);
2730 /* Drop thru */
2667 case LPFC_LINK_UNKNOWN: 2731 case LPFC_LINK_UNKNOWN:
2668 case LPFC_WARM_START: 2732 case LPFC_WARM_START:
2669 case LPFC_INIT_START: 2733 case LPFC_INIT_START:
2670 case LPFC_INIT_MBX_CMDS: 2734 case LPFC_INIT_MBX_CMDS:
2671 case LPFC_LINK_DOWN: 2735 case LPFC_LINK_DOWN:
2672 case LPFC_LINK_UP:
2673 case LPFC_HBA_ERROR: 2736 case LPFC_HBA_ERROR:
2674 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2737 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2675 "0230 Unexpected timeout, hba link " 2738 "0230 Unexpected timeout, hba link "
@@ -2723,7 +2786,9 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2723 else 2786 else
2724 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); 2787 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2725 2788
2726 /* Mailbox took a reference to the node */ 2789 /* decrement the node reference count held for this callback
2790 * function.
2791 */
2727 lpfc_nlp_put(ndlp); 2792 lpfc_nlp_put(ndlp);
2728 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2793 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2729 kfree(mp); 2794 kfree(mp);
@@ -2747,19 +2812,19 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2747 sizeof(ndlp->nlp_portname)) == 0; 2812 sizeof(ndlp->nlp_portname)) == 0;
2748} 2813}
2749 2814
2750struct lpfc_nodelist * 2815static struct lpfc_nodelist *
2751__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) 2816__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2752{ 2817{
2753 struct lpfc_nodelist *ndlp; 2818 struct lpfc_nodelist *ndlp;
2754 2819
2755 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 2820 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2756 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE && 2821 if (filter(ndlp, param))
2757 filter(ndlp, param))
2758 return ndlp; 2822 return ndlp;
2759 } 2823 }
2760 return NULL; 2824 return NULL;
2761} 2825}
2762 2826
2827#if 0
2763/* 2828/*
2764 * Search node lists for a remote port matching filter criteria 2829 * Search node lists for a remote port matching filter criteria
2765 * Caller needs to hold host_lock before calling this routine. 2830 * Caller needs to hold host_lock before calling this routine.
@@ -2775,6 +2840,7 @@ lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2775 spin_unlock_irq(shost->host_lock); 2840 spin_unlock_irq(shost->host_lock);
2776 return ndlp; 2841 return ndlp;
2777} 2842}
2843#endif /* 0 */
2778 2844
2779/* 2845/*
2780 * This routine looks up the ndlp lists for the given RPI. If rpi found it 2846 * This routine looks up the ndlp lists for the given RPI. If rpi found it
@@ -2786,6 +2852,7 @@ __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2786 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); 2852 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2787} 2853}
2788 2854
2855#if 0
2789struct lpfc_nodelist * 2856struct lpfc_nodelist *
2790lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) 2857lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2791{ 2858{
@@ -2797,6 +2864,7 @@ lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2797 spin_unlock_irq(shost->host_lock); 2864 spin_unlock_irq(shost->host_lock);
2798 return ndlp; 2865 return ndlp;
2799} 2866}
2867#endif /* 0 */
2800 2868
2801/* 2869/*
2802 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it 2870 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
@@ -2837,6 +2905,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2837 return; 2905 return;
2838} 2906}
2839 2907
2908/* This routine releases all resources associated with a specifc NPort's ndlp
2909 * and mempool_free's the nodelist.
2910 */
2840static void 2911static void
2841lpfc_nlp_release(struct kref *kref) 2912lpfc_nlp_release(struct kref *kref)
2842{ 2913{
@@ -2851,16 +2922,57 @@ lpfc_nlp_release(struct kref *kref)
2851 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); 2922 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2852} 2923}
2853 2924
2925/* This routine bumps the reference count for a ndlp structure to ensure
2926 * that one discovery thread won't free a ndlp while another discovery thread
2927 * is using it.
2928 */
2854struct lpfc_nodelist * 2929struct lpfc_nodelist *
2855lpfc_nlp_get(struct lpfc_nodelist *ndlp) 2930lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2856{ 2931{
2857 if (ndlp) 2932 if (ndlp) {
2933 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2934 "node get: did:x%x flg:x%x refcnt:x%x",
2935 ndlp->nlp_DID, ndlp->nlp_flag,
2936 atomic_read(&ndlp->kref.refcount));
2858 kref_get(&ndlp->kref); 2937 kref_get(&ndlp->kref);
2938 }
2859 return ndlp; 2939 return ndlp;
2860} 2940}
2861 2941
2942
2943/* This routine decrements the reference count for a ndlp structure. If the
2944 * count goes to 0, this indicates the the associated nodelist should be freed.
2945 */
2862int 2946int
2863lpfc_nlp_put(struct lpfc_nodelist *ndlp) 2947lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2864{ 2948{
2949 if (ndlp) {
2950 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2951 "node put: did:x%x flg:x%x refcnt:x%x",
2952 ndlp->nlp_DID, ndlp->nlp_flag,
2953 atomic_read(&ndlp->kref.refcount));
2954 }
2865 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; 2955 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2866} 2956}
2957
2958/* This routine free's the specified nodelist if it is not in use
2959 * by any other discovery thread. This routine returns 1 if the ndlp
2960 * is not being used by anyone and has been freed. A return value of
2961 * 0 indicates it is being used by another discovery thread and the
2962 * refcount is left unchanged.
2963 */
2964int
2965lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
2966{
2967 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2968 "node not used: did:x%x flg:x%x refcnt:x%x",
2969 ndlp->nlp_DID, ndlp->nlp_flag,
2970 atomic_read(&ndlp->kref.refcount));
2971
2972 if (atomic_read(&ndlp->kref.refcount) == 1) {
2973 lpfc_nlp_put(ndlp);
2974 return 1;
2975 }
2976 return 0;
2977}
2978
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 451accd5564b..041f83e7634a 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -139,6 +139,9 @@ struct lpfc_sli_ct_request {
139 uint8_t len; 139 uint8_t len;
140 uint8_t symbname[255]; 140 uint8_t symbname[255];
141 } rsnn; 141 } rsnn;
142 struct da_id { /* For DA_ID requests */
143 uint32_t port_id;
144 } da_id;
142 struct rspn { /* For RSPN_ID requests */ 145 struct rspn { /* For RSPN_ID requests */
143 uint32_t PortId; 146 uint32_t PortId;
144 uint8_t len; 147 uint8_t len;
@@ -150,11 +153,7 @@ struct lpfc_sli_ct_request {
150 struct gff_acc { 153 struct gff_acc {
151 uint8_t fbits[128]; 154 uint8_t fbits[128];
152 } gff_acc; 155 } gff_acc;
153#ifdef __BIG_ENDIAN_BITFIELD
154#define FCP_TYPE_FEATURE_OFFSET 7 156#define FCP_TYPE_FEATURE_OFFSET 7
155#else /* __LITTLE_ENDIAN_BITFIELD */
156#define FCP_TYPE_FEATURE_OFFSET 4
157#endif
158 struct rff { 157 struct rff {
159 uint32_t PortId; 158 uint32_t PortId;
160 uint8_t reserved[2]; 159 uint8_t reserved[2];
@@ -177,6 +176,8 @@ struct lpfc_sli_ct_request {
177 sizeof(struct rnn)) 176 sizeof(struct rnn))
178#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ 177#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
179 sizeof(struct rsnn)) 178 sizeof(struct rsnn))
179#define DA_ID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
180 sizeof(struct da_id))
180#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ 181#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
181 sizeof(struct rspn)) 182 sizeof(struct rspn))
182 183
@@ -1228,7 +1229,8 @@ typedef struct { /* FireFly BIU registers */
1228#define HS_FFER3 0x20000000 /* Bit 29 */ 1229#define HS_FFER3 0x20000000 /* Bit 29 */
1229#define HS_FFER2 0x40000000 /* Bit 30 */ 1230#define HS_FFER2 0x40000000 /* Bit 30 */
1230#define HS_FFER1 0x80000000 /* Bit 31 */ 1231#define HS_FFER1 0x80000000 /* Bit 31 */
1231#define HS_FFERM 0xFF000000 /* Mask for error bits 31:24 */ 1232#define HS_CRIT_TEMP 0x00000100 /* Bit 8 */
1233#define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */
1232 1234
1233/* Host Control Register */ 1235/* Host Control Register */
1234 1236
@@ -1277,12 +1279,14 @@ typedef struct { /* FireFly BIU registers */
1277#define MBX_DEL_LD_ENTRY 0x1D 1279#define MBX_DEL_LD_ENTRY 0x1D
1278#define MBX_RUN_PROGRAM 0x1E 1280#define MBX_RUN_PROGRAM 0x1E
1279#define MBX_SET_MASK 0x20 1281#define MBX_SET_MASK 0x20
1280#define MBX_SET_SLIM 0x21 1282#define MBX_SET_VARIABLE 0x21
1281#define MBX_UNREG_D_ID 0x23 1283#define MBX_UNREG_D_ID 0x23
1282#define MBX_KILL_BOARD 0x24 1284#define MBX_KILL_BOARD 0x24
1283#define MBX_CONFIG_FARP 0x25 1285#define MBX_CONFIG_FARP 0x25
1284#define MBX_BEACON 0x2A 1286#define MBX_BEACON 0x2A
1285#define MBX_HEARTBEAT 0x31 1287#define MBX_HEARTBEAT 0x31
1288#define MBX_WRITE_VPARMS 0x32
1289#define MBX_ASYNCEVT_ENABLE 0x33
1286 1290
1287#define MBX_CONFIG_HBQ 0x7C 1291#define MBX_CONFIG_HBQ 0x7C
1288#define MBX_LOAD_AREA 0x81 1292#define MBX_LOAD_AREA 0x81
@@ -1297,7 +1301,7 @@ typedef struct { /* FireFly BIU registers */
1297#define MBX_REG_VNPID 0x96 1301#define MBX_REG_VNPID 0x96
1298#define MBX_UNREG_VNPID 0x97 1302#define MBX_UNREG_VNPID 0x97
1299 1303
1300#define MBX_FLASH_WR_ULA 0x98 1304#define MBX_WRITE_WWN 0x98
1301#define MBX_SET_DEBUG 0x99 1305#define MBX_SET_DEBUG 0x99
1302#define MBX_LOAD_EXP_ROM 0x9C 1306#define MBX_LOAD_EXP_ROM 0x9C
1303 1307
@@ -1344,6 +1348,7 @@ typedef struct { /* FireFly BIU registers */
1344 1348
1345/* SLI_2 IOCB Command Set */ 1349/* SLI_2 IOCB Command Set */
1346 1350
1351#define CMD_ASYNC_STATUS 0x7C
1347#define CMD_RCV_SEQUENCE64_CX 0x81 1352#define CMD_RCV_SEQUENCE64_CX 0x81
1348#define CMD_XMIT_SEQUENCE64_CR 0x82 1353#define CMD_XMIT_SEQUENCE64_CR 0x82
1349#define CMD_XMIT_SEQUENCE64_CX 0x83 1354#define CMD_XMIT_SEQUENCE64_CX 0x83
@@ -1368,6 +1373,7 @@ typedef struct { /* FireFly BIU registers */
1368#define CMD_FCP_TRECEIVE64_CX 0xA1 1373#define CMD_FCP_TRECEIVE64_CX 0xA1
1369#define CMD_FCP_TRSP64_CX 0xA3 1374#define CMD_FCP_TRSP64_CX 0xA3
1370 1375
1376#define CMD_QUE_XRI64_CX 0xB3
1371#define CMD_IOCB_RCV_SEQ64_CX 0xB5 1377#define CMD_IOCB_RCV_SEQ64_CX 0xB5
1372#define CMD_IOCB_RCV_ELS64_CX 0xB7 1378#define CMD_IOCB_RCV_ELS64_CX 0xB7
1373#define CMD_IOCB_RCV_CONT64_CX 0xBB 1379#define CMD_IOCB_RCV_CONT64_CX 0xBB
@@ -1406,6 +1412,8 @@ typedef struct { /* FireFly BIU registers */
1406#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ 1412#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
1407#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */ 1413#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */
1408 1414
1415#define TEMPERATURE_OFFSET 0xB0 /* Slim offset for critical temperature event */
1416
1409/* 1417/*
1410 * Begin Structure Definitions for Mailbox Commands 1418 * Begin Structure Definitions for Mailbox Commands
1411 */ 1419 */
@@ -2606,6 +2614,18 @@ typedef struct {
2606 uint32_t IPAddress; 2614 uint32_t IPAddress;
2607} CONFIG_FARP_VAR; 2615} CONFIG_FARP_VAR;
2608 2616
2617/* Structure for MB Command MBX_ASYNCEVT_ENABLE (0x33) */
2618
2619typedef struct {
2620#ifdef __BIG_ENDIAN_BITFIELD
2621 uint32_t rsvd:30;
2622 uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/
2623#else /* __LITTLE_ENDIAN */
2624 uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/
2625 uint32_t rsvd:30;
2626#endif
2627} ASYNCEVT_ENABLE_VAR;
2628
2609/* Union of all Mailbox Command types */ 2629/* Union of all Mailbox Command types */
2610#define MAILBOX_CMD_WSIZE 32 2630#define MAILBOX_CMD_WSIZE 32
2611#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t)) 2631#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
@@ -2645,6 +2665,7 @@ typedef union {
2645 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ 2665 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
2646 REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ 2666 REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
2647 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ 2667 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
2668 ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
2648} MAILVARIANTS; 2669} MAILVARIANTS;
2649 2670
2650/* 2671/*
@@ -2973,6 +2994,34 @@ typedef struct {
2973#endif 2994#endif
2974} RCV_ELS_REQ64; 2995} RCV_ELS_REQ64;
2975 2996
2997/* IOCB Command template for RCV_SEQ64 */
2998struct rcv_seq64 {
2999 struct ulp_bde64 elsReq;
3000 uint32_t hbq_1;
3001 uint32_t parmRo;
3002#ifdef __BIG_ENDIAN_BITFIELD
3003 uint32_t rctl:8;
3004 uint32_t type:8;
3005 uint32_t dfctl:8;
3006 uint32_t ls:1;
3007 uint32_t fs:1;
3008 uint32_t rsvd2:3;
3009 uint32_t si:1;
3010 uint32_t bc:1;
3011 uint32_t rsvd3:1;
3012#else /* __LITTLE_ENDIAN_BITFIELD */
3013 uint32_t rsvd3:1;
3014 uint32_t bc:1;
3015 uint32_t si:1;
3016 uint32_t rsvd2:3;
3017 uint32_t fs:1;
3018 uint32_t ls:1;
3019 uint32_t dfctl:8;
3020 uint32_t type:8;
3021 uint32_t rctl:8;
3022#endif
3023};
3024
2976/* IOCB Command template for all 64 bit FCP Initiator commands */ 3025/* IOCB Command template for all 64 bit FCP Initiator commands */
2977typedef struct { 3026typedef struct {
2978 ULP_BDL bdl; 3027 ULP_BDL bdl;
@@ -2987,6 +3036,21 @@ typedef struct {
2987 uint32_t fcpt_Length; /* transfer ready for IWRITE */ 3036 uint32_t fcpt_Length; /* transfer ready for IWRITE */
2988} FCPT_FIELDS64; 3037} FCPT_FIELDS64;
2989 3038
3039/* IOCB Command template for Async Status iocb commands */
3040typedef struct {
3041 uint32_t rsvd[4];
3042 uint32_t param;
3043#ifdef __BIG_ENDIAN_BITFIELD
3044 uint16_t evt_code; /* High order bits word 5 */
3045 uint16_t sub_ctxt_tag; /* Low order bits word 5 */
3046#else /* __LITTLE_ENDIAN_BITFIELD */
3047 uint16_t sub_ctxt_tag; /* High order bits word 5 */
3048 uint16_t evt_code; /* Low order bits word 5 */
3049#endif
3050} ASYNCSTAT_FIELDS;
3051#define ASYNC_TEMP_WARN 0x100
3052#define ASYNC_TEMP_SAFE 0x101
3053
2990/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7) 3054/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
2991 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */ 3055 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
2992 3056
@@ -3004,7 +3068,26 @@ struct rcv_sli3 {
3004 struct ulp_bde64 bde2; 3068 struct ulp_bde64 bde2;
3005}; 3069};
3006 3070
3071/* Structure used for a single HBQ entry */
3072struct lpfc_hbq_entry {
3073 struct ulp_bde64 bde;
3074 uint32_t buffer_tag;
3075};
3007 3076
3077/* IOCB Command template for QUE_XRI64_CX (0xB3) command */
3078typedef struct {
3079 struct lpfc_hbq_entry buff;
3080 uint32_t rsvd;
3081 uint32_t rsvd1;
3082} QUE_XRI64_CX_FIELDS;
3083
3084struct que_xri64cx_ext_fields {
3085 uint32_t iotag64_low;
3086 uint32_t iotag64_high;
3087 uint32_t ebde_count;
3088 uint32_t rsvd;
3089 struct lpfc_hbq_entry buff[5];
3090};
3008 3091
3009typedef struct _IOCB { /* IOCB structure */ 3092typedef struct _IOCB { /* IOCB structure */
3010 union { 3093 union {
@@ -3028,6 +3111,9 @@ typedef struct _IOCB { /* IOCB structure */
3028 XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */ 3111 XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */
3029 FCPI_FIELDS64 fcpi64; /* FCP 64 bit Initiator template */ 3112 FCPI_FIELDS64 fcpi64; /* FCP 64 bit Initiator template */
3030 FCPT_FIELDS64 fcpt64; /* FCP 64 bit target template */ 3113 FCPT_FIELDS64 fcpt64; /* FCP 64 bit target template */
3114 ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
3115 QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
3116 struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */
3031 3117
3032 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ 3118 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
3033 } un; 3119 } un;
@@ -3085,6 +3171,10 @@ typedef struct _IOCB { /* IOCB structure */
3085 3171
3086 union { 3172 union {
3087 struct rcv_sli3 rcvsli3; /* words 8 - 15 */ 3173 struct rcv_sli3 rcvsli3; /* words 8 - 15 */
3174
3175 /* words 8-31 used for que_xri_cx iocb */
3176 struct que_xri64cx_ext_fields que_xri64cx_ext_words;
3177
3088 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ 3178 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
3089 } unsli3; 3179 } unsli3;
3090 3180
@@ -3124,12 +3214,6 @@ typedef struct _IOCB { /* IOCB structure */
3124 3214
3125} IOCB_t; 3215} IOCB_t;
3126 3216
3127/* Structure used for a single HBQ entry */
3128struct lpfc_hbq_entry {
3129 struct ulp_bde64 bde;
3130 uint32_t buffer_tag;
3131};
3132
3133 3217
3134#define SLI1_SLIM_SIZE (4 * 1024) 3218#define SLI1_SLIM_SIZE (4 * 1024)
3135 3219
@@ -3172,6 +3256,8 @@ lpfc_is_LC_HBA(unsigned short device)
3172 (device == PCI_DEVICE_ID_BSMB) || 3256 (device == PCI_DEVICE_ID_BSMB) ||
3173 (device == PCI_DEVICE_ID_ZMID) || 3257 (device == PCI_DEVICE_ID_ZMID) ||
3174 (device == PCI_DEVICE_ID_ZSMB) || 3258 (device == PCI_DEVICE_ID_ZSMB) ||
3259 (device == PCI_DEVICE_ID_SAT_MID) ||
3260 (device == PCI_DEVICE_ID_SAT_SMB) ||
3175 (device == PCI_DEVICE_ID_RFLY)) 3261 (device == PCI_DEVICE_ID_RFLY))
3176 return 1; 3262 return 1;
3177 else 3263 else
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ecebdfa00470..3205f7488d1c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -212,6 +212,18 @@ out_free_mbox:
212 return 0; 212 return 0;
213} 213}
214 214
215/* Completion handler for config async event mailbox command. */
216static void
217lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
218{
219 if (pmboxq->mb.mbxStatus == MBX_SUCCESS)
220 phba->temp_sensor_support = 1;
221 else
222 phba->temp_sensor_support = 0;
223 mempool_free(pmboxq, phba->mbox_mem_pool);
224 return;
225}
226
215/************************************************************************/ 227/************************************************************************/
216/* */ 228/* */
217/* lpfc_config_port_post */ 229/* lpfc_config_port_post */
@@ -234,6 +246,15 @@ lpfc_config_port_post(struct lpfc_hba *phba)
234 int i, j; 246 int i, j;
235 int rc; 247 int rc;
236 248
249 spin_lock_irq(&phba->hbalock);
250 /*
251 * If the Config port completed correctly the HBA is not
252 * over heated any more.
253 */
254 if (phba->over_temp_state == HBA_OVER_TEMP)
255 phba->over_temp_state = HBA_NORMAL_TEMP;
256 spin_unlock_irq(&phba->hbalock);
257
237 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 258 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
238 if (!pmb) { 259 if (!pmb) {
239 phba->link_state = LPFC_HBA_ERROR; 260 phba->link_state = LPFC_HBA_ERROR;
@@ -343,7 +364,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
343 364
344 phba->link_state = LPFC_LINK_DOWN; 365 phba->link_state = LPFC_LINK_DOWN;
345 366
346 /* Only process IOCBs on ring 0 till hba_state is READY */ 367 /* Only process IOCBs on ELS ring till hba_state is READY */
347 if (psli->ring[psli->extra_ring].cmdringaddr) 368 if (psli->ring[psli->extra_ring].cmdringaddr)
348 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 369 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
349 if (psli->ring[psli->fcp_ring].cmdringaddr) 370 if (psli->ring[psli->fcp_ring].cmdringaddr)
@@ -409,7 +430,21 @@ lpfc_config_port_post(struct lpfc_hba *phba)
409 return -EIO; 430 return -EIO;
410 } 431 }
411 /* MBOX buffer will be freed in mbox compl */ 432 /* MBOX buffer will be freed in mbox compl */
433 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
434 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
435 pmb->mbox_cmpl = lpfc_config_async_cmpl;
436 pmb->vport = phba->pport;
437 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
412 438
439 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
440 lpfc_printf_log(phba,
441 KERN_ERR,
442 LOG_INIT,
443 "0456 Adapter failed to issue "
444 "ASYNCEVT_ENABLE mbox status x%x \n.",
445 rc);
446 mempool_free(pmb, phba->mbox_mem_pool);
447 }
413 return (0); 448 return (0);
414} 449}
415 450
@@ -449,6 +484,9 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
449 struct lpfc_sli *psli = &phba->sli; 484 struct lpfc_sli *psli = &phba->sli;
450 struct lpfc_sli_ring *pring; 485 struct lpfc_sli_ring *pring;
451 struct lpfc_dmabuf *mp, *next_mp; 486 struct lpfc_dmabuf *mp, *next_mp;
487 struct lpfc_iocbq *iocb;
488 IOCB_t *cmd = NULL;
489 LIST_HEAD(completions);
452 int i; 490 int i;
453 491
454 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 492 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
@@ -464,16 +502,42 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
464 } 502 }
465 } 503 }
466 504
505 spin_lock_irq(&phba->hbalock);
467 for (i = 0; i < psli->num_rings; i++) { 506 for (i = 0; i < psli->num_rings; i++) {
468 pring = &psli->ring[i]; 507 pring = &psli->ring[i];
508
509 /* At this point in time the HBA is either reset or DOA. Either
510 * way, nothing should be on txcmplq as it will NEVER complete.
511 */
512 list_splice_init(&pring->txcmplq, &completions);
513 pring->txcmplq_cnt = 0;
514 spin_unlock_irq(&phba->hbalock);
515
516 while (!list_empty(&completions)) {
517 iocb = list_get_first(&completions, struct lpfc_iocbq,
518 list);
519 cmd = &iocb->iocb;
520 list_del_init(&iocb->list);
521
522 if (!iocb->iocb_cmpl)
523 lpfc_sli_release_iocbq(phba, iocb);
524 else {
525 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
526 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
527 (iocb->iocb_cmpl) (phba, iocb, iocb);
528 }
529 }
530
469 lpfc_sli_abort_iocb_ring(phba, pring); 531 lpfc_sli_abort_iocb_ring(phba, pring);
532 spin_lock_irq(&phba->hbalock);
470 } 533 }
534 spin_unlock_irq(&phba->hbalock);
471 535
472 return 0; 536 return 0;
473} 537}
474 538
475/* HBA heart beat timeout handler */ 539/* HBA heart beat timeout handler */
476void 540static void
477lpfc_hb_timeout(unsigned long ptr) 541lpfc_hb_timeout(unsigned long ptr)
478{ 542{
479 struct lpfc_hba *phba; 543 struct lpfc_hba *phba;
@@ -512,8 +576,10 @@ void
512lpfc_hb_timeout_handler(struct lpfc_hba *phba) 576lpfc_hb_timeout_handler(struct lpfc_hba *phba)
513{ 577{
514 LPFC_MBOXQ_t *pmboxq; 578 LPFC_MBOXQ_t *pmboxq;
579 struct lpfc_dmabuf *buf_ptr;
515 int retval; 580 int retval;
516 struct lpfc_sli *psli = &phba->sli; 581 struct lpfc_sli *psli = &phba->sli;
582 LIST_HEAD(completions);
517 583
518 if ((phba->link_state == LPFC_HBA_ERROR) || 584 if ((phba->link_state == LPFC_HBA_ERROR) ||
519 (phba->pport->load_flag & FC_UNLOADING) || 585 (phba->pport->load_flag & FC_UNLOADING) ||
@@ -540,49 +606,88 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
540 } 606 }
541 spin_unlock_irq(&phba->pport->work_port_lock); 607 spin_unlock_irq(&phba->pport->work_port_lock);
542 608
543 /* If there is no heart beat outstanding, issue a heartbeat command */ 609 if (phba->elsbuf_cnt &&
544 if (!phba->hb_outstanding) { 610 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
545 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 611 spin_lock_irq(&phba->hbalock);
546 if (!pmboxq) { 612 list_splice_init(&phba->elsbuf, &completions);
547 mod_timer(&phba->hb_tmofunc, 613 phba->elsbuf_cnt = 0;
548 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 614 phba->elsbuf_prev_cnt = 0;
549 return; 615 spin_unlock_irq(&phba->hbalock);
616
617 while (!list_empty(&completions)) {
618 list_remove_head(&completions, buf_ptr,
619 struct lpfc_dmabuf, list);
620 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
621 kfree(buf_ptr);
550 } 622 }
623 }
624 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
551 625
552 lpfc_heart_beat(phba, pmboxq); 626 /* If there is no heart beat outstanding, issue a heartbeat command */
553 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 627 if (phba->cfg_enable_hba_heartbeat) {
554 pmboxq->vport = phba->pport; 628 if (!phba->hb_outstanding) {
555 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 629 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
630 if (!pmboxq) {
631 mod_timer(&phba->hb_tmofunc,
632 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
633 return;
634 }
556 635
557 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 636 lpfc_heart_beat(phba, pmboxq);
558 mempool_free(pmboxq, phba->mbox_mem_pool); 637 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
638 pmboxq->vport = phba->pport;
639 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
640
641 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
642 mempool_free(pmboxq, phba->mbox_mem_pool);
643 mod_timer(&phba->hb_tmofunc,
644 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
645 return;
646 }
559 mod_timer(&phba->hb_tmofunc, 647 mod_timer(&phba->hb_tmofunc,
560 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 648 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
649 phba->hb_outstanding = 1;
561 return; 650 return;
651 } else {
652 /*
653 * If heart beat timeout called with hb_outstanding set
654 * we need to take the HBA offline.
655 */
656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
657 "0459 Adapter heartbeat failure, "
658 "taking this port offline.\n");
659
660 spin_lock_irq(&phba->hbalock);
661 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
662 spin_unlock_irq(&phba->hbalock);
663
664 lpfc_offline_prep(phba);
665 lpfc_offline(phba);
666 lpfc_unblock_mgmt_io(phba);
667 phba->link_state = LPFC_HBA_ERROR;
668 lpfc_hba_down_post(phba);
562 } 669 }
563 mod_timer(&phba->hb_tmofunc, 670 }
564 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 671}
565 phba->hb_outstanding = 1;
566 return;
567 } else {
568 /*
569 * If heart beat timeout called with hb_outstanding set we
570 * need to take the HBA offline.
571 */
572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
573 "0459 Adapter heartbeat failure, taking "
574 "this port offline.\n");
575 672
576 spin_lock_irq(&phba->hbalock); 673static void
577 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 674lpfc_offline_eratt(struct lpfc_hba *phba)
578 spin_unlock_irq(&phba->hbalock); 675{
676 struct lpfc_sli *psli = &phba->sli;
579 677
580 lpfc_offline_prep(phba); 678 spin_lock_irq(&phba->hbalock);
581 lpfc_offline(phba); 679 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
582 lpfc_unblock_mgmt_io(phba); 680 spin_unlock_irq(&phba->hbalock);
583 phba->link_state = LPFC_HBA_ERROR; 681 lpfc_offline_prep(phba);
584 lpfc_hba_down_post(phba); 682
585 } 683 lpfc_offline(phba);
684 lpfc_reset_barrier(phba);
685 lpfc_sli_brdreset(phba);
686 lpfc_hba_down_post(phba);
687 lpfc_sli_brdready(phba, HS_MBRDY);
688 lpfc_unblock_mgmt_io(phba);
689 phba->link_state = LPFC_HBA_ERROR;
690 return;
586} 691}
587 692
588/************************************************************************/ 693/************************************************************************/
@@ -601,6 +706,8 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
601 struct lpfc_sli_ring *pring; 706 struct lpfc_sli_ring *pring;
602 struct lpfc_vport **vports; 707 struct lpfc_vport **vports;
603 uint32_t event_data; 708 uint32_t event_data;
709 unsigned long temperature;
710 struct temp_event temp_event_data;
604 struct Scsi_Host *shost; 711 struct Scsi_Host *shost;
605 int i; 712 int i;
606 713
@@ -608,6 +715,9 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
608 * since we cannot communicate with the pci card anyway. */ 715 * since we cannot communicate with the pci card anyway. */
609 if (pci_channel_offline(phba->pcidev)) 716 if (pci_channel_offline(phba->pcidev))
610 return; 717 return;
718 /* If resets are disabled then leave the HBA alone and return */
719 if (!phba->cfg_enable_hba_reset)
720 return;
611 721
612 if (phba->work_hs & HS_FFER6 || 722 if (phba->work_hs & HS_FFER6 ||
613 phba->work_hs & HS_FFER5) { 723 phba->work_hs & HS_FFER5) {
@@ -620,14 +730,14 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
620 vports = lpfc_create_vport_work_array(phba); 730 vports = lpfc_create_vport_work_array(phba);
621 if (vports != NULL) 731 if (vports != NULL)
622 for(i = 0; 732 for(i = 0;
623 i < LPFC_MAX_VPORTS && vports[i] != NULL; 733 i <= phba->max_vpi && vports[i] != NULL;
624 i++){ 734 i++){
625 shost = lpfc_shost_from_vport(vports[i]); 735 shost = lpfc_shost_from_vport(vports[i]);
626 spin_lock_irq(shost->host_lock); 736 spin_lock_irq(shost->host_lock);
627 vports[i]->fc_flag |= FC_ESTABLISH_LINK; 737 vports[i]->fc_flag |= FC_ESTABLISH_LINK;
628 spin_unlock_irq(shost->host_lock); 738 spin_unlock_irq(shost->host_lock);
629 } 739 }
630 lpfc_destroy_vport_work_array(vports); 740 lpfc_destroy_vport_work_array(phba, vports);
631 spin_lock_irq(&phba->hbalock); 741 spin_lock_irq(&phba->hbalock);
632 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 742 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
633 spin_unlock_irq(&phba->hbalock); 743 spin_unlock_irq(&phba->hbalock);
@@ -655,6 +765,31 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
655 return; 765 return;
656 } 766 }
657 lpfc_unblock_mgmt_io(phba); 767 lpfc_unblock_mgmt_io(phba);
768 } else if (phba->work_hs & HS_CRIT_TEMP) {
769 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
770 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
771 temp_event_data.event_code = LPFC_CRIT_TEMP;
772 temp_event_data.data = (uint32_t)temperature;
773
774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
775 "0459 Adapter maximum temperature exceeded "
776 "(%ld), taking this port offline "
777 "Data: x%x x%x x%x\n",
778 temperature, phba->work_hs,
779 phba->work_status[0], phba->work_status[1]);
780
781 shost = lpfc_shost_from_vport(phba->pport);
782 fc_host_post_vendor_event(shost, fc_get_event_number(),
783 sizeof(temp_event_data),
784 (char *) &temp_event_data,
785 SCSI_NL_VID_TYPE_PCI
786 | PCI_VENDOR_ID_EMULEX);
787
788 spin_lock_irq(&phba->hbalock);
789 phba->over_temp_state = HBA_OVER_TEMP;
790 spin_unlock_irq(&phba->hbalock);
791 lpfc_offline_eratt(phba);
792
658 } else { 793 } else {
659 /* The if clause above forces this code path when the status 794 /* The if clause above forces this code path when the status
660 * failure is a value other than FFER6. Do not call the offline 795 * failure is a value other than FFER6. Do not call the offline
@@ -672,14 +807,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
672 sizeof(event_data), (char *) &event_data, 807 sizeof(event_data), (char *) &event_data,
673 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 808 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
674 809
675 spin_lock_irq(&phba->hbalock); 810 lpfc_offline_eratt(phba);
676 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
677 spin_unlock_irq(&phba->hbalock);
678 lpfc_offline_prep(phba);
679 lpfc_offline(phba);
680 lpfc_unblock_mgmt_io(phba);
681 phba->link_state = LPFC_HBA_ERROR;
682 lpfc_hba_down_post(phba);
683 } 811 }
684} 812}
685 813
@@ -699,21 +827,25 @@ lpfc_handle_latt(struct lpfc_hba *phba)
699 LPFC_MBOXQ_t *pmb; 827 LPFC_MBOXQ_t *pmb;
700 volatile uint32_t control; 828 volatile uint32_t control;
701 struct lpfc_dmabuf *mp; 829 struct lpfc_dmabuf *mp;
702 int rc = -ENOMEM; 830 int rc = 0;
703 831
704 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 832 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
705 if (!pmb) 833 if (!pmb) {
834 rc = 1;
706 goto lpfc_handle_latt_err_exit; 835 goto lpfc_handle_latt_err_exit;
836 }
707 837
708 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 838 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
709 if (!mp) 839 if (!mp) {
840 rc = 2;
710 goto lpfc_handle_latt_free_pmb; 841 goto lpfc_handle_latt_free_pmb;
842 }
711 843
712 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 844 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
713 if (!mp->virt) 845 if (!mp->virt) {
846 rc = 3;
714 goto lpfc_handle_latt_free_mp; 847 goto lpfc_handle_latt_free_mp;
715 848 }
716 rc = -EIO;
717 849
718 /* Cleanup any outstanding ELS commands */ 850 /* Cleanup any outstanding ELS commands */
719 lpfc_els_flush_all_cmd(phba); 851 lpfc_els_flush_all_cmd(phba);
@@ -722,9 +854,11 @@ lpfc_handle_latt(struct lpfc_hba *phba)
722 lpfc_read_la(phba, pmb, mp); 854 lpfc_read_la(phba, pmb, mp);
723 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 855 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
724 pmb->vport = vport; 856 pmb->vport = vport;
725 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); 857 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
726 if (rc == MBX_NOT_FINISHED) 858 if (rc == MBX_NOT_FINISHED) {
859 rc = 4;
727 goto lpfc_handle_latt_free_mbuf; 860 goto lpfc_handle_latt_free_mbuf;
861 }
728 862
729 /* Clear Link Attention in HA REG */ 863 /* Clear Link Attention in HA REG */
730 spin_lock_irq(&phba->hbalock); 864 spin_lock_irq(&phba->hbalock);
@@ -756,10 +890,8 @@ lpfc_handle_latt_err_exit:
756 lpfc_linkdown(phba); 890 lpfc_linkdown(phba);
757 phba->link_state = LPFC_HBA_ERROR; 891 phba->link_state = LPFC_HBA_ERROR;
758 892
759 /* The other case is an error from issue_mbox */ 893 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
760 if (rc == -ENOMEM) 894 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
761 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
762 "0300 READ_LA: no buffers\n");
763 895
764 return; 896 return;
765} 897}
@@ -1088,9 +1220,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
1088 /* Allocate buffer to post */ 1220 /* Allocate buffer to post */
1089 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1221 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1090 if (mp1) 1222 if (mp1)
1091 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1223 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1092 &mp1->phys); 1224 if (!mp1 || !mp1->virt) {
1093 if (mp1 == 0 || mp1->virt == 0) {
1094 kfree(mp1); 1225 kfree(mp1);
1095 lpfc_sli_release_iocbq(phba, iocb); 1226 lpfc_sli_release_iocbq(phba, iocb);
1096 pring->missbufcnt = cnt; 1227 pring->missbufcnt = cnt;
@@ -1104,7 +1235,7 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
1104 if (mp2) 1235 if (mp2)
1105 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1236 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1106 &mp2->phys); 1237 &mp2->phys);
1107 if (mp2 == 0 || mp2->virt == 0) { 1238 if (!mp2 || !mp2->virt) {
1108 kfree(mp2); 1239 kfree(mp2);
1109 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1240 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1110 kfree(mp1); 1241 kfree(mp1);
@@ -1280,15 +1411,39 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1280 kfree(HashWorking); 1411 kfree(HashWorking);
1281} 1412}
1282 1413
1283static void 1414void
1284lpfc_cleanup(struct lpfc_vport *vport) 1415lpfc_cleanup(struct lpfc_vport *vport)
1285{ 1416{
1417 struct lpfc_hba *phba = vport->phba;
1286 struct lpfc_nodelist *ndlp, *next_ndlp; 1418 struct lpfc_nodelist *ndlp, *next_ndlp;
1419 int i = 0;
1287 1420
1288 /* clean up phba - lpfc specific */ 1421 if (phba->link_state > LPFC_LINK_DOWN)
1289 lpfc_can_disctmo(vport); 1422 lpfc_port_link_failure(vport);
1290 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) 1423
1291 lpfc_nlp_put(ndlp); 1424 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1425 if (ndlp->nlp_type & NLP_FABRIC)
1426 lpfc_disc_state_machine(vport, ndlp, NULL,
1427 NLP_EVT_DEVICE_RECOVERY);
1428 lpfc_disc_state_machine(vport, ndlp, NULL,
1429 NLP_EVT_DEVICE_RM);
1430 }
1431
1432 /* At this point, ALL ndlp's should be gone
1433 * because of the previous NLP_EVT_DEVICE_RM.
1434 * Lets wait for this to happen, if needed.
1435 */
1436 while (!list_empty(&vport->fc_nodes)) {
1437
1438 if (i++ > 3000) {
1439 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1440 "0233 Nodelist not empty\n");
1441 break;
1442 }
1443
1444 /* Wait for any activity on ndlps to settle */
1445 msleep(10);
1446 }
1292 return; 1447 return;
1293} 1448}
1294 1449
@@ -1307,14 +1462,14 @@ lpfc_establish_link_tmo(unsigned long ptr)
1307 phba->pport->fc_flag, phba->pport->port_state); 1462 phba->pport->fc_flag, phba->pport->port_state);
1308 vports = lpfc_create_vport_work_array(phba); 1463 vports = lpfc_create_vport_work_array(phba);
1309 if (vports != NULL) 1464 if (vports != NULL)
1310 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 1465 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1311 struct Scsi_Host *shost; 1466 struct Scsi_Host *shost;
1312 shost = lpfc_shost_from_vport(vports[i]); 1467 shost = lpfc_shost_from_vport(vports[i]);
1313 spin_lock_irqsave(shost->host_lock, iflag); 1468 spin_lock_irqsave(shost->host_lock, iflag);
1314 vports[i]->fc_flag &= ~FC_ESTABLISH_LINK; 1469 vports[i]->fc_flag &= ~FC_ESTABLISH_LINK;
1315 spin_unlock_irqrestore(shost->host_lock, iflag); 1470 spin_unlock_irqrestore(shost->host_lock, iflag);
1316 } 1471 }
1317 lpfc_destroy_vport_work_array(vports); 1472 lpfc_destroy_vport_work_array(phba, vports);
1318} 1473}
1319 1474
1320void 1475void
@@ -1339,6 +1494,16 @@ lpfc_stop_phba_timers(struct lpfc_hba *phba)
1339 return; 1494 return;
1340} 1495}
1341 1496
1497static void
1498lpfc_block_mgmt_io(struct lpfc_hba * phba)
1499{
1500 unsigned long iflag;
1501
1502 spin_lock_irqsave(&phba->hbalock, iflag);
1503 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
1504 spin_unlock_irqrestore(&phba->hbalock, iflag);
1505}
1506
1342int 1507int
1343lpfc_online(struct lpfc_hba *phba) 1508lpfc_online(struct lpfc_hba *phba)
1344{ 1509{
@@ -1369,7 +1534,7 @@ lpfc_online(struct lpfc_hba *phba)
1369 1534
1370 vports = lpfc_create_vport_work_array(phba); 1535 vports = lpfc_create_vport_work_array(phba);
1371 if (vports != NULL) 1536 if (vports != NULL)
1372 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 1537 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1373 struct Scsi_Host *shost; 1538 struct Scsi_Host *shost;
1374 shost = lpfc_shost_from_vport(vports[i]); 1539 shost = lpfc_shost_from_vport(vports[i]);
1375 spin_lock_irq(shost->host_lock); 1540 spin_lock_irq(shost->host_lock);
@@ -1378,23 +1543,13 @@ lpfc_online(struct lpfc_hba *phba)
1378 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1543 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1379 spin_unlock_irq(shost->host_lock); 1544 spin_unlock_irq(shost->host_lock);
1380 } 1545 }
1381 lpfc_destroy_vport_work_array(vports); 1546 lpfc_destroy_vport_work_array(phba, vports);
1382 1547
1383 lpfc_unblock_mgmt_io(phba); 1548 lpfc_unblock_mgmt_io(phba);
1384 return 0; 1549 return 0;
1385} 1550}
1386 1551
1387void 1552void
1388lpfc_block_mgmt_io(struct lpfc_hba * phba)
1389{
1390 unsigned long iflag;
1391
1392 spin_lock_irqsave(&phba->hbalock, iflag);
1393 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
1394 spin_unlock_irqrestore(&phba->hbalock, iflag);
1395}
1396
1397void
1398lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 1553lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
1399{ 1554{
1400 unsigned long iflag; 1555 unsigned long iflag;
@@ -1409,6 +1564,8 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1409{ 1564{
1410 struct lpfc_vport *vport = phba->pport; 1565 struct lpfc_vport *vport = phba->pport;
1411 struct lpfc_nodelist *ndlp, *next_ndlp; 1566 struct lpfc_nodelist *ndlp, *next_ndlp;
1567 struct lpfc_vport **vports;
1568 int i;
1412 1569
1413 if (vport->fc_flag & FC_OFFLINE_MODE) 1570 if (vport->fc_flag & FC_OFFLINE_MODE)
1414 return; 1571 return;
@@ -1417,10 +1574,34 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1417 1574
1418 lpfc_linkdown(phba); 1575 lpfc_linkdown(phba);
1419 1576
1420 /* Issue an unreg_login to all nodes */ 1577 /* Issue an unreg_login to all nodes on all vports */
1421 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) 1578 vports = lpfc_create_vport_work_array(phba);
1422 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) 1579 if (vports != NULL) {
1423 lpfc_unreg_rpi(vport, ndlp); 1580 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1581 struct Scsi_Host *shost;
1582
1583 if (vports[i]->load_flag & FC_UNLOADING)
1584 continue;
1585 shost = lpfc_shost_from_vport(vports[i]);
1586 list_for_each_entry_safe(ndlp, next_ndlp,
1587 &vports[i]->fc_nodes,
1588 nlp_listp) {
1589 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1590 continue;
1591 if (ndlp->nlp_type & NLP_FABRIC) {
1592 lpfc_disc_state_machine(vports[i], ndlp,
1593 NULL, NLP_EVT_DEVICE_RECOVERY);
1594 lpfc_disc_state_machine(vports[i], ndlp,
1595 NULL, NLP_EVT_DEVICE_RM);
1596 }
1597 spin_lock_irq(shost->host_lock);
1598 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1599 spin_unlock_irq(shost->host_lock);
1600 lpfc_unreg_rpi(vports[i], ndlp);
1601 }
1602 }
1603 }
1604 lpfc_destroy_vport_work_array(phba, vports);
1424 1605
1425 lpfc_sli_flush_mbox_queue(phba); 1606 lpfc_sli_flush_mbox_queue(phba);
1426} 1607}
@@ -1439,9 +1620,9 @@ lpfc_offline(struct lpfc_hba *phba)
1439 lpfc_stop_phba_timers(phba); 1620 lpfc_stop_phba_timers(phba);
1440 vports = lpfc_create_vport_work_array(phba); 1621 vports = lpfc_create_vport_work_array(phba);
1441 if (vports != NULL) 1622 if (vports != NULL)
1442 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) 1623 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
1443 lpfc_stop_vport_timers(vports[i]); 1624 lpfc_stop_vport_timers(vports[i]);
1444 lpfc_destroy_vport_work_array(vports); 1625 lpfc_destroy_vport_work_array(phba, vports);
1445 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1626 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1446 "0460 Bring Adapter offline\n"); 1627 "0460 Bring Adapter offline\n");
1447 /* Bring down the SLI Layer and cleanup. The HBA is offline 1628 /* Bring down the SLI Layer and cleanup. The HBA is offline
@@ -1452,15 +1633,14 @@ lpfc_offline(struct lpfc_hba *phba)
1452 spin_unlock_irq(&phba->hbalock); 1633 spin_unlock_irq(&phba->hbalock);
1453 vports = lpfc_create_vport_work_array(phba); 1634 vports = lpfc_create_vport_work_array(phba);
1454 if (vports != NULL) 1635 if (vports != NULL)
1455 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 1636 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1456 shost = lpfc_shost_from_vport(vports[i]); 1637 shost = lpfc_shost_from_vport(vports[i]);
1457 lpfc_cleanup(vports[i]);
1458 spin_lock_irq(shost->host_lock); 1638 spin_lock_irq(shost->host_lock);
1459 vports[i]->work_port_events = 0; 1639 vports[i]->work_port_events = 0;
1460 vports[i]->fc_flag |= FC_OFFLINE_MODE; 1640 vports[i]->fc_flag |= FC_OFFLINE_MODE;
1461 spin_unlock_irq(shost->host_lock); 1641 spin_unlock_irq(shost->host_lock);
1462 } 1642 }
1463 lpfc_destroy_vport_work_array(vports); 1643 lpfc_destroy_vport_work_array(phba, vports);
1464} 1644}
1465 1645
1466/****************************************************************************** 1646/******************************************************************************
@@ -1674,6 +1854,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
1674 fc_host_supported_speeds(shost) = 0; 1854 fc_host_supported_speeds(shost) = 0;
1675 if (phba->lmt & LMT_10Gb) 1855 if (phba->lmt & LMT_10Gb)
1676 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 1856 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
1857 if (phba->lmt & LMT_8Gb)
1858 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
1677 if (phba->lmt & LMT_4Gb) 1859 if (phba->lmt & LMT_4Gb)
1678 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 1860 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
1679 if (phba->lmt & LMT_2Gb) 1861 if (phba->lmt & LMT_2Gb)
@@ -1707,13 +1889,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1707 struct Scsi_Host *shost = NULL; 1889 struct Scsi_Host *shost = NULL;
1708 void *ptr; 1890 void *ptr;
1709 unsigned long bar0map_len, bar2map_len; 1891 unsigned long bar0map_len, bar2map_len;
1710 int error = -ENODEV; 1892 int error = -ENODEV, retval;
1711 int i, hbq_count; 1893 int i, hbq_count;
1712 uint16_t iotag; 1894 uint16_t iotag;
1895 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
1713 1896
1714 if (pci_enable_device(pdev)) 1897 if (pci_enable_device_bars(pdev, bars))
1715 goto out; 1898 goto out;
1716 if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) 1899 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
1717 goto out_disable_device; 1900 goto out_disable_device;
1718 1901
1719 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); 1902 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
@@ -1823,9 +2006,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1823 lpfc_sli_setup(phba); 2006 lpfc_sli_setup(phba);
1824 lpfc_sli_queue_setup(phba); 2007 lpfc_sli_queue_setup(phba);
1825 2008
1826 error = lpfc_mem_alloc(phba); 2009 retval = lpfc_mem_alloc(phba);
1827 if (error) 2010 if (retval) {
2011 error = retval;
1828 goto out_free_hbqslimp; 2012 goto out_free_hbqslimp;
2013 }
1829 2014
1830 /* Initialize and populate the iocb list per host. */ 2015 /* Initialize and populate the iocb list per host. */
1831 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 2016 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
@@ -1880,6 +2065,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1880 /* Initialize list of fabric iocbs */ 2065 /* Initialize list of fabric iocbs */
1881 INIT_LIST_HEAD(&phba->fabric_iocb_list); 2066 INIT_LIST_HEAD(&phba->fabric_iocb_list);
1882 2067
2068 /* Initialize list to save ELS buffers */
2069 INIT_LIST_HEAD(&phba->elsbuf);
2070
1883 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 2071 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
1884 if (!vport) 2072 if (!vport)
1885 goto out_kthread_stop; 2073 goto out_kthread_stop;
@@ -1891,8 +2079,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1891 pci_set_drvdata(pdev, shost); 2079 pci_set_drvdata(pdev, shost);
1892 2080
1893 if (phba->cfg_use_msi) { 2081 if (phba->cfg_use_msi) {
1894 error = pci_enable_msi(phba->pcidev); 2082 retval = pci_enable_msi(phba->pcidev);
1895 if (!error) 2083 if (!retval)
1896 phba->using_msi = 1; 2084 phba->using_msi = 1;
1897 else 2085 else
1898 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -1900,11 +2088,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1900 "with IRQ\n"); 2088 "with IRQ\n");
1901 } 2089 }
1902 2090
1903 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, 2091 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
1904 LPFC_DRIVER_NAME, phba); 2092 LPFC_DRIVER_NAME, phba);
1905 if (error) { 2093 if (retval) {
1906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2094 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1907 "0451 Enable interrupt handler failed\n"); 2095 "0451 Enable interrupt handler failed\n");
2096 error = retval;
1908 goto out_disable_msi; 2097 goto out_disable_msi;
1909 } 2098 }
1910 2099
@@ -1914,11 +2103,15 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1914 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 2103 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1915 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 2104 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1916 2105
1917 if (lpfc_alloc_sysfs_attr(vport)) 2106 if (lpfc_alloc_sysfs_attr(vport)) {
2107 error = -ENOMEM;
1918 goto out_free_irq; 2108 goto out_free_irq;
2109 }
1919 2110
1920 if (lpfc_sli_hba_setup(phba)) 2111 if (lpfc_sli_hba_setup(phba)) {
2112 error = -ENODEV;
1921 goto out_remove_device; 2113 goto out_remove_device;
2114 }
1922 2115
1923 /* 2116 /*
1924 * hba setup may have changed the hba_queue_depth so we need to adjust 2117 * hba setup may have changed the hba_queue_depth so we need to adjust
@@ -1975,7 +2168,7 @@ out_idr_remove:
1975out_free_phba: 2168out_free_phba:
1976 kfree(phba); 2169 kfree(phba);
1977out_release_regions: 2170out_release_regions:
1978 pci_release_regions(pdev); 2171 pci_release_selected_regions(pdev, bars);
1979out_disable_device: 2172out_disable_device:
1980 pci_disable_device(pdev); 2173 pci_disable_device(pdev);
1981out: 2174out:
@@ -1991,6 +2184,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
1991 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2184 struct Scsi_Host *shost = pci_get_drvdata(pdev);
1992 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2185 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1993 struct lpfc_hba *phba = vport->phba; 2186 struct lpfc_hba *phba = vport->phba;
2187 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2188
1994 spin_lock_irq(&phba->hbalock); 2189 spin_lock_irq(&phba->hbalock);
1995 vport->load_flag |= FC_UNLOADING; 2190 vport->load_flag |= FC_UNLOADING;
1996 spin_unlock_irq(&phba->hbalock); 2191 spin_unlock_irq(&phba->hbalock);
@@ -1998,8 +2193,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
1998 kfree(vport->vname); 2193 kfree(vport->vname);
1999 lpfc_free_sysfs_attr(vport); 2194 lpfc_free_sysfs_attr(vport);
2000 2195
2196 kthread_stop(phba->worker_thread);
2197
2001 fc_remove_host(shost); 2198 fc_remove_host(shost);
2002 scsi_remove_host(shost); 2199 scsi_remove_host(shost);
2200 lpfc_cleanup(vport);
2201
2003 /* 2202 /*
2004 * Bring down the SLI Layer. This step disable all interrupts, 2203 * Bring down the SLI Layer. This step disable all interrupts,
2005 * clears the rings, discards all mailbox commands, and resets 2204 * clears the rings, discards all mailbox commands, and resets
@@ -2014,9 +2213,6 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2014 spin_unlock_irq(&phba->hbalock); 2213 spin_unlock_irq(&phba->hbalock);
2015 2214
2016 lpfc_debugfs_terminate(vport); 2215 lpfc_debugfs_terminate(vport);
2017 lpfc_cleanup(vport);
2018
2019 kthread_stop(phba->worker_thread);
2020 2216
2021 /* Release the irq reservation */ 2217 /* Release the irq reservation */
2022 free_irq(phba->pcidev->irq, phba); 2218 free_irq(phba->pcidev->irq, phba);
@@ -2048,7 +2244,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2048 2244
2049 kfree(phba); 2245 kfree(phba);
2050 2246
2051 pci_release_regions(pdev); 2247 pci_release_selected_regions(pdev, bars);
2052 pci_disable_device(pdev); 2248 pci_disable_device(pdev);
2053} 2249}
2054 2250
@@ -2239,12 +2435,22 @@ lpfc_init(void)
2239 printk(LPFC_MODULE_DESC "\n"); 2435 printk(LPFC_MODULE_DESC "\n");
2240 printk(LPFC_COPYRIGHT "\n"); 2436 printk(LPFC_COPYRIGHT "\n");
2241 2437
2438 if (lpfc_enable_npiv) {
2439 lpfc_transport_functions.vport_create = lpfc_vport_create;
2440 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
2441 }
2242 lpfc_transport_template = 2442 lpfc_transport_template =
2243 fc_attach_transport(&lpfc_transport_functions); 2443 fc_attach_transport(&lpfc_transport_functions);
2244 lpfc_vport_transport_template = 2444 if (lpfc_transport_template == NULL)
2245 fc_attach_transport(&lpfc_vport_transport_functions);
2246 if (!lpfc_transport_template || !lpfc_vport_transport_template)
2247 return -ENOMEM; 2445 return -ENOMEM;
2446 if (lpfc_enable_npiv) {
2447 lpfc_vport_transport_template =
2448 fc_attach_transport(&lpfc_vport_transport_functions);
2449 if (lpfc_vport_transport_template == NULL) {
2450 fc_release_transport(lpfc_transport_template);
2451 return -ENOMEM;
2452 }
2453 }
2248 error = pci_register_driver(&lpfc_driver); 2454 error = pci_register_driver(&lpfc_driver);
2249 if (error) { 2455 if (error) {
2250 fc_release_transport(lpfc_transport_template); 2456 fc_release_transport(lpfc_transport_template);
@@ -2259,7 +2465,8 @@ lpfc_exit(void)
2259{ 2465{
2260 pci_unregister_driver(&lpfc_driver); 2466 pci_unregister_driver(&lpfc_driver);
2261 fc_release_transport(lpfc_transport_template); 2467 fc_release_transport(lpfc_transport_template);
2262 fc_release_transport(lpfc_vport_transport_template); 2468 if (lpfc_enable_npiv)
2469 fc_release_transport(lpfc_vport_transport_template);
2263} 2470}
2264 2471
2265module_init(lpfc_init); 2472module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 626e4d878725..c5841d7565f7 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -26,6 +26,7 @@
26#define LOG_IP 0x20 /* IP traffic history */ 26#define LOG_IP 0x20 /* IP traffic history */
27#define LOG_FCP 0x40 /* FCP traffic history */ 27#define LOG_FCP 0x40 /* FCP traffic history */
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x80 /* Node table events */
29#define LOG_TEMP 0x100 /* Temperature sensor events */
29#define LOG_MISC 0x400 /* Miscellaneous events */ 30#define LOG_MISC 0x400 /* Miscellaneous events */
30#define LOG_SLI 0x800 /* SLI events */ 31#define LOG_SLI 0x800 /* SLI events */
31#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 32#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index a592733664e9..dfc63f6ccd7b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -82,6 +82,24 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
82} 82}
83 83
84/**********************************************/ 84/**********************************************/
85/* lpfc_config_async Issue a */
86/* MBX_ASYNC_EVT_ENABLE mailbox command */
87/**********************************************/
88void
89lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
90 uint32_t ring)
91{
92 MAILBOX_t *mb;
93
94 mb = &pmb->mb;
95 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
96 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
97 mb->un.varCfgAsyncEvent.ring = ring;
98 mb->mbxOwner = OWN_HOST;
99 return;
100}
101
102/**********************************************/
85/* lpfc_heart_beat Issue a HEART_BEAT */ 103/* lpfc_heart_beat Issue a HEART_BEAT */
86/* mailbox command */ 104/* mailbox command */
87/**********************************************/ 105/**********************************************/
@@ -270,8 +288,10 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
270 288
271 /* Get a buffer to hold the HBAs Service Parameters */ 289 /* Get a buffer to hold the HBAs Service Parameters */
272 290
273 if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) || 291 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
274 ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) { 292 if (mp)
293 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
294 if (!mp || !mp->virt) {
275 kfree(mp); 295 kfree(mp);
276 mb->mbxCommand = MBX_READ_SPARM64; 296 mb->mbxCommand = MBX_READ_SPARM64;
277 /* READ_SPARAM: no buffers */ 297 /* READ_SPARAM: no buffers */
@@ -369,8 +389,10 @@ lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
369 mb->mbxOwner = OWN_HOST; 389 mb->mbxOwner = OWN_HOST;
370 390
371 /* Get a buffer to hold NPorts Service Parameters */ 391 /* Get a buffer to hold NPorts Service Parameters */
372 if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == NULL) || 392 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
373 ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) { 393 if (mp)
394 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
395 if (!mp || !mp->virt) {
374 kfree(mp); 396 kfree(mp);
375 mb->mbxCommand = MBX_REG_LOGIN64; 397 mb->mbxCommand = MBX_REG_LOGIN64;
376 /* REG_LOGIN: no buffers */ 398 /* REG_LOGIN: no buffers */
@@ -874,7 +896,7 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
874 case MBX_DOWN_LOAD: /* 0x1C */ 896 case MBX_DOWN_LOAD: /* 0x1C */
875 case MBX_DEL_LD_ENTRY: /* 0x1D */ 897 case MBX_DEL_LD_ENTRY: /* 0x1D */
876 case MBX_LOAD_AREA: /* 0x81 */ 898 case MBX_LOAD_AREA: /* 0x81 */
877 case MBX_FLASH_WR_ULA: /* 0x98 */ 899 case MBX_WRITE_WWN: /* 0x98 */
878 case MBX_LOAD_EXP_ROM: /* 0x9C */ 900 case MBX_LOAD_EXP_ROM: /* 0x9C */
879 return LPFC_MBOX_TMO_FLASH_CMD; 901 return LPFC_MBOX_TMO_FLASH_CMD;
880 } 902 }
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 43c3b8a0d76a..6dc5ab8d6716 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -98,6 +98,7 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
98 98
99 fail_free_hbq_pool: 99 fail_free_hbq_pool:
100 lpfc_sli_hbqbuf_free_all(phba); 100 lpfc_sli_hbqbuf_free_all(phba);
101 pci_pool_destroy(phba->lpfc_hbq_pool);
101 fail_free_nlp_mem_pool: 102 fail_free_nlp_mem_pool:
102 mempool_destroy(phba->nlp_mem_pool); 103 mempool_destroy(phba->nlp_mem_pool);
103 phba->nlp_mem_pool = NULL; 104 phba->nlp_mem_pool = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 880af0cd463d..4a0e3406e37a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -287,6 +287,24 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
287 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 287 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
288 lp = (uint32_t *) pcmd->virt; 288 lp = (uint32_t *) pcmd->virt;
289 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 289 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
290 if (wwn_to_u64(sp->portName.u.wwn) == 0) {
291 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
292 "0140 PLOGI Reject: invalid nname\n");
293 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
294 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
295 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
296 NULL);
297 return 0;
298 }
299 if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
300 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
301 "0141 PLOGI Reject: invalid pname\n");
302 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
303 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
304 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
305 NULL);
306 return 0;
307 }
290 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { 308 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
291 /* Reject this request because invalid parameters */ 309 /* Reject this request because invalid parameters */
292 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 310 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
@@ -343,8 +361,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
343 lpfc_config_link(phba, mbox); 361 lpfc_config_link(phba, mbox);
344 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 362 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
345 mbox->vport = vport; 363 mbox->vport = vport;
346 rc = lpfc_sli_issue_mbox 364 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
347 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
348 if (rc == MBX_NOT_FINISHED) { 365 if (rc == MBX_NOT_FINISHED) {
349 mempool_free(mbox, phba->mbox_mem_pool); 366 mempool_free(mbox, phba->mbox_mem_pool);
350 goto out; 367 goto out;
@@ -407,6 +424,61 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
407 ndlp, mbox); 424 ndlp, mbox);
408 return 1; 425 return 1;
409 } 426 }
427
428 /* If the remote NPort logs into us, before we can initiate
429 * discovery to them, cleanup the NPort from discovery accordingly.
430 */
431 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
432 spin_lock_irq(shost->host_lock);
433 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
434 spin_unlock_irq(shost->host_lock);
435 del_timer_sync(&ndlp->nlp_delayfunc);
436 ndlp->nlp_last_elscmd = 0;
437
438 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
439 list_del_init(&ndlp->els_retry_evt.evt_listp);
440
441 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
442 spin_lock_irq(shost->host_lock);
443 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
444 spin_unlock_irq(shost->host_lock);
445
446 if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
447 (vport->num_disc_nodes)) {
448 /* Check to see if there are more
449 * ADISCs to be sent
450 */
451 lpfc_more_adisc(vport);
452
453 if ((vport->num_disc_nodes == 0) &&
454 (vport->fc_npr_cnt))
455 lpfc_els_disc_plogi(vport);
456
457 if (vport->num_disc_nodes == 0) {
458 spin_lock_irq(shost->host_lock);
459 vport->fc_flag &= ~FC_NDISC_ACTIVE;
460 spin_unlock_irq(shost->host_lock);
461 lpfc_can_disctmo(vport);
462 lpfc_end_rscn(vport);
463 }
464 }
465 else if (vport->num_disc_nodes) {
466 /* Check to see if there are more
467 * PLOGIs to be sent
468 */
469 lpfc_more_plogi(vport);
470
471 if (vport->num_disc_nodes == 0) {
472 spin_lock_irq(shost->host_lock);
473 vport->fc_flag &= ~FC_NDISC_ACTIVE;
474 spin_unlock_irq(shost->host_lock);
475 lpfc_can_disctmo(vport);
476 lpfc_end_rscn(vport);
477 }
478 }
479 }
480 }
481
410 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); 482 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
411 return 1; 483 return 1;
412 484
@@ -501,12 +573,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
501 spin_unlock_irq(shost->host_lock); 573 spin_unlock_irq(shost->host_lock);
502 574
503 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 575 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
504 ndlp->nlp_prev_state = ndlp->nlp_state;
505 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
506 } else {
507 ndlp->nlp_prev_state = ndlp->nlp_state;
508 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
509 } 576 }
577 ndlp->nlp_prev_state = ndlp->nlp_state;
578 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
510 579
511 spin_lock_irq(shost->host_lock); 580 spin_lock_irq(shost->host_lock);
512 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 581 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
@@ -594,6 +663,25 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
594 return ndlp->nlp_state; 663 return ndlp->nlp_state;
595} 664}
596 665
666static uint32_t
667lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
668 void *arg, uint32_t evt)
669{
670 /* This transition is only legal if we previously
671 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
672 * working on the same NPortID, do nothing for this thread
673 * to stop it.
674 */
675 if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
676 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
677 "0253 Illegal State Transition: node x%x "
678 "event x%x, state x%x Data: x%x x%x\n",
679 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
680 ndlp->nlp_flag);
681 }
682 return ndlp->nlp_state;
683}
684
597/* Start of Discovery State Machine routines */ 685/* Start of Discovery State Machine routines */
598 686
599static uint32_t 687static uint32_t
@@ -605,11 +693,8 @@ lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
605 cmdiocb = (struct lpfc_iocbq *) arg; 693 cmdiocb = (struct lpfc_iocbq *) arg;
606 694
607 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 695 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
608 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
609 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
610 return ndlp->nlp_state; 696 return ndlp->nlp_state;
611 } 697 }
612 lpfc_drop_node(vport, ndlp);
613 return NLP_STE_FREED_NODE; 698 return NLP_STE_FREED_NODE;
614} 699}
615 700
@@ -618,7 +703,6 @@ lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
618 void *arg, uint32_t evt) 703 void *arg, uint32_t evt)
619{ 704{
620 lpfc_issue_els_logo(vport, ndlp, 0); 705 lpfc_issue_els_logo(vport, ndlp, 0);
621 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
622 return ndlp->nlp_state; 706 return ndlp->nlp_state;
623} 707}
624 708
@@ -633,7 +717,6 @@ lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
633 ndlp->nlp_flag |= NLP_LOGO_ACC; 717 ndlp->nlp_flag |= NLP_LOGO_ACC;
634 spin_unlock_irq(shost->host_lock); 718 spin_unlock_irq(shost->host_lock);
635 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 719 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
636 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
637 720
638 return ndlp->nlp_state; 721 return ndlp->nlp_state;
639} 722}
@@ -642,7 +725,6 @@ static uint32_t
642lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 725lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
643 void *arg, uint32_t evt) 726 void *arg, uint32_t evt)
644{ 727{
645 lpfc_drop_node(vport, ndlp);
646 return NLP_STE_FREED_NODE; 728 return NLP_STE_FREED_NODE;
647} 729}
648 730
@@ -650,7 +732,6 @@ static uint32_t
650lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 732lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
651 void *arg, uint32_t evt) 733 void *arg, uint32_t evt)
652{ 734{
653 lpfc_drop_node(vport, ndlp);
654 return NLP_STE_FREED_NODE; 735 return NLP_STE_FREED_NODE;
655} 736}
656 737
@@ -752,6 +833,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
752 uint32_t evt) 833 uint32_t evt)
753{ 834{
754 struct lpfc_hba *phba = vport->phba; 835 struct lpfc_hba *phba = vport->phba;
836 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
755 struct lpfc_iocbq *cmdiocb, *rspiocb; 837 struct lpfc_iocbq *cmdiocb, *rspiocb;
756 struct lpfc_dmabuf *pcmd, *prsp, *mp; 838 struct lpfc_dmabuf *pcmd, *prsp, *mp;
757 uint32_t *lp; 839 uint32_t *lp;
@@ -778,6 +860,12 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
778 860
779 lp = (uint32_t *) prsp->virt; 861 lp = (uint32_t *) prsp->virt;
780 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 862 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
863 if (wwn_to_u64(sp->portName.u.wwn) == 0 ||
864 wwn_to_u64(sp->nodeName.u.wwn) == 0) {
865 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
866 "0142 PLOGI RSP: Invalid WWN.\n");
867 goto out;
868 }
781 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) 869 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
782 goto out; 870 goto out;
783 /* PLOGI chkparm OK */ 871 /* PLOGI chkparm OK */
@@ -828,13 +916,15 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
828 } 916 }
829 mbox->context2 = lpfc_nlp_get(ndlp); 917 mbox->context2 = lpfc_nlp_get(ndlp);
830 mbox->vport = vport; 918 mbox->vport = vport;
831 if (lpfc_sli_issue_mbox(phba, mbox, 919 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
832 (MBX_NOWAIT | MBX_STOP_IOCB))
833 != MBX_NOT_FINISHED) { 920 != MBX_NOT_FINISHED) {
834 lpfc_nlp_set_state(vport, ndlp, 921 lpfc_nlp_set_state(vport, ndlp,
835 NLP_STE_REG_LOGIN_ISSUE); 922 NLP_STE_REG_LOGIN_ISSUE);
836 return ndlp->nlp_state; 923 return ndlp->nlp_state;
837 } 924 }
925 /* decrement node reference count to the failed mbox
926 * command
927 */
838 lpfc_nlp_put(ndlp); 928 lpfc_nlp_put(ndlp);
839 mp = (struct lpfc_dmabuf *) mbox->context1; 929 mp = (struct lpfc_dmabuf *) mbox->context1;
840 lpfc_mbuf_free(phba, mp->virt, mp->phys); 930 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -864,13 +954,27 @@ out:
864 "0261 Cannot Register NameServer login\n"); 954 "0261 Cannot Register NameServer login\n");
865 } 955 }
866 956
867 /* Free this node since the driver cannot login or has the wrong 957 spin_lock_irq(shost->host_lock);
868 sparm */ 958 ndlp->nlp_flag |= NLP_DEFER_RM;
869 lpfc_drop_node(vport, ndlp); 959 spin_unlock_irq(shost->host_lock);
870 return NLP_STE_FREED_NODE; 960 return NLP_STE_FREED_NODE;
871} 961}
872 962
873static uint32_t 963static uint32_t
964lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
965 void *arg, uint32_t evt)
966{
967 return ndlp->nlp_state;
968}
969
970static uint32_t
971lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
972 struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
973{
974 return ndlp->nlp_state;
975}
976
977static uint32_t
874lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 978lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
875 void *arg, uint32_t evt) 979 void *arg, uint32_t evt)
876{ 980{
@@ -1137,7 +1241,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1137 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1241 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1138 mp = (struct lpfc_dmabuf *) (mb->context1); 1242 mp = (struct lpfc_dmabuf *) (mb->context1);
1139 if (mp) { 1243 if (mp) {
1140 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1244 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1141 kfree(mp); 1245 kfree(mp);
1142 } 1246 }
1143 lpfc_nlp_put(ndlp); 1247 lpfc_nlp_put(ndlp);
@@ -1197,8 +1301,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1197 * retry discovery. 1301 * retry discovery.
1198 */ 1302 */
1199 if (mb->mbxStatus == MBXERR_RPI_FULL) { 1303 if (mb->mbxStatus == MBXERR_RPI_FULL) {
1200 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 1304 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1201 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 1305 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1202 return ndlp->nlp_state; 1306 return ndlp->nlp_state;
1203 } 1307 }
1204 1308
@@ -1378,7 +1482,7 @@ out:
1378 lpfc_issue_els_logo(vport, ndlp, 0); 1482 lpfc_issue_els_logo(vport, ndlp, 0);
1379 1483
1380 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1484 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1381 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 1485 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1382 return ndlp->nlp_state; 1486 return ndlp->nlp_state;
1383 } 1487 }
1384 1488
@@ -1753,7 +1857,7 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1753 1857
1754 irsp = &rspiocb->iocb; 1858 irsp = &rspiocb->iocb;
1755 if (irsp->ulpStatus) { 1859 if (irsp->ulpStatus) {
1756 lpfc_drop_node(vport, ndlp); 1860 ndlp->nlp_flag |= NLP_DEFER_RM;
1757 return NLP_STE_FREED_NODE; 1861 return NLP_STE_FREED_NODE;
1758 } 1862 }
1759 return ndlp->nlp_state; 1863 return ndlp->nlp_state;
@@ -1942,9 +2046,9 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1942 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ 2046 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
1943 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ 2047 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
1944 lpfc_disc_illegal, /* CMPL_PRLI */ 2048 lpfc_disc_illegal, /* CMPL_PRLI */
1945 lpfc_disc_illegal, /* CMPL_LOGO */ 2049 lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */
1946 lpfc_disc_illegal, /* CMPL_ADISC */ 2050 lpfc_disc_illegal, /* CMPL_ADISC */
1947 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2051 lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */
1948 lpfc_device_rm_plogi_issue, /* DEVICE_RM */ 2052 lpfc_device_rm_plogi_issue, /* DEVICE_RM */
1949 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ 2053 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
1950 2054
@@ -1968,7 +2072,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1968 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ 2072 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
1969 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ 2073 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
1970 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ 2074 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
1971 lpfc_disc_illegal, /* CMPL_PLOGI */ 2075 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
1972 lpfc_disc_illegal, /* CMPL_PRLI */ 2076 lpfc_disc_illegal, /* CMPL_PRLI */
1973 lpfc_disc_illegal, /* CMPL_LOGO */ 2077 lpfc_disc_illegal, /* CMPL_LOGO */
1974 lpfc_disc_illegal, /* CMPL_ADISC */ 2078 lpfc_disc_illegal, /* CMPL_ADISC */
@@ -1982,7 +2086,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1982 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ 2086 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
1983 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ 2087 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
1984 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ 2088 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
1985 lpfc_disc_illegal, /* CMPL_PLOGI */ 2089 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
1986 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ 2090 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
1987 lpfc_disc_illegal, /* CMPL_LOGO */ 2091 lpfc_disc_illegal, /* CMPL_LOGO */
1988 lpfc_disc_illegal, /* CMPL_ADISC */ 2092 lpfc_disc_illegal, /* CMPL_ADISC */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 4e46045dea6d..6483c62730b3 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -130,7 +130,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
130 130
131 vports = lpfc_create_vport_work_array(phba); 131 vports = lpfc_create_vport_work_array(phba);
132 if (vports != NULL) 132 if (vports != NULL)
133 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 133 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
134 shost = lpfc_shost_from_vport(vports[i]); 134 shost = lpfc_shost_from_vport(vports[i]);
135 shost_for_each_device(sdev, shost) { 135 shost_for_each_device(sdev, shost) {
136 new_queue_depth = 136 new_queue_depth =
@@ -151,7 +151,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
151 new_queue_depth); 151 new_queue_depth);
152 } 152 }
153 } 153 }
154 lpfc_destroy_vport_work_array(vports); 154 lpfc_destroy_vport_work_array(phba, vports);
155 atomic_set(&phba->num_rsrc_err, 0); 155 atomic_set(&phba->num_rsrc_err, 0);
156 atomic_set(&phba->num_cmd_success, 0); 156 atomic_set(&phba->num_cmd_success, 0);
157} 157}
@@ -166,7 +166,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
166 166
167 vports = lpfc_create_vport_work_array(phba); 167 vports = lpfc_create_vport_work_array(phba);
168 if (vports != NULL) 168 if (vports != NULL)
169 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 169 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
170 shost = lpfc_shost_from_vport(vports[i]); 170 shost = lpfc_shost_from_vport(vports[i]);
171 shost_for_each_device(sdev, shost) { 171 shost_for_each_device(sdev, shost) {
172 if (sdev->ordered_tags) 172 if (sdev->ordered_tags)
@@ -179,7 +179,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
179 sdev->queue_depth+1); 179 sdev->queue_depth+1);
180 } 180 }
181 } 181 }
182 lpfc_destroy_vport_work_array(vports); 182 lpfc_destroy_vport_work_array(phba, vports);
183 atomic_set(&phba->num_rsrc_err, 0); 183 atomic_set(&phba->num_rsrc_err, 0);
184 atomic_set(&phba->num_cmd_success, 0); 184 atomic_set(&phba->num_cmd_success, 0);
185} 185}
@@ -380,7 +380,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
380 (num_bde * sizeof (struct ulp_bde64)); 380 (num_bde * sizeof (struct ulp_bde64));
381 iocb_cmd->ulpBdeCount = 1; 381 iocb_cmd->ulpBdeCount = 1;
382 iocb_cmd->ulpLe = 1; 382 iocb_cmd->ulpLe = 1;
383 fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd)); 383 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
384 return 0; 384 return 0;
385} 385}
386 386
@@ -542,6 +542,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
542 int result; 542 int result;
543 struct scsi_device *sdev, *tmp_sdev; 543 struct scsi_device *sdev, *tmp_sdev;
544 int depth = 0; 544 int depth = 0;
545 unsigned long flags;
545 546
546 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 547 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
547 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 548 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
@@ -608,6 +609,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
608 cmd->scsi_done(cmd); 609 cmd->scsi_done(cmd);
609 610
610 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 611 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
612 /*
613 * If there is a thread waiting for command completion
614 * wake up the thread.
615 */
616 spin_lock_irqsave(sdev->host->host_lock, flags);
617 lpfc_cmd->pCmd = NULL;
618 if (lpfc_cmd->waitq)
619 wake_up(lpfc_cmd->waitq);
620 spin_unlock_irqrestore(sdev->host->host_lock, flags);
611 lpfc_release_scsi_buf(phba, lpfc_cmd); 621 lpfc_release_scsi_buf(phba, lpfc_cmd);
612 return; 622 return;
613 } 623 }
@@ -669,6 +679,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
669 } 679 }
670 } 680 }
671 681
682 /*
683 * If there is a thread waiting for command completion
684 * wake up the thread.
685 */
686 spin_lock_irqsave(sdev->host->host_lock, flags);
687 lpfc_cmd->pCmd = NULL;
688 if (lpfc_cmd->waitq)
689 wake_up(lpfc_cmd->waitq);
690 spin_unlock_irqrestore(sdev->host->host_lock, flags);
691
672 lpfc_release_scsi_buf(phba, lpfc_cmd); 692 lpfc_release_scsi_buf(phba, lpfc_cmd);
673} 693}
674 694
@@ -743,6 +763,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
743 piocbq->iocb.ulpContext = pnode->nlp_rpi; 763 piocbq->iocb.ulpContext = pnode->nlp_rpi;
744 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 764 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
745 piocbq->iocb.ulpFCP2Rcvy = 1; 765 piocbq->iocb.ulpFCP2Rcvy = 1;
766 else
767 piocbq->iocb.ulpFCP2Rcvy = 0;
746 768
747 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 769 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
748 piocbq->context1 = lpfc_cmd; 770 piocbq->context1 = lpfc_cmd;
@@ -1018,8 +1040,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
1018 struct lpfc_iocbq *abtsiocb; 1040 struct lpfc_iocbq *abtsiocb;
1019 struct lpfc_scsi_buf *lpfc_cmd; 1041 struct lpfc_scsi_buf *lpfc_cmd;
1020 IOCB_t *cmd, *icmd; 1042 IOCB_t *cmd, *icmd;
1021 unsigned int loop_count = 0;
1022 int ret = SUCCESS; 1043 int ret = SUCCESS;
1044 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
1023 1045
1024 lpfc_block_error_handler(cmnd); 1046 lpfc_block_error_handler(cmnd);
1025 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 1047 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
@@ -1074,17 +1096,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
1074 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1096 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1075 lpfc_sli_poll_fcp_ring (phba); 1097 lpfc_sli_poll_fcp_ring (phba);
1076 1098
1099 lpfc_cmd->waitq = &waitq;
1077 /* Wait for abort to complete */ 1100 /* Wait for abort to complete */
1078 while (lpfc_cmd->pCmd == cmnd) 1101 wait_event_timeout(waitq,
1079 { 1102 (lpfc_cmd->pCmd != cmnd),
1080 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1103 (2*vport->cfg_devloss_tmo*HZ));
1081 lpfc_sli_poll_fcp_ring (phba);
1082 1104
1083 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ); 1105 spin_lock_irq(shost->host_lock);
1084 if (++loop_count 1106 lpfc_cmd->waitq = NULL;
1085 > (2 * vport->cfg_devloss_tmo)/LPFC_ABORT_WAIT) 1107 spin_unlock_irq(shost->host_lock);
1086 break;
1087 }
1088 1108
1089 if (lpfc_cmd->pCmd == cmnd) { 1109 if (lpfc_cmd->pCmd == cmnd) {
1090 ret = FAILED; 1110 ret = FAILED;
@@ -1438,7 +1458,7 @@ struct scsi_host_template lpfc_template = {
1438 .slave_destroy = lpfc_slave_destroy, 1458 .slave_destroy = lpfc_slave_destroy,
1439 .scan_finished = lpfc_scan_finished, 1459 .scan_finished = lpfc_scan_finished,
1440 .this_id = -1, 1460 .this_id = -1,
1441 .sg_tablesize = LPFC_SG_SEG_CNT, 1461 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
1442 .use_sg_chaining = ENABLE_SG_CHAINING, 1462 .use_sg_chaining = ENABLE_SG_CHAINING,
1443 .cmd_per_lun = LPFC_CMD_PER_LUN, 1463 .cmd_per_lun = LPFC_CMD_PER_LUN,
1444 .use_clustering = ENABLE_CLUSTERING, 1464 .use_clustering = ENABLE_CLUSTERING,
@@ -1459,7 +1479,7 @@ struct scsi_host_template lpfc_vport_template = {
1459 .slave_destroy = lpfc_slave_destroy, 1479 .slave_destroy = lpfc_slave_destroy,
1460 .scan_finished = lpfc_scan_finished, 1480 .scan_finished = lpfc_scan_finished,
1461 .this_id = -1, 1481 .this_id = -1,
1462 .sg_tablesize = LPFC_SG_SEG_CNT, 1482 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
1463 .cmd_per_lun = LPFC_CMD_PER_LUN, 1483 .cmd_per_lun = LPFC_CMD_PER_LUN,
1464 .use_clustering = ENABLE_CLUSTERING, 1484 .use_clustering = ENABLE_CLUSTERING,
1465 .use_sg_chaining = ENABLE_SG_CHAINING, 1485 .use_sg_chaining = ENABLE_SG_CHAINING,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 31787bb6d53e..daba92374985 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -138,6 +138,7 @@ struct lpfc_scsi_buf {
138 * Iotag is in here 138 * Iotag is in here
139 */ 139 */
140 struct lpfc_iocbq cur_iocbq; 140 struct lpfc_iocbq cur_iocbq;
141 wait_queue_head_t *waitq;
141}; 142};
142 143
143#define LPFC_SCSI_DMA_EXT_SIZE 264 144#define LPFC_SCSI_DMA_EXT_SIZE 264
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index ce348c5c706c..fdd01e384e36 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -106,7 +106,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
106 return iocbq; 106 return iocbq;
107} 107}
108 108
109void 109static void
110__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 110__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
111{ 111{
112 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 112 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
@@ -199,6 +199,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
199 case CMD_RCV_ELS_REQ_CX: 199 case CMD_RCV_ELS_REQ_CX:
200 case CMD_RCV_SEQUENCE64_CX: 200 case CMD_RCV_SEQUENCE64_CX:
201 case CMD_RCV_ELS_REQ64_CX: 201 case CMD_RCV_ELS_REQ64_CX:
202 case CMD_ASYNC_STATUS:
202 case CMD_IOCB_RCV_SEQ64_CX: 203 case CMD_IOCB_RCV_SEQ64_CX:
203 case CMD_IOCB_RCV_ELS64_CX: 204 case CMD_IOCB_RCV_ELS64_CX:
204 case CMD_IOCB_RCV_CONT64_CX: 205 case CMD_IOCB_RCV_CONT64_CX:
@@ -473,8 +474,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
473 if (pring->txq_cnt && 474 if (pring->txq_cnt &&
474 lpfc_is_link_up(phba) && 475 lpfc_is_link_up(phba) &&
475 (pring->ringno != phba->sli.fcp_ring || 476 (pring->ringno != phba->sli.fcp_ring ||
476 phba->sli.sli_flag & LPFC_PROCESS_LA) && 477 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
477 !(pring->flag & LPFC_STOP_IOCB_MBX)) {
478 478
479 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 479 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
480 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 480 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
@@ -489,32 +489,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
489 return; 489 return;
490} 490}
491 491
492/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */ 492static struct lpfc_hbq_entry *
493static void
494lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
495{
496 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
497 &phba->slim2p->mbx.us.s3_pgp.port[ringno] :
498 &phba->slim2p->mbx.us.s2.port[ringno];
499 unsigned long iflags;
500
501 /* If the ring is active, flag it */
502 spin_lock_irqsave(&phba->hbalock, iflags);
503 if (phba->sli.ring[ringno].cmdringaddr) {
504 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
505 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
506 /*
507 * Force update of the local copy of cmdGetInx
508 */
509 phba->sli.ring[ringno].local_getidx
510 = le32_to_cpu(pgp->cmdGetInx);
511 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
512 }
513 }
514 spin_unlock_irqrestore(&phba->hbalock, iflags);
515}
516
517struct lpfc_hbq_entry *
518lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 493lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
519{ 494{
520 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 495 struct hbq_s *hbqp = &phba->hbqs[hbqno];
@@ -565,6 +540,7 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
565 list_del(&hbq_buf->dbuf.list); 540 list_del(&hbq_buf->dbuf.list);
566 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 541 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
567 } 542 }
543 phba->hbqs[i].buffer_count = 0;
568 } 544 }
569} 545}
570 546
@@ -633,8 +609,8 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
633 return 0; 609 return 0;
634 } 610 }
635 611
636 start = lpfc_hbq_defs[hbqno]->buffer_count; 612 start = phba->hbqs[hbqno].buffer_count;
637 end = count + lpfc_hbq_defs[hbqno]->buffer_count; 613 end = count + start;
638 if (end > lpfc_hbq_defs[hbqno]->entry_count) { 614 if (end > lpfc_hbq_defs[hbqno]->entry_count) {
639 end = lpfc_hbq_defs[hbqno]->entry_count; 615 end = lpfc_hbq_defs[hbqno]->entry_count;
640 } 616 }
@@ -646,7 +622,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
646 return 1; 622 return 1;
647 hbq_buffer->tag = (i | (hbqno << 16)); 623 hbq_buffer->tag = (i | (hbqno << 16));
648 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 624 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
649 lpfc_hbq_defs[hbqno]->buffer_count++; 625 phba->hbqs[hbqno].buffer_count++;
650 else 626 else
651 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 627 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
652 } 628 }
@@ -660,14 +636,14 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
660 lpfc_hbq_defs[qno]->add_count)); 636 lpfc_hbq_defs[qno]->add_count));
661} 637}
662 638
663int 639static int
664lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 640lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
665{ 641{
666 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 642 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
667 lpfc_hbq_defs[qno]->init_count)); 643 lpfc_hbq_defs[qno]->init_count));
668} 644}
669 645
670struct hbq_dmabuf * 646static struct hbq_dmabuf *
671lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 647lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
672{ 648{
673 struct lpfc_dmabuf *d_buf; 649 struct lpfc_dmabuf *d_buf;
@@ -686,7 +662,7 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
686 } 662 }
687 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
688 "1803 Bad hbq tag. Data: x%x x%x\n", 664 "1803 Bad hbq tag. Data: x%x x%x\n",
689 tag, lpfc_hbq_defs[tag >> 16]->buffer_count); 665 tag, phba->hbqs[tag >> 16].buffer_count);
690 return NULL; 666 return NULL;
691} 667}
692 668
@@ -712,6 +688,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
712 case MBX_LOAD_SM: 688 case MBX_LOAD_SM:
713 case MBX_READ_NV: 689 case MBX_READ_NV:
714 case MBX_WRITE_NV: 690 case MBX_WRITE_NV:
691 case MBX_WRITE_VPARMS:
715 case MBX_RUN_BIU_DIAG: 692 case MBX_RUN_BIU_DIAG:
716 case MBX_INIT_LINK: 693 case MBX_INIT_LINK:
717 case MBX_DOWN_LINK: 694 case MBX_DOWN_LINK:
@@ -739,7 +716,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
739 case MBX_DEL_LD_ENTRY: 716 case MBX_DEL_LD_ENTRY:
740 case MBX_RUN_PROGRAM: 717 case MBX_RUN_PROGRAM:
741 case MBX_SET_MASK: 718 case MBX_SET_MASK:
742 case MBX_SET_SLIM: 719 case MBX_SET_VARIABLE:
743 case MBX_UNREG_D_ID: 720 case MBX_UNREG_D_ID:
744 case MBX_KILL_BOARD: 721 case MBX_KILL_BOARD:
745 case MBX_CONFIG_FARP: 722 case MBX_CONFIG_FARP:
@@ -751,9 +728,10 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
751 case MBX_READ_RPI64: 728 case MBX_READ_RPI64:
752 case MBX_REG_LOGIN64: 729 case MBX_REG_LOGIN64:
753 case MBX_READ_LA64: 730 case MBX_READ_LA64:
754 case MBX_FLASH_WR_ULA: 731 case MBX_WRITE_WWN:
755 case MBX_SET_DEBUG: 732 case MBX_SET_DEBUG:
756 case MBX_LOAD_EXP_ROM: 733 case MBX_LOAD_EXP_ROM:
734 case MBX_ASYNCEVT_ENABLE:
757 case MBX_REG_VPI: 735 case MBX_REG_VPI:
758 case MBX_UNREG_VPI: 736 case MBX_UNREG_VPI:
759 case MBX_HEARTBEAT: 737 case MBX_HEARTBEAT:
@@ -953,6 +931,17 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
953 return &new_hbq_entry->dbuf; 931 return &new_hbq_entry->dbuf;
954} 932}
955 933
934static struct lpfc_dmabuf *
935lpfc_sli_get_buff(struct lpfc_hba *phba,
936 struct lpfc_sli_ring *pring,
937 uint32_t tag)
938{
939 if (tag & QUE_BUFTAG_BIT)
940 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
941 else
942 return lpfc_sli_replace_hbqbuff(phba, tag);
943}
944
956static int 945static int
957lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 946lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
958 struct lpfc_iocbq *saveq) 947 struct lpfc_iocbq *saveq)
@@ -961,19 +950,112 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
961 WORD5 * w5p; 950 WORD5 * w5p;
962 uint32_t Rctl, Type; 951 uint32_t Rctl, Type;
963 uint32_t match, i; 952 uint32_t match, i;
953 struct lpfc_iocbq *iocbq;
964 954
965 match = 0; 955 match = 0;
966 irsp = &(saveq->iocb); 956 irsp = &(saveq->iocb);
967 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) 957
968 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) 958 if (irsp->ulpStatus == IOSTAT_NEED_BUFFER)
969 || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX) 959 return 1;
970 || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) { 960 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
961 if (pring->lpfc_sli_rcv_async_status)
962 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
963 else
964 lpfc_printf_log(phba,
965 KERN_WARNING,
966 LOG_SLI,
967 "0316 Ring %d handler: unexpected "
968 "ASYNC_STATUS iocb received evt_code "
969 "0x%x\n",
970 pring->ringno,
971 irsp->un.asyncstat.evt_code);
972 return 1;
973 }
974
975 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
976 if (irsp->ulpBdeCount != 0) {
977 saveq->context2 = lpfc_sli_get_buff(phba, pring,
978 irsp->un.ulpWord[3]);
979 if (!saveq->context2)
980 lpfc_printf_log(phba,
981 KERN_ERR,
982 LOG_SLI,
983 "0341 Ring %d Cannot find buffer for "
984 "an unsolicited iocb. tag 0x%x\n",
985 pring->ringno,
986 irsp->un.ulpWord[3]);
987 }
988 if (irsp->ulpBdeCount == 2) {
989 saveq->context3 = lpfc_sli_get_buff(phba, pring,
990 irsp->unsli3.sli3Words[7]);
991 if (!saveq->context3)
992 lpfc_printf_log(phba,
993 KERN_ERR,
994 LOG_SLI,
995 "0342 Ring %d Cannot find buffer for an"
996 " unsolicited iocb. tag 0x%x\n",
997 pring->ringno,
998 irsp->unsli3.sli3Words[7]);
999 }
1000 list_for_each_entry(iocbq, &saveq->list, list) {
1001 irsp = &(iocbq->iocb);
1002 if (irsp->ulpBdeCount != 0) {
1003 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
1004 irsp->un.ulpWord[3]);
1005 if (!iocbq->context2)
1006 lpfc_printf_log(phba,
1007 KERN_ERR,
1008 LOG_SLI,
1009 "0343 Ring %d Cannot find "
1010 "buffer for an unsolicited iocb"
1011 ". tag 0x%x\n", pring->ringno,
1012 irsp->un.ulpWord[3]);
1013 }
1014 if (irsp->ulpBdeCount == 2) {
1015 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
1016 irsp->unsli3.sli3Words[7]);
1017 if (!iocbq->context3)
1018 lpfc_printf_log(phba,
1019 KERN_ERR,
1020 LOG_SLI,
1021 "0344 Ring %d Cannot find "
1022 "buffer for an unsolicited "
1023 "iocb. tag 0x%x\n",
1024 pring->ringno,
1025 irsp->unsli3.sli3Words[7]);
1026 }
1027 }
1028 }
1029 if (irsp->ulpBdeCount != 0 &&
1030 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
1031 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
1032 int found = 0;
1033
1034 /* search continue save q for same XRI */
1035 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
1036 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
1037 list_add_tail(&saveq->list, &iocbq->list);
1038 found = 1;
1039 break;
1040 }
1041 }
1042 if (!found)
1043 list_add_tail(&saveq->clist,
1044 &pring->iocb_continue_saveq);
1045 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
1046 list_del_init(&iocbq->clist);
1047 saveq = iocbq;
1048 irsp = &(saveq->iocb);
1049 } else
1050 return 0;
1051 }
1052 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
1053 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
1054 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
971 Rctl = FC_ELS_REQ; 1055 Rctl = FC_ELS_REQ;
972 Type = FC_ELS_DATA; 1056 Type = FC_ELS_DATA;
973 } else { 1057 } else {
974 w5p = 1058 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
975 (WORD5 *) & (saveq->iocb.un.
976 ulpWord[5]);
977 Rctl = w5p->hcsw.Rctl; 1059 Rctl = w5p->hcsw.Rctl;
978 Type = w5p->hcsw.Type; 1060 Type = w5p->hcsw.Type;
979 1061
@@ -988,15 +1070,6 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
988 } 1070 }
989 } 1071 }
990 1072
991 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
992 if (irsp->ulpBdeCount != 0)
993 saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
994 irsp->un.ulpWord[3]);
995 if (irsp->ulpBdeCount == 2)
996 saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
997 irsp->unsli3.sli3Words[7]);
998 }
999
1000 /* unSolicited Responses */ 1073 /* unSolicited Responses */
1001 if (pring->prt[0].profile) { 1074 if (pring->prt[0].profile) {
1002 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 1075 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
@@ -1006,12 +1079,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1006 } else { 1079 } else {
1007 /* We must search, based on rctl / type 1080 /* We must search, based on rctl / type
1008 for the right routine */ 1081 for the right routine */
1009 for (i = 0; i < pring->num_mask; 1082 for (i = 0; i < pring->num_mask; i++) {
1010 i++) { 1083 if ((pring->prt[i].rctl == Rctl)
1011 if ((pring->prt[i].rctl == 1084 && (pring->prt[i].type == Type)) {
1012 Rctl)
1013 && (pring->prt[i].
1014 type == Type)) {
1015 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 1085 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1016 (pring->prt[i].lpfc_sli_rcv_unsol_event) 1086 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1017 (phba, pring, saveq); 1087 (phba, pring, saveq);
@@ -1084,6 +1154,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1084 IOSTAT_LOCAL_REJECT; 1154 IOSTAT_LOCAL_REJECT;
1085 saveq->iocb.un.ulpWord[4] = 1155 saveq->iocb.un.ulpWord[4] =
1086 IOERR_SLI_ABORTED; 1156 IOERR_SLI_ABORTED;
1157
1158 /* Firmware could still be in progress
1159 * of DMAing payload, so don't free data
1160 * buffer till after a hbeat.
1161 */
1162 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
1087 } 1163 }
1088 } 1164 }
1089 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 1165 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -1572,12 +1648,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1572 1648
1573 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 1649 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1574 1650
1575 if (list_empty(&(pring->iocb_continueq))) { 1651 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
1576 list_add(&rspiocbp->list, &(pring->iocb_continueq));
1577 } else {
1578 list_add_tail(&rspiocbp->list,
1579 &(pring->iocb_continueq));
1580 }
1581 1652
1582 pring->iocb_continueq_cnt++; 1653 pring->iocb_continueq_cnt++;
1583 if (irsp->ulpLe) { 1654 if (irsp->ulpLe) {
@@ -1642,17 +1713,17 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1642 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 1713 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1643 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 1714 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1644 if (type == LPFC_SOL_IOCB) { 1715 if (type == LPFC_SOL_IOCB) {
1645 spin_unlock_irqrestore(&phba->hbalock, 1716 spin_unlock_irqrestore(&phba->hbalock, iflag);
1646 iflag);
1647 rc = lpfc_sli_process_sol_iocb(phba, pring, 1717 rc = lpfc_sli_process_sol_iocb(phba, pring,
1648 saveq); 1718 saveq);
1649 spin_lock_irqsave(&phba->hbalock, iflag); 1719 spin_lock_irqsave(&phba->hbalock, iflag);
1650 } else if (type == LPFC_UNSOL_IOCB) { 1720 } else if (type == LPFC_UNSOL_IOCB) {
1651 spin_unlock_irqrestore(&phba->hbalock, 1721 spin_unlock_irqrestore(&phba->hbalock, iflag);
1652 iflag);
1653 rc = lpfc_sli_process_unsol_iocb(phba, pring, 1722 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1654 saveq); 1723 saveq);
1655 spin_lock_irqsave(&phba->hbalock, iflag); 1724 spin_lock_irqsave(&phba->hbalock, iflag);
1725 if (!rc)
1726 free_saveq = 0;
1656 } else if (type == LPFC_ABORT_IOCB) { 1727 } else if (type == LPFC_ABORT_IOCB) {
1657 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1728 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1658 ((cmdiocbp = 1729 ((cmdiocbp =
@@ -1921,8 +1992,8 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
1921 "0329 Kill HBA Data: x%x x%x\n", 1992 "0329 Kill HBA Data: x%x x%x\n",
1922 phba->pport->port_state, psli->sli_flag); 1993 phba->pport->port_state, psli->sli_flag);
1923 1994
1924 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 1995 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1925 GFP_KERNEL)) == 0) 1996 if (!pmb)
1926 return 1; 1997 return 1;
1927 1998
1928 /* Disable the error attention */ 1999 /* Disable the error attention */
@@ -2113,7 +2184,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
2113 <status> */ 2184 <status> */
2114 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2115 "0436 Adapter failed to init, " 2186 "0436 Adapter failed to init, "
2116 "timeout, status reg x%x\n", status); 2187 "timeout, status reg x%x, "
2188 "FW Data: A8 x%x AC x%x\n", status,
2189 readl(phba->MBslimaddr + 0xa8),
2190 readl(phba->MBslimaddr + 0xac));
2117 phba->link_state = LPFC_HBA_ERROR; 2191 phba->link_state = LPFC_HBA_ERROR;
2118 return -ETIMEDOUT; 2192 return -ETIMEDOUT;
2119 } 2193 }
@@ -2125,7 +2199,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
2125 <status> */ 2199 <status> */
2126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2127 "0437 Adapter failed to init, " 2201 "0437 Adapter failed to init, "
2128 "chipset, status reg x%x\n", status); 2202 "chipset, status reg x%x, "
2203 "FW Data: A8 x%x AC x%x\n", status,
2204 readl(phba->MBslimaddr + 0xa8),
2205 readl(phba->MBslimaddr + 0xac));
2129 phba->link_state = LPFC_HBA_ERROR; 2206 phba->link_state = LPFC_HBA_ERROR;
2130 return -EIO; 2207 return -EIO;
2131 } 2208 }
@@ -2153,7 +2230,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
2153 /* Adapter failed to init, chipset, status reg <status> */ 2230 /* Adapter failed to init, chipset, status reg <status> */
2154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2155 "0438 Adapter failed to init, chipset, " 2232 "0438 Adapter failed to init, chipset, "
2156 "status reg x%x\n", status); 2233 "status reg x%x, "
2234 "FW Data: A8 x%x AC x%x\n", status,
2235 readl(phba->MBslimaddr + 0xa8),
2236 readl(phba->MBslimaddr + 0xac));
2157 phba->link_state = LPFC_HBA_ERROR; 2237 phba->link_state = LPFC_HBA_ERROR;
2158 return -EIO; 2238 return -EIO;
2159 } 2239 }
@@ -2485,11 +2565,16 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2485 lpfc_sli_abort_iocb_ring(phba, pring); 2565 lpfc_sli_abort_iocb_ring(phba, pring);
2486 2566
2487 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2567 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2488 "0316 Resetting board due to mailbox timeout\n"); 2568 "0345 Resetting board due to mailbox timeout\n");
2489 /* 2569 /*
2490 * lpfc_offline calls lpfc_sli_hba_down which will clean up 2570 * lpfc_offline calls lpfc_sli_hba_down which will clean up
2491 * on oustanding mailbox commands. 2571 * on oustanding mailbox commands.
2492 */ 2572 */
2573 /* If resets are disabled then set error state and return. */
2574 if (!phba->cfg_enable_hba_reset) {
2575 phba->link_state = LPFC_HBA_ERROR;
2576 return;
2577 }
2493 lpfc_offline_prep(phba); 2578 lpfc_offline_prep(phba);
2494 lpfc_offline(phba); 2579 lpfc_offline(phba);
2495 lpfc_sli_brdrestart(phba); 2580 lpfc_sli_brdrestart(phba);
@@ -2507,6 +2592,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2507 uint32_t status, evtctr; 2592 uint32_t status, evtctr;
2508 uint32_t ha_copy; 2593 uint32_t ha_copy;
2509 int i; 2594 int i;
2595 unsigned long timeout;
2510 unsigned long drvr_flag = 0; 2596 unsigned long drvr_flag = 0;
2511 volatile uint32_t word0, ldata; 2597 volatile uint32_t word0, ldata;
2512 void __iomem *to_slim; 2598 void __iomem *to_slim;
@@ -2519,7 +2605,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2519 "1806 Mbox x%x failed. No vport\n", 2605 "1806 Mbox x%x failed. No vport\n",
2520 pmbox->mb.mbxCommand); 2606 pmbox->mb.mbxCommand);
2521 dump_stack(); 2607 dump_stack();
2522 return MBXERR_ERROR; 2608 return MBX_NOT_FINISHED;
2523 } 2609 }
2524 } 2610 }
2525 2611
@@ -2571,21 +2657,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2571 return MBX_NOT_FINISHED; 2657 return MBX_NOT_FINISHED;
2572 } 2658 }
2573 2659
2574 /* Handle STOP IOCB processing flag. This is only meaningful
2575 * if we are not polling for mbox completion.
2576 */
2577 if (flag & MBX_STOP_IOCB) {
2578 flag &= ~MBX_STOP_IOCB;
2579 /* Now flag each ring */
2580 for (i = 0; i < psli->num_rings; i++) {
2581 /* If the ring is active, flag it */
2582 if (psli->ring[i].cmdringaddr) {
2583 psli->ring[i].flag |=
2584 LPFC_STOP_IOCB_MBX;
2585 }
2586 }
2587 }
2588
2589 /* Another mailbox command is still being processed, queue this 2660 /* Another mailbox command is still being processed, queue this
2590 * command to be processed later. 2661 * command to be processed later.
2591 */ 2662 */
@@ -2620,23 +2691,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2620 return MBX_BUSY; 2691 return MBX_BUSY;
2621 } 2692 }
2622 2693
2623 /* Handle STOP IOCB processing flag. This is only meaningful
2624 * if we are not polling for mbox completion.
2625 */
2626 if (flag & MBX_STOP_IOCB) {
2627 flag &= ~MBX_STOP_IOCB;
2628 if (flag == MBX_NOWAIT) {
2629 /* Now flag each ring */
2630 for (i = 0; i < psli->num_rings; i++) {
2631 /* If the ring is active, flag it */
2632 if (psli->ring[i].cmdringaddr) {
2633 psli->ring[i].flag |=
2634 LPFC_STOP_IOCB_MBX;
2635 }
2636 }
2637 }
2638 }
2639
2640 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2694 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2641 2695
2642 /* If we are not polling, we MUST be in SLI2 mode */ 2696 /* If we are not polling, we MUST be in SLI2 mode */
@@ -2714,18 +2768,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2714 } 2768 }
2715 2769
2716 wmb(); 2770 wmb();
2717 /* interrupt board to doit right away */
2718 writel(CA_MBATT, phba->CAregaddr);
2719 readl(phba->CAregaddr); /* flush */
2720 2771
2721 switch (flag) { 2772 switch (flag) {
2722 case MBX_NOWAIT: 2773 case MBX_NOWAIT:
2723 /* Don't wait for it to finish, just return */ 2774 /* Set up reference to mailbox command */
2724 psli->mbox_active = pmbox; 2775 psli->mbox_active = pmbox;
2776 /* Interrupt board to do it */
2777 writel(CA_MBATT, phba->CAregaddr);
2778 readl(phba->CAregaddr); /* flush */
2779 /* Don't wait for it to finish, just return */
2725 break; 2780 break;
2726 2781
2727 case MBX_POLL: 2782 case MBX_POLL:
2783 /* Set up null reference to mailbox command */
2728 psli->mbox_active = NULL; 2784 psli->mbox_active = NULL;
2785 /* Interrupt board to do it */
2786 writel(CA_MBATT, phba->CAregaddr);
2787 readl(phba->CAregaddr); /* flush */
2788
2729 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2789 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2730 /* First read mbox status word */ 2790 /* First read mbox status word */
2731 word0 = *((volatile uint32_t *)&phba->slim2p->mbx); 2791 word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
@@ -2737,15 +2797,15 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2737 2797
2738 /* Read the HBA Host Attention Register */ 2798 /* Read the HBA Host Attention Register */
2739 ha_copy = readl(phba->HAregaddr); 2799 ha_copy = readl(phba->HAregaddr);
2740 2800 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2741 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); 2801 mb->mbxCommand) *
2742 i *= 1000; /* Convert to ms */ 2802 1000) + jiffies;
2743 2803 i = 0;
2744 /* Wait for command to complete */ 2804 /* Wait for command to complete */
2745 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2805 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2746 (!(ha_copy & HA_MBATT) && 2806 (!(ha_copy & HA_MBATT) &&
2747 (phba->link_state > LPFC_WARM_START))) { 2807 (phba->link_state > LPFC_WARM_START))) {
2748 if (i-- <= 0) { 2808 if (time_after(jiffies, timeout)) {
2749 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2809 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2750 spin_unlock_irqrestore(&phba->hbalock, 2810 spin_unlock_irqrestore(&phba->hbalock,
2751 drvr_flag); 2811 drvr_flag);
@@ -2758,12 +2818,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2758 && (evtctr != psli->slistat.mbox_event)) 2818 && (evtctr != psli->slistat.mbox_event))
2759 break; 2819 break;
2760 2820
2761 spin_unlock_irqrestore(&phba->hbalock, 2821 if (i++ > 10) {
2762 drvr_flag); 2822 spin_unlock_irqrestore(&phba->hbalock,
2763 2823 drvr_flag);
2764 msleep(1); 2824 msleep(1);
2765 2825 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2766 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2826 }
2767 2827
2768 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2828 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2769 /* First copy command data */ 2829 /* First copy command data */
@@ -2848,7 +2908,7 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2848/* 2908/*
2849 * Lockless version of lpfc_sli_issue_iocb. 2909 * Lockless version of lpfc_sli_issue_iocb.
2850 */ 2910 */
2851int 2911static int
2852__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2912__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2853 struct lpfc_iocbq *piocb, uint32_t flag) 2913 struct lpfc_iocbq *piocb, uint32_t flag)
2854{ 2914{
@@ -2879,9 +2939,9 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2879 2939
2880 /* 2940 /*
2881 * Check to see if we are blocking IOCB processing because of a 2941 * Check to see if we are blocking IOCB processing because of a
2882 * outstanding mbox command. 2942 * outstanding event.
2883 */ 2943 */
2884 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX)) 2944 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
2885 goto iocb_busy; 2945 goto iocb_busy;
2886 2946
2887 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 2947 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
@@ -2993,6 +3053,61 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
2993 return 0; 3053 return 0;
2994} 3054}
2995 3055
3056static void
3057lpfc_sli_async_event_handler(struct lpfc_hba * phba,
3058 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
3059{
3060 IOCB_t *icmd;
3061 uint16_t evt_code;
3062 uint16_t temp;
3063 struct temp_event temp_event_data;
3064 struct Scsi_Host *shost;
3065
3066 icmd = &iocbq->iocb;
3067 evt_code = icmd->un.asyncstat.evt_code;
3068 temp = icmd->ulpContext;
3069
3070 if ((evt_code != ASYNC_TEMP_WARN) &&
3071 (evt_code != ASYNC_TEMP_SAFE)) {
3072 lpfc_printf_log(phba,
3073 KERN_ERR,
3074 LOG_SLI,
3075 "0346 Ring %d handler: unexpected ASYNC_STATUS"
3076 " evt_code 0x%x\n",
3077 pring->ringno,
3078 icmd->un.asyncstat.evt_code);
3079 return;
3080 }
3081 temp_event_data.data = (uint32_t)temp;
3082 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
3083 if (evt_code == ASYNC_TEMP_WARN) {
3084 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
3085 lpfc_printf_log(phba,
3086 KERN_ERR,
3087 LOG_TEMP,
3088 "0347 Adapter is very hot, please take "
3089 "corrective action. temperature : %d Celsius\n",
3090 temp);
3091 }
3092 if (evt_code == ASYNC_TEMP_SAFE) {
3093 temp_event_data.event_code = LPFC_NORMAL_TEMP;
3094 lpfc_printf_log(phba,
3095 KERN_ERR,
3096 LOG_TEMP,
3097 "0340 Adapter temperature is OK now. "
3098 "temperature : %d Celsius\n",
3099 temp);
3100 }
3101
3102 /* Send temperature change event to applications */
3103 shost = lpfc_shost_from_vport(phba->pport);
3104 fc_host_post_vendor_event(shost, fc_get_event_number(),
3105 sizeof(temp_event_data), (char *) &temp_event_data,
3106 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
3107
3108}
3109
3110
2996int 3111int
2997lpfc_sli_setup(struct lpfc_hba *phba) 3112lpfc_sli_setup(struct lpfc_hba *phba)
2998{ 3113{
@@ -3059,6 +3174,8 @@ lpfc_sli_setup(struct lpfc_hba *phba)
3059 pring->fast_iotag = 0; 3174 pring->fast_iotag = 0;
3060 pring->iotag_ctr = 0; 3175 pring->iotag_ctr = 0;
3061 pring->iotag_max = 4096; 3176 pring->iotag_max = 4096;
3177 pring->lpfc_sli_rcv_async_status =
3178 lpfc_sli_async_event_handler;
3062 pring->num_mask = 4; 3179 pring->num_mask = 4;
3063 pring->prt[0].profile = 0; /* Mask 0 */ 3180 pring->prt[0].profile = 0; /* Mask 0 */
3064 pring->prt[0].rctl = FC_ELS_REQ; 3181 pring->prt[0].rctl = FC_ELS_REQ;
@@ -3123,6 +3240,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
3123 INIT_LIST_HEAD(&pring->txq); 3240 INIT_LIST_HEAD(&pring->txq);
3124 INIT_LIST_HEAD(&pring->txcmplq); 3241 INIT_LIST_HEAD(&pring->txcmplq);
3125 INIT_LIST_HEAD(&pring->iocb_continueq); 3242 INIT_LIST_HEAD(&pring->iocb_continueq);
3243 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
3126 INIT_LIST_HEAD(&pring->postbufq); 3244 INIT_LIST_HEAD(&pring->postbufq);
3127 } 3245 }
3128 spin_unlock_irq(&phba->hbalock); 3246 spin_unlock_irq(&phba->hbalock);
@@ -3193,6 +3311,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3193 LIST_HEAD(completions); 3311 LIST_HEAD(completions);
3194 struct lpfc_sli *psli = &phba->sli; 3312 struct lpfc_sli *psli = &phba->sli;
3195 struct lpfc_sli_ring *pring; 3313 struct lpfc_sli_ring *pring;
3314 struct lpfc_dmabuf *buf_ptr;
3196 LPFC_MBOXQ_t *pmb; 3315 LPFC_MBOXQ_t *pmb;
3197 struct lpfc_iocbq *iocb; 3316 struct lpfc_iocbq *iocb;
3198 IOCB_t *cmd = NULL; 3317 IOCB_t *cmd = NULL;
@@ -3232,6 +3351,19 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3232 } 3351 }
3233 } 3352 }
3234 3353
3354 spin_lock_irqsave(&phba->hbalock, flags);
3355 list_splice_init(&phba->elsbuf, &completions);
3356 phba->elsbuf_cnt = 0;
3357 phba->elsbuf_prev_cnt = 0;
3358 spin_unlock_irqrestore(&phba->hbalock, flags);
3359
3360 while (!list_empty(&completions)) {
3361 list_remove_head(&completions, buf_ptr,
3362 struct lpfc_dmabuf, list);
3363 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3364 kfree(buf_ptr);
3365 }
3366
3235 /* Return any active mbox cmds */ 3367 /* Return any active mbox cmds */
3236 del_timer_sync(&psli->mbox_tmo); 3368 del_timer_sync(&psli->mbox_tmo);
3237 spin_lock_irqsave(&phba->hbalock, flags); 3369 spin_lock_irqsave(&phba->hbalock, flags);
@@ -3294,6 +3426,47 @@ lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3294 return 0; 3426 return 0;
3295} 3427}
3296 3428
3429uint32_t
3430lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
3431{
3432 spin_lock_irq(&phba->hbalock);
3433 phba->buffer_tag_count++;
3434 /*
3435 * Always set the QUE_BUFTAG_BIT to distiguish between
3436 * a tag assigned by HBQ.
3437 */
3438 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
3439 spin_unlock_irq(&phba->hbalock);
3440 return phba->buffer_tag_count;
3441}
3442
3443struct lpfc_dmabuf *
3444lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3445 uint32_t tag)
3446{
3447 struct lpfc_dmabuf *mp, *next_mp;
3448 struct list_head *slp = &pring->postbufq;
3449
3450 /* Search postbufq, from the begining, looking for a match on tag */
3451 spin_lock_irq(&phba->hbalock);
3452 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
3453 if (mp->buffer_tag == tag) {
3454 list_del_init(&mp->list);
3455 pring->postbufq_cnt--;
3456 spin_unlock_irq(&phba->hbalock);
3457 return mp;
3458 }
3459 }
3460
3461 spin_unlock_irq(&phba->hbalock);
3462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3463 "0410 Cannot find virtual addr for buffer tag on "
3464 "ring %d Data x%lx x%p x%p x%x\n",
3465 pring->ringno, (unsigned long) tag,
3466 slp->next, slp->prev, pring->postbufq_cnt);
3467
3468 return NULL;
3469}
3297 3470
3298struct lpfc_dmabuf * 3471struct lpfc_dmabuf *
3299lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3472lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
@@ -3361,6 +3534,12 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3361 pring->txcmplq_cnt--; 3534 pring->txcmplq_cnt--;
3362 spin_unlock_irq(&phba->hbalock); 3535 spin_unlock_irq(&phba->hbalock);
3363 3536
3537 /* Firmware could still be in progress of DMAing
3538 * payload, so don't free data buffer till after
3539 * a hbeat.
3540 */
3541 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
3542
3364 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3543 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3365 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 3544 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3366 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; 3545 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
@@ -3699,7 +3878,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3699 unsigned long flag; 3878 unsigned long flag;
3700 3879
3701 /* The caller must leave context1 empty. */ 3880 /* The caller must leave context1 empty. */
3702 if (pmboxq->context1 != 0) 3881 if (pmboxq->context1)
3703 return MBX_NOT_FINISHED; 3882 return MBX_NOT_FINISHED;
3704 3883
3705 /* setup wake call as IOCB callback */ 3884 /* setup wake call as IOCB callback */
@@ -3771,7 +3950,6 @@ lpfc_intr_handler(int irq, void *dev_id)
3771 uint32_t ha_copy; 3950 uint32_t ha_copy;
3772 uint32_t work_ha_copy; 3951 uint32_t work_ha_copy;
3773 unsigned long status; 3952 unsigned long status;
3774 int i;
3775 uint32_t control; 3953 uint32_t control;
3776 3954
3777 MAILBOX_t *mbox, *pmbox; 3955 MAILBOX_t *mbox, *pmbox;
@@ -3888,7 +4066,6 @@ lpfc_intr_handler(int irq, void *dev_id)
3888 } 4066 }
3889 4067
3890 if (work_ha_copy & HA_ERATT) { 4068 if (work_ha_copy & HA_ERATT) {
3891 phba->link_state = LPFC_HBA_ERROR;
3892 /* 4069 /*
3893 * There was a link/board error. Read the 4070 * There was a link/board error. Read the
3894 * status register to retrieve the error event 4071 * status register to retrieve the error event
@@ -3920,7 +4097,7 @@ lpfc_intr_handler(int irq, void *dev_id)
3920 * Stray Mailbox Interrupt, mbxCommand <cmd> 4097 * Stray Mailbox Interrupt, mbxCommand <cmd>
3921 * mbxStatus <status> 4098 * mbxStatus <status>
3922 */ 4099 */
3923 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | 4100 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
3924 LOG_SLI, 4101 LOG_SLI,
3925 "(%d):0304 Stray Mailbox " 4102 "(%d):0304 Stray Mailbox "
3926 "Interrupt mbxCommand x%x " 4103 "Interrupt mbxCommand x%x "
@@ -3928,51 +4105,60 @@ lpfc_intr_handler(int irq, void *dev_id)
3928 (vport ? vport->vpi : 0), 4105 (vport ? vport->vpi : 0),
3929 pmbox->mbxCommand, 4106 pmbox->mbxCommand,
3930 pmbox->mbxStatus); 4107 pmbox->mbxStatus);
3931 } 4108 /* clear mailbox attention bit */
3932 phba->last_completion_time = jiffies; 4109 work_ha_copy &= ~HA_MBATT;
3933 del_timer_sync(&phba->sli.mbox_tmo); 4110 } else {
3934 4111 phba->last_completion_time = jiffies;
3935 phba->sli.mbox_active = NULL; 4112 del_timer(&phba->sli.mbox_tmo);
3936 if (pmb->mbox_cmpl) {
3937 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3938 MAILBOX_CMD_SIZE);
3939 }
3940 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
3941 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
3942 4113
3943 lpfc_debugfs_disc_trc(vport, 4114 phba->sli.mbox_active = NULL;
3944 LPFC_DISC_TRC_MBOX_VPORT, 4115 if (pmb->mbox_cmpl) {
3945 "MBOX dflt rpi: : status:x%x rpi:x%x", 4116 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3946 (uint32_t)pmbox->mbxStatus, 4117 MAILBOX_CMD_SIZE);
3947 pmbox->un.varWords[0], 0); 4118 }
3948 4119 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
3949 if ( !pmbox->mbxStatus) { 4120 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
3950 mp = (struct lpfc_dmabuf *) 4121
3951 (pmb->context1); 4122 lpfc_debugfs_disc_trc(vport,
3952 ndlp = (struct lpfc_nodelist *) 4123 LPFC_DISC_TRC_MBOX_VPORT,
3953 pmb->context2; 4124 "MBOX dflt rpi: : "
3954 4125 "status:x%x rpi:x%x",
3955 /* Reg_LOGIN of dflt RPI was successful. 4126 (uint32_t)pmbox->mbxStatus,
3956 * new lets get rid of the RPI using the 4127 pmbox->un.varWords[0], 0);
3957 * same mbox buffer. 4128
3958 */ 4129 if (!pmbox->mbxStatus) {
3959 lpfc_unreg_login(phba, vport->vpi, 4130 mp = (struct lpfc_dmabuf *)
3960 pmbox->un.varWords[0], pmb); 4131 (pmb->context1);
3961 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 4132 ndlp = (struct lpfc_nodelist *)
3962 pmb->context1 = mp; 4133 pmb->context2;
3963 pmb->context2 = ndlp; 4134
3964 pmb->vport = vport; 4135 /* Reg_LOGIN of dflt RPI was
3965 spin_lock(&phba->hbalock); 4136 * successful. new lets get
3966 phba->sli.sli_flag &= 4137 * rid of the RPI using the
3967 ~LPFC_SLI_MBOX_ACTIVE; 4138 * same mbox buffer.
3968 spin_unlock(&phba->hbalock); 4139 */
3969 goto send_current_mbox; 4140 lpfc_unreg_login(phba,
4141 vport->vpi,
4142 pmbox->un.varWords[0],
4143 pmb);
4144 pmb->mbox_cmpl =
4145 lpfc_mbx_cmpl_dflt_rpi;
4146 pmb->context1 = mp;
4147 pmb->context2 = ndlp;
4148 pmb->vport = vport;
4149 spin_lock(&phba->hbalock);
4150 phba->sli.sli_flag &=
4151 ~LPFC_SLI_MBOX_ACTIVE;
4152 spin_unlock(&phba->hbalock);
4153 goto send_current_mbox;
4154 }
3970 } 4155 }
4156 spin_lock(&phba->pport->work_port_lock);
4157 phba->pport->work_port_events &=
4158 ~WORKER_MBOX_TMO;
4159 spin_unlock(&phba->pport->work_port_lock);
4160 lpfc_mbox_cmpl_put(phba, pmb);
3971 } 4161 }
3972 spin_lock(&phba->pport->work_port_lock);
3973 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3974 spin_unlock(&phba->pport->work_port_lock);
3975 lpfc_mbox_cmpl_put(phba, pmb);
3976 } 4162 }
3977 if ((work_ha_copy & HA_MBATT) && 4163 if ((work_ha_copy & HA_MBATT) &&
3978 (phba->sli.mbox_active == NULL)) { 4164 (phba->sli.mbox_active == NULL)) {
@@ -3990,10 +4176,6 @@ send_current_mbox:
3990 lpfc_mbox_cmpl_put(phba, pmb); 4176 lpfc_mbox_cmpl_put(phba, pmb);
3991 goto send_next_mbox; 4177 goto send_next_mbox;
3992 } 4178 }
3993 } else {
3994 /* Turn on IOCB processing */
3995 for (i = 0; i < phba->sli.num_rings; i++)
3996 lpfc_sli_turn_on_ring(phba, i);
3997 } 4179 }
3998 4180
3999 } 4181 }
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 51b2b6b949be..7249fd252cbb 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -33,6 +33,7 @@ typedef enum _lpfc_ctx_cmd {
33struct lpfc_iocbq { 33struct lpfc_iocbq {
34 /* lpfc_iocbqs are used in double linked lists */ 34 /* lpfc_iocbqs are used in double linked lists */
35 struct list_head list; 35 struct list_head list;
36 struct list_head clist;
36 uint16_t iotag; /* pre-assigned IO tag */ 37 uint16_t iotag; /* pre-assigned IO tag */
37 uint16_t rsvd1; 38 uint16_t rsvd1;
38 39
@@ -44,6 +45,7 @@ struct lpfc_iocbq {
44#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ 45#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
45#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 46#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
46#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ 47#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
48#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
47 49
48 uint8_t abort_count; 50 uint8_t abort_count;
49 uint8_t rsvd2; 51 uint8_t rsvd2;
@@ -92,8 +94,6 @@ typedef struct lpfcMboxq {
92#define MBX_POLL 1 /* poll mailbox till command done, then 94#define MBX_POLL 1 /* poll mailbox till command done, then
93 return */ 95 return */
94#define MBX_NOWAIT 2 /* issue command then return immediately */ 96#define MBX_NOWAIT 2 /* issue command then return immediately */
95#define MBX_STOP_IOCB 4 /* Stop iocb processing till mbox cmds
96 complete */
97 97
98#define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per 98#define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per
99 ring */ 99 ring */
@@ -129,9 +129,7 @@ struct lpfc_sli_ring {
129 uint16_t flag; /* ring flags */ 129 uint16_t flag; /* ring flags */
130#define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */ 130#define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */
131#define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */ 131#define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */
132#define LPFC_STOP_IOCB_MBX 0x010 /* Stop processing IOCB cmds mbox */
133#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */ 132#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */
134#define LPFC_STOP_IOCB_MASK 0x030 /* Stop processing IOCB cmds mask */
135 uint16_t abtsiotag; /* tracks next iotag to use for ABTS */ 133 uint16_t abtsiotag; /* tracks next iotag to use for ABTS */
136 134
137 uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */ 135 uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
@@ -163,9 +161,12 @@ struct lpfc_sli_ring {
163 struct list_head iocb_continueq; 161 struct list_head iocb_continueq;
164 uint16_t iocb_continueq_cnt; /* current length of queue */ 162 uint16_t iocb_continueq_cnt; /* current length of queue */
165 uint16_t iocb_continueq_max; /* max length */ 163 uint16_t iocb_continueq_max; /* max length */
164 struct list_head iocb_continue_saveq;
166 165
167 struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK]; 166 struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK];
168 uint32_t num_mask; /* number of mask entries in prt array */ 167 uint32_t num_mask; /* number of mask entries in prt array */
168 void (*lpfc_sli_rcv_async_status) (struct lpfc_hba *,
169 struct lpfc_sli_ring *, struct lpfc_iocbq *);
169 170
170 struct lpfc_sli_ring_stat stats; /* SLI statistical info */ 171 struct lpfc_sli_ring_stat stats; /* SLI statistical info */
171 172
@@ -199,9 +200,6 @@ struct lpfc_hbq_init {
199 uint32_t add_count; /* number to allocate when starved */ 200 uint32_t add_count; /* number to allocate when starved */
200} ; 201} ;
201 202
202#define LPFC_MAX_HBQ 16
203
204
205/* Structure used to hold SLI statistical counters and info */ 203/* Structure used to hold SLI statistical counters and info */
206struct lpfc_sli_stat { 204struct lpfc_sli_stat {
207 uint64_t mbox_stat_err; /* Mbox cmds completed status error */ 205 uint64_t mbox_stat_err; /* Mbox cmds completed status error */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 0081f49286bc..4b633d39a82a 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,10 +18,10 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.2.2" 21#define LPFC_DRIVER_VERSION "8.2.4"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
25#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ 25#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
26 LPFC_DRIVER_VERSION 26 LPFC_DRIVER_VERSION
27#define LPFC_COPYRIGHT "Copyright(c) 2004-2007 Emulex. All rights reserved." 27#define LPFC_COPYRIGHT "Copyright(c) 2004-2008 Emulex. All rights reserved."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index dcb415e717c3..9fad7663c117 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -125,15 +125,26 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
125 pmb->vport = vport; 125 pmb->vport = vport;
126 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); 126 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
127 if (rc != MBX_SUCCESS) { 127 if (rc != MBX_SUCCESS) {
128 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, 128 if (signal_pending(current)) {
129 "1818 VPort failed init, mbxCmd x%x " 129 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
130 "READ_SPARM mbxStatus x%x, rc = x%x\n", 130 "1830 Signal aborted mbxCmd x%x\n",
131 mb->mbxCommand, mb->mbxStatus, rc); 131 mb->mbxCommand);
132 lpfc_mbuf_free(phba, mp->virt, mp->phys); 132 lpfc_mbuf_free(phba, mp->virt, mp->phys);
133 kfree(mp); 133 kfree(mp);
134 if (rc != MBX_TIMEOUT) 134 if (rc != MBX_TIMEOUT)
135 mempool_free(pmb, phba->mbox_mem_pool); 135 mempool_free(pmb, phba->mbox_mem_pool);
136 return -EIO; 136 return -EINTR;
137 } else {
138 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
139 "1818 VPort failed init, mbxCmd x%x "
140 "READ_SPARM mbxStatus x%x, rc = x%x\n",
141 mb->mbxCommand, mb->mbxStatus, rc);
142 lpfc_mbuf_free(phba, mp->virt, mp->phys);
143 kfree(mp);
144 if (rc != MBX_TIMEOUT)
145 mempool_free(pmb, phba->mbox_mem_pool);
146 return -EIO;
147 }
137 } 148 }
138 149
139 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 150 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
@@ -204,6 +215,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
204 int instance; 215 int instance;
205 int vpi; 216 int vpi;
206 int rc = VPORT_ERROR; 217 int rc = VPORT_ERROR;
218 int status;
207 219
208 if ((phba->sli_rev < 3) || 220 if ((phba->sli_rev < 3) ||
209 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { 221 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
@@ -248,13 +260,19 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
248 vport->vpi = vpi; 260 vport->vpi = vpi;
249 lpfc_debugfs_initialize(vport); 261 lpfc_debugfs_initialize(vport);
250 262
251 if (lpfc_vport_sparm(phba, vport)) { 263 if ((status = lpfc_vport_sparm(phba, vport))) {
252 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 264 if (status == -EINTR) {
253 "1813 Create VPORT failed. " 265 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
254 "Cannot get sparam\n"); 266 "1831 Create VPORT Interrupted.\n");
267 rc = VPORT_ERROR;
268 } else {
269 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
270 "1813 Create VPORT failed. "
271 "Cannot get sparam\n");
272 rc = VPORT_NORESOURCES;
273 }
255 lpfc_free_vpi(phba, vpi); 274 lpfc_free_vpi(phba, vpi);
256 destroy_port(vport); 275 destroy_port(vport);
257 rc = VPORT_NORESOURCES;
258 goto error_out; 276 goto error_out;
259 } 277 }
260 278
@@ -427,7 +445,6 @@ int
427lpfc_vport_delete(struct fc_vport *fc_vport) 445lpfc_vport_delete(struct fc_vport *fc_vport)
428{ 446{
429 struct lpfc_nodelist *ndlp = NULL; 447 struct lpfc_nodelist *ndlp = NULL;
430 struct lpfc_nodelist *next_ndlp;
431 struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost; 448 struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
432 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 449 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
433 struct lpfc_hba *phba = vport->phba; 450 struct lpfc_hba *phba = vport->phba;
@@ -482,8 +499,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
482 499
483 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 500 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
484 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 501 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
485 phba->link_state >= LPFC_LINK_UP) { 502 phba->link_state >= LPFC_LINK_UP) {
486 503 if (vport->cfg_enable_da_id) {
504 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
505 if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
506 while (vport->ct_flags && timeout)
507 timeout = schedule_timeout(timeout);
508 else
509 lpfc_printf_log(vport->phba, KERN_WARNING,
510 LOG_VPORT,
511 "1829 CT command failed to "
512 "delete objects on fabric. \n");
513 }
487 /* First look for the Fabric ndlp */ 514 /* First look for the Fabric ndlp */
488 ndlp = lpfc_findnode_did(vport, Fabric_DID); 515 ndlp = lpfc_findnode_did(vport, Fabric_DID);
489 if (!ndlp) { 516 if (!ndlp) {
@@ -503,23 +530,20 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
503 } 530 }
504 531
505skip_logo: 532skip_logo:
533 lpfc_cleanup(vport);
506 lpfc_sli_host_down(vport); 534 lpfc_sli_host_down(vport);
507 535
508 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
509 lpfc_disc_state_machine(vport, ndlp, NULL,
510 NLP_EVT_DEVICE_RECOVERY);
511 lpfc_disc_state_machine(vport, ndlp, NULL,
512 NLP_EVT_DEVICE_RM);
513 }
514
515 lpfc_stop_vport_timers(vport); 536 lpfc_stop_vport_timers(vport);
516 lpfc_unreg_all_rpis(vport); 537 lpfc_unreg_all_rpis(vport);
517 lpfc_unreg_default_rpis(vport); 538
518 /* 539 if (!(phba->pport->load_flag & FC_UNLOADING)) {
519 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the 540 lpfc_unreg_default_rpis(vport);
520 * scsi_host_put() to release the vport. 541 /*
521 */ 542 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
522 lpfc_mbx_unreg_vpi(vport); 543 * does the scsi_host_put() to release the vport.
544 */
545 lpfc_mbx_unreg_vpi(vport);
546 }
523 547
524 lpfc_free_vpi(phba, vport->vpi); 548 lpfc_free_vpi(phba, vport->vpi);
525 vport->work_port_events = 0; 549 vport->work_port_events = 0;
@@ -532,16 +556,13 @@ skip_logo:
532 return VPORT_OK; 556 return VPORT_OK;
533} 557}
534 558
535EXPORT_SYMBOL(lpfc_vport_create);
536EXPORT_SYMBOL(lpfc_vport_delete);
537
538struct lpfc_vport ** 559struct lpfc_vport **
539lpfc_create_vport_work_array(struct lpfc_hba *phba) 560lpfc_create_vport_work_array(struct lpfc_hba *phba)
540{ 561{
541 struct lpfc_vport *port_iterator; 562 struct lpfc_vport *port_iterator;
542 struct lpfc_vport **vports; 563 struct lpfc_vport **vports;
543 int index = 0; 564 int index = 0;
544 vports = kzalloc(LPFC_MAX_VPORTS * sizeof(struct lpfc_vport *), 565 vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *),
545 GFP_KERNEL); 566 GFP_KERNEL);
546 if (vports == NULL) 567 if (vports == NULL)
547 return NULL; 568 return NULL;
@@ -560,12 +581,12 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
560} 581}
561 582
562void 583void
563lpfc_destroy_vport_work_array(struct lpfc_vport **vports) 584lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
564{ 585{
565 int i; 586 int i;
566 if (vports == NULL) 587 if (vports == NULL)
567 return; 588 return;
568 for (i=0; vports[i] != NULL && i < LPFC_MAX_VPORTS; i++) 589 for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++)
569 scsi_host_put(lpfc_shost_from_vport(vports[i])); 590 scsi_host_put(lpfc_shost_from_vport(vports[i]));
570 kfree(vports); 591 kfree(vports);
571} 592}
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 91da17751a37..96c445333b69 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -89,7 +89,7 @@ int lpfc_vport_delete(struct fc_vport *);
89int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *); 89int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
90int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); 90int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
91struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); 91struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
92void lpfc_destroy_vport_work_array(struct lpfc_vport **); 92void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
93 93
94/* 94/*
95 * queuecommand VPORT-specific return codes. Specified in the host byte code. 95 * queuecommand VPORT-specific return codes. Specified in the host byte code.
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 66c652035730..765c24d2bc38 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4889,7 +4889,7 @@ __megaraid_shutdown(adapter_t *adapter)
4889 mdelay(1000); 4889 mdelay(1000);
4890} 4890}
4891 4891
4892static void 4892static void __devexit
4893megaraid_remove_one(struct pci_dev *pdev) 4893megaraid_remove_one(struct pci_dev *pdev)
4894{ 4894{
4895 struct Scsi_Host *host = pci_get_drvdata(pdev); 4895 struct Scsi_Host *host = pci_get_drvdata(pdev);
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index c8923108183a..24e32e446e76 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -300,7 +300,7 @@ static struct pci_device_id pci_id_table_g[] = {
300MODULE_DEVICE_TABLE(pci, pci_id_table_g); 300MODULE_DEVICE_TABLE(pci, pci_id_table_g);
301 301
302 302
303static struct pci_driver megaraid_pci_driver_g = { 303static struct pci_driver megaraid_pci_driver = {
304 .name = "megaraid", 304 .name = "megaraid",
305 .id_table = pci_id_table_g, 305 .id_table = pci_id_table_g,
306 .probe = megaraid_probe_one, 306 .probe = megaraid_probe_one,
@@ -394,7 +394,7 @@ megaraid_init(void)
394 394
395 395
396 // register as a PCI hot-plug driver module 396 // register as a PCI hot-plug driver module
397 rval = pci_register_driver(&megaraid_pci_driver_g); 397 rval = pci_register_driver(&megaraid_pci_driver);
398 if (rval < 0) { 398 if (rval < 0) {
399 con_log(CL_ANN, (KERN_WARNING 399 con_log(CL_ANN, (KERN_WARNING
400 "megaraid: could not register hotplug support.\n")); 400 "megaraid: could not register hotplug support.\n"));
@@ -415,7 +415,7 @@ megaraid_exit(void)
415 con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n")); 415 con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
416 416
417 // unregister as PCI hotplug driver 417 // unregister as PCI hotplug driver
418 pci_unregister_driver(&megaraid_pci_driver_g); 418 pci_unregister_driver(&megaraid_pci_driver);
419 419
420 return; 420 return;
421} 421}
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index e3c5c5282203..d7ec921865c4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * Linux MegaRAID driver for SAS based RAID controllers 3 * Linux MegaRAID driver for SAS based RAID controllers
4 * 4 *
5 * Copyright (c) 2003-2005 LSI Logic Corporation. 5 * Copyright (c) 2003-2005 LSI Corporation.
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.03.10-rc5 13 * Version : v00.00.03.16-rc1
14 * 14 *
15 * Authors: 15 * Authors:
16 * (email-id : megaraidlinux@lsi.com) 16 * (email-id : megaraidlinux@lsi.com)
@@ -31,6 +31,7 @@
31#include <linux/moduleparam.h> 31#include <linux/moduleparam.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/mutex.h>
34#include <linux/interrupt.h> 35#include <linux/interrupt.h>
35#include <linux/delay.h> 36#include <linux/delay.h>
36#include <linux/uio.h> 37#include <linux/uio.h>
@@ -46,10 +47,18 @@
46#include <scsi/scsi_host.h> 47#include <scsi/scsi_host.h>
47#include "megaraid_sas.h" 48#include "megaraid_sas.h"
48 49
50/*
51 * poll_mode_io:1- schedule complete completion from q cmd
52 */
53static unsigned int poll_mode_io;
54module_param_named(poll_mode_io, poll_mode_io, int, 0);
55MODULE_PARM_DESC(poll_mode_io,
56 "Complete cmds from IO path, (default=0)");
57
49MODULE_LICENSE("GPL"); 58MODULE_LICENSE("GPL");
50MODULE_VERSION(MEGASAS_VERSION); 59MODULE_VERSION(MEGASAS_VERSION);
51MODULE_AUTHOR("megaraidlinux@lsi.com"); 60MODULE_AUTHOR("megaraidlinux@lsi.com");
52MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver"); 61MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
53 62
54/* 63/*
55 * PCI ID table for all supported controllers 64 * PCI ID table for all supported controllers
@@ -76,6 +85,10 @@ static DEFINE_MUTEX(megasas_async_queue_mutex);
76 85
77static u32 megasas_dbg_lvl; 86static u32 megasas_dbg_lvl;
78 87
88static void
89megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
90 u8 alt_status);
91
79/** 92/**
80 * megasas_get_cmd - Get a command from the free pool 93 * megasas_get_cmd - Get a command from the free pool
81 * @instance: Adapter soft state 94 * @instance: Adapter soft state
@@ -855,6 +868,12 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
855 atomic_inc(&instance->fw_outstanding); 868 atomic_inc(&instance->fw_outstanding);
856 869
857 instance->instancet->fire_cmd(cmd->frame_phys_addr ,cmd->frame_count-1,instance->reg_set); 870 instance->instancet->fire_cmd(cmd->frame_phys_addr ,cmd->frame_count-1,instance->reg_set);
871 /*
872 * Check if we have pend cmds to be completed
873 */
874 if (poll_mode_io && atomic_read(&instance->fw_outstanding))
875 tasklet_schedule(&instance->isr_tasklet);
876
858 877
859 return 0; 878 return 0;
860 879
@@ -886,6 +905,64 @@ static int megasas_slave_configure(struct scsi_device *sdev)
886} 905}
887 906
888/** 907/**
908 * megasas_complete_cmd_dpc - Returns FW's controller structure
909 * @instance_addr: Address of adapter soft state
910 *
911 * Tasklet to complete cmds
912 */
913static void megasas_complete_cmd_dpc(unsigned long instance_addr)
914{
915 u32 producer;
916 u32 consumer;
917 u32 context;
918 struct megasas_cmd *cmd;
919 struct megasas_instance *instance =
920 (struct megasas_instance *)instance_addr;
921 unsigned long flags;
922
923 /* If we have already declared adapter dead, donot complete cmds */
924 if (instance->hw_crit_error)
925 return;
926
927 spin_lock_irqsave(&instance->completion_lock, flags);
928
929 producer = *instance->producer;
930 consumer = *instance->consumer;
931
932 while (consumer != producer) {
933 context = instance->reply_queue[consumer];
934
935 cmd = instance->cmd_list[context];
936
937 megasas_complete_cmd(instance, cmd, DID_OK);
938
939 consumer++;
940 if (consumer == (instance->max_fw_cmds + 1)) {
941 consumer = 0;
942 }
943 }
944
945 *instance->consumer = producer;
946
947 spin_unlock_irqrestore(&instance->completion_lock, flags);
948
949 /*
950 * Check if we can restore can_queue
951 */
952 if (instance->flag & MEGASAS_FW_BUSY
953 && time_after(jiffies, instance->last_time + 5 * HZ)
954 && atomic_read(&instance->fw_outstanding) < 17) {
955
956 spin_lock_irqsave(instance->host->host_lock, flags);
957 instance->flag &= ~MEGASAS_FW_BUSY;
958 instance->host->can_queue =
959 instance->max_fw_cmds - MEGASAS_INT_CMDS;
960
961 spin_unlock_irqrestore(instance->host->host_lock, flags);
962 }
963}
964
965/**
889 * megasas_wait_for_outstanding - Wait for all outstanding cmds 966 * megasas_wait_for_outstanding - Wait for all outstanding cmds
890 * @instance: Adapter soft state 967 * @instance: Adapter soft state
891 * 968 *
@@ -908,6 +985,11 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
908 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 985 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
909 printk(KERN_NOTICE "megasas: [%2d]waiting for %d " 986 printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
910 "commands to complete\n",i,outstanding); 987 "commands to complete\n",i,outstanding);
988 /*
989 * Call cmd completion routine. Cmd to be
990 * be completed directly without depending on isr.
991 */
992 megasas_complete_cmd_dpc((unsigned long)instance);
911 } 993 }
912 994
913 msleep(1000); 995 msleep(1000);
@@ -1100,7 +1182,7 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
1100static struct scsi_host_template megasas_template = { 1182static struct scsi_host_template megasas_template = {
1101 1183
1102 .module = THIS_MODULE, 1184 .module = THIS_MODULE,
1103 .name = "LSI Logic SAS based MegaRAID driver", 1185 .name = "LSI SAS based MegaRAID driver",
1104 .proc_name = "megaraid_sas", 1186 .proc_name = "megaraid_sas",
1105 .slave_configure = megasas_slave_configure, 1187 .slave_configure = megasas_slave_configure,
1106 .queuecommand = megasas_queue_command, 1188 .queuecommand = megasas_queue_command,
@@ -1749,57 +1831,119 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
1749} 1831}
1750 1832
1751/** 1833/**
1752 * megasas_complete_cmd_dpc - Returns FW's controller structure 1834 * megasas_issue_init_mfi - Initializes the FW
1753 * @instance_addr: Address of adapter soft state 1835 * @instance: Adapter soft state
1754 * 1836 *
1755 * Tasklet to complete cmds 1837 * Issues the INIT MFI cmd
1756 */ 1838 */
1757static void megasas_complete_cmd_dpc(unsigned long instance_addr) 1839static int
1840megasas_issue_init_mfi(struct megasas_instance *instance)
1758{ 1841{
1759 u32 producer;
1760 u32 consumer;
1761 u32 context; 1842 u32 context;
1843
1762 struct megasas_cmd *cmd; 1844 struct megasas_cmd *cmd;
1763 struct megasas_instance *instance = (struct megasas_instance *)instance_addr;
1764 unsigned long flags;
1765 1845
1766 /* If we have already declared adapter dead, donot complete cmds */ 1846 struct megasas_init_frame *init_frame;
1767 if (instance->hw_crit_error) 1847 struct megasas_init_queue_info *initq_info;
1768 return; 1848 dma_addr_t init_frame_h;
1849 dma_addr_t initq_info_h;
1769 1850
1770 producer = *instance->producer; 1851 /*
1771 consumer = *instance->consumer; 1852 * Prepare a init frame. Note the init frame points to queue info
1853 * structure. Each frame has SGL allocated after first 64 bytes. For
1854 * this frame - since we don't need any SGL - we use SGL's space as
1855 * queue info structure
1856 *
1857 * We will not get a NULL command below. We just created the pool.
1858 */
1859 cmd = megasas_get_cmd(instance);
1772 1860
1773 while (consumer != producer) { 1861 init_frame = (struct megasas_init_frame *)cmd->frame;
1774 context = instance->reply_queue[consumer]; 1862 initq_info = (struct megasas_init_queue_info *)
1863 ((unsigned long)init_frame + 64);
1775 1864
1776 cmd = instance->cmd_list[context]; 1865 init_frame_h = cmd->frame_phys_addr;
1866 initq_info_h = init_frame_h + 64;
1777 1867
1778 megasas_complete_cmd(instance, cmd, DID_OK); 1868 context = init_frame->context;
1869 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
1870 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
1871 init_frame->context = context;
1779 1872
1780 consumer++; 1873 initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
1781 if (consumer == (instance->max_fw_cmds + 1)) { 1874 initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
1782 consumer = 0;
1783 }
1784 }
1785 1875
1786 *instance->consumer = producer; 1876 initq_info->producer_index_phys_addr_lo = instance->producer_h;
1877 initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
1878
1879 init_frame->cmd = MFI_CMD_INIT;
1880 init_frame->cmd_status = 0xFF;
1881 init_frame->queue_info_new_phys_addr_lo = initq_info_h;
1882
1883 init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
1787 1884
1788 /* 1885 /*
1789 * Check if we can restore can_queue 1886 * disable the intr before firing the init frame to FW
1790 */ 1887 */
1791 if (instance->flag & MEGASAS_FW_BUSY 1888 instance->instancet->disable_intr(instance->reg_set);
1792 && time_after(jiffies, instance->last_time + 5 * HZ)
1793 && atomic_read(&instance->fw_outstanding) < 17) {
1794 1889
1795 spin_lock_irqsave(instance->host->host_lock, flags); 1890 /*
1796 instance->flag &= ~MEGASAS_FW_BUSY; 1891 * Issue the init frame in polled mode
1797 instance->host->can_queue = 1892 */
1798 instance->max_fw_cmds - MEGASAS_INT_CMDS;
1799 1893
1800 spin_unlock_irqrestore(instance->host->host_lock, flags); 1894 if (megasas_issue_polled(instance, cmd)) {
1895 printk(KERN_ERR "megasas: Failed to init firmware\n");
1896 megasas_return_cmd(instance, cmd);
1897 goto fail_fw_init;
1801 } 1898 }
1802 1899
1900 megasas_return_cmd(instance, cmd);
1901
1902 return 0;
1903
1904fail_fw_init:
1905 return -EINVAL;
1906}
1907
1908/**
1909 * megasas_start_timer - Initializes a timer object
1910 * @instance: Adapter soft state
1911 * @timer: timer object to be initialized
1912 * @fn: timer function
1913 * @interval: time interval between timer function call
1914 */
1915static inline void
1916megasas_start_timer(struct megasas_instance *instance,
1917 struct timer_list *timer,
1918 void *fn, unsigned long interval)
1919{
1920 init_timer(timer);
1921 timer->expires = jiffies + interval;
1922 timer->data = (unsigned long)instance;
1923 timer->function = fn;
1924 add_timer(timer);
1925}
1926
1927/**
1928 * megasas_io_completion_timer - Timer fn
1929 * @instance_addr: Address of adapter soft state
1930 *
1931 * Schedules tasklet for cmd completion
1932 * if poll_mode_io is set
1933 */
1934static void
1935megasas_io_completion_timer(unsigned long instance_addr)
1936{
1937 struct megasas_instance *instance =
1938 (struct megasas_instance *)instance_addr;
1939
1940 if (atomic_read(&instance->fw_outstanding))
1941 tasklet_schedule(&instance->isr_tasklet);
1942
1943 /* Restart timer */
1944 if (poll_mode_io)
1945 mod_timer(&instance->io_completion_timer,
1946 jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
1803} 1947}
1804 1948
1805/** 1949/**
@@ -1814,22 +1958,15 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1814 u32 reply_q_sz; 1958 u32 reply_q_sz;
1815 u32 max_sectors_1; 1959 u32 max_sectors_1;
1816 u32 max_sectors_2; 1960 u32 max_sectors_2;
1961 u32 tmp_sectors;
1817 struct megasas_register_set __iomem *reg_set; 1962 struct megasas_register_set __iomem *reg_set;
1818
1819 struct megasas_cmd *cmd;
1820 struct megasas_ctrl_info *ctrl_info; 1963 struct megasas_ctrl_info *ctrl_info;
1821
1822 struct megasas_init_frame *init_frame;
1823 struct megasas_init_queue_info *initq_info;
1824 dma_addr_t init_frame_h;
1825 dma_addr_t initq_info_h;
1826
1827 /* 1964 /*
1828 * Map the message registers 1965 * Map the message registers
1829 */ 1966 */
1830 instance->base_addr = pci_resource_start(instance->pdev, 0); 1967 instance->base_addr = pci_resource_start(instance->pdev, 0);
1831 1968
1832 if (pci_request_regions(instance->pdev, "megasas: LSI Logic")) { 1969 if (pci_request_regions(instance->pdev, "megasas: LSI")) {
1833 printk(KERN_DEBUG "megasas: IO memory region busy!\n"); 1970 printk(KERN_DEBUG "megasas: IO memory region busy!\n");
1834 return -EBUSY; 1971 return -EBUSY;
1835 } 1972 }
@@ -1900,52 +2037,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1900 goto fail_reply_queue; 2037 goto fail_reply_queue;
1901 } 2038 }
1902 2039
1903 /* 2040 if (megasas_issue_init_mfi(instance))
1904 * Prepare a init frame. Note the init frame points to queue info
1905 * structure. Each frame has SGL allocated after first 64 bytes. For
1906 * this frame - since we don't need any SGL - we use SGL's space as
1907 * queue info structure
1908 *
1909 * We will not get a NULL command below. We just created the pool.
1910 */
1911 cmd = megasas_get_cmd(instance);
1912
1913 init_frame = (struct megasas_init_frame *)cmd->frame;
1914 initq_info = (struct megasas_init_queue_info *)
1915 ((unsigned long)init_frame + 64);
1916
1917 init_frame_h = cmd->frame_phys_addr;
1918 initq_info_h = init_frame_h + 64;
1919
1920 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
1921 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
1922
1923 initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
1924 initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
1925
1926 initq_info->producer_index_phys_addr_lo = instance->producer_h;
1927 initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
1928
1929 init_frame->cmd = MFI_CMD_INIT;
1930 init_frame->cmd_status = 0xFF;
1931 init_frame->queue_info_new_phys_addr_lo = initq_info_h;
1932
1933 init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
1934
1935 /*
1936 * disable the intr before firing the init frame to FW
1937 */
1938 instance->instancet->disable_intr(instance->reg_set);
1939
1940 /*
1941 * Issue the init frame in polled mode
1942 */
1943 if (megasas_issue_polled(instance, cmd)) {
1944 printk(KERN_DEBUG "megasas: Failed to init firmware\n");
1945 goto fail_fw_init; 2041 goto fail_fw_init;
1946 }
1947
1948 megasas_return_cmd(instance, cmd);
1949 2042
1950 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); 2043 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
1951 2044
@@ -1958,17 +2051,20 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1958 * Note that older firmwares ( < FW ver 30) didn't report information 2051 * Note that older firmwares ( < FW ver 30) didn't report information
1959 * to calculate max_sectors_1. So the number ended up as zero always. 2052 * to calculate max_sectors_1. So the number ended up as zero always.
1960 */ 2053 */
2054 tmp_sectors = 0;
1961 if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { 2055 if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
1962 2056
1963 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 2057 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1964 ctrl_info->max_strips_per_io; 2058 ctrl_info->max_strips_per_io;
1965 max_sectors_2 = ctrl_info->max_request_size; 2059 max_sectors_2 = ctrl_info->max_request_size;
1966 2060
1967 instance->max_sectors_per_req = (max_sectors_1 < max_sectors_2) 2061 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
1968 ? max_sectors_1 : max_sectors_2; 2062 }
1969 } else 2063
1970 instance->max_sectors_per_req = instance->max_num_sge * 2064 instance->max_sectors_per_req = instance->max_num_sge *
1971 PAGE_SIZE / 512; 2065 PAGE_SIZE / 512;
2066 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
2067 instance->max_sectors_per_req = tmp_sectors;
1972 2068
1973 kfree(ctrl_info); 2069 kfree(ctrl_info);
1974 2070
@@ -1976,12 +2072,17 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1976 * Setup tasklet for cmd completion 2072 * Setup tasklet for cmd completion
1977 */ 2073 */
1978 2074
1979 tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc, 2075 tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
1980 (unsigned long)instance); 2076 (unsigned long)instance);
2077
2078 /* Initialize the cmd completion timer */
2079 if (poll_mode_io)
2080 megasas_start_timer(instance, &instance->io_completion_timer,
2081 megasas_io_completion_timer,
2082 MEGASAS_COMPLETION_TIMER_INTERVAL);
1981 return 0; 2083 return 0;
1982 2084
1983 fail_fw_init: 2085 fail_fw_init:
1984 megasas_return_cmd(instance, cmd);
1985 2086
1986 pci_free_consistent(instance->pdev, reply_q_sz, 2087 pci_free_consistent(instance->pdev, reply_q_sz,
1987 instance->reply_queue, instance->reply_queue_h); 2088 instance->reply_queue, instance->reply_queue_h);
@@ -2263,6 +2364,28 @@ static int megasas_io_attach(struct megasas_instance *instance)
2263 return 0; 2364 return 0;
2264} 2365}
2265 2366
2367static int
2368megasas_set_dma_mask(struct pci_dev *pdev)
2369{
2370 /*
2371 * All our contollers are capable of performing 64-bit DMA
2372 */
2373 if (IS_DMA64) {
2374 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
2375
2376 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
2377 goto fail_set_dma_mask;
2378 }
2379 } else {
2380 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
2381 goto fail_set_dma_mask;
2382 }
2383 return 0;
2384
2385fail_set_dma_mask:
2386 return 1;
2387}
2388
2266/** 2389/**
2267 * megasas_probe_one - PCI hotplug entry point 2390 * megasas_probe_one - PCI hotplug entry point
2268 * @pdev: PCI device structure 2391 * @pdev: PCI device structure
@@ -2296,19 +2419,8 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2296 2419
2297 pci_set_master(pdev); 2420 pci_set_master(pdev);
2298 2421
2299 /* 2422 if (megasas_set_dma_mask(pdev))
2300 * All our contollers are capable of performing 64-bit DMA 2423 goto fail_set_dma_mask;
2301 */
2302 if (IS_DMA64) {
2303 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
2304
2305 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
2306 goto fail_set_dma_mask;
2307 }
2308 } else {
2309 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
2310 goto fail_set_dma_mask;
2311 }
2312 2424
2313 host = scsi_host_alloc(&megasas_template, 2425 host = scsi_host_alloc(&megasas_template,
2314 sizeof(struct megasas_instance)); 2426 sizeof(struct megasas_instance));
@@ -2357,8 +2469,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2357 init_waitqueue_head(&instance->abort_cmd_wait_q); 2469 init_waitqueue_head(&instance->abort_cmd_wait_q);
2358 2470
2359 spin_lock_init(&instance->cmd_pool_lock); 2471 spin_lock_init(&instance->cmd_pool_lock);
2472 spin_lock_init(&instance->completion_lock);
2360 2473
2361 sema_init(&instance->aen_mutex, 1); 2474 mutex_init(&instance->aen_mutex);
2362 sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); 2475 sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
2363 2476
2364 /* 2477 /*
@@ -2490,8 +2603,10 @@ static void megasas_flush_cache(struct megasas_instance *instance)
2490/** 2603/**
2491 * megasas_shutdown_controller - Instructs FW to shutdown the controller 2604 * megasas_shutdown_controller - Instructs FW to shutdown the controller
2492 * @instance: Adapter soft state 2605 * @instance: Adapter soft state
2606 * @opcode: Shutdown/Hibernate
2493 */ 2607 */
2494static void megasas_shutdown_controller(struct megasas_instance *instance) 2608static void megasas_shutdown_controller(struct megasas_instance *instance,
2609 u32 opcode)
2495{ 2610{
2496 struct megasas_cmd *cmd; 2611 struct megasas_cmd *cmd;
2497 struct megasas_dcmd_frame *dcmd; 2612 struct megasas_dcmd_frame *dcmd;
@@ -2514,7 +2629,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance)
2514 dcmd->flags = MFI_FRAME_DIR_NONE; 2629 dcmd->flags = MFI_FRAME_DIR_NONE;
2515 dcmd->timeout = 0; 2630 dcmd->timeout = 0;
2516 dcmd->data_xfer_len = 0; 2631 dcmd->data_xfer_len = 0;
2517 dcmd->opcode = MR_DCMD_CTRL_SHUTDOWN; 2632 dcmd->opcode = opcode;
2518 2633
2519 megasas_issue_blocked_cmd(instance, cmd); 2634 megasas_issue_blocked_cmd(instance, cmd);
2520 2635
@@ -2524,6 +2639,139 @@ static void megasas_shutdown_controller(struct megasas_instance *instance)
2524} 2639}
2525 2640
2526/** 2641/**
2642 * megasas_suspend - driver suspend entry point
2643 * @pdev: PCI device structure
2644 * @state: PCI power state to suspend routine
2645 */
2646static int __devinit
2647megasas_suspend(struct pci_dev *pdev, pm_message_t state)
2648{
2649 struct Scsi_Host *host;
2650 struct megasas_instance *instance;
2651
2652 instance = pci_get_drvdata(pdev);
2653 host = instance->host;
2654
2655 if (poll_mode_io)
2656 del_timer_sync(&instance->io_completion_timer);
2657
2658 megasas_flush_cache(instance);
2659 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
2660 tasklet_kill(&instance->isr_tasklet);
2661
2662 pci_set_drvdata(instance->pdev, instance);
2663 instance->instancet->disable_intr(instance->reg_set);
2664 free_irq(instance->pdev->irq, instance);
2665
2666 pci_save_state(pdev);
2667 pci_disable_device(pdev);
2668
2669 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2670
2671 return 0;
2672}
2673
2674/**
2675 * megasas_resume- driver resume entry point
2676 * @pdev: PCI device structure
2677 */
2678static int __devinit
2679megasas_resume(struct pci_dev *pdev)
2680{
2681 int rval;
2682 struct Scsi_Host *host;
2683 struct megasas_instance *instance;
2684
2685 instance = pci_get_drvdata(pdev);
2686 host = instance->host;
2687 pci_set_power_state(pdev, PCI_D0);
2688 pci_enable_wake(pdev, PCI_D0, 0);
2689 pci_restore_state(pdev);
2690
2691 /*
2692 * PCI prepping: enable device set bus mastering and dma mask
2693 */
2694 rval = pci_enable_device(pdev);
2695
2696 if (rval) {
2697 printk(KERN_ERR "megasas: Enable device failed\n");
2698 return rval;
2699 }
2700
2701 pci_set_master(pdev);
2702
2703 if (megasas_set_dma_mask(pdev))
2704 goto fail_set_dma_mask;
2705
2706 /*
2707 * Initialize MFI Firmware
2708 */
2709
2710 *instance->producer = 0;
2711 *instance->consumer = 0;
2712
2713 atomic_set(&instance->fw_outstanding, 0);
2714
2715 /*
2716 * We expect the FW state to be READY
2717 */
2718 if (megasas_transition_to_ready(instance))
2719 goto fail_ready_state;
2720
2721 if (megasas_issue_init_mfi(instance))
2722 goto fail_init_mfi;
2723
2724 tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
2725 (unsigned long)instance);
2726
2727 /*
2728 * Register IRQ
2729 */
2730 if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED,
2731 "megasas", instance)) {
2732 printk(KERN_ERR "megasas: Failed to register IRQ\n");
2733 goto fail_irq;
2734 }
2735
2736 instance->instancet->enable_intr(instance->reg_set);
2737
2738 /*
2739 * Initiate AEN (Asynchronous Event Notification)
2740 */
2741 if (megasas_start_aen(instance))
2742 printk(KERN_ERR "megasas: Start AEN failed\n");
2743
2744 /* Initialize the cmd completion timer */
2745 if (poll_mode_io)
2746 megasas_start_timer(instance, &instance->io_completion_timer,
2747 megasas_io_completion_timer,
2748 MEGASAS_COMPLETION_TIMER_INTERVAL);
2749 return 0;
2750
2751fail_irq:
2752fail_init_mfi:
2753 if (instance->evt_detail)
2754 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
2755 instance->evt_detail,
2756 instance->evt_detail_h);
2757
2758 if (instance->producer)
2759 pci_free_consistent(pdev, sizeof(u32), instance->producer,
2760 instance->producer_h);
2761 if (instance->consumer)
2762 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
2763 instance->consumer_h);
2764 scsi_host_put(host);
2765
2766fail_set_dma_mask:
2767fail_ready_state:
2768
2769 pci_disable_device(pdev);
2770
2771 return -ENODEV;
2772}
2773
2774/**
2527 * megasas_detach_one - PCI hot"un"plug entry point 2775 * megasas_detach_one - PCI hot"un"plug entry point
2528 * @pdev: PCI device structure 2776 * @pdev: PCI device structure
2529 */ 2777 */
@@ -2536,9 +2784,12 @@ static void megasas_detach_one(struct pci_dev *pdev)
2536 instance = pci_get_drvdata(pdev); 2784 instance = pci_get_drvdata(pdev);
2537 host = instance->host; 2785 host = instance->host;
2538 2786
2787 if (poll_mode_io)
2788 del_timer_sync(&instance->io_completion_timer);
2789
2539 scsi_remove_host(instance->host); 2790 scsi_remove_host(instance->host);
2540 megasas_flush_cache(instance); 2791 megasas_flush_cache(instance);
2541 megasas_shutdown_controller(instance); 2792 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
2542 tasklet_kill(&instance->isr_tasklet); 2793 tasklet_kill(&instance->isr_tasklet);
2543 2794
2544 /* 2795 /*
@@ -2660,6 +2911,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
2660 void *sense = NULL; 2911 void *sense = NULL;
2661 dma_addr_t sense_handle; 2912 dma_addr_t sense_handle;
2662 u32 *sense_ptr; 2913 u32 *sense_ptr;
2914 unsigned long *sense_buff;
2663 2915
2664 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 2916 memset(kbuff_arr, 0, sizeof(kbuff_arr));
2665 2917
@@ -2764,14 +3016,16 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
2764 */ 3016 */
2765 if (ioc->sense_len) { 3017 if (ioc->sense_len) {
2766 /* 3018 /*
2767 * sense_ptr points to the location that has the user 3019 * sense_buff points to the location that has the user
2768 * sense buffer address 3020 * sense buffer address
2769 */ 3021 */
2770 sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw + 3022 sense_buff = (unsigned long *) ((unsigned long)ioc->frame.raw +
2771 ioc->sense_off); 3023 ioc->sense_off);
2772 3024
2773 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), 3025 if (copy_to_user((void __user *)(unsigned long)(*sense_buff),
2774 sense, ioc->sense_len)) { 3026 sense, ioc->sense_len)) {
3027 printk(KERN_ERR "megasas: Failed to copy out to user "
3028 "sense data\n");
2775 error = -EFAULT; 3029 error = -EFAULT;
2776 goto out; 3030 goto out;
2777 } 3031 }
@@ -2874,10 +3128,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
2874 if (!instance) 3128 if (!instance)
2875 return -ENODEV; 3129 return -ENODEV;
2876 3130
2877 down(&instance->aen_mutex); 3131 mutex_lock(&instance->aen_mutex);
2878 error = megasas_register_aen(instance, aen.seq_num, 3132 error = megasas_register_aen(instance, aen.seq_num,
2879 aen.class_locale_word); 3133 aen.class_locale_word);
2880 up(&instance->aen_mutex); 3134 mutex_unlock(&instance->aen_mutex);
2881 return error; 3135 return error;
2882} 3136}
2883 3137
@@ -2977,6 +3231,8 @@ static struct pci_driver megasas_pci_driver = {
2977 .id_table = megasas_pci_table, 3231 .id_table = megasas_pci_table,
2978 .probe = megasas_probe_one, 3232 .probe = megasas_probe_one,
2979 .remove = __devexit_p(megasas_detach_one), 3233 .remove = __devexit_p(megasas_detach_one),
3234 .suspend = megasas_suspend,
3235 .resume = megasas_resume,
2980 .shutdown = megasas_shutdown, 3236 .shutdown = megasas_shutdown,
2981}; 3237};
2982 3238
@@ -3004,7 +3260,7 @@ static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
3004static ssize_t 3260static ssize_t
3005megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) 3261megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
3006{ 3262{
3007 return sprintf(buf,"%u",megasas_dbg_lvl); 3263 return sprintf(buf, "%u\n", megasas_dbg_lvl);
3008} 3264}
3009 3265
3010static ssize_t 3266static ssize_t
@@ -3019,7 +3275,65 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
3019} 3275}
3020 3276
3021static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUGO, megasas_sysfs_show_dbg_lvl, 3277static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUGO, megasas_sysfs_show_dbg_lvl,
3022 megasas_sysfs_set_dbg_lvl); 3278 megasas_sysfs_set_dbg_lvl);
3279
3280static ssize_t
3281megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
3282{
3283 return sprintf(buf, "%u\n", poll_mode_io);
3284}
3285
3286static ssize_t
3287megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
3288 const char *buf, size_t count)
3289{
3290 int retval = count;
3291 int tmp = poll_mode_io;
3292 int i;
3293 struct megasas_instance *instance;
3294
3295 if (sscanf(buf, "%u", &poll_mode_io) < 1) {
3296 printk(KERN_ERR "megasas: could not set poll_mode_io\n");
3297 retval = -EINVAL;
3298 }
3299
3300 /*
3301 * Check if poll_mode_io is already set or is same as previous value
3302 */
3303 if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
3304 goto out;
3305
3306 if (poll_mode_io) {
3307 /*
3308 * Start timers for all adapters
3309 */
3310 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
3311 instance = megasas_mgmt_info.instance[i];
3312 if (instance) {
3313 megasas_start_timer(instance,
3314 &instance->io_completion_timer,
3315 megasas_io_completion_timer,
3316 MEGASAS_COMPLETION_TIMER_INTERVAL);
3317 }
3318 }
3319 } else {
3320 /*
3321 * Delete timers for all adapters
3322 */
3323 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
3324 instance = megasas_mgmt_info.instance[i];
3325 if (instance)
3326 del_timer_sync(&instance->io_completion_timer);
3327 }
3328 }
3329
3330out:
3331 return retval;
3332}
3333
3334static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO,
3335 megasas_sysfs_show_poll_mode_io,
3336 megasas_sysfs_set_poll_mode_io);
3023 3337
3024/** 3338/**
3025 * megasas_init - Driver load entry point 3339 * megasas_init - Driver load entry point
@@ -3070,8 +3384,16 @@ static int __init megasas_init(void)
3070 &driver_attr_dbg_lvl); 3384 &driver_attr_dbg_lvl);
3071 if (rval) 3385 if (rval)
3072 goto err_dcf_dbg_lvl; 3386 goto err_dcf_dbg_lvl;
3387 rval = driver_create_file(&megasas_pci_driver.driver,
3388 &driver_attr_poll_mode_io);
3389 if (rval)
3390 goto err_dcf_poll_mode_io;
3073 3391
3074 return rval; 3392 return rval;
3393
3394err_dcf_poll_mode_io:
3395 driver_remove_file(&megasas_pci_driver.driver,
3396 &driver_attr_dbg_lvl);
3075err_dcf_dbg_lvl: 3397err_dcf_dbg_lvl:
3076 driver_remove_file(&megasas_pci_driver.driver, 3398 driver_remove_file(&megasas_pci_driver.driver,
3077 &driver_attr_release_date); 3399 &driver_attr_release_date);
@@ -3090,6 +3412,8 @@ err_pcidrv:
3090static void __exit megasas_exit(void) 3412static void __exit megasas_exit(void)
3091{ 3413{
3092 driver_remove_file(&megasas_pci_driver.driver, 3414 driver_remove_file(&megasas_pci_driver.driver,
3415 &driver_attr_poll_mode_io);
3416 driver_remove_file(&megasas_pci_driver.driver,
3093 &driver_attr_dbg_lvl); 3417 &driver_attr_dbg_lvl);
3094 driver_remove_file(&megasas_pci_driver.driver, 3418 driver_remove_file(&megasas_pci_driver.driver,
3095 &driver_attr_release_date); 3419 &driver_attr_release_date);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 4dffc918a414..6466bdf548c2 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2,7 +2,7 @@
2 * 2 *
3 * Linux MegaRAID driver for SAS based RAID controllers 3 * Linux MegaRAID driver for SAS based RAID controllers
4 * 4 *
5 * Copyright (c) 2003-2005 LSI Logic Corporation. 5 * Copyright (c) 2003-2005 LSI Corporation.
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -18,9 +18,9 @@
18/* 18/*
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.03.10-rc5" 21#define MEGASAS_VERSION "00.00.03.16-rc1"
22#define MEGASAS_RELDATE "May 17, 2007" 22#define MEGASAS_RELDATE "Nov. 07, 2007"
23#define MEGASAS_EXT_VERSION "Thu May 17 10:09:32 PDT 2007" 23#define MEGASAS_EXT_VERSION "Thu. Nov. 07 10:09:32 PDT 2007"
24 24
25/* 25/*
26 * Device IDs 26 * Device IDs
@@ -117,6 +117,7 @@
117#define MR_FLUSH_DISK_CACHE 0x02 117#define MR_FLUSH_DISK_CACHE 0x02
118 118
119#define MR_DCMD_CTRL_SHUTDOWN 0x01050000 119#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
120#define MR_DCMD_HIBERNATE_SHUTDOWN 0x01060000
120#define MR_ENABLE_DRIVE_SPINDOWN 0x01 121#define MR_ENABLE_DRIVE_SPINDOWN 0x01
121 122
122#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100 123#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
@@ -570,7 +571,8 @@ struct megasas_ctrl_info {
570#define IS_DMA64 (sizeof(dma_addr_t) == 8) 571#define IS_DMA64 (sizeof(dma_addr_t) == 8)
571 572
572#define MFI_OB_INTR_STATUS_MASK 0x00000002 573#define MFI_OB_INTR_STATUS_MASK 0x00000002
573#define MFI_POLL_TIMEOUT_SECS 10 574#define MFI_POLL_TIMEOUT_SECS 60
575#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
574 576
575#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 577#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
576 578
@@ -1083,13 +1085,15 @@ struct megasas_instance {
1083 struct megasas_cmd **cmd_list; 1085 struct megasas_cmd **cmd_list;
1084 struct list_head cmd_pool; 1086 struct list_head cmd_pool;
1085 spinlock_t cmd_pool_lock; 1087 spinlock_t cmd_pool_lock;
1088 /* used to synch producer, consumer ptrs in dpc */
1089 spinlock_t completion_lock;
1086 struct dma_pool *frame_dma_pool; 1090 struct dma_pool *frame_dma_pool;
1087 struct dma_pool *sense_dma_pool; 1091 struct dma_pool *sense_dma_pool;
1088 1092
1089 struct megasas_evt_detail *evt_detail; 1093 struct megasas_evt_detail *evt_detail;
1090 dma_addr_t evt_detail_h; 1094 dma_addr_t evt_detail_h;
1091 struct megasas_cmd *aen_cmd; 1095 struct megasas_cmd *aen_cmd;
1092 struct semaphore aen_mutex; 1096 struct mutex aen_mutex;
1093 struct semaphore ioctl_sem; 1097 struct semaphore ioctl_sem;
1094 1098
1095 struct Scsi_Host *host; 1099 struct Scsi_Host *host;
@@ -1108,6 +1112,8 @@ struct megasas_instance {
1108 1112
1109 u8 flag; 1113 u8 flag;
1110 unsigned long last_time; 1114 unsigned long last_time;
1115
1116 struct timer_list io_completion_timer;
1111}; 1117};
1112 1118
1113#define MEGASAS_IS_LOGICAL(scp) \ 1119#define MEGASAS_IS_LOGICAL(scp) \
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 016c462bc771..c02771aa6c9b 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4963,7 +4963,8 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
4963 ** Copy back sense data to caller's buffer. 4963 ** Copy back sense data to caller's buffer.
4964 */ 4964 */
4965 memcpy(cmd->sense_buffer, cp->sense_buf, 4965 memcpy(cmd->sense_buffer, cp->sense_buf,
4966 min(sizeof(cmd->sense_buffer), sizeof(cp->sense_buf))); 4966 min_t(size_t, SCSI_SENSE_BUFFERSIZE,
4967 sizeof(cp->sense_buf)));
4967 4968
4968 if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) { 4969 if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
4969 u_char * p = (u_char*) & cmd->sense_buffer; 4970 u_char * p = (u_char*) & cmd->sense_buffer;
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index fa481b515ead..53857c6b6d4d 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -6,7 +6,8 @@ menuconfig SCSI_LOWLEVEL_PCMCIA
6 bool "PCMCIA SCSI adapter support" 6 bool "PCMCIA SCSI adapter support"
7 depends on SCSI!=n && PCMCIA!=n 7 depends on SCSI!=n && PCMCIA!=n
8 8
9if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA 9# drivers have problems when build in, so require modules
10if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA && m
10 11
11config PCMCIA_AHA152X 12config PCMCIA_AHA152X
12 tristate "Adaptec AHA152X PCMCIA support" 13 tristate "Adaptec AHA152X PCMCIA support"
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index a45d89b14147..5082ca3c6876 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -135,6 +135,11 @@ static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
135 135
136#define NSP_DEBUG_BUF_LEN 150 136#define NSP_DEBUG_BUF_LEN 150
137 137
138static inline void nsp_inc_resid(struct scsi_cmnd *SCpnt, int residInc)
139{
140 scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) + residInc);
141}
142
138static void nsp_cs_message(const char *func, int line, char *type, char *fmt, ...) 143static void nsp_cs_message(const char *func, int line, char *type, char *fmt, ...)
139{ 144{
140 va_list args; 145 va_list args;
@@ -192,8 +197,10 @@ static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
192#endif 197#endif
193 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; 198 nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
194 199
195 nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "SCpnt=0x%p target=%d lun=%d buff=0x%p bufflen=%d use_sg=%d", 200 nsp_dbg(NSP_DEBUG_QUEUECOMMAND,
196 SCpnt, target, SCpnt->device->lun, SCpnt->request_buffer, SCpnt->request_bufflen, SCpnt->use_sg); 201 "SCpnt=0x%p target=%d lun=%d sglist=0x%p bufflen=%d sg_count=%d",
202 SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt),
203 scsi_bufflen(SCpnt), scsi_sg_count(SCpnt));
197 //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC); 204 //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC);
198 205
199 SCpnt->scsi_done = done; 206 SCpnt->scsi_done = done;
@@ -225,7 +232,7 @@ static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
225 SCpnt->SCp.have_data_in = IO_UNKNOWN; 232 SCpnt->SCp.have_data_in = IO_UNKNOWN;
226 SCpnt->SCp.sent_command = 0; 233 SCpnt->SCp.sent_command = 0;
227 SCpnt->SCp.phase = PH_UNDETERMINED; 234 SCpnt->SCp.phase = PH_UNDETERMINED;
228 SCpnt->resid = SCpnt->request_bufflen; 235 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
229 236
230 /* setup scratch area 237 /* setup scratch area
231 SCp.ptr : buffer pointer 238 SCp.ptr : buffer pointer
@@ -233,14 +240,14 @@ static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
233 SCp.buffer : next buffer 240 SCp.buffer : next buffer
234 SCp.buffers_residual : left buffers in list 241 SCp.buffers_residual : left buffers in list
235 SCp.phase : current state of the command */ 242 SCp.phase : current state of the command */
236 if (SCpnt->use_sg) { 243 if (scsi_bufflen(SCpnt)) {
237 SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer; 244 SCpnt->SCp.buffer = scsi_sglist(SCpnt);
238 SCpnt->SCp.ptr = BUFFER_ADDR; 245 SCpnt->SCp.ptr = BUFFER_ADDR;
239 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; 246 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
240 SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1; 247 SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
241 } else { 248 } else {
242 SCpnt->SCp.ptr = (char *) SCpnt->request_buffer; 249 SCpnt->SCp.ptr = NULL;
243 SCpnt->SCp.this_residual = SCpnt->request_bufflen; 250 SCpnt->SCp.this_residual = 0;
244 SCpnt->SCp.buffer = NULL; 251 SCpnt->SCp.buffer = NULL;
245 SCpnt->SCp.buffers_residual = 0; 252 SCpnt->SCp.buffers_residual = 0;
246 } 253 }
@@ -721,7 +728,9 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
721 ocount = data->FifoCount; 728 ocount = data->FifoCount;
722 729
723 nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d", 730 nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d",
724 SCpnt, SCpnt->resid, ocount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual); 731 SCpnt, scsi_get_resid(SCpnt), ocount, SCpnt->SCp.ptr,
732 SCpnt->SCp.this_residual, SCpnt->SCp.buffer,
733 SCpnt->SCp.buffers_residual);
725 734
726 time_out = 1000; 735 time_out = 1000;
727 736
@@ -771,7 +780,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
771 return; 780 return;
772 } 781 }
773 782
774 SCpnt->resid -= res; 783 nsp_inc_resid(SCpnt, -res);
775 SCpnt->SCp.ptr += res; 784 SCpnt->SCp.ptr += res;
776 SCpnt->SCp.this_residual -= res; 785 SCpnt->SCp.this_residual -= res;
777 ocount += res; 786 ocount += res;
@@ -795,10 +804,12 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
795 804
796 if (time_out == 0) { 805 if (time_out == 0) {
797 nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d", 806 nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d",
798 SCpnt->resid, SCpnt->SCp.this_residual, SCpnt->SCp.buffers_residual); 807 scsi_get_resid(SCpnt), SCpnt->SCp.this_residual,
808 SCpnt->SCp.buffers_residual);
799 } 809 }
800 nsp_dbg(NSP_DEBUG_DATA_IO, "read ocount=0x%x", ocount); 810 nsp_dbg(NSP_DEBUG_DATA_IO, "read ocount=0x%x", ocount);
801 nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId, SCpnt->resid); 811 nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId,
812 scsi_get_resid(SCpnt));
802} 813}
803 814
804/* 815/*
@@ -816,7 +827,9 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
816 ocount = data->FifoCount; 827 ocount = data->FifoCount;
817 828
818 nsp_dbg(NSP_DEBUG_DATA_IO, "in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x", 829 nsp_dbg(NSP_DEBUG_DATA_IO, "in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x",
819 data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual, SCpnt->resid); 830 data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual,
831 SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual,
832 scsi_get_resid(SCpnt));
820 833
821 time_out = 1000; 834 time_out = 1000;
822 835
@@ -830,7 +843,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
830 843
831 nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x, res=%d\n", stat, res); 844 nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x, res=%d\n", stat, res);
832 /* Put back pointer */ 845 /* Put back pointer */
833 SCpnt->resid += res; 846 nsp_inc_resid(SCpnt, res);
834 SCpnt->SCp.ptr -= res; 847 SCpnt->SCp.ptr -= res;
835 SCpnt->SCp.this_residual += res; 848 SCpnt->SCp.this_residual += res;
836 ocount -= res; 849 ocount -= res;
@@ -866,7 +879,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
866 break; 879 break;
867 } 880 }
868 881
869 SCpnt->resid -= res; 882 nsp_inc_resid(SCpnt, -res);
870 SCpnt->SCp.ptr += res; 883 SCpnt->SCp.ptr += res;
871 SCpnt->SCp.this_residual -= res; 884 SCpnt->SCp.this_residual -= res;
872 ocount += res; 885 ocount += res;
@@ -886,10 +899,12 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
886 data->FifoCount = ocount; 899 data->FifoCount = ocount;
887 900
888 if (time_out == 0) { 901 if (time_out == 0) {
889 nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x", SCpnt->resid); 902 nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x",
903 scsi_get_resid(SCpnt));
890 } 904 }
891 nsp_dbg(NSP_DEBUG_DATA_IO, "write ocount=0x%x", ocount); 905 nsp_dbg(NSP_DEBUG_DATA_IO, "write ocount=0x%x", ocount);
892 nsp_dbg(NSP_DEBUG_DATA_IO, "w cmd=%d resid=0x%x\n", data->CmdId, SCpnt->resid); 906 nsp_dbg(NSP_DEBUG_DATA_IO, "w cmd=%d resid=0x%x\n", data->CmdId,
907 scsi_get_resid(SCpnt));
893} 908}
894#undef RFIFO_CRIT 909#undef RFIFO_CRIT
895#undef WFIFO_CRIT 910#undef WFIFO_CRIT
@@ -911,9 +926,8 @@ static int nsp_nexus(struct scsi_cmnd *SCpnt)
911 nsp_index_write(base, SYNCREG, sync->SyncRegister); 926 nsp_index_write(base, SYNCREG, sync->SyncRegister);
912 nsp_index_write(base, ACKWIDTH, sync->AckWidth); 927 nsp_index_write(base, ACKWIDTH, sync->AckWidth);
913 928
914 if (SCpnt->use_sg == 0 || 929 if (scsi_get_resid(SCpnt) % 4 != 0 ||
915 SCpnt->resid % 4 != 0 || 930 scsi_get_resid(SCpnt) <= PAGE_SIZE ) {
916 SCpnt->resid <= PAGE_SIZE ) {
917 data->TransferMode = MODE_IO8; 931 data->TransferMode = MODE_IO8;
918 } else if (nsp_burst_mode == BURST_MEM32) { 932 } else if (nsp_burst_mode == BURST_MEM32) {
919 data->TransferMode = MODE_MEM32; 933 data->TransferMode = MODE_MEM32;
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 67ee51a3d7e1..f655ae320b48 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -750,18 +750,16 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
750 cmd->SCp.phase++; 750 cmd->SCp.phase++;
751 751
752 case 4: /* Phase 4 - Setup scatter/gather buffers */ 752 case 4: /* Phase 4 - Setup scatter/gather buffers */
753 if (cmd->use_sg) { 753 if (scsi_bufflen(cmd)) {
754 /* if many buffers are available, start filling the first */ 754 cmd->SCp.buffer = scsi_sglist(cmd);
755 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
756 cmd->SCp.this_residual = cmd->SCp.buffer->length; 755 cmd->SCp.this_residual = cmd->SCp.buffer->length;
757 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 756 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
758 } else { 757 } else {
759 /* else fill the only available buffer */
760 cmd->SCp.buffer = NULL; 758 cmd->SCp.buffer = NULL;
761 cmd->SCp.this_residual = cmd->request_bufflen; 759 cmd->SCp.this_residual = 0;
762 cmd->SCp.ptr = cmd->request_buffer; 760 cmd->SCp.ptr = NULL;
763 } 761 }
764 cmd->SCp.buffers_residual = cmd->use_sg - 1; 762 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
765 cmd->SCp.phase++; 763 cmd->SCp.phase++;
766 764
767 case 5: /* Phase 5 - Data transfer stage */ 765 case 5: /* Phase 5 - Data transfer stage */
diff --git a/drivers/scsi/psi240i.c b/drivers/scsi/psi240i.c
deleted file mode 100644
index 899e89d6fe67..000000000000
--- a/drivers/scsi/psi240i.c
+++ /dev/null
@@ -1,689 +0,0 @@
1/*+M*************************************************************************
2 * Perceptive Solutions, Inc. PSI-240I device driver proc support for Linux.
3 *
4 * Copyright (c) 1997 Perceptive Solutions, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 *
21 * File Name: psi240i.c
22 *
23 * Description: SCSI driver for the PSI240I EIDE interface card.
24 *
25 *-M*************************************************************************/
26
27#include <linux/module.h>
28
29#include <linux/blkdev.h>
30#include <linux/kernel.h>
31#include <linux/types.h>
32#include <linux/string.h>
33#include <linux/ioport.h>
34#include <linux/delay.h>
35#include <linux/interrupt.h>
36#include <linux/proc_fs.h>
37#include <linux/spinlock.h>
38#include <linux/stat.h>
39
40#include <asm/dma.h>
41#include <asm/system.h>
42#include <asm/io.h>
43#include "scsi.h"
44#include <scsi/scsi_host.h>
45
46#include "psi240i.h"
47#include "psi_chip.h"
48
49//#define DEBUG 1
50
51#ifdef DEBUG
52#define DEB(x) x
53#else
54#define DEB(x)
55#endif
56
57#define MAXBOARDS 6 /* Increase this and the sizes of the arrays below, if you need more. */
58
59#define PORT_DATA 0
60#define PORT_ERROR 1
61#define PORT_SECTOR_COUNT 2
62#define PORT_LBA_0 3
63#define PORT_LBA_8 4
64#define PORT_LBA_16 5
65#define PORT_LBA_24 6
66#define PORT_STAT_CMD 7
67#define PORT_SEL_FAIL 8
68#define PORT_IRQ_STATUS 9
69#define PORT_ADDRESS 10
70#define PORT_FAIL 11
71#define PORT_ALT_STAT 12
72
73typedef struct
74 {
75 UCHAR device; // device code
76 UCHAR byte6; // device select register image
77 UCHAR spigot; // spigot number
78 UCHAR expectingIRQ; // flag for expecting and interrupt
79 USHORT sectors; // number of sectors per track
80 USHORT heads; // number of heads
81 USHORT cylinders; // number of cylinders for this device
82 USHORT spareword; // placeholder
83 ULONG blocks; // number of blocks on device
84 } OUR_DEVICE, *POUR_DEVICE;
85
86typedef struct
87 {
88 USHORT ports[13];
89 OUR_DEVICE device[8];
90 struct scsi_cmnd *pSCmnd;
91 IDE_STRUCT ide;
92 ULONG startSector;
93 USHORT sectorCount;
94 struct scsi_cmnd *SCpnt;
95 VOID *buffer;
96 USHORT expectingIRQ;
97 } ADAPTER240I, *PADAPTER240I;
98
99#define HOSTDATA(host) ((PADAPTER240I)&host->hostdata)
100
101static struct Scsi_Host *PsiHost[6] = {NULL,}; /* One for each IRQ level (10-15) */
102static IDENTIFY_DATA identifyData;
103static SETUP ChipSetup;
104
105static USHORT portAddr[6] = {CHIP_ADRS_0, CHIP_ADRS_1, CHIP_ADRS_2, CHIP_ADRS_3, CHIP_ADRS_4, CHIP_ADRS_5};
106
107/****************************************************************
108 * Name: WriteData :LOCAL
109 *
110 * Description: Write data to device.
111 *
112 * Parameters: padapter - Pointer adapter data structure.
113 *
114 * Returns: TRUE if drive does not assert DRQ in time.
115 *
116 ****************************************************************/
117static int WriteData (PADAPTER240I padapter)
118 {
119 ULONG timer;
120 USHORT *pports = padapter->ports;
121
122 timer = jiffies + TIMEOUT_DRQ; // calculate the timeout value
123 do {
124 if ( inb_p (pports[PORT_STAT_CMD]) & IDE_STATUS_DRQ )
125 {
126 outsw (pports[PORT_DATA], padapter->buffer, (USHORT)padapter->ide.ide.ide[2] * 256);
127 return 0;
128 }
129 } while ( time_after(timer, jiffies) ); // test for timeout
130
131 padapter->ide.ide.ides.cmd = 0; // null out the command byte
132 return 1;
133 }
134/****************************************************************
135 * Name: IdeCmd :LOCAL
136 *
137 * Description: Process a queued command from the SCSI manager.
138 *
139 * Parameters: padapter - Pointer adapter data structure.
140 *
141 * Returns: Zero if no error or status register contents on error.
142 *
143 ****************************************************************/
144static UCHAR IdeCmd (PADAPTER240I padapter)
145 {
146 ULONG timer;
147 USHORT *pports = padapter->ports;
148 UCHAR status;
149
150 outb_p (padapter->ide.ide.ides.spigot, pports[PORT_SEL_FAIL]); // select the spigot
151 outb_p (padapter->ide.ide.ide[6], pports[PORT_LBA_24]); // select the drive
152 timer = jiffies + TIMEOUT_READY; // calculate the timeout value
153 do {
154 status = inb_p (padapter->ports[PORT_STAT_CMD]);
155 if ( status & IDE_STATUS_DRDY )
156 {
157 outb_p (padapter->ide.ide.ide[2], pports[PORT_SECTOR_COUNT]);
158 outb_p (padapter->ide.ide.ide[3], pports[PORT_LBA_0]);
159 outb_p (padapter->ide.ide.ide[4], pports[PORT_LBA_8]);
160 outb_p (padapter->ide.ide.ide[5], pports[PORT_LBA_16]);
161 padapter->expectingIRQ = 1;
162 outb_p (padapter->ide.ide.ide[7], pports[PORT_STAT_CMD]);
163
164 if ( padapter->ide.ide.ides.cmd == IDE_CMD_WRITE_MULTIPLE )
165 return (WriteData (padapter));
166
167 return 0;
168 }
169 } while ( time_after(timer, jiffies) ); // test for timeout
170
171 padapter->ide.ide.ides.cmd = 0; // null out the command byte
172 return status;
173 }
174/****************************************************************
175 * Name: SetupTransfer :LOCAL
176 *
177 * Description: Setup a data transfer command.
178 *
179 * Parameters: padapter - Pointer adapter data structure.
180 * drive - Drive/head register upper nibble only.
181 *
182 * Returns: TRUE if no data to transfer.
183 *
184 ****************************************************************/
185static int SetupTransfer (PADAPTER240I padapter, UCHAR drive)
186 {
187 if ( padapter->sectorCount )
188 {
189 *(ULONG *)padapter->ide.ide.ides.lba = padapter->startSector;
190 padapter->ide.ide.ide[6] |= drive;
191 padapter->ide.ide.ides.sectors = ( padapter->sectorCount > SECTORSXFER ) ? SECTORSXFER : padapter->sectorCount;
192 padapter->sectorCount -= padapter->ide.ide.ides.sectors; // bump the start and count for next xfer
193 padapter->startSector += padapter->ide.ide.ides.sectors;
194 return 0;
195 }
196 else
197 {
198 padapter->ide.ide.ides.cmd = 0; // null out the command byte
199 padapter->SCpnt = NULL;
200 return 1;
201 }
202 }
203/****************************************************************
204 * Name: DecodeError :LOCAL
205 *
206 * Description: Decode and process device errors.
207 *
208 * Parameters: pshost - Pointer to host data block.
209 * status - Status register code.
210 *
211 * Returns: The driver status code.
212 *
213 ****************************************************************/
214static ULONG DecodeError (struct Scsi_Host *pshost, UCHAR status)
215 {
216 PADAPTER240I padapter = HOSTDATA(pshost);
217 UCHAR error;
218
219 padapter->expectingIRQ = 0;
220 padapter->SCpnt = NULL;
221 if ( status & IDE_STATUS_WRITE_FAULT )
222 {
223 return DID_PARITY << 16;
224 }
225 if ( status & IDE_STATUS_BUSY )
226 return DID_BUS_BUSY << 16;
227
228 error = inb_p (padapter->ports[PORT_ERROR]);
229 DEB(printk ("\npsi240i error register: %x", error));
230 switch ( error )
231 {
232 case IDE_ERROR_AMNF:
233 case IDE_ERROR_TKONF:
234 case IDE_ERROR_ABRT:
235 case IDE_ERROR_IDFN:
236 case IDE_ERROR_UNC:
237 case IDE_ERROR_BBK:
238 default:
239 return DID_ERROR << 16;
240 }
241 return DID_ERROR << 16;
242 }
243/****************************************************************
244 * Name: Irq_Handler :LOCAL
245 *
246 * Description: Interrupt handler.
247 *
248 * Parameters: irq - Hardware IRQ number.
249 * dev_id -
250 *
251 * Returns: TRUE if drive is not ready in time.
252 *
253 ****************************************************************/
254static void Irq_Handler (int irq, void *dev_id)
255 {
256 struct Scsi_Host *shost; // Pointer to host data block
257 PADAPTER240I padapter; // Pointer to adapter control structure
258 USHORT *pports; // I/O port array
259 struct scsi_cmnd *SCpnt;
260 UCHAR status;
261 int z;
262
263 DEB(printk ("\npsi240i received interrupt\n"));
264
265 shost = PsiHost[irq - 10];
266 if ( !shost )
267 panic ("Splunge!");
268
269 padapter = HOSTDATA(shost);
270 pports = padapter->ports;
271 SCpnt = padapter->SCpnt;
272
273 if ( !padapter->expectingIRQ )
274 {
275 DEB(printk ("\npsi240i Unsolicited interrupt\n"));
276 return;
277 }
278 padapter->expectingIRQ = 0;
279
280 status = inb_p (padapter->ports[PORT_STAT_CMD]); // read the device status
281 if ( status & (IDE_STATUS_ERROR | IDE_STATUS_WRITE_FAULT) )
282 goto irqerror;
283
284 DEB(printk ("\npsi240i processing interrupt"));
285 switch ( padapter->ide.ide.ides.cmd ) // decide how to handle the interrupt
286 {
287 case IDE_CMD_READ_MULTIPLE:
288 if ( status & IDE_STATUS_DRQ )
289 {
290 insw (pports[PORT_DATA], padapter->buffer, (USHORT)padapter->ide.ide.ides.sectors * 256);
291 padapter->buffer += padapter->ide.ide.ides.sectors * 512;
292 if ( SetupTransfer (padapter, padapter->ide.ide.ide[6] & 0xF0) )
293 {
294 SCpnt->result = DID_OK << 16;
295 padapter->SCpnt = NULL;
296 SCpnt->scsi_done (SCpnt);
297 return;
298 }
299 if ( !(status = IdeCmd (padapter)) )
300 return;
301 }
302 break;
303
304 case IDE_CMD_WRITE_MULTIPLE:
305 padapter->buffer += padapter->ide.ide.ides.sectors * 512;
306 if ( SetupTransfer (padapter, padapter->ide.ide.ide[6] & 0xF0) )
307 {
308 SCpnt->result = DID_OK << 16;
309 padapter->SCpnt = NULL;
310 SCpnt->scsi_done (SCpnt);
311 return;
312 }
313 if ( !(status = IdeCmd (padapter)) )
314 return;
315 break;
316
317 case IDE_COMMAND_IDENTIFY:
318 {
319 PINQUIRYDATA pinquiryData = SCpnt->request_buffer;
320
321 if ( status & IDE_STATUS_DRQ )
322 {
323 insw (pports[PORT_DATA], &identifyData, sizeof (identifyData) >> 1);
324
325 memset (pinquiryData, 0, SCpnt->request_bufflen); // Zero INQUIRY data structure.
326 pinquiryData->DeviceType = 0;
327 pinquiryData->Versions = 2;
328 pinquiryData->AdditionalLength = 35 - 4;
329
330 // Fill in vendor identification fields.
331 for ( z = 0; z < 8; z += 2 )
332 {
333 pinquiryData->VendorId[z] = ((UCHAR *)identifyData.ModelNumber)[z + 1];
334 pinquiryData->VendorId[z + 1] = ((UCHAR *)identifyData.ModelNumber)[z];
335 }
336
337 // Initialize unused portion of product id.
338 for ( z = 0; z < 4; z++ )
339 pinquiryData->ProductId[12 + z] = ' ';
340
341 // Move firmware revision from IDENTIFY data to
342 // product revision in INQUIRY data.
343 for ( z = 0; z < 4; z += 2 )
344 {
345 pinquiryData->ProductRevisionLevel[z] = ((UCHAR *)identifyData.FirmwareRevision)[z + 1];
346 pinquiryData->ProductRevisionLevel[z + 1] = ((UCHAR *)identifyData.FirmwareRevision)[z];
347 }
348
349 SCpnt->result = DID_OK << 16;
350 padapter->SCpnt = NULL;
351 SCpnt->scsi_done (SCpnt);
352 return;
353 }
354 break;
355 }
356
357 default:
358 SCpnt->result = DID_OK << 16;
359 padapter->SCpnt = NULL;
360 SCpnt->scsi_done (SCpnt);
361 return;
362 }
363
364irqerror:;
365 DEB(printk ("\npsi240i error Device Status: %X\n", status));
366 SCpnt->result = DecodeError (shost, status);
367 SCpnt->scsi_done (SCpnt);
368 }
369
370static irqreturn_t do_Irq_Handler (int irq, void *dev_id)
371{
372 unsigned long flags;
373 struct Scsi_Host *dev = dev_id;
374
375 spin_lock_irqsave(dev->host_lock, flags);
376 Irq_Handler(irq, dev_id);
377 spin_unlock_irqrestore(dev->host_lock, flags);
378 return IRQ_HANDLED;
379}
380
381/****************************************************************
382 * Name: Psi240i_QueueCommand
383 *
384 * Description: Process a queued command from the SCSI manager.
385 *
386 * Parameters: SCpnt - Pointer to SCSI command structure.
387 * done - Pointer to done function to call.
388 *
389 * Returns: Status code.
390 *
391 ****************************************************************/
392static int Psi240i_QueueCommand(struct scsi_cmnd *SCpnt,
393 void (*done)(struct scsi_cmnd *))
394 {
395 UCHAR *cdb = (UCHAR *)SCpnt->cmnd;
396 // Pointer to SCSI CDB
397 PADAPTER240I padapter = HOSTDATA (SCpnt->device->host);
398 // Pointer to adapter control structure
399 POUR_DEVICE pdev = &padapter->device [SCpnt->device->id];
400 // Pointer to device information
401 UCHAR rc;
402 // command return code
403
404 SCpnt->scsi_done = done;
405 padapter->ide.ide.ides.spigot = pdev->spigot;
406 padapter->buffer = SCpnt->request_buffer;
407 if (done)
408 {
409 if ( !pdev->device )
410 {
411 SCpnt->result = DID_BAD_TARGET << 16;
412 done (SCpnt);
413 return 0;
414 }
415 }
416 else
417 {
418 printk("psi240i_queuecommand: %02X: done can't be NULL\n", *cdb);
419 return 0;
420 }
421
422 switch ( *cdb )
423 {
424 case SCSIOP_INQUIRY: // inquiry CDB
425 {
426 padapter->ide.ide.ide[6] = pdev->byte6;
427 padapter->ide.ide.ides.cmd = IDE_COMMAND_IDENTIFY;
428 break;
429 }
430
431 case SCSIOP_TEST_UNIT_READY: // test unit ready CDB
432 SCpnt->result = DID_OK << 16;
433 done (SCpnt);
434 return 0;
435
436 case SCSIOP_READ_CAPACITY: // read capctiy CDB
437 {
438 PREAD_CAPACITY_DATA pdata = (PREAD_CAPACITY_DATA)SCpnt->request_buffer;
439
440 pdata->blksiz = 0x20000;
441 XANY2SCSI ((UCHAR *)&pdata->blks, pdev->blocks);
442 SCpnt->result = DID_OK << 16;
443 done (SCpnt);
444 return 0;
445 }
446
447 case SCSIOP_VERIFY: // verify CDB
448 *(ULONG *)padapter->ide.ide.ides.lba = XSCSI2LONG (&cdb[2]);
449 padapter->ide.ide.ide[6] |= pdev->byte6;
450 padapter->ide.ide.ide[2] = (UCHAR)((USHORT)cdb[8] | ((USHORT)cdb[7] << 8));
451 padapter->ide.ide.ides.cmd = IDE_COMMAND_VERIFY;
452 break;
453
454 case SCSIOP_READ: // read10 CDB
455 padapter->startSector = XSCSI2LONG (&cdb[2]);
456 padapter->sectorCount = (USHORT)cdb[8] | ((USHORT)cdb[7] << 8);
457 SetupTransfer (padapter, pdev->byte6);
458 padapter->ide.ide.ides.cmd = IDE_CMD_READ_MULTIPLE;
459 break;
460
461 case SCSIOP_READ6: // read6 CDB
462 padapter->startSector = SCSI2LONG (&cdb[1]);
463 padapter->sectorCount = cdb[4];
464 SetupTransfer (padapter, pdev->byte6);
465 padapter->ide.ide.ides.cmd = IDE_CMD_READ_MULTIPLE;
466 break;
467
468 case SCSIOP_WRITE: // write10 CDB
469 padapter->startSector = XSCSI2LONG (&cdb[2]);
470 padapter->sectorCount = (USHORT)cdb[8] | ((USHORT)cdb[7] << 8);
471 SetupTransfer (padapter, pdev->byte6);
472 padapter->ide.ide.ides.cmd = IDE_CMD_WRITE_MULTIPLE;
473 break;
474 case SCSIOP_WRITE6: // write6 CDB
475 padapter->startSector = SCSI2LONG (&cdb[1]);
476 padapter->sectorCount = cdb[4];
477 SetupTransfer (padapter, pdev->byte6);
478 padapter->ide.ide.ides.cmd = IDE_CMD_WRITE_MULTIPLE;
479 break;
480
481 default:
482 DEB (printk ("psi240i_queuecommand: Unsupported command %02X\n", *cdb));
483 SCpnt->result = DID_ERROR << 16;
484 done (SCpnt);
485 return 0;
486 }
487
488 padapter->SCpnt = SCpnt; // Save this command data
489
490 rc = IdeCmd (padapter);
491 if ( rc )
492 {
493 padapter->expectingIRQ = 0;
494 DEB (printk ("psi240i_queuecommand: %02X, %02X: Device failed to respond for command\n", *cdb, padapter->ide.ide.ides.cmd));
495 SCpnt->result = DID_ERROR << 16;
496 done (SCpnt);
497 return 0;
498 }
499 DEB (printk("psi240i_queuecommand: %02X, %02X now waiting for interrupt ", *cdb, padapter->ide.ide.ides.cmd));
500 return 0;
501 }
502
503/***************************************************************************
504 * Name: ReadChipMemory
505 *
506 * Description: Read information from controller memory.
507 *
508 * Parameters: psetup - Pointer to memory image of setup information.
509 * base - base address of memory.
510 * length - lenght of data space in bytes.
511 * port - I/O address of data port.
512 *
513 * Returns: Nothing.
514 *
515 **************************************************************************/
516static void ReadChipMemory (void *pdata, USHORT base, USHORT length, USHORT port)
517 {
518 USHORT z, zz;
519 UCHAR *pd = (UCHAR *)pdata;
520 outb_p (SEL_NONE, port + REG_SEL_FAIL); // setup data port
521 zz = 0;
522 while ( zz < length )
523 {
524 outw_p (base, port + REG_ADDRESS); // setup address
525
526 for ( z = 0; z < 8; z++ )
527 {
528 if ( (zz + z) < length )
529 *pd++ = inb_p (port + z); // read data byte
530 }
531 zz += 8;
532 base += 8;
533 }
534 }
535/****************************************************************
536 * Name: Psi240i_Detect
537 *
538 * Description: Detect and initialize our boards.
539 *
540 * Parameters: tpnt - Pointer to SCSI host template structure.
541 *
542 * Returns: Number of adapters found.
543 *
544 ****************************************************************/
545static int Psi240i_Detect (struct scsi_host_template *tpnt)
546 {
547 int board;
548 int count = 0;
549 int unit;
550 int z;
551 USHORT port, port_range = 16;
552 CHIP_CONFIG_N chipConfig;
553 CHIP_DEVICE_N chipDevice[8];
554 struct Scsi_Host *pshost;
555
556 for ( board = 0; board < MAXBOARDS; board++ ) // scan for I/O ports
557 {
558 pshost = NULL;
559 port = portAddr[board]; // get base address to test
560 if ( !request_region (port, port_range, "psi240i") )
561 continue;
562 if ( inb_p (port + REG_FAIL) != CHIP_ID ) // do the first test for likley hood that it is us
563 goto host_init_failure;
564 outb_p (SEL_NONE, port + REG_SEL_FAIL); // setup EEPROM/RAM access
565 outw (0, port + REG_ADDRESS); // setup EEPROM address zero
566 if ( inb_p (port) != 0x55 ) // test 1st byte
567 goto host_init_failure; // nope
568 if ( inb_p (port + 1) != 0xAA ) // test 2nd byte
569 goto host_init_failure; // nope
570
571 // at this point our board is found and can be accessed. Now we need to initialize
572 // our informatation and register with the kernel.
573
574
575 ReadChipMemory (&chipConfig, CHIP_CONFIG, sizeof (chipConfig), port);
576 ReadChipMemory (&chipDevice, CHIP_DEVICE, sizeof (chipDevice), port);
577 ReadChipMemory (&ChipSetup, CHIP_EEPROM_DATA, sizeof (ChipSetup), port);
578
579 if ( !chipConfig.numDrives ) // if no devices on this board
580 goto host_init_failure;
581
582 pshost = scsi_register (tpnt, sizeof(ADAPTER240I));
583 if(pshost == NULL)
584 goto host_init_failure;
585
586 PsiHost[chipConfig.irq - 10] = pshost;
587 pshost->unique_id = port;
588 pshost->io_port = port;
589 pshost->n_io_port = 16; /* Number of bytes of I/O space used */
590 pshost->irq = chipConfig.irq;
591
592 for ( z = 0; z < 11; z++ ) // build regester address array
593 HOSTDATA(pshost)->ports[z] = port + z;
594 HOSTDATA(pshost)->ports[11] = port + REG_FAIL;
595 HOSTDATA(pshost)->ports[12] = port + REG_ALT_STAT;
596 DEB (printk ("\nPorts ="));
597 DEB (for (z=0;z<13;z++) printk(" %#04X",HOSTDATA(pshost)->ports[z]););
598
599 for ( z = 0; z < chipConfig.numDrives; ++z )
600 {
601 unit = chipDevice[z].channel & 0x0F;
602 HOSTDATA(pshost)->device[unit].device = ChipSetup.setupDevice[unit].device;
603 HOSTDATA(pshost)->device[unit].byte6 = (UCHAR)(((unit & 1) << 4) | 0xE0);
604 HOSTDATA(pshost)->device[unit].spigot = (UCHAR)(1 << (unit >> 1));
605 HOSTDATA(pshost)->device[unit].sectors = ChipSetup.setupDevice[unit].sectors;
606 HOSTDATA(pshost)->device[unit].heads = ChipSetup.setupDevice[unit].heads;
607 HOSTDATA(pshost)->device[unit].cylinders = ChipSetup.setupDevice[unit].cylinders;
608 HOSTDATA(pshost)->device[unit].blocks = ChipSetup.setupDevice[unit].blocks;
609 DEB (printk ("\nHOSTDATA->device = %X", HOSTDATA(pshost)->device[unit].device));
610 DEB (printk ("\n byte6 = %X", HOSTDATA(pshost)->device[unit].byte6));
611 DEB (printk ("\n spigot = %X", HOSTDATA(pshost)->device[unit].spigot));
612 DEB (printk ("\n sectors = %X", HOSTDATA(pshost)->device[unit].sectors));
613 DEB (printk ("\n heads = %X", HOSTDATA(pshost)->device[unit].heads));
614 DEB (printk ("\n cylinders = %X", HOSTDATA(pshost)->device[unit].cylinders));
615 DEB (printk ("\n blocks = %lX", HOSTDATA(pshost)->device[unit].blocks));
616 }
617
618 if ( request_irq (chipConfig.irq, do_Irq_Handler, 0, "psi240i", pshost) == 0 )
619 {
620 printk("\nPSI-240I EIDE CONTROLLER: at I/O = %x IRQ = %d\n", port, chipConfig.irq);
621 printk("(C) 1997 Perceptive Solutions, Inc. All rights reserved\n\n");
622 count++;
623 continue;
624 }
625
626 printk ("Unable to allocate IRQ for PSI-240I controller.\n");
627
628host_init_failure:
629
630 release_region (port, port_range);
631 if (pshost)
632 scsi_unregister (pshost);
633
634 }
635 return count;
636 }
637
638static int Psi240i_Release(struct Scsi_Host *shost)
639{
640 if (shost->irq)
641 free_irq(shost->irq, NULL);
642 if (shost->io_port && shost->n_io_port)
643 release_region(shost->io_port, shost->n_io_port);
644 scsi_unregister(shost);
645 return 0;
646}
647
648/****************************************************************
649 * Name: Psi240i_BiosParam
650 *
651 * Description: Process the biosparam request from the SCSI manager to
652 * return C/H/S data.
653 *
654 * Parameters: disk - Pointer to SCSI disk structure.
655 * dev - Major/minor number from kernel.
656 * geom - Pointer to integer array to place geometry data.
657 *
658 * Returns: zero.
659 *
660 ****************************************************************/
661static int Psi240i_BiosParam (struct scsi_device *sdev, struct block_device *dev,
662 sector_t capacity, int geom[])
663 {
664 POUR_DEVICE pdev;
665
666 pdev = &(HOSTDATA(sdev->host)->device[sdev_id(sdev)]);
667
668 geom[0] = pdev->heads;
669 geom[1] = pdev->sectors;
670 geom[2] = pdev->cylinders;
671 return 0;
672 }
673
674MODULE_LICENSE("GPL");
675
676static struct scsi_host_template driver_template = {
677 .proc_name = "psi240i",
678 .name = "PSI-240I EIDE Disk Controller",
679 .detect = Psi240i_Detect,
680 .release = Psi240i_Release,
681 .queuecommand = Psi240i_QueueCommand,
682 .bios_param = Psi240i_BiosParam,
683 .can_queue = 1,
684 .this_id = -1,
685 .sg_tablesize = SG_NONE,
686 .cmd_per_lun = 1,
687 .use_clustering = DISABLE_CLUSTERING,
688};
689#include "scsi_module.c"
diff --git a/drivers/scsi/psi240i.h b/drivers/scsi/psi240i.h
deleted file mode 100644
index 21ebb9214004..000000000000
--- a/drivers/scsi/psi240i.h
+++ /dev/null
@@ -1,315 +0,0 @@
1/*+M*************************************************************************
2 * Perceptive Solutions, Inc. PSI-240I device driver proc support for Linux.
3 *
4 * Copyright (c) 1997 Perceptive Solutions, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 *
21 * File Name: psi240i.h
22 *
23 * Description: Header file for the SCSI driver for the PSI240I
24 * EIDE interface card.
25 *
26 *-M*************************************************************************/
27#ifndef _PSI240I_H
28#define _PSI240I_H
29
30#include <linux/types.h>
31
32#ifndef PSI_EIDE_SCSIOP
33#define PSI_EIDE_SCSIOP 1
34
35/************************************************/
36/* Some defines that we like */
37/************************************************/
38#define CHAR char
39#define UCHAR unsigned char
40#define SHORT short
41#define USHORT unsigned short
42#define BOOL unsigned short
43#define LONG long
44#define ULONG unsigned long
45#define VOID void
46
47/************************************************/
48/* Timeout konstants */
49/************************************************/
50#define TIMEOUT_READY 10 // 100 mSec
51#define TIMEOUT_DRQ 40 // 400 mSec
52
53/************************************************/
54/* Misc. macros */
55/************************************************/
56#define ANY2SCSI(up, p) \
57((UCHAR *)up)[0] = (((ULONG)(p)) >> 8); \
58((UCHAR *)up)[1] = ((ULONG)(p));
59
60#define SCSI2LONG(up) \
61( (((long)*(((UCHAR *)up))) << 16) \
62+ (((long)(((UCHAR *)up)[1])) << 8) \
63+ ((long)(((UCHAR *)up)[2])) )
64
65#define XANY2SCSI(up, p) \
66((UCHAR *)up)[0] = ((long)(p)) >> 24; \
67((UCHAR *)up)[1] = ((long)(p)) >> 16; \
68((UCHAR *)up)[2] = ((long)(p)) >> 8; \
69((UCHAR *)up)[3] = ((long)(p));
70
71#define XSCSI2LONG(up) \
72( (((long)(((UCHAR *)up)[0])) << 24) \
73+ (((long)(((UCHAR *)up)[1])) << 16) \
74+ (((long)(((UCHAR *)up)[2])) << 8) \
75+ ((long)(((UCHAR *)up)[3])) )
76
77/************************************************/
78/* SCSI CDB operation codes */
79/************************************************/
80#define SCSIOP_TEST_UNIT_READY 0x00
81#define SCSIOP_REZERO_UNIT 0x01
82#define SCSIOP_REWIND 0x01
83#define SCSIOP_REQUEST_BLOCK_ADDR 0x02
84#define SCSIOP_REQUEST_SENSE 0x03
85#define SCSIOP_FORMAT_UNIT 0x04
86#define SCSIOP_READ_BLOCK_LIMITS 0x05
87#define SCSIOP_REASSIGN_BLOCKS 0x07
88#define SCSIOP_READ6 0x08
89#define SCSIOP_RECEIVE 0x08
90#define SCSIOP_WRITE6 0x0A
91#define SCSIOP_PRINT 0x0A
92#define SCSIOP_SEND 0x0A
93#define SCSIOP_SEEK6 0x0B
94#define SCSIOP_TRACK_SELECT 0x0B
95#define SCSIOP_SLEW_PRINT 0x0B
96#define SCSIOP_SEEK_BLOCK 0x0C
97#define SCSIOP_PARTITION 0x0D
98#define SCSIOP_READ_REVERSE 0x0F
99#define SCSIOP_WRITE_FILEMARKS 0x10
100#define SCSIOP_FLUSH_BUFFER 0x10
101#define SCSIOP_SPACE 0x11
102#define SCSIOP_INQUIRY 0x12
103#define SCSIOP_VERIFY6 0x13
104#define SCSIOP_RECOVER_BUF_DATA 0x14
105#define SCSIOP_MODE_SELECT 0x15
106#define SCSIOP_RESERVE_UNIT 0x16
107#define SCSIOP_RELEASE_UNIT 0x17
108#define SCSIOP_COPY 0x18
109#define SCSIOP_ERASE 0x19
110#define SCSIOP_MODE_SENSE 0x1A
111#define SCSIOP_START_STOP_UNIT 0x1B
112#define SCSIOP_STOP_PRINT 0x1B
113#define SCSIOP_LOAD_UNLOAD 0x1B
114#define SCSIOP_RECEIVE_DIAGNOSTIC 0x1C
115#define SCSIOP_SEND_DIAGNOSTIC 0x1D
116#define SCSIOP_MEDIUM_REMOVAL 0x1E
117#define SCSIOP_READ_CAPACITY 0x25
118#define SCSIOP_READ 0x28
119#define SCSIOP_WRITE 0x2A
120#define SCSIOP_SEEK 0x2B
121#define SCSIOP_LOCATE 0x2B
122#define SCSIOP_WRITE_VERIFY 0x2E
123#define SCSIOP_VERIFY 0x2F
124#define SCSIOP_SEARCH_DATA_HIGH 0x30
125#define SCSIOP_SEARCH_DATA_EQUAL 0x31
126#define SCSIOP_SEARCH_DATA_LOW 0x32
127#define SCSIOP_SET_LIMITS 0x33
128#define SCSIOP_READ_POSITION 0x34
129#define SCSIOP_SYNCHRONIZE_CACHE 0x35
130#define SCSIOP_COMPARE 0x39
131#define SCSIOP_COPY_COMPARE 0x3A
132#define SCSIOP_WRITE_DATA_BUFF 0x3B
133#define SCSIOP_READ_DATA_BUFF 0x3C
134#define SCSIOP_CHANGE_DEFINITION 0x40
135#define SCSIOP_READ_SUB_CHANNEL 0x42
136#define SCSIOP_READ_TOC 0x43
137#define SCSIOP_READ_HEADER 0x44
138#define SCSIOP_PLAY_AUDIO 0x45
139#define SCSIOP_PLAY_AUDIO_MSF 0x47
140#define SCSIOP_PLAY_TRACK_INDEX 0x48
141#define SCSIOP_PLAY_TRACK_RELATIVE 0x49
142#define SCSIOP_PAUSE_RESUME 0x4B
143#define SCSIOP_LOG_SELECT 0x4C
144#define SCSIOP_LOG_SENSE 0x4D
145#define SCSIOP_MODE_SELECT10 0x55
146#define SCSIOP_MODE_SENSE10 0x5A
147#define SCSIOP_LOAD_UNLOAD_SLOT 0xA6
148#define SCSIOP_MECHANISM_STATUS 0xBD
149#define SCSIOP_READ_CD 0xBE
150
151// IDE command definitions
152#define IDE_COMMAND_ATAPI_RESET 0x08
153#define IDE_COMMAND_READ 0x20
154#define IDE_COMMAND_WRITE 0x30
155#define IDE_COMMAND_RECALIBRATE 0x10
156#define IDE_COMMAND_SEEK 0x70
157#define IDE_COMMAND_SET_PARAMETERS 0x91
158#define IDE_COMMAND_VERIFY 0x40
159#define IDE_COMMAND_ATAPI_PACKET 0xA0
160#define IDE_COMMAND_ATAPI_IDENTIFY 0xA1
161#define IDE_CMD_READ_MULTIPLE 0xC4
162#define IDE_CMD_WRITE_MULTIPLE 0xC5
163#define IDE_CMD_SET_MULTIPLE 0xC6
164#define IDE_COMMAND_WRITE_DMA 0xCA
165#define IDE_COMMAND_READ_DMA 0xC8
166#define IDE_COMMAND_IDENTIFY 0xEC
167
168// IDE status definitions
169#define IDE_STATUS_ERROR 0x01
170#define IDE_STATUS_INDEX 0x02
171#define IDE_STATUS_CORRECTED_ERROR 0x04
172#define IDE_STATUS_DRQ 0x08
173#define IDE_STATUS_DSC 0x10
174#define IDE_STATUS_WRITE_FAULT 0x20
175#define IDE_STATUS_DRDY 0x40
176#define IDE_STATUS_BUSY 0x80
177
178// IDE error definitions
179#define IDE_ERROR_AMNF 0x01
180#define IDE_ERROR_TKONF 0x02
181#define IDE_ERROR_ABRT 0x04
182#define IDE_ERROR_MCR 0x08
183#define IDE_ERROR_IDFN 0x10
184#define IDE_ERROR_MC 0x20
185#define IDE_ERROR_UNC 0x40
186#define IDE_ERROR_BBK 0x80
187
188// IDE interface structure
189typedef struct _IDE_STRUCT
190 {
191 union
192 {
193 UCHAR ide[9];
194 struct
195 {
196 USHORT data;
197 UCHAR sectors;
198 UCHAR lba[4];
199 UCHAR cmd;
200 UCHAR spigot;
201 } ides;
202 } ide;
203 } IDE_STRUCT;
204
205// SCSI read capacity structure
206typedef struct _READ_CAPACITY_DATA
207 {
208 ULONG blks; /* total blocks (converted to little endian) */
209 ULONG blksiz; /* size of each (converted to little endian) */
210 } READ_CAPACITY_DATA, *PREAD_CAPACITY_DATA;
211
212// SCSI inquiry data
213#ifndef HOSTS_C
214
215typedef struct _INQUIRYDATA
216 {
217 UCHAR DeviceType :5;
218 UCHAR DeviceTypeQualifier :3;
219 UCHAR DeviceTypeModifier :7;
220 UCHAR RemovableMedia :1;
221 UCHAR Versions;
222 UCHAR ResponseDataFormat;
223 UCHAR AdditionalLength;
224 UCHAR Reserved[2];
225 UCHAR SoftReset :1;
226 UCHAR CommandQueue :1;
227 UCHAR Reserved2 :1;
228 UCHAR LinkedCommands :1;
229 UCHAR Synchronous :1;
230 UCHAR Wide16Bit :1;
231 UCHAR Wide32Bit :1;
232 UCHAR RelativeAddressing :1;
233 UCHAR VendorId[8];
234 UCHAR ProductId[16];
235 UCHAR ProductRevisionLevel[4];
236 UCHAR VendorSpecific[20];
237 UCHAR Reserved3[40];
238 } INQUIRYDATA, *PINQUIRYDATA;
239#endif
240
241// IDE IDENTIFY data
242typedef struct _IDENTIFY_DATA
243 {
244 USHORT GeneralConfiguration; // 00
245 USHORT NumberOfCylinders; // 02
246 USHORT Reserved1; // 04
247 USHORT NumberOfHeads; // 06
248 USHORT UnformattedBytesPerTrack; // 08
249 USHORT UnformattedBytesPerSector; // 0A
250 USHORT SectorsPerTrack; // 0C
251 USHORT VendorUnique1[3]; // 0E
252 USHORT SerialNumber[10]; // 14
253 USHORT BufferType; // 28
254 USHORT BufferSectorSize; // 2A
255 USHORT NumberOfEccBytes; // 2C
256 USHORT FirmwareRevision[4]; // 2E
257 USHORT ModelNumber[20]; // 36
258 UCHAR MaximumBlockTransfer; // 5E
259 UCHAR VendorUnique2; // 5F
260 USHORT DoubleWordIo; // 60
261 USHORT Capabilities; // 62
262 USHORT Reserved2; // 64
263 UCHAR VendorUnique3; // 66
264 UCHAR PioCycleTimingMode; // 67
265 UCHAR VendorUnique4; // 68
266 UCHAR DmaCycleTimingMode; // 69
267 USHORT TranslationFieldsValid:1; // 6A
268 USHORT Reserved3:15;
269 USHORT NumberOfCurrentCylinders; // 6C
270 USHORT NumberOfCurrentHeads; // 6E
271 USHORT CurrentSectorsPerTrack; // 70
272 ULONG CurrentSectorCapacity; // 72
273 USHORT Reserved4[197]; // 76
274 } IDENTIFY_DATA, *PIDENTIFY_DATA;
275
276// Identify data without the Reserved4.
277typedef struct _IDENTIFY_DATA2 {
278 USHORT GeneralConfiguration; // 00
279 USHORT NumberOfCylinders; // 02
280 USHORT Reserved1; // 04
281 USHORT NumberOfHeads; // 06
282 USHORT UnformattedBytesPerTrack; // 08
283 USHORT UnformattedBytesPerSector; // 0A
284 USHORT SectorsPerTrack; // 0C
285 USHORT VendorUnique1[3]; // 0E
286 USHORT SerialNumber[10]; // 14
287 USHORT BufferType; // 28
288 USHORT BufferSectorSize; // 2A
289 USHORT NumberOfEccBytes; // 2C
290 USHORT FirmwareRevision[4]; // 2E
291 USHORT ModelNumber[20]; // 36
292 UCHAR MaximumBlockTransfer; // 5E
293 UCHAR VendorUnique2; // 5F
294 USHORT DoubleWordIo; // 60
295 USHORT Capabilities; // 62
296 USHORT Reserved2; // 64
297 UCHAR VendorUnique3; // 66
298 UCHAR PioCycleTimingMode; // 67
299 UCHAR VendorUnique4; // 68
300 UCHAR DmaCycleTimingMode; // 69
301 USHORT TranslationFieldsValid:1; // 6A
302 USHORT Reserved3:15;
303 USHORT NumberOfCurrentCylinders; // 6C
304 USHORT NumberOfCurrentHeads; // 6E
305 USHORT CurrentSectorsPerTrack; // 70
306 ULONG CurrentSectorCapacity; // 72
307 } IDENTIFY_DATA2, *PIDENTIFY_DATA2;
308
309#endif // PSI_EIDE_SCSIOP
310
311// function prototypes
312int Psi240i_Command(struct scsi_cmnd *SCpnt);
313int Psi240i_Abort(struct scsi_cmnd *SCpnt);
314int Psi240i_Reset(struct scsi_cmnd *SCpnt, unsigned int flags);
315#endif
diff --git a/drivers/scsi/psi_chip.h b/drivers/scsi/psi_chip.h
deleted file mode 100644
index 224cf8f64c97..000000000000
--- a/drivers/scsi/psi_chip.h
+++ /dev/null
@@ -1,195 +0,0 @@
1/*+M*************************************************************************
2 * Perceptive Solutions, Inc. PSI-240I device driver proc support for Linux.
3 *
4 * Copyright (c) 1997 Perceptive Solutions, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 *
21 * File Name: psi_chip.h
22 *
23 * Description: This file contains the interface defines and
24 * error codes.
25 *
26 *-M*************************************************************************/
27#ifndef PSI_CHIP
28#define PSI_CHIP
29
30/************************************************/
31/* Misc konstants */
32/************************************************/
33#define CHIP_MAXDRIVES 8
34
35/************************************************/
36/* Chip I/O addresses */
37/************************************************/
38#define CHIP_ADRS_0 0x0130
39#define CHIP_ADRS_1 0x0150
40#define CHIP_ADRS_2 0x0190
41#define CHIP_ADRS_3 0x0210
42#define CHIP_ADRS_4 0x0230
43#define CHIP_ADRS_5 0x0250
44
45/************************************************/
46/* EEPROM locations */
47/************************************************/
48#define CHIP_EEPROM_BIOS 0x0000 // BIOS base address
49#define CHIP_EEPROM_DATA 0x2000 // SETUP data base address
50#define CHIP_EEPROM_FACTORY 0x2400 // FACTORY data base address
51#define CHIP_EEPROM_SETUP 0x3000 // SETUP PROGRAM base address
52
53#define CHIP_EEPROM_SIZE 32768U // size of the entire EEPROM
54#define CHIP_EEPROM_BIOS_SIZE 8192 // size of the BIOS in bytes
55#define CHIP_EEPROM_DATA_SIZE 4096 // size of factory, setup, log data block in bytes
56#define CHIP_EEPROM_SETUP_SIZE 20480U // size of the setup program in bytes
57
58/************************************************/
59/* Chip Interrupts */
60/************************************************/
61#define CHIP_IRQ_10 0x72
62#define CHIP_IRQ_11 0x73
63#define CHIP_IRQ_12 0x74
64
65/************************************************/
66/* Chip Setup addresses */
67/************************************************/
68#define CHIP_SETUP_BASE 0x0000C000L
69
70/************************************************/
71/* Chip Register address offsets */
72/************************************************/
73#define REG_DATA 0x00
74#define REG_ERROR 0x01
75#define REG_SECTOR_COUNT 0x02
76#define REG_LBA_0 0x03
77#define REG_LBA_8 0x04
78#define REG_LBA_16 0x05
79#define REG_LBA_24 0x06
80#define REG_STAT_CMD 0x07
81#define REG_SEL_FAIL 0x08
82#define REG_IRQ_STATUS 0x09
83#define REG_ADDRESS 0x0A
84#define REG_FAIL 0x0C
85#define REG_ALT_STAT 0x0E
86#define REG_DRIVE_ADRS 0x0F
87
88/************************************************/
89/* Chip RAM locations */
90/************************************************/
91#define CHIP_DEVICE 0x8000
92#define CHIP_DEVICE_0 0x8000
93#define CHIP_DEVICE_1 0x8008
94#define CHIP_DEVICE_2 0x8010
95#define CHIP_DEVICE_3 0x8018
96#define CHIP_DEVICE_4 0x8020
97#define CHIP_DEVICE_5 0x8028
98#define CHIP_DEVICE_6 0x8030
99#define CHIP_DEVICE_7 0x8038
100typedef struct
101 {
102 UCHAR channel; // channel of this device (0-8).
103 UCHAR spt; // Sectors Per Track.
104 ULONG spc; // Sectors Per Cylinder.
105 } CHIP_DEVICE_N;
106
107#define CHIP_CONFIG 0x8100 // address of boards configuration.
108typedef struct
109 {
110 UCHAR irq; // interrupt request channel number
111 UCHAR numDrives; // Number of accessible drives
112 UCHAR fastFormat; // Boolean for fast format enable
113 } CHIP_CONFIG_N;
114
115#define CHIP_MAP 0x8108 // eight byte device type map.
116
117
118#define CHIP_RAID 0x8120 // array of RAID signature structures and LBA
119#define CHIP_RAID_1 0x8120
120#define CHIP_RAID_2 0x8130
121#define CHIP_RAID_3 0x8140
122#define CHIP_RAID_4 0x8150
123
124/************************************************/
125/* Chip Register Masks */
126/************************************************/
127#define CHIP_ID 0x7B
128#define SEL_RAM 0x8000
129#define MASK_FAIL 0x80
130
131/************************************************/
132/* Chip cable select bits */
133/************************************************/
134#define SECTORSXFER 8
135
136/************************************************/
137/* Chip cable select bits */
138/************************************************/
139#define SEL_NONE 0x00
140#define SEL_1 0x01
141#define SEL_2 0x02
142#define SEL_3 0x04
143#define SEL_4 0x08
144
145/************************************************/
146/* Programmable Interrupt Controller*/
147/************************************************/
148#define PIC1 0x20 // first 8259 base port address
149#define PIC2 0xA0 // second 8259 base port address
150#define INT_OCW1 1 // Operation Control Word 1: IRQ mask
151#define EOI 0x20 // non-specific end-of-interrupt
152
153/************************************************/
154/* Device/Geometry controls */
155/************************************************/
156#define GEOMETRY_NONE 0x0 // No device
157#define GEOMETRY_AUTO 0x1 // Geometry set automatically
158#define GEOMETRY_USER 0x2 // User supplied geometry
159
160#define DEVICE_NONE 0x0 // No device present
161#define DEVICE_INACTIVE 0x1 // device present but not registered active
162#define DEVICE_ATAPI 0x2 // ATAPI device (CD_ROM, Tape, Etc...)
163#define DEVICE_DASD_NONLBA 0x3 // Non LBA incompatible device
164#define DEVICE_DASD_LBA 0x4 // LBA compatible device
165
166/************************************************/
167/* Setup Structure Definitions */
168/************************************************/
169typedef struct // device setup parameters
170 {
171 UCHAR geometryControl; // geometry control flags
172 UCHAR device; // device code
173 USHORT sectors; // number of sectors per track
174 USHORT heads; // number of heads
175 USHORT cylinders; // number of cylinders for this device
176 ULONG blocks; // number of blocks on device
177 USHORT spare1;
178 USHORT spare2;
179 } SETUP_DEVICE, *PSETUP_DEVICE;
180
181typedef struct // master setup structure
182 {
183 USHORT startupDelay;
184 USHORT promptBIOS;
185 USHORT fastFormat;
186 USHORT spare2;
187 USHORT spare3;
188 USHORT spare4;
189 USHORT spare5;
190 USHORT spare6;
191 SETUP_DEVICE setupDevice[8];
192 } SETUP, *PSETUP;
193
194#endif
195
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 288640756099..c94906abfee3 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -528,7 +528,7 @@ __setup("qla1280=", qla1280_setup);
528#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len 528#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
529#define CMD_CDBP(Cmnd) Cmnd->cmnd 529#define CMD_CDBP(Cmnd) Cmnd->cmnd
530#define CMD_SNSP(Cmnd) Cmnd->sense_buffer 530#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
531#define CMD_SNSLEN(Cmnd) sizeof(Cmnd->sense_buffer) 531#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
532#define CMD_RESULT(Cmnd) Cmnd->result 532#define CMD_RESULT(Cmnd) Cmnd->result
533#define CMD_HANDLE(Cmnd) Cmnd->host_scribble 533#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
534#define CMD_REQUEST(Cmnd) Cmnd->request->cmd 534#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
@@ -3715,7 +3715,7 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3715 } else 3715 } else
3716 sense_sz = 0; 3716 sense_sz = 0;
3717 memset(cmd->sense_buffer + sense_sz, 0, 3717 memset(cmd->sense_buffer + sense_sz, 0,
3718 sizeof(cmd->sense_buffer) - sense_sz); 3718 SCSI_SENSE_BUFFERSIZE - sense_sz);
3719 3719
3720 dprintk(2, "qla1280_status_entry: Check " 3720 dprintk(2, "qla1280_status_entry: Check "
3721 "condition Sense data, b %i, t %i, " 3721 "condition Sense data, b %i, t %i, "
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 71ddb5db4944..c51fd1f86639 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,4 +1,4 @@
1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o 2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o
3 3
4obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o 4obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fb388b8c07cf..adf97320574b 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -9,7 +9,7 @@
9#include <linux/kthread.h> 9#include <linux/kthread.h>
10#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
11 11
12int qla24xx_vport_disable(struct fc_vport *, bool); 12static int qla24xx_vport_disable(struct fc_vport *, bool);
13 13
14/* SYSFS attributes --------------------------------------------------------- */ 14/* SYSFS attributes --------------------------------------------------------- */
15 15
@@ -958,7 +958,7 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
958{ 958{
959 scsi_qla_host_t *ha = shost_priv(shost); 959 scsi_qla_host_t *ha = shost_priv(shost);
960 960
961 set_bit(LOOP_RESET_NEEDED, &ha->dpc_flags); 961 qla2x00_loop_reset(ha);
962 return 0; 962 return 0;
963} 963}
964 964
@@ -967,35 +967,51 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
967{ 967{
968 scsi_qla_host_t *ha = shost_priv(shost); 968 scsi_qla_host_t *ha = shost_priv(shost);
969 int rval; 969 int rval;
970 uint16_t mb_stat[1]; 970 struct link_statistics *stats;
971 link_stat_t stat_buf; 971 dma_addr_t stats_dma;
972 struct fc_host_statistics *pfc_host_stat; 972 struct fc_host_statistics *pfc_host_stat;
973 973
974 rval = QLA_FUNCTION_FAILED;
975 pfc_host_stat = &ha->fc_host_stat; 974 pfc_host_stat = &ha->fc_host_stat;
976 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 975 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
977 976
977 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
978 if (stats == NULL) {
979 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
980 __func__, ha->host_no));
981 goto done;
982 }
983 memset(stats, 0, DMA_POOL_SIZE);
984
985 rval = QLA_FUNCTION_FAILED;
978 if (IS_FWI2_CAPABLE(ha)) { 986 if (IS_FWI2_CAPABLE(ha)) {
979 rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf, 987 rval = qla24xx_get_isp_stats(ha, stats, stats_dma);
980 sizeof(stat_buf) / 4, mb_stat);
981 } else if (atomic_read(&ha->loop_state) == LOOP_READY && 988 } else if (atomic_read(&ha->loop_state) == LOOP_READY &&
982 !test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) && 989 !test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) &&
983 !test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) && 990 !test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) &&
984 !ha->dpc_active) { 991 !ha->dpc_active) {
985 /* Must be in a 'READY' state for statistics retrieval. */ 992 /* Must be in a 'READY' state for statistics retrieval. */
986 rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf, 993 rval = qla2x00_get_link_status(ha, ha->loop_id, stats,
987 mb_stat); 994 stats_dma);
988 } 995 }
989 996
990 if (rval != QLA_SUCCESS) 997 if (rval != QLA_SUCCESS)
991 goto done; 998 goto done_free;
999
1000 pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1001 pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1002 pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1003 pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1004 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1005 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1006 if (IS_FWI2_CAPABLE(ha)) {
1007 pfc_host_stat->tx_frames = stats->tx_frames;
1008 pfc_host_stat->rx_frames = stats->rx_frames;
1009 pfc_host_stat->dumped_frames = stats->dumped_frames;
1010 pfc_host_stat->nos_count = stats->nos_rcvd;
1011 }
992 1012
993 pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt; 1013done_free:
994 pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt; 1014 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
995 pfc_host_stat->loss_of_signal_count = stat_buf.loss_sig_cnt;
996 pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt;
997 pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt;
998 pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt;
999done: 1015done:
1000 return pfc_host_stat; 1016 return pfc_host_stat;
1001} 1017}
@@ -1113,7 +1129,7 @@ vport_create_failed_2:
1113 return FC_VPORT_FAILED; 1129 return FC_VPORT_FAILED;
1114} 1130}
1115 1131
1116int 1132static int
1117qla24xx_vport_delete(struct fc_vport *fc_vport) 1133qla24xx_vport_delete(struct fc_vport *fc_vport)
1118{ 1134{
1119 scsi_qla_host_t *ha = shost_priv(fc_vport->shost); 1135 scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
@@ -1124,7 +1140,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1124 1140
1125 down(&ha->vport_sem); 1141 down(&ha->vport_sem);
1126 ha->cur_vport_count--; 1142 ha->cur_vport_count--;
1127 clear_bit(vha->vp_idx, (unsigned long *)ha->vp_idx_map); 1143 clear_bit(vha->vp_idx, ha->vp_idx_map);
1128 up(&ha->vport_sem); 1144 up(&ha->vport_sem);
1129 1145
1130 kfree(vha->node_name); 1146 kfree(vha->node_name);
@@ -1146,7 +1162,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1146 return 0; 1162 return 0;
1147} 1163}
1148 1164
1149int 1165static int
1150qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) 1166qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1151{ 1167{
1152 scsi_qla_host_t *vha = fc_vport->dd_data; 1168 scsi_qla_host_t *vha = fc_vport->dd_data;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index eaa04dabcdf6..d88e98c476b0 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1051,6 +1051,7 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1051 struct qla25xx_fw_dump *fw; 1051 struct qla25xx_fw_dump *fw;
1052 uint32_t ext_mem_cnt; 1052 uint32_t ext_mem_cnt;
1053 void *nxt; 1053 void *nxt;
1054 struct qla2xxx_fce_chain *fcec;
1054 1055
1055 risc_address = ext_mem_cnt = 0; 1056 risc_address = ext_mem_cnt = 0;
1056 flags = 0; 1057 flags = 0;
@@ -1321,10 +1322,31 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1321 if (rval != QLA_SUCCESS) 1322 if (rval != QLA_SUCCESS)
1322 goto qla25xx_fw_dump_failed_0; 1323 goto qla25xx_fw_dump_failed_0;
1323 1324
1325 /* Fibre Channel Trace Buffer. */
1324 nxt = qla2xxx_copy_queues(ha, nxt); 1326 nxt = qla2xxx_copy_queues(ha, nxt);
1325 if (ha->eft) 1327 if (ha->eft)
1326 memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size)); 1328 memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
1327 1329
1330 /* Fibre Channel Event Buffer. */
1331 if (!ha->fce)
1332 goto qla25xx_fw_dump_failed_0;
1333
1334 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1335
1336 fcec = nxt + ntohl(ha->fw_dump->eft_size);
1337 fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
1338 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
1339 fce_calc_size(ha->fce_bufs));
1340 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
1341 fcec->addr_l = htonl(LSD(ha->fce_dma));
1342 fcec->addr_h = htonl(MSD(ha->fce_dma));
1343
1344 iter_reg = fcec->eregs;
1345 for (cnt = 0; cnt < 8; cnt++)
1346 *iter_reg++ = htonl(ha->fce_mb[cnt]);
1347
1348 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
1349
1328qla25xx_fw_dump_failed_0: 1350qla25xx_fw_dump_failed_0:
1329 if (rval != QLA_SUCCESS) { 1351 if (rval != QLA_SUCCESS) {
1330 qla_printk(KERN_WARNING, ha, 1352 qla_printk(KERN_WARNING, ha,
@@ -1428,21 +1450,6 @@ qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd)
1428 printk(" sp flags=0x%x\n", sp->flags); 1450 printk(" sp flags=0x%x\n", sp->flags);
1429} 1451}
1430 1452
1431void
1432qla2x00_dump_pkt(void *pkt)
1433{
1434 uint32_t i;
1435 uint8_t *data = (uint8_t *) pkt;
1436
1437 for (i = 0; i < 64; i++) {
1438 if (!(i % 4))
1439 printk("\n%02x: ", i);
1440
1441 printk("%02x ", data[i]);
1442 }
1443 printk("\n");
1444}
1445
1446#if defined(QL_DEBUG_ROUTINES) 1453#if defined(QL_DEBUG_ROUTINES)
1447/* 1454/*
1448 * qla2x00_formatted_dump_buffer 1455 * qla2x00_formatted_dump_buffer
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index a50ecf0b7c84..524598afc81c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -256,6 +256,25 @@ struct qla25xx_fw_dump {
256#define EFT_BYTES_PER_BUFFER 0x4000 256#define EFT_BYTES_PER_BUFFER 0x4000
257#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS)) 257#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
258 258
259#define FCE_NUM_BUFFERS 64
260#define FCE_BYTES_PER_BUFFER 0x400
261#define FCE_SIZE ((FCE_BYTES_PER_BUFFER) * (FCE_NUM_BUFFERS))
262#define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b))
263
264struct qla2xxx_fce_chain {
265 uint32_t type;
266 uint32_t chain_size;
267
268 uint32_t size;
269 uint32_t addr_l;
270 uint32_t addr_h;
271 uint32_t eregs[8];
272};
273
274#define DUMP_CHAIN_VARIANT 0x80000000
275#define DUMP_CHAIN_FCE 0x7FFFFAF0
276#define DUMP_CHAIN_LAST 0x80000000
277
259struct qla2xxx_fw_dump { 278struct qla2xxx_fw_dump {
260 uint8_t signature[4]; 279 uint8_t signature[4];
261 uint32_t version; 280 uint32_t version;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 04e8cbca4c0d..6f129da37589 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -623,9 +623,6 @@ typedef struct {
623#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */ 623#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
624#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */ 624#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
625 625
626#define TC_ENABLE 4
627#define TC_DISABLE 5
628
629/* Firmware return data sizes */ 626/* Firmware return data sizes */
630#define FCAL_MAP_SIZE 128 627#define FCAL_MAP_SIZE 128
631 628
@@ -862,14 +859,20 @@ typedef struct {
862#define GLSO_SEND_RPS BIT_0 859#define GLSO_SEND_RPS BIT_0
863#define GLSO_USE_DID BIT_3 860#define GLSO_USE_DID BIT_3
864 861
865typedef struct { 862struct link_statistics {
866 uint32_t link_fail_cnt; 863 uint32_t link_fail_cnt;
867 uint32_t loss_sync_cnt; 864 uint32_t loss_sync_cnt;
868 uint32_t loss_sig_cnt; 865 uint32_t loss_sig_cnt;
869 uint32_t prim_seq_err_cnt; 866 uint32_t prim_seq_err_cnt;
870 uint32_t inval_xmit_word_cnt; 867 uint32_t inval_xmit_word_cnt;
871 uint32_t inval_crc_cnt; 868 uint32_t inval_crc_cnt;
872} link_stat_t; 869 uint32_t unused1[0x1b];
870 uint32_t tx_frames;
871 uint32_t rx_frames;
872 uint32_t dumped_frames;
873 uint32_t unused2[2];
874 uint32_t nos_rcvd;
875};
873 876
874/* 877/*
875 * NVRAM Command values. 878 * NVRAM Command values.
@@ -2116,14 +2119,6 @@ struct qla_msix_entry {
2116 2119
2117#define WATCH_INTERVAL 1 /* number of seconds */ 2120#define WATCH_INTERVAL 1 /* number of seconds */
2118 2121
2119/* NPIV */
2120#define MAX_MULTI_ID_LOOP 126
2121#define MAX_MULTI_ID_FABRIC 64
2122#define MAX_NUM_VPORT_LOOP (MAX_MULTI_ID_LOOP - 1)
2123#define MAX_NUM_VPORT_FABRIC (MAX_MULTI_ID_FABRIC - 1)
2124#define MAX_NUM_VHBA_LOOP (MAX_MULTI_ID_LOOP - 1)
2125#define MAX_NUM_VHBA_FABRIC (MAX_MULTI_ID_FABRIC - 1)
2126
2127/* 2122/*
2128 * Linux Host Adapter structure 2123 * Linux Host Adapter structure
2129 */ 2124 */
@@ -2161,6 +2156,7 @@ typedef struct scsi_qla_host {
2161 uint32_t gpsc_supported :1; 2156 uint32_t gpsc_supported :1;
2162 uint32_t vsan_enabled :1; 2157 uint32_t vsan_enabled :1;
2163 uint32_t npiv_supported :1; 2158 uint32_t npiv_supported :1;
2159 uint32_t fce_enabled :1;
2164 } flags; 2160 } flags;
2165 2161
2166 atomic_t loop_state; 2162 atomic_t loop_state;
@@ -2273,8 +2269,7 @@ typedef struct scsi_qla_host {
2273 2269
2274 int bars; 2270 int bars;
2275 device_reg_t __iomem *iobase; /* Base I/O address */ 2271 device_reg_t __iomem *iobase; /* Base I/O address */
2276 unsigned long pio_address; 2272 resource_size_t pio_address;
2277 unsigned long pio_length;
2278#define MIN_IOBASE_LEN 0x100 2273#define MIN_IOBASE_LEN 0x100
2279 2274
2280 /* ISP ring lock, rings, and indexes */ 2275 /* ISP ring lock, rings, and indexes */
@@ -2416,9 +2411,9 @@ typedef struct scsi_qla_host {
2416#define MBX_INTR_WAIT 2 2411#define MBX_INTR_WAIT 2
2417#define MBX_UPDATE_FLASH_ACTIVE 3 2412#define MBX_UPDATE_FLASH_ACTIVE 3
2418 2413
2419 struct semaphore mbx_cmd_sem; /* Serialialize mbx access */
2420 struct semaphore vport_sem; /* Virtual port synchronization */ 2414 struct semaphore vport_sem; /* Virtual port synchronization */
2421 struct semaphore mbx_intr_sem; /* Used for completion notification */ 2415 struct completion mbx_cmd_comp; /* Serialize mbx access */
2416 struct completion mbx_intr_comp; /* Used for completion notification */
2422 2417
2423 uint32_t mbx_flags; 2418 uint32_t mbx_flags;
2424#define MBX_IN_PROGRESS BIT_0 2419#define MBX_IN_PROGRESS BIT_0
@@ -2455,6 +2450,15 @@ typedef struct scsi_qla_host {
2455 dma_addr_t eft_dma; 2450 dma_addr_t eft_dma;
2456 void *eft; 2451 void *eft;
2457 2452
2453 struct dentry *dfs_dir;
2454 struct dentry *dfs_fce;
2455 dma_addr_t fce_dma;
2456 void *fce;
2457 uint32_t fce_bufs;
2458 uint16_t fce_mb[8];
2459 uint64_t fce_wr, fce_rd;
2460 struct mutex fce_mutex;
2461
2458 uint8_t host_str[16]; 2462 uint8_t host_str[16];
2459 uint32_t pci_attr; 2463 uint32_t pci_attr;
2460 uint16_t chip_revision; 2464 uint16_t chip_revision;
@@ -2507,7 +2511,7 @@ typedef struct scsi_qla_host {
2507 2511
2508 struct list_head vp_list; /* list of VP */ 2512 struct list_head vp_list; /* list of VP */
2509 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 2513 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
2510 uint8_t vp_idx_map[16]; 2514 unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) / sizeof(unsigned long)];
2511 uint16_t num_vhosts; /* number of vports created */ 2515 uint16_t num_vhosts; /* number of vports created */
2512 uint16_t num_vsans; /* number of vsan created */ 2516 uint16_t num_vsans; /* number of vsan created */
2513 uint16_t vp_idx; /* vport ID */ 2517 uint16_t vp_idx; /* vport ID */
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
new file mode 100644
index 000000000000..1479c60441c8
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -0,0 +1,175 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2005 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8
9#include <linux/debugfs.h>
10#include <linux/seq_file.h>
11
12static struct dentry *qla2x00_dfs_root;
13static atomic_t qla2x00_dfs_root_count;
14
15static int
16qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
17{
18 scsi_qla_host_t *ha = s->private;
19 uint32_t cnt;
20 uint32_t *fce;
21 uint64_t fce_start;
22
23 mutex_lock(&ha->fce_mutex);
24
25 seq_printf(s, "FCE Trace Buffer\n");
26 seq_printf(s, "In Pointer = %llx\n\n", ha->fce_wr);
27 seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
28 seq_printf(s, "FCE Enable Registers\n");
29 seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
30 ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
31 ha->fce_mb[5], ha->fce_mb[6]);
32
33 fce = (uint32_t *) ha->fce;
34 fce_start = (unsigned long long) ha->fce_dma;
35 for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
36 if (cnt % 8 == 0)
37 seq_printf(s, "\n%llx: ",
38 (unsigned long long)((cnt * 4) + fce_start));
39 else
40 seq_printf(s, " ");
41 seq_printf(s, "%08x", *fce++);
42 }
43
44 seq_printf(s, "\nEnd\n");
45
46 mutex_unlock(&ha->fce_mutex);
47
48 return 0;
49}
50
51static int
52qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
53{
54 scsi_qla_host_t *ha = inode->i_private;
55 int rval;
56
57 if (!ha->flags.fce_enabled)
58 goto out;
59
60 mutex_lock(&ha->fce_mutex);
61
62 /* Pause tracing to flush FCE buffers. */
63 rval = qla2x00_disable_fce_trace(ha, &ha->fce_wr, &ha->fce_rd);
64 if (rval)
65 qla_printk(KERN_WARNING, ha,
66 "DebugFS: Unable to disable FCE (%d).\n", rval);
67
68 ha->flags.fce_enabled = 0;
69
70 mutex_unlock(&ha->fce_mutex);
71out:
72 return single_open(file, qla2x00_dfs_fce_show, ha);
73}
74
75static int
76qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
77{
78 scsi_qla_host_t *ha = inode->i_private;
79 int rval;
80
81 if (ha->flags.fce_enabled)
82 goto out;
83
84 mutex_lock(&ha->fce_mutex);
85
86 /* Re-enable FCE tracing. */
87 ha->flags.fce_enabled = 1;
88 memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
89 rval = qla2x00_enable_fce_trace(ha, ha->fce_dma, ha->fce_bufs,
90 ha->fce_mb, &ha->fce_bufs);
91 if (rval) {
92 qla_printk(KERN_WARNING, ha,
93 "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
94 ha->flags.fce_enabled = 0;
95 }
96
97 mutex_unlock(&ha->fce_mutex);
98out:
99 return single_release(inode, file);
100}
101
102static const struct file_operations dfs_fce_ops = {
103 .open = qla2x00_dfs_fce_open,
104 .read = seq_read,
105 .llseek = seq_lseek,
106 .release = qla2x00_dfs_fce_release,
107};
108
109int
110qla2x00_dfs_setup(scsi_qla_host_t *ha)
111{
112 if (!IS_QLA25XX(ha))
113 goto out;
114 if (!ha->fce)
115 goto out;
116
117 if (qla2x00_dfs_root)
118 goto create_dir;
119
120 atomic_set(&qla2x00_dfs_root_count, 0);
121 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
122 if (!qla2x00_dfs_root) {
123 qla_printk(KERN_NOTICE, ha,
124 "DebugFS: Unable to create root directory.\n");
125 goto out;
126 }
127
128create_dir:
129 if (ha->dfs_dir)
130 goto create_nodes;
131
132 mutex_init(&ha->fce_mutex);
133 ha->dfs_dir = debugfs_create_dir(ha->host_str, qla2x00_dfs_root);
134 if (!ha->dfs_dir) {
135 qla_printk(KERN_NOTICE, ha,
136 "DebugFS: Unable to create ha directory.\n");
137 goto out;
138 }
139
140 atomic_inc(&qla2x00_dfs_root_count);
141
142create_nodes:
143 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, ha,
144 &dfs_fce_ops);
145 if (!ha->dfs_fce) {
146 qla_printk(KERN_NOTICE, ha,
147 "DebugFS: Unable to fce node.\n");
148 goto out;
149 }
150out:
151 return 0;
152}
153
154int
155qla2x00_dfs_remove(scsi_qla_host_t *ha)
156{
157 if (ha->dfs_fce) {
158 debugfs_remove(ha->dfs_fce);
159 ha->dfs_fce = NULL;
160 }
161
162 if (ha->dfs_dir) {
163 debugfs_remove(ha->dfs_dir);
164 ha->dfs_dir = NULL;
165 atomic_dec(&qla2x00_dfs_root_count);
166 }
167
168 if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
169 qla2x00_dfs_root) {
170 debugfs_remove(qla2x00_dfs_root);
171 qla2x00_dfs_root = NULL;
172 }
173
174 return 0;
175}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 25364b1aaf12..9337e138ed63 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -952,9 +952,31 @@ struct device_reg_24xx {
952 uint32_t iobase_sdata; 952 uint32_t iobase_sdata;
953}; 953};
954 954
955/* Trace Control *************************************************************/
956
957#define TC_AEN_DISABLE 0
958
959#define TC_EFT_ENABLE 4
960#define TC_EFT_DISABLE 5
961
962#define TC_FCE_ENABLE 8
963#define TC_FCE_OPTIONS 0
964#define TC_FCE_DEFAULT_RX_SIZE 2112
965#define TC_FCE_DEFAULT_TX_SIZE 2112
966#define TC_FCE_DISABLE 9
967#define TC_FCE_DISABLE_TRACE BIT_0
968
955/* MID Support ***************************************************************/ 969/* MID Support ***************************************************************/
956 970
957#define MAX_MID_VPS 125 971#define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */
972#define MAX_MULTI_ID_FABRIC 256 /* ... */
973
974#define for_each_mapped_vp_idx(_ha, _idx) \
975 for (_idx = find_next_bit((_ha)->vp_idx_map, \
976 (_ha)->max_npiv_vports + 1, 1); \
977 _idx <= (_ha)->max_npiv_vports; \
978 _idx = find_next_bit((_ha)->vp_idx_map, \
979 (_ha)->max_npiv_vports + 1, _idx + 1)) \
958 980
959struct mid_conf_entry_24xx { 981struct mid_conf_entry_24xx {
960 uint16_t reserved_1; 982 uint16_t reserved_1;
@@ -982,7 +1004,7 @@ struct mid_init_cb_24xx {
982 uint16_t count; 1004 uint16_t count;
983 uint16_t options; 1005 uint16_t options;
984 1006
985 struct mid_conf_entry_24xx entries[MAX_MID_VPS]; 1007 struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
986}; 1008};
987 1009
988 1010
@@ -1002,10 +1024,6 @@ struct mid_db_entry_24xx {
1002 uint8_t reserved_1; 1024 uint8_t reserved_1;
1003}; 1025};
1004 1026
1005struct mid_db_24xx {
1006 struct mid_db_entry_24xx entries[MAX_MID_VPS];
1007};
1008
1009 /* 1027 /*
1010 * Virtual Fabric ID type definition. 1028 * Virtual Fabric ID type definition.
1011 */ 1029 */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 09cb2a908059..ba35fc26ce6b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -65,33 +65,25 @@ extern int ql2xextended_error_logging;
65extern int ql2xqfullrampup; 65extern int ql2xqfullrampup;
66extern int num_hosts; 66extern int num_hosts;
67 67
68extern int qla2x00_loop_reset(scsi_qla_host_t *);
69
68/* 70/*
69 * Global Functions in qla_mid.c source file. 71 * Global Functions in qla_mid.c source file.
70 */ 72 */
71extern struct scsi_host_template qla2x00_driver_template;
72extern struct scsi_host_template qla24xx_driver_template; 73extern struct scsi_host_template qla24xx_driver_template;
73extern struct scsi_transport_template *qla2xxx_transport_vport_template; 74extern struct scsi_transport_template *qla2xxx_transport_vport_template;
74extern uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
75extern void qla2x00_timer(scsi_qla_host_t *); 75extern void qla2x00_timer(scsi_qla_host_t *);
76extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long); 76extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long);
77extern void qla2x00_stop_timer(scsi_qla_host_t *);
78extern uint32_t qla24xx_allocate_vp_id(scsi_qla_host_t *);
79extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *); 77extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *);
80extern int qla24xx_disable_vp (scsi_qla_host_t *); 78extern int qla24xx_disable_vp (scsi_qla_host_t *);
81extern int qla24xx_enable_vp (scsi_qla_host_t *); 79extern int qla24xx_enable_vp (scsi_qla_host_t *);
82extern void qla2x00_mem_free(scsi_qla_host_t *);
83extern int qla24xx_control_vp(scsi_qla_host_t *, int ); 80extern int qla24xx_control_vp(scsi_qla_host_t *, int );
84extern int qla24xx_modify_vp_config(scsi_qla_host_t *); 81extern int qla24xx_modify_vp_config(scsi_qla_host_t *);
85extern int qla2x00_send_change_request(scsi_qla_host_t *, uint16_t, uint16_t); 82extern int qla2x00_send_change_request(scsi_qla_host_t *, uint16_t, uint16_t);
86extern void qla2x00_vp_stop_timer(scsi_qla_host_t *); 83extern void qla2x00_vp_stop_timer(scsi_qla_host_t *);
87extern int qla24xx_configure_vhba (scsi_qla_host_t *); 84extern int qla24xx_configure_vhba (scsi_qla_host_t *);
88extern int qla24xx_get_vp_entry(scsi_qla_host_t *, uint16_t, int);
89extern int qla24xx_get_vp_database(scsi_qla_host_t *, uint16_t);
90extern int qla2x00_do_dpc_vp(scsi_qla_host_t *);
91extern void qla24xx_report_id_acquisition(scsi_qla_host_t *, 85extern void qla24xx_report_id_acquisition(scsi_qla_host_t *,
92 struct vp_rpt_id_entry_24xx *); 86 struct vp_rpt_id_entry_24xx *);
93extern scsi_qla_host_t * qla24xx_find_vhost_by_name(scsi_qla_host_t *,
94 uint8_t *);
95extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *); 87extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
96extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); 88extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
97extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); 89extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
@@ -103,8 +95,6 @@ extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
103extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); 95extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
104extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int); 96extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
105 97
106extern int qla2x00_down_timeout(struct semaphore *, unsigned long);
107
108extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); 98extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
109 99
110extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); 100extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
@@ -113,7 +103,6 @@ extern void qla2xxx_wake_dpc(scsi_qla_host_t *);
113extern void qla2x00_alert_all_vps(scsi_qla_host_t *, uint16_t *); 103extern void qla2x00_alert_all_vps(scsi_qla_host_t *, uint16_t *);
114extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *); 104extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
115extern void qla2x00_vp_abort_isp(scsi_qla_host_t *); 105extern void qla2x00_vp_abort_isp(scsi_qla_host_t *);
116extern int qla24xx_vport_delete(struct fc_vport *);
117 106
118/* 107/*
119 * Global Function Prototypes in qla_iocb.c source file. 108 * Global Function Prototypes in qla_iocb.c source file.
@@ -222,21 +211,16 @@ extern int
222qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); 211qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
223 212
224extern int 213extern int
225qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, link_stat_t *, 214qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
226 uint16_t *); 215 dma_addr_t);
227 216
228extern int 217extern int
229qla24xx_get_isp_stats(scsi_qla_host_t *, uint32_t *, uint32_t, uint16_t *); 218qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
219 dma_addr_t);
230 220
231extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); 221extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *);
232extern int qla24xx_abort_target(fc_port_t *); 222extern int qla24xx_abort_target(fc_port_t *);
233 223
234extern int qla2x00_system_error(scsi_qla_host_t *);
235
236extern int
237qla2x00_get_serdes_params(scsi_qla_host_t *, uint16_t *, uint16_t *,
238 uint16_t *);
239
240extern int 224extern int
241qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); 225qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
242 226
@@ -244,13 +228,19 @@ extern int
244qla2x00_stop_firmware(scsi_qla_host_t *); 228qla2x00_stop_firmware(scsi_qla_host_t *);
245 229
246extern int 230extern int
247qla2x00_trace_control(scsi_qla_host_t *, uint16_t, dma_addr_t, uint16_t); 231qla2x00_enable_eft_trace(scsi_qla_host_t *, dma_addr_t, uint16_t);
232extern int
233qla2x00_disable_eft_trace(scsi_qla_host_t *);
248 234
249extern int 235extern int
250qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t); 236qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
237 uint32_t *);
251 238
252extern int 239extern int
253qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t *, uint16_t *); 240qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
241
242extern int
243qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t);
254 244
255extern int 245extern int
256qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *); 246qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *);
@@ -270,11 +260,7 @@ extern void qla2x00_free_irqs(scsi_qla_host_t *);
270/* 260/*
271 * Global Function Prototypes in qla_sup.c source file. 261 * Global Function Prototypes in qla_sup.c source file.
272 */ 262 */
273extern void qla2x00_lock_nvram_access(scsi_qla_host_t *);
274extern void qla2x00_unlock_nvram_access(scsi_qla_host_t *);
275extern void qla2x00_release_nvram_protection(scsi_qla_host_t *); 263extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
276extern uint16_t qla2x00_get_nvram_word(scsi_qla_host_t *, uint32_t);
277extern void qla2x00_write_nvram_word(scsi_qla_host_t *, uint32_t, uint16_t);
278extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *, 264extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
279 uint32_t, uint32_t); 265 uint32_t, uint32_t);
280extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 266extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
@@ -321,7 +307,6 @@ extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
321extern void qla2x00_dump_regs(scsi_qla_host_t *); 307extern void qla2x00_dump_regs(scsi_qla_host_t *);
322extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 308extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
323extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *); 309extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *);
324extern void qla2x00_dump_pkt(void *);
325 310
326/* 311/*
327 * Global Function Prototypes in qla_gs.c source file. 312 * Global Function Prototypes in qla_gs.c source file.
@@ -356,4 +341,10 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
356extern void qla2x00_init_host_attr(scsi_qla_host_t *); 341extern void qla2x00_init_host_attr(scsi_qla_host_t *);
357extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); 342extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
358extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); 343extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
344
345/*
346 * Global Function Prototypes in qla_dfs.c source file.
347 */
348extern int qla2x00_dfs_setup(scsi_qla_host_t *);
349extern int qla2x00_dfs_remove(scsi_qla_host_t *);
359#endif /* _QLA_GBL_H */ 350#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 191dafd89be0..d0633ca894be 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -732,9 +732,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
732{ 732{
733 int rval; 733 int rval;
734 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 734 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
735 eft_size; 735 eft_size, fce_size;
736 dma_addr_t eft_dma; 736 dma_addr_t tc_dma;
737 void *eft; 737 void *tc;
738 738
739 if (ha->fw_dump) { 739 if (ha->fw_dump) {
740 qla_printk(KERN_WARNING, ha, 740 qla_printk(KERN_WARNING, ha,
@@ -743,7 +743,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
743 } 743 }
744 744
745 ha->fw_dumped = 0; 745 ha->fw_dumped = 0;
746 fixed_size = mem_size = eft_size = 0; 746 fixed_size = mem_size = eft_size = fce_size = 0;
747 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 747 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
748 fixed_size = sizeof(struct qla2100_fw_dump); 748 fixed_size = sizeof(struct qla2100_fw_dump);
749 } else if (IS_QLA23XX(ha)) { 749 } else if (IS_QLA23XX(ha)) {
@@ -758,21 +758,21 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
758 sizeof(uint32_t); 758 sizeof(uint32_t);
759 759
760 /* Allocate memory for Extended Trace Buffer. */ 760 /* Allocate memory for Extended Trace Buffer. */
761 eft = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &eft_dma, 761 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
762 GFP_KERNEL); 762 GFP_KERNEL);
763 if (!eft) { 763 if (!tc) {
764 qla_printk(KERN_WARNING, ha, "Unable to allocate " 764 qla_printk(KERN_WARNING, ha, "Unable to allocate "
765 "(%d KB) for EFT.\n", EFT_SIZE / 1024); 765 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
766 goto cont_alloc; 766 goto cont_alloc;
767 } 767 }
768 768
769 rval = qla2x00_trace_control(ha, TC_ENABLE, eft_dma, 769 memset(tc, 0, EFT_SIZE);
770 EFT_NUM_BUFFERS); 770 rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
771 if (rval) { 771 if (rval) {
772 qla_printk(KERN_WARNING, ha, "Unable to initialize " 772 qla_printk(KERN_WARNING, ha, "Unable to initialize "
773 "EFT (%d).\n", rval); 773 "EFT (%d).\n", rval);
774 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, eft, 774 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
775 eft_dma); 775 tc_dma);
776 goto cont_alloc; 776 goto cont_alloc;
777 } 777 }
778 778
@@ -780,9 +780,40 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
780 EFT_SIZE / 1024); 780 EFT_SIZE / 1024);
781 781
782 eft_size = EFT_SIZE; 782 eft_size = EFT_SIZE;
783 memset(eft, 0, eft_size); 783 ha->eft_dma = tc_dma;
784 ha->eft_dma = eft_dma; 784 ha->eft = tc;
785 ha->eft = eft; 785
786 /* Allocate memory for Fibre Channel Event Buffer. */
787 if (!IS_QLA25XX(ha))
788 goto cont_alloc;
789
790 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
791 GFP_KERNEL);
792 if (!tc) {
793 qla_printk(KERN_WARNING, ha, "Unable to allocate "
794 "(%d KB) for FCE.\n", FCE_SIZE / 1024);
795 goto cont_alloc;
796 }
797
798 memset(tc, 0, FCE_SIZE);
799 rval = qla2x00_enable_fce_trace(ha, tc_dma, FCE_NUM_BUFFERS,
800 ha->fce_mb, &ha->fce_bufs);
801 if (rval) {
802 qla_printk(KERN_WARNING, ha, "Unable to initialize "
803 "FCE (%d).\n", rval);
804 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
805 tc_dma);
806 ha->flags.fce_enabled = 0;
807 goto cont_alloc;
808 }
809
810 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
811 FCE_SIZE / 1024);
812
813 fce_size = sizeof(struct qla2xxx_fce_chain) + EFT_SIZE;
814 ha->flags.fce_enabled = 1;
815 ha->fce_dma = tc_dma;
816 ha->fce = tc;
786 } 817 }
787cont_alloc: 818cont_alloc:
788 req_q_size = ha->request_q_length * sizeof(request_t); 819 req_q_size = ha->request_q_length * sizeof(request_t);
@@ -790,7 +821,7 @@ cont_alloc:
790 821
791 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 822 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
792 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 823 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
793 eft_size; 824 eft_size + fce_size;
794 825
795 ha->fw_dump = vmalloc(dump_size); 826 ha->fw_dump = vmalloc(dump_size);
796 if (!ha->fw_dump) { 827 if (!ha->fw_dump) {
@@ -922,9 +953,9 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
922 ha->flags.npiv_supported = 1; 953 ha->flags.npiv_supported = 1;
923 if ((!ha->max_npiv_vports) || 954 if ((!ha->max_npiv_vports) ||
924 ((ha->max_npiv_vports + 1) % 955 ((ha->max_npiv_vports + 1) %
925 MAX_MULTI_ID_FABRIC)) 956 MIN_MULTI_ID_FABRIC))
926 ha->max_npiv_vports = 957 ha->max_npiv_vports =
927 MAX_NUM_VPORT_FABRIC; 958 MIN_MULTI_ID_FABRIC - 1;
928 } 959 }
929 960
930 if (ql2xallocfwdump) 961 if (ql2xallocfwdump)
@@ -1162,7 +1193,10 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1162 1193
1163 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no)); 1194 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no));
1164 1195
1165 mid_init_cb->count = ha->max_npiv_vports; 1196 if (ha->flags.npiv_supported)
1197 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1198
1199 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1166 1200
1167 rval = qla2x00_init_firmware(ha, ha->init_cb_size); 1201 rval = qla2x00_init_firmware(ha, ha->init_cb_size);
1168 if (rval) { 1202 if (rval) {
@@ -2566,14 +2600,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2566 2600
2567 /* Bypass virtual ports of the same host. */ 2601 /* Bypass virtual ports of the same host. */
2568 if (pha->num_vhosts) { 2602 if (pha->num_vhosts) {
2569 vp_index = find_next_bit( 2603 for_each_mapped_vp_idx(pha, vp_index) {
2570 (unsigned long *)pha->vp_idx_map,
2571 MAX_MULTI_ID_FABRIC + 1, 1);
2572
2573 for (;vp_index <= MAX_MULTI_ID_FABRIC;
2574 vp_index = find_next_bit(
2575 (unsigned long *)pha->vp_idx_map,
2576 MAX_MULTI_ID_FABRIC + 1, vp_index + 1)) {
2577 empty_vp_index = 1; 2604 empty_vp_index = 1;
2578 found_vp = 0; 2605 found_vp = 0;
2579 list_for_each_entry(vha, &pha->vp_list, 2606 list_for_each_entry(vha, &pha->vp_list,
@@ -2592,7 +2619,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2592 new_fcport->d_id.b24 == vha->d_id.b24) 2619 new_fcport->d_id.b24 == vha->d_id.b24)
2593 break; 2620 break;
2594 } 2621 }
2595 if (vp_index <= MAX_MULTI_ID_FABRIC) 2622
2623 if (vp_index <= pha->max_npiv_vports)
2596 continue; 2624 continue;
2597 } 2625 }
2598 2626
@@ -3245,7 +3273,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3245 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3273 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3246 3274
3247 if (ha->eft) { 3275 if (ha->eft) {
3248 rval = qla2x00_trace_control(ha, TC_ENABLE, 3276 rval = qla2x00_enable_eft_trace(ha,
3249 ha->eft_dma, EFT_NUM_BUFFERS); 3277 ha->eft_dma, EFT_NUM_BUFFERS);
3250 if (rval) { 3278 if (rval) {
3251 qla_printk(KERN_WARNING, ha, 3279 qla_printk(KERN_WARNING, ha,
@@ -3253,6 +3281,21 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3253 "(%d).\n", rval); 3281 "(%d).\n", rval);
3254 } 3282 }
3255 } 3283 }
3284
3285 if (ha->fce) {
3286 ha->flags.fce_enabled = 1;
3287 memset(ha->fce, 0,
3288 fce_calc_size(ha->fce_bufs));
3289 rval = qla2x00_enable_fce_trace(ha,
3290 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
3291 &ha->fce_bufs);
3292 if (rval) {
3293 qla_printk(KERN_WARNING, ha,
3294 "Unable to reinitialize FCE "
3295 "(%d).\n", rval);
3296 ha->flags.fce_enabled = 0;
3297 }
3298 }
3256 } else { /* failed the ISP abort */ 3299 } else { /* failed the ISP abort */
3257 ha->flags.online = 1; 3300 ha->flags.online = 1;
3258 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3301 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 1104bd2eed40..642a0c3f09c6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -104,7 +104,7 @@ qla2100_intr_handler(int irq, void *dev_id)
104 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 104 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
105 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 105 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
106 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 106 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
107 up(&ha->mbx_intr_sem); 107 complete(&ha->mbx_intr_comp);
108 } 108 }
109 109
110 return (IRQ_HANDLED); 110 return (IRQ_HANDLED);
@@ -216,7 +216,7 @@ qla2300_intr_handler(int irq, void *dev_id)
216 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 216 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
217 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 217 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
218 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 218 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
219 up(&ha->mbx_intr_sem); 219 complete(&ha->mbx_intr_comp);
220 } 220 }
221 221
222 return (IRQ_HANDLED); 222 return (IRQ_HANDLED);
@@ -347,10 +347,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
347 break; 347 break;
348 348
349 case MBA_SYSTEM_ERR: /* System Error */ 349 case MBA_SYSTEM_ERR: /* System Error */
350 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
351 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
352 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
353
354 qla_printk(KERN_INFO, ha, 350 qla_printk(KERN_INFO, ha,
355 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 351 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
356 mb[1], mb[2], mb[3]); 352 mb[1], mb[2], mb[3]);
@@ -579,12 +575,15 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
579 /* Check if the Vport has issued a SCR */ 575 /* Check if the Vport has issued a SCR */
580 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags)) 576 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
581 break; 577 break;
578 /* Only handle SCNs for our Vport index. */
579 if (ha->flags.npiv_supported && ha->vp_idx != mb[3])
580 break;
582 581
583 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 582 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
584 ha->host_no)); 583 ha->host_no));
585 DEBUG(printk(KERN_INFO 584 DEBUG(printk(KERN_INFO
586 "scsi(%ld): RSCN database changed -- %04x %04x.\n", 585 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
587 ha->host_no, mb[1], mb[2])); 586 ha->host_no, mb[1], mb[2], mb[3]));
588 587
589 rscn_entry = (mb[1] << 16) | mb[2]; 588 rscn_entry = (mb[1] << 16) | mb[2];
590 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | 589 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
@@ -823,6 +822,35 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
823 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index); 822 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
824} 823}
825 824
825static inline void
826qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
827{
828 struct scsi_cmnd *cp = sp->cmd;
829
830 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
831 sense_len = SCSI_SENSE_BUFFERSIZE;
832
833 CMD_ACTUAL_SNSLEN(cp) = sense_len;
834 sp->request_sense_length = sense_len;
835 sp->request_sense_ptr = cp->sense_buffer;
836 if (sp->request_sense_length > 32)
837 sense_len = 32;
838
839 memcpy(cp->sense_buffer, sense_data, sense_len);
840
841 sp->request_sense_ptr += sense_len;
842 sp->request_sense_length -= sense_len;
843 if (sp->request_sense_length != 0)
844 sp->ha->status_srb = sp;
845
846 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
847 "cmd=%p pid=%ld\n", __func__, sp->ha->host_no, cp->device->channel,
848 cp->device->id, cp->device->lun, cp, cp->serial_number));
849 if (sense_len)
850 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
851 CMD_ACTUAL_SNSLEN(cp)));
852}
853
826/** 854/**
827 * qla2x00_status_entry() - Process a Status IOCB entry. 855 * qla2x00_status_entry() - Process a Status IOCB entry.
828 * @ha: SCSI driver HA context 856 * @ha: SCSI driver HA context
@@ -977,36 +1005,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
977 if (lscsi_status != SS_CHECK_CONDITION) 1005 if (lscsi_status != SS_CHECK_CONDITION)
978 break; 1006 break;
979 1007
980 /* Copy Sense Data into sense buffer. */ 1008 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
981 memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
982
983 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1009 if (!(scsi_status & SS_SENSE_LEN_VALID))
984 break; 1010 break;
985 1011
986 if (sense_len >= sizeof(cp->sense_buffer)) 1012 qla2x00_handle_sense(sp, sense_data, sense_len);
987 sense_len = sizeof(cp->sense_buffer);
988
989 CMD_ACTUAL_SNSLEN(cp) = sense_len;
990 sp->request_sense_length = sense_len;
991 sp->request_sense_ptr = cp->sense_buffer;
992
993 if (sp->request_sense_length > 32)
994 sense_len = 32;
995
996 memcpy(cp->sense_buffer, sense_data, sense_len);
997
998 sp->request_sense_ptr += sense_len;
999 sp->request_sense_length -= sense_len;
1000 if (sp->request_sense_length != 0)
1001 ha->status_srb = sp;
1002
1003 DEBUG5(printk("%s(): Check condition Sense data, "
1004 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n", __func__,
1005 ha->host_no, cp->device->channel, cp->device->id,
1006 cp->device->lun, cp, cp->serial_number));
1007 if (sense_len)
1008 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
1009 CMD_ACTUAL_SNSLEN(cp)));
1010 break; 1013 break;
1011 1014
1012 case CS_DATA_UNDERRUN: 1015 case CS_DATA_UNDERRUN:
@@ -1061,34 +1064,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1061 if (lscsi_status != SS_CHECK_CONDITION) 1064 if (lscsi_status != SS_CHECK_CONDITION)
1062 break; 1065 break;
1063 1066
1064 /* Copy Sense Data into sense buffer */ 1067 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1065 memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
1066
1067 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1068 if (!(scsi_status & SS_SENSE_LEN_VALID))
1068 break; 1069 break;
1069 1070
1070 if (sense_len >= sizeof(cp->sense_buffer)) 1071 qla2x00_handle_sense(sp, sense_data, sense_len);
1071 sense_len = sizeof(cp->sense_buffer);
1072
1073 CMD_ACTUAL_SNSLEN(cp) = sense_len;
1074 sp->request_sense_length = sense_len;
1075 sp->request_sense_ptr = cp->sense_buffer;
1076
1077 if (sp->request_sense_length > 32)
1078 sense_len = 32;
1079
1080 memcpy(cp->sense_buffer, sense_data, sense_len);
1081
1082 sp->request_sense_ptr += sense_len;
1083 sp->request_sense_length -= sense_len;
1084 if (sp->request_sense_length != 0)
1085 ha->status_srb = sp;
1086
1087 DEBUG5(printk("%s(): Check condition Sense data, "
1088 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n",
1089 __func__, ha->host_no, cp->device->channel,
1090 cp->device->id, cp->device->lun, cp,
1091 cp->serial_number));
1092 1072
1093 /* 1073 /*
1094 * In case of a Underrun condition, set both the lscsi 1074 * In case of a Underrun condition, set both the lscsi
@@ -1108,10 +1088,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1108 1088
1109 cp->result = DID_ERROR << 16 | lscsi_status; 1089 cp->result = DID_ERROR << 16 | lscsi_status;
1110 } 1090 }
1111
1112 if (sense_len)
1113 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
1114 CMD_ACTUAL_SNSLEN(cp)));
1115 } else { 1091 } else {
1116 /* 1092 /*
1117 * If RISC reports underrun and target does not report 1093 * If RISC reports underrun and target does not report
@@ -1621,7 +1597,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1621 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 1597 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1622 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 1598 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1623 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 1599 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1624 up(&ha->mbx_intr_sem); 1600 complete(&ha->mbx_intr_comp);
1625 } 1601 }
1626 1602
1627 return IRQ_HANDLED; 1603 return IRQ_HANDLED;
@@ -1758,7 +1734,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1758 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 1734 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1759 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 1735 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1760 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 1736 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1761 up(&ha->mbx_intr_sem); 1737 complete(&ha->mbx_intr_comp);
1762 } 1738 }
1763 1739
1764 return IRQ_HANDLED; 1740 return IRQ_HANDLED;
@@ -1853,6 +1829,18 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1853 goto skip_msix; 1829 goto skip_msix;
1854 } 1830 }
1855 1831
1832 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1833 (ha->pdev->subsystem_device == 0x7040 ||
1834 ha->pdev->subsystem_device == 0x7041 ||
1835 ha->pdev->subsystem_device == 0x1705)) {
1836 DEBUG2(qla_printk(KERN_WARNING, ha,
1837 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1838 ha->pdev->subsystem_vendor,
1839 ha->pdev->subsystem_device));
1840
1841 goto skip_msi;
1842 }
1843
1856 ret = qla24xx_enable_msix(ha); 1844 ret = qla24xx_enable_msix(ha);
1857 if (!ret) { 1845 if (!ret) {
1858 DEBUG2(qla_printk(KERN_INFO, ha, 1846 DEBUG2(qla_printk(KERN_INFO, ha,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index ccd662a6f5dc..0c10c0b0fb73 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -8,19 +8,6 @@
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10 10
11static void
12qla2x00_mbx_sem_timeout(unsigned long data)
13{
14 struct semaphore *sem_ptr = (struct semaphore *)data;
15
16 DEBUG11(printk("qla2x00_sem_timeout: entered.\n"));
17
18 if (sem_ptr != NULL) {
19 up(sem_ptr);
20 }
21
22 DEBUG11(printk("qla2x00_mbx_sem_timeout: exiting.\n"));
23}
24 11
25/* 12/*
26 * qla2x00_mailbox_command 13 * qla2x00_mailbox_command
@@ -47,7 +34,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
47 int rval; 34 int rval;
48 unsigned long flags = 0; 35 unsigned long flags = 0;
49 device_reg_t __iomem *reg; 36 device_reg_t __iomem *reg;
50 struct timer_list tmp_intr_timer;
51 uint8_t abort_active; 37 uint8_t abort_active;
52 uint8_t io_lock_on; 38 uint8_t io_lock_on;
53 uint16_t command; 39 uint16_t command;
@@ -72,7 +58,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
72 * non ISP abort time. 58 * non ISP abort time.
73 */ 59 */
74 if (!abort_active) { 60 if (!abort_active) {
75 if (qla2x00_down_timeout(&ha->mbx_cmd_sem, mcp->tov * HZ)) { 61 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp,
62 mcp->tov * HZ)) {
76 /* Timeout occurred. Return error. */ 63 /* Timeout occurred. Return error. */
77 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " 64 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
78 "Exiting.\n", __func__, ha->host_no)); 65 "Exiting.\n", __func__, ha->host_no));
@@ -135,22 +122,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
135 /* Wait for mbx cmd completion until timeout */ 122 /* Wait for mbx cmd completion until timeout */
136 123
137 if (!abort_active && io_lock_on) { 124 if (!abort_active && io_lock_on) {
138 /* sleep on completion semaphore */
139 DEBUG11(printk("%s(%ld): INTERRUPT MODE. Initializing timer.\n",
140 __func__, ha->host_no));
141
142 init_timer(&tmp_intr_timer);
143 tmp_intr_timer.data = (unsigned long)&ha->mbx_intr_sem;
144 tmp_intr_timer.expires = jiffies + mcp->tov * HZ;
145 tmp_intr_timer.function =
146 (void (*)(unsigned long))qla2x00_mbx_sem_timeout;
147
148 DEBUG11(printk("%s(%ld): Adding timer.\n", __func__,
149 ha->host_no));
150 add_timer(&tmp_intr_timer);
151
152 DEBUG11(printk("%s(%ld): going to unlock & sleep. "
153 "time=0x%lx.\n", __func__, ha->host_no, jiffies));
154 125
155 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 126 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
156 127
@@ -160,17 +131,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
160 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); 131 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
161 spin_unlock_irqrestore(&ha->hardware_lock, flags); 132 spin_unlock_irqrestore(&ha->hardware_lock, flags);
162 133
163 /* Wait for either the timer to expire 134 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
164 * or the mbox completion interrupt
165 */
166 down(&ha->mbx_intr_sem);
167 135
168 DEBUG11(printk("%s(%ld): waking up. time=0x%lx\n", __func__,
169 ha->host_no, jiffies));
170 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 136 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
171 137
172 /* delete the timer */
173 del_timer(&tmp_intr_timer);
174 } else { 138 } else {
175 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 139 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
176 ha->host_no, command)); 140 ha->host_no, command));
@@ -299,7 +263,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
299 263
300 /* Allow next mbx cmd to come in. */ 264 /* Allow next mbx cmd to come in. */
301 if (!abort_active) 265 if (!abort_active)
302 up(&ha->mbx_cmd_sem); 266 complete(&ha->mbx_cmd_comp);
303 267
304 if (rval) { 268 if (rval) {
305 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " 269 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
@@ -905,7 +869,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
905 869
906 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 870 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
907 mcp->mb[9] = ha->vp_idx; 871 mcp->mb[9] = ha->vp_idx;
908 mcp->out_mb = MBX_0; 872 mcp->out_mb = MBX_9|MBX_0;
909 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 873 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
910 mcp->tov = 30; 874 mcp->tov = 30;
911 mcp->flags = 0; 875 mcp->flags = 0;
@@ -1016,7 +980,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1016 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 980 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
1017 ha->host_no)); 981 ha->host_no));
1018 982
1019 if (ha->flags.npiv_supported) 983 if (ha->fw_attributes & BIT_2)
1020 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 984 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1021 else 985 else
1022 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 986 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
@@ -2042,29 +2006,20 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2042 */ 2006 */
2043int 2007int
2044qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id, 2008qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2045 link_stat_t *ret_buf, uint16_t *status) 2009 struct link_statistics *stats, dma_addr_t stats_dma)
2046{ 2010{
2047 int rval; 2011 int rval;
2048 mbx_cmd_t mc; 2012 mbx_cmd_t mc;
2049 mbx_cmd_t *mcp = &mc; 2013 mbx_cmd_t *mcp = &mc;
2050 link_stat_t *stat_buf; 2014 uint32_t *siter, *diter, dwords;
2051 dma_addr_t stat_buf_dma;
2052 2015
2053 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2016 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2054 2017
2055 stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma);
2056 if (stat_buf == NULL) {
2057 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
2058 __func__, ha->host_no));
2059 return BIT_0;
2060 }
2061 memset(stat_buf, 0, sizeof(link_stat_t));
2062
2063 mcp->mb[0] = MBC_GET_LINK_STATUS; 2018 mcp->mb[0] = MBC_GET_LINK_STATUS;
2064 mcp->mb[2] = MSW(stat_buf_dma); 2019 mcp->mb[2] = MSW(stats_dma);
2065 mcp->mb[3] = LSW(stat_buf_dma); 2020 mcp->mb[3] = LSW(stats_dma);
2066 mcp->mb[6] = MSW(MSD(stat_buf_dma)); 2021 mcp->mb[6] = MSW(MSD(stats_dma));
2067 mcp->mb[7] = LSW(MSD(stat_buf_dma)); 2022 mcp->mb[7] = LSW(MSD(stats_dma));
2068 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2023 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2069 mcp->in_mb = MBX_0; 2024 mcp->in_mb = MBX_0;
2070 if (IS_FWI2_CAPABLE(ha)) { 2025 if (IS_FWI2_CAPABLE(ha)) {
@@ -2089,78 +2044,43 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2089 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2044 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2090 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2045 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2091 __func__, ha->host_no, mcp->mb[0])); 2046 __func__, ha->host_no, mcp->mb[0]));
2092 status[0] = mcp->mb[0]; 2047 rval = QLA_FUNCTION_FAILED;
2093 rval = BIT_1;
2094 } else { 2048 } else {
2095 /* copy over data -- firmware data is LE. */ 2049 /* Copy over data -- firmware data is LE. */
2096 ret_buf->link_fail_cnt = 2050 dwords = offsetof(struct link_statistics, unused1) / 4;
2097 le32_to_cpu(stat_buf->link_fail_cnt); 2051 siter = diter = &stats->link_fail_cnt;
2098 ret_buf->loss_sync_cnt = 2052 while (dwords--)
2099 le32_to_cpu(stat_buf->loss_sync_cnt); 2053 *diter++ = le32_to_cpu(*siter++);
2100 ret_buf->loss_sig_cnt =
2101 le32_to_cpu(stat_buf->loss_sig_cnt);
2102 ret_buf->prim_seq_err_cnt =
2103 le32_to_cpu(stat_buf->prim_seq_err_cnt);
2104 ret_buf->inval_xmit_word_cnt =
2105 le32_to_cpu(stat_buf->inval_xmit_word_cnt);
2106 ret_buf->inval_crc_cnt =
2107 le32_to_cpu(stat_buf->inval_crc_cnt);
2108
2109 DEBUG11(printk("%s(%ld): stat dump: fail_cnt=%d "
2110 "loss_sync=%d loss_sig=%d seq_err=%d "
2111 "inval_xmt_word=%d inval_crc=%d.\n", __func__,
2112 ha->host_no, stat_buf->link_fail_cnt,
2113 stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt,
2114 stat_buf->prim_seq_err_cnt,
2115 stat_buf->inval_xmit_word_cnt,
2116 stat_buf->inval_crc_cnt));
2117 } 2054 }
2118 } else { 2055 } else {
2119 /* Failed. */ 2056 /* Failed. */
2120 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2057 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2121 ha->host_no, rval)); 2058 ha->host_no, rval));
2122 rval = BIT_1;
2123 } 2059 }
2124 2060
2125 dma_pool_free(ha->s_dma_pool, stat_buf, stat_buf_dma);
2126
2127 return rval; 2061 return rval;
2128} 2062}
2129 2063
2130int 2064int
2131qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords, 2065qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
2132 uint16_t *status) 2066 dma_addr_t stats_dma)
2133{ 2067{
2134 int rval; 2068 int rval;
2135 mbx_cmd_t mc; 2069 mbx_cmd_t mc;
2136 mbx_cmd_t *mcp = &mc; 2070 mbx_cmd_t *mcp = &mc;
2137 uint32_t *sbuf, *siter; 2071 uint32_t *siter, *diter, dwords;
2138 dma_addr_t sbuf_dma;
2139 2072
2140 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2073 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2141 2074
2142 if (dwords > (DMA_POOL_SIZE / 4)) {
2143 DEBUG2_3_11(printk("%s(%ld): Unabled to retrieve %d DWORDs "
2144 "(max %d).\n", __func__, ha->host_no, dwords,
2145 DMA_POOL_SIZE / 4));
2146 return BIT_0;
2147 }
2148 sbuf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &sbuf_dma);
2149 if (sbuf == NULL) {
2150 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
2151 __func__, ha->host_no));
2152 return BIT_0;
2153 }
2154 memset(sbuf, 0, DMA_POOL_SIZE);
2155
2156 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2075 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2157 mcp->mb[2] = MSW(sbuf_dma); 2076 mcp->mb[2] = MSW(stats_dma);
2158 mcp->mb[3] = LSW(sbuf_dma); 2077 mcp->mb[3] = LSW(stats_dma);
2159 mcp->mb[6] = MSW(MSD(sbuf_dma)); 2078 mcp->mb[6] = MSW(MSD(stats_dma));
2160 mcp->mb[7] = LSW(MSD(sbuf_dma)); 2079 mcp->mb[7] = LSW(MSD(stats_dma));
2161 mcp->mb[8] = dwords; 2080 mcp->mb[8] = sizeof(struct link_statistics) / 4;
2081 mcp->mb[9] = ha->vp_idx;
2162 mcp->mb[10] = 0; 2082 mcp->mb[10] = 0;
2163 mcp->out_mb = MBX_10|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2083 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2164 mcp->in_mb = MBX_2|MBX_1|MBX_0; 2084 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2165 mcp->tov = 30; 2085 mcp->tov = 30;
2166 mcp->flags = IOCTL_CMD; 2086 mcp->flags = IOCTL_CMD;
@@ -2170,23 +2090,20 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords,
2170 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2090 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2171 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2091 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2172 __func__, ha->host_no, mcp->mb[0])); 2092 __func__, ha->host_no, mcp->mb[0]));
2173 status[0] = mcp->mb[0]; 2093 rval = QLA_FUNCTION_FAILED;
2174 rval = BIT_1;
2175 } else { 2094 } else {
2176 /* Copy over data -- firmware data is LE. */ 2095 /* Copy over data -- firmware data is LE. */
2177 siter = sbuf; 2096 dwords = sizeof(struct link_statistics) / 4;
2097 siter = diter = &stats->link_fail_cnt;
2178 while (dwords--) 2098 while (dwords--)
2179 *dwbuf++ = le32_to_cpu(*siter++); 2099 *diter++ = le32_to_cpu(*siter++);
2180 } 2100 }
2181 } else { 2101 } else {
2182 /* Failed. */ 2102 /* Failed. */
2183 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2103 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2184 ha->host_no, rval)); 2104 ha->host_no, rval));
2185 rval = BIT_1;
2186 } 2105 }
2187 2106
2188 dma_pool_free(ha->s_dma_pool, sbuf, sbuf_dma);
2189
2190 return rval; 2107 return rval;
2191} 2108}
2192 2109
@@ -2331,6 +2248,8 @@ atarget_done:
2331 return rval; 2248 return rval;
2332} 2249}
2333 2250
2251#if 0
2252
2334int 2253int
2335qla2x00_system_error(scsi_qla_host_t *ha) 2254qla2x00_system_error(scsi_qla_host_t *ha)
2336{ 2255{
@@ -2360,47 +2279,7 @@ qla2x00_system_error(scsi_qla_host_t *ha)
2360 return rval; 2279 return rval;
2361} 2280}
2362 2281
2363/** 2282#endif /* 0 */
2364 * qla2x00_get_serdes_params() -
2365 * @ha: HA context
2366 *
2367 * Returns
2368 */
2369int
2370qla2x00_get_serdes_params(scsi_qla_host_t *ha, uint16_t *sw_em_1g,
2371 uint16_t *sw_em_2g, uint16_t *sw_em_4g)
2372{
2373 int rval;
2374 mbx_cmd_t mc;
2375 mbx_cmd_t *mcp = &mc;
2376
2377 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2378
2379 mcp->mb[0] = MBC_SERDES_PARAMS;
2380 mcp->mb[1] = 0;
2381 mcp->out_mb = MBX_1|MBX_0;
2382 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_0;
2383 mcp->tov = 30;
2384 mcp->flags = 0;
2385 rval = qla2x00_mailbox_command(ha, mcp);
2386
2387 if (rval != QLA_SUCCESS) {
2388 /*EMPTY*/
2389 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2390 ha->host_no, rval, mcp->mb[0]));
2391 } else {
2392 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2393
2394 if (sw_em_1g)
2395 *sw_em_1g = mcp->mb[2];
2396 if (sw_em_2g)
2397 *sw_em_2g = mcp->mb[3];
2398 if (sw_em_4g)
2399 *sw_em_4g = mcp->mb[4];
2400 }
2401
2402 return rval;
2403}
2404 2283
2405/** 2284/**
2406 * qla2x00_set_serdes_params() - 2285 * qla2x00_set_serdes_params() -
@@ -2471,7 +2350,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *ha)
2471} 2350}
2472 2351
2473int 2352int
2474qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma, 2353qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma,
2475 uint16_t buffers) 2354 uint16_t buffers)
2476{ 2355{
2477 int rval; 2356 int rval;
@@ -2484,22 +2363,18 @@ qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma,
2484 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2363 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2485 2364
2486 mcp->mb[0] = MBC_TRACE_CONTROL; 2365 mcp->mb[0] = MBC_TRACE_CONTROL;
2487 mcp->mb[1] = ctrl; 2366 mcp->mb[1] = TC_EFT_ENABLE;
2488 mcp->out_mb = MBX_1|MBX_0; 2367 mcp->mb[2] = LSW(eft_dma);
2368 mcp->mb[3] = MSW(eft_dma);
2369 mcp->mb[4] = LSW(MSD(eft_dma));
2370 mcp->mb[5] = MSW(MSD(eft_dma));
2371 mcp->mb[6] = buffers;
2372 mcp->mb[7] = TC_AEN_DISABLE;
2373 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2489 mcp->in_mb = MBX_1|MBX_0; 2374 mcp->in_mb = MBX_1|MBX_0;
2490 if (ctrl == TC_ENABLE) {
2491 mcp->mb[2] = LSW(eft_dma);
2492 mcp->mb[3] = MSW(eft_dma);
2493 mcp->mb[4] = LSW(MSD(eft_dma));
2494 mcp->mb[5] = MSW(MSD(eft_dma));
2495 mcp->mb[6] = buffers;
2496 mcp->mb[7] = 0;
2497 mcp->out_mb |= MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2;
2498 }
2499 mcp->tov = 30; 2375 mcp->tov = 30;
2500 mcp->flags = 0; 2376 mcp->flags = 0;
2501 rval = qla2x00_mailbox_command(ha, mcp); 2377 rval = qla2x00_mailbox_command(ha, mcp);
2502
2503 if (rval != QLA_SUCCESS) { 2378 if (rval != QLA_SUCCESS) {
2504 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2379 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2505 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2380 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
@@ -2511,8 +2386,7 @@ qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma,
2511} 2386}
2512 2387
2513int 2388int
2514qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr, 2389qla2x00_disable_eft_trace(scsi_qla_host_t *ha)
2515 uint16_t off, uint16_t count)
2516{ 2390{
2517 int rval; 2391 int rval;
2518 mbx_cmd_t mc; 2392 mbx_cmd_t mc;
@@ -2523,24 +2397,16 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
2523 2397
2524 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2398 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2525 2399
2526 mcp->mb[0] = MBC_READ_SFP; 2400 mcp->mb[0] = MBC_TRACE_CONTROL;
2527 mcp->mb[1] = addr; 2401 mcp->mb[1] = TC_EFT_DISABLE;
2528 mcp->mb[2] = MSW(sfp_dma); 2402 mcp->out_mb = MBX_1|MBX_0;
2529 mcp->mb[3] = LSW(sfp_dma); 2403 mcp->in_mb = MBX_1|MBX_0;
2530 mcp->mb[6] = MSW(MSD(sfp_dma));
2531 mcp->mb[7] = LSW(MSD(sfp_dma));
2532 mcp->mb[8] = count;
2533 mcp->mb[9] = off;
2534 mcp->mb[10] = 0;
2535 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2536 mcp->in_mb = MBX_0;
2537 mcp->tov = 30; 2404 mcp->tov = 30;
2538 mcp->flags = 0; 2405 mcp->flags = 0;
2539 rval = qla2x00_mailbox_command(ha, mcp); 2406 rval = qla2x00_mailbox_command(ha, mcp);
2540
2541 if (rval != QLA_SUCCESS) { 2407 if (rval != QLA_SUCCESS) {
2542 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 2408 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2543 ha->host_no, rval, mcp->mb[0])); 2409 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2544 } else { 2410 } else {
2545 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2411 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2546 } 2412 }
@@ -2549,176 +2415,168 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
2549} 2415}
2550 2416
2551int 2417int
2552qla2x00_get_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id, 2418qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
2553 uint16_t *port_speed, uint16_t *mb) 2419 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
2554{ 2420{
2555 int rval; 2421 int rval;
2556 mbx_cmd_t mc; 2422 mbx_cmd_t mc;
2557 mbx_cmd_t *mcp = &mc; 2423 mbx_cmd_t *mcp = &mc;
2558 2424
2559 if (!IS_IIDMA_CAPABLE(ha)) 2425 if (!IS_QLA25XX(ha))
2560 return QLA_FUNCTION_FAILED; 2426 return QLA_FUNCTION_FAILED;
2561 2427
2562 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2428 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2563 2429
2564 mcp->mb[0] = MBC_PORT_PARAMS; 2430 mcp->mb[0] = MBC_TRACE_CONTROL;
2565 mcp->mb[1] = loop_id; 2431 mcp->mb[1] = TC_FCE_ENABLE;
2566 mcp->mb[2] = mcp->mb[3] = mcp->mb[4] = mcp->mb[5] = 0; 2432 mcp->mb[2] = LSW(fce_dma);
2567 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2433 mcp->mb[3] = MSW(fce_dma);
2568 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 2434 mcp->mb[4] = LSW(MSD(fce_dma));
2435 mcp->mb[5] = MSW(MSD(fce_dma));
2436 mcp->mb[6] = buffers;
2437 mcp->mb[7] = TC_AEN_DISABLE;
2438 mcp->mb[8] = 0;
2439 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
2440 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
2441 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
2442 MBX_1|MBX_0;
2443 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2569 mcp->tov = 30; 2444 mcp->tov = 30;
2570 mcp->flags = 0; 2445 mcp->flags = 0;
2571 rval = qla2x00_mailbox_command(ha, mcp); 2446 rval = qla2x00_mailbox_command(ha, mcp);
2572
2573 /* Return mailbox statuses. */
2574 if (mb != NULL) {
2575 mb[0] = mcp->mb[0];
2576 mb[1] = mcp->mb[1];
2577 mb[3] = mcp->mb[3];
2578 mb[4] = mcp->mb[4];
2579 mb[5] = mcp->mb[5];
2580 }
2581
2582 if (rval != QLA_SUCCESS) { 2447 if (rval != QLA_SUCCESS) {
2583 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2448 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2584 ha->host_no, rval)); 2449 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2585 } else { 2450 } else {
2586 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2451 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2587 if (port_speed) 2452
2588 *port_speed = mcp->mb[3]; 2453 if (mb)
2454 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
2455 if (dwords)
2456 *dwords = mcp->mb[6];
2589 } 2457 }
2590 2458
2591 return rval; 2459 return rval;
2592} 2460}
2593 2461
2594int 2462int
2595qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id, 2463qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
2596 uint16_t port_speed, uint16_t *mb)
2597{ 2464{
2598 int rval; 2465 int rval;
2599 mbx_cmd_t mc; 2466 mbx_cmd_t mc;
2600 mbx_cmd_t *mcp = &mc; 2467 mbx_cmd_t *mcp = &mc;
2601 2468
2602 if (!IS_IIDMA_CAPABLE(ha)) 2469 if (!IS_FWI2_CAPABLE(ha))
2603 return QLA_FUNCTION_FAILED; 2470 return QLA_FUNCTION_FAILED;
2604 2471
2605 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2472 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2606 2473
2607 mcp->mb[0] = MBC_PORT_PARAMS; 2474 mcp->mb[0] = MBC_TRACE_CONTROL;
2608 mcp->mb[1] = loop_id; 2475 mcp->mb[1] = TC_FCE_DISABLE;
2609 mcp->mb[2] = BIT_0; 2476 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
2610 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); 2477 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2611 mcp->mb[4] = mcp->mb[5] = 0; 2478 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
2612 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2479 MBX_1|MBX_0;
2613 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
2614 mcp->tov = 30; 2480 mcp->tov = 30;
2615 mcp->flags = 0; 2481 mcp->flags = 0;
2616 rval = qla2x00_mailbox_command(ha, mcp); 2482 rval = qla2x00_mailbox_command(ha, mcp);
2617
2618 /* Return mailbox statuses. */
2619 if (mb != NULL) {
2620 mb[0] = mcp->mb[0];
2621 mb[1] = mcp->mb[1];
2622 mb[3] = mcp->mb[3];
2623 mb[4] = mcp->mb[4];
2624 mb[5] = mcp->mb[5];
2625 }
2626
2627 if (rval != QLA_SUCCESS) { 2483 if (rval != QLA_SUCCESS) {
2628 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2484 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2629 ha->host_no, rval)); 2485 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2630 } else { 2486 } else {
2631 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2487 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2488
2489 if (wr)
2490 *wr = (uint64_t) mcp->mb[5] << 48 |
2491 (uint64_t) mcp->mb[4] << 32 |
2492 (uint64_t) mcp->mb[3] << 16 |
2493 (uint64_t) mcp->mb[2];
2494 if (rd)
2495 *rd = (uint64_t) mcp->mb[9] << 48 |
2496 (uint64_t) mcp->mb[8] << 32 |
2497 (uint64_t) mcp->mb[7] << 16 |
2498 (uint64_t) mcp->mb[6];
2632 } 2499 }
2633 2500
2634 return rval; 2501 return rval;
2635} 2502}
2636 2503
2637/*
2638 * qla24xx_get_vp_database
2639 * Get the VP's database for all configured ports.
2640 *
2641 * Input:
2642 * ha = adapter block pointer.
2643 * size = size of initialization control block.
2644 *
2645 * Returns:
2646 * qla2x00 local function return status code.
2647 *
2648 * Context:
2649 * Kernel context.
2650 */
2651int 2504int
2652qla24xx_get_vp_database(scsi_qla_host_t *ha, uint16_t size) 2505qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
2506 uint16_t off, uint16_t count)
2653{ 2507{
2654 int rval; 2508 int rval;
2655 mbx_cmd_t mc; 2509 mbx_cmd_t mc;
2656 mbx_cmd_t *mcp = &mc; 2510 mbx_cmd_t *mcp = &mc;
2657 2511
2658 DEBUG11(printk("scsi(%ld):%s - entered.\n", 2512 if (!IS_FWI2_CAPABLE(ha))
2659 ha->host_no, __func__)); 2513 return QLA_FUNCTION_FAILED;
2660 2514
2661 mcp->mb[0] = MBC_MID_GET_VP_DATABASE; 2515 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2662 mcp->mb[2] = MSW(ha->init_cb_dma); 2516
2663 mcp->mb[3] = LSW(ha->init_cb_dma); 2517 mcp->mb[0] = MBC_READ_SFP;
2664 mcp->mb[4] = 0; 2518 mcp->mb[1] = addr;
2665 mcp->mb[5] = 0; 2519 mcp->mb[2] = MSW(sfp_dma);
2666 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 2520 mcp->mb[3] = LSW(sfp_dma);
2667 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 2521 mcp->mb[6] = MSW(MSD(sfp_dma));
2668 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2522 mcp->mb[7] = LSW(MSD(sfp_dma));
2669 mcp->in_mb = MBX_1|MBX_0; 2523 mcp->mb[8] = count;
2670 mcp->buf_size = size; 2524 mcp->mb[9] = off;
2671 mcp->flags = MBX_DMA_OUT; 2525 mcp->mb[10] = 0;
2672 mcp->tov = MBX_TOV_SECONDS; 2526 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2527 mcp->in_mb = MBX_0;
2528 mcp->tov = 30;
2529 mcp->flags = 0;
2673 rval = qla2x00_mailbox_command(ha, mcp); 2530 rval = qla2x00_mailbox_command(ha, mcp);
2674 2531
2675 if (rval != QLA_SUCCESS) { 2532 if (rval != QLA_SUCCESS) {
2676 /*EMPTY*/ 2533 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2677 DEBUG2_3_11(printk("%s(%ld): failed=%x " 2534 ha->host_no, rval, mcp->mb[0]));
2678 "mb0=%x.\n",
2679 __func__, ha->host_no, rval, mcp->mb[0]));
2680 } else { 2535 } else {
2681 /*EMPTY*/ 2536 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2682 DEBUG11(printk("%s(%ld): done.\n",
2683 __func__, ha->host_no));
2684 } 2537 }
2685 2538
2686 return rval; 2539 return rval;
2687} 2540}
2688 2541
2689int 2542int
2690qla24xx_get_vp_entry(scsi_qla_host_t *ha, uint16_t size, int vp_id) 2543qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2544 uint16_t port_speed, uint16_t *mb)
2691{ 2545{
2692 int rval; 2546 int rval;
2693 mbx_cmd_t mc; 2547 mbx_cmd_t mc;
2694 mbx_cmd_t *mcp = &mc; 2548 mbx_cmd_t *mcp = &mc;
2695 2549
2550 if (!IS_IIDMA_CAPABLE(ha))
2551 return QLA_FUNCTION_FAILED;
2552
2696 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2553 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2697 2554
2698 mcp->mb[0] = MBC_MID_GET_VP_ENTRY; 2555 mcp->mb[0] = MBC_PORT_PARAMS;
2699 mcp->mb[2] = MSW(ha->init_cb_dma); 2556 mcp->mb[1] = loop_id;
2700 mcp->mb[3] = LSW(ha->init_cb_dma); 2557 mcp->mb[2] = BIT_0;
2701 mcp->mb[4] = 0; 2558 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
2702 mcp->mb[5] = 0; 2559 mcp->mb[4] = mcp->mb[5] = 0;
2703 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 2560 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2704 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 2561 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
2705 mcp->mb[9] = vp_id;
2706 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2707 mcp->in_mb = MBX_0;
2708 mcp->buf_size = size;
2709 mcp->flags = MBX_DMA_OUT;
2710 mcp->tov = 30; 2562 mcp->tov = 30;
2563 mcp->flags = 0;
2711 rval = qla2x00_mailbox_command(ha, mcp); 2564 rval = qla2x00_mailbox_command(ha, mcp);
2712 2565
2566 /* Return mailbox statuses. */
2567 if (mb != NULL) {
2568 mb[0] = mcp->mb[0];
2569 mb[1] = mcp->mb[1];
2570 mb[3] = mcp->mb[3];
2571 mb[4] = mcp->mb[4];
2572 mb[5] = mcp->mb[5];
2573 }
2574
2713 if (rval != QLA_SUCCESS) { 2575 if (rval != QLA_SUCCESS) {
2714 /*EMPTY*/ 2576 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2715 DEBUG2_3_11(printk("qla24xx_get_vp_entry(%ld): failed=%x " 2577 ha->host_no, rval));
2716 "mb0=%x.\n",
2717 ha->host_no, rval, mcp->mb[0]));
2718 } else { 2578 } else {
2719 /*EMPTY*/ 2579 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2720 DEBUG11(printk("qla24xx_get_vp_entry(%ld): done.\n",
2721 ha->host_no));
2722 } 2580 }
2723 2581
2724 return rval; 2582 return rval;
@@ -2873,7 +2731,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2873 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__, 2731 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
2874 ha->host_no, vp_index)); 2732 ha->host_no, vp_index));
2875 2733
2876 if (vp_index == 0 || vp_index >= MAX_MULTI_ID_LOOP) 2734 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
2877 return QLA_PARAMETER_ERROR; 2735 return QLA_PARAMETER_ERROR;
2878 2736
2879 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma); 2737 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 821ee74aadc6..cf784cdafb01 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -39,7 +39,7 @@ qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
39 } 39 }
40} 40}
41 41
42uint32_t 42static uint32_t
43qla24xx_allocate_vp_id(scsi_qla_host_t *vha) 43qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
44{ 44{
45 uint32_t vp_id; 45 uint32_t vp_id;
@@ -47,16 +47,15 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
47 47
48 /* Find an empty slot and assign an vp_id */ 48 /* Find an empty slot and assign an vp_id */
49 down(&ha->vport_sem); 49 down(&ha->vport_sem);
50 vp_id = find_first_zero_bit((unsigned long *)ha->vp_idx_map, 50 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
51 MAX_MULTI_ID_FABRIC); 51 if (vp_id > ha->max_npiv_vports) {
52 if (vp_id > MAX_MULTI_ID_FABRIC) { 52 DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
53 DEBUG15(printk ("vp_id %d is bigger than MAX_MULTI_ID_FABRID\n", 53 vp_id, ha->max_npiv_vports));
54 vp_id));
55 up(&ha->vport_sem); 54 up(&ha->vport_sem);
56 return vp_id; 55 return vp_id;
57 } 56 }
58 57
59 set_bit(vp_id, (unsigned long *)ha->vp_idx_map); 58 set_bit(vp_id, ha->vp_idx_map);
60 ha->num_vhosts++; 59 ha->num_vhosts++;
61 vha->vp_idx = vp_id; 60 vha->vp_idx = vp_id;
62 list_add_tail(&vha->vp_list, &ha->vp_list); 61 list_add_tail(&vha->vp_list, &ha->vp_list);
@@ -73,12 +72,12 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
73 down(&ha->vport_sem); 72 down(&ha->vport_sem);
74 vp_id = vha->vp_idx; 73 vp_id = vha->vp_idx;
75 ha->num_vhosts--; 74 ha->num_vhosts--;
76 clear_bit(vp_id, (unsigned long *)ha->vp_idx_map); 75 clear_bit(vp_id, ha->vp_idx_map);
77 list_del(&vha->vp_list); 76 list_del(&vha->vp_list);
78 up(&ha->vport_sem); 77 up(&ha->vport_sem);
79} 78}
80 79
81scsi_qla_host_t * 80static scsi_qla_host_t *
82qla24xx_find_vhost_by_name(scsi_qla_host_t *ha, uint8_t *port_name) 81qla24xx_find_vhost_by_name(scsi_qla_host_t *ha, uint8_t *port_name)
83{ 82{
84 scsi_qla_host_t *vha; 83 scsi_qla_host_t *vha;
@@ -216,11 +215,7 @@ qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
216 if (ha->parent) 215 if (ha->parent)
217 return; 216 return;
218 217
219 i = find_next_bit((unsigned long *)ha->vp_idx_map, 218 for_each_mapped_vp_idx(ha, i) {
220 MAX_MULTI_ID_FABRIC + 1, 1);
221 for (;i <= MAX_MULTI_ID_FABRIC;
222 i = find_next_bit((unsigned long *)ha->vp_idx_map,
223 MAX_MULTI_ID_FABRIC + 1, i + 1)) {
224 vp_idx_matched = 0; 219 vp_idx_matched = 0;
225 220
226 list_for_each_entry(vha, &ha->vp_list, vp_list) { 221 list_for_each_entry(vha, &ha->vp_list, vp_list) {
@@ -270,7 +265,7 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
270 qla24xx_enable_vp(vha); 265 qla24xx_enable_vp(vha);
271} 266}
272 267
273int 268static int
274qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 269qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
275{ 270{
276 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 271 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
@@ -311,11 +306,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *ha)
311 306
312 clear_bit(VP_DPC_NEEDED, &ha->dpc_flags); 307 clear_bit(VP_DPC_NEEDED, &ha->dpc_flags);
313 308
314 i = find_next_bit((unsigned long *)ha->vp_idx_map, 309 for_each_mapped_vp_idx(ha, i) {
315 MAX_MULTI_ID_FABRIC + 1, 1);
316 for (;i <= MAX_MULTI_ID_FABRIC;
317 i = find_next_bit((unsigned long *)ha->vp_idx_map,
318 MAX_MULTI_ID_FABRIC + 1, i + 1)) {
319 vp_idx_matched = 0; 310 vp_idx_matched = 0;
320 311
321 list_for_each_entry(vha, &ha->vp_list, vp_list) { 312 list_for_each_entry(vha, &ha->vp_list, vp_list) {
@@ -350,15 +341,17 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
350 341
351 /* Check up unique WWPN */ 342 /* Check up unique WWPN */
352 u64_to_wwn(fc_vport->port_name, port_name); 343 u64_to_wwn(fc_vport->port_name, port_name);
344 if (!memcmp(port_name, ha->port_name, WWN_SIZE))
345 return VPCERR_BAD_WWN;
353 vha = qla24xx_find_vhost_by_name(ha, port_name); 346 vha = qla24xx_find_vhost_by_name(ha, port_name);
354 if (vha) 347 if (vha)
355 return VPCERR_BAD_WWN; 348 return VPCERR_BAD_WWN;
356 349
357 /* Check up max-npiv-supports */ 350 /* Check up max-npiv-supports */
358 if (ha->num_vhosts > ha->max_npiv_vports) { 351 if (ha->num_vhosts > ha->max_npiv_vports) {
359 DEBUG15(printk("scsi(%ld): num_vhosts %d is bigger than " 352 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
360 "max_npv_vports %d.\n", ha->host_no, 353 "max_npv_vports %ud.\n", ha->host_no,
361 (uint16_t) ha->num_vhosts, (int) ha->max_npiv_vports)); 354 ha->num_vhosts, ha->max_npiv_vports));
362 return VPCERR_UNSUPPORTED; 355 return VPCERR_UNSUPPORTED;
363 } 356 }
364 return 0; 357 return 0;
@@ -412,8 +405,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
412 } 405 }
413 vha->mgmt_svr_loop_id = 10 + vha->vp_idx; 406 vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
414 407
415 init_MUTEX(&vha->mbx_cmd_sem); 408 init_completion(&vha->mbx_cmd_comp);
416 init_MUTEX_LOCKED(&vha->mbx_intr_sem); 409 complete(&vha->mbx_cmd_comp);
410 init_completion(&vha->mbx_intr_comp);
417 411
418 INIT_LIST_HEAD(&vha->list); 412 INIT_LIST_HEAD(&vha->list);
419 INIT_LIST_HEAD(&vha->fcports); 413 INIT_LIST_HEAD(&vha->fcports);
@@ -450,7 +444,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
450 num_hosts++; 444 num_hosts++;
451 445
452 down(&ha->vport_sem); 446 down(&ha->vport_sem);
453 set_bit(vha->vp_idx, (unsigned long *)ha->vp_idx_map); 447 set_bit(vha->vp_idx, ha->vp_idx_map);
454 ha->cur_vport_count++; 448 ha->cur_vport_count++;
455 up(&ha->vport_sem); 449 up(&ha->vport_sem);
456 450
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8ecc0470b8f3..aba1e6d48066 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -105,13 +105,12 @@ static int qla2xxx_eh_abort(struct scsi_cmnd *);
105static int qla2xxx_eh_device_reset(struct scsi_cmnd *); 105static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
106static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 106static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
107static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 107static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
108static int qla2x00_loop_reset(scsi_qla_host_t *ha);
109static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *); 108static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
110 109
111static int qla2x00_change_queue_depth(struct scsi_device *, int); 110static int qla2x00_change_queue_depth(struct scsi_device *, int);
112static int qla2x00_change_queue_type(struct scsi_device *, int); 111static int qla2x00_change_queue_type(struct scsi_device *, int);
113 112
114struct scsi_host_template qla2x00_driver_template = { 113static struct scsi_host_template qla2x00_driver_template = {
115 .module = THIS_MODULE, 114 .module = THIS_MODULE,
116 .name = QLA2XXX_DRIVER_NAME, 115 .name = QLA2XXX_DRIVER_NAME,
117 .queuecommand = qla2x00_queuecommand, 116 .queuecommand = qla2x00_queuecommand,
@@ -179,13 +178,6 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
179 * Timer routines 178 * Timer routines
180 */ 179 */
181 180
182void qla2x00_timer(scsi_qla_host_t *);
183
184__inline__ void qla2x00_start_timer(scsi_qla_host_t *,
185 void *, unsigned long);
186static __inline__ void qla2x00_restart_timer(scsi_qla_host_t *, unsigned long);
187__inline__ void qla2x00_stop_timer(scsi_qla_host_t *);
188
189__inline__ void 181__inline__ void
190qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval) 182qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval)
191{ 183{
@@ -203,7 +195,7 @@ qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval)
203 mod_timer(&ha->timer, jiffies + interval * HZ); 195 mod_timer(&ha->timer, jiffies + interval * HZ);
204} 196}
205 197
206__inline__ void 198static __inline__ void
207qla2x00_stop_timer(scsi_qla_host_t *ha) 199qla2x00_stop_timer(scsi_qla_host_t *ha)
208{ 200{
209 del_timer_sync(&ha->timer); 201 del_timer_sync(&ha->timer);
@@ -214,12 +206,11 @@ static int qla2x00_do_dpc(void *data);
214 206
215static void qla2x00_rst_aen(scsi_qla_host_t *); 207static void qla2x00_rst_aen(scsi_qla_host_t *);
216 208
217uint8_t qla2x00_mem_alloc(scsi_qla_host_t *); 209static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
218void qla2x00_mem_free(scsi_qla_host_t *ha); 210static void qla2x00_mem_free(scsi_qla_host_t *ha);
219static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha); 211static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
220static void qla2x00_free_sp_pool(scsi_qla_host_t *ha); 212static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
221static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 213static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
222void qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *);
223 214
224/* -------------------------------------------------------------------------- */ 215/* -------------------------------------------------------------------------- */
225 216
@@ -1060,7 +1051,7 @@ eh_host_reset_lock:
1060* Returns: 1051* Returns:
1061* 0 = success 1052* 0 = success
1062*/ 1053*/
1063static int 1054int
1064qla2x00_loop_reset(scsi_qla_host_t *ha) 1055qla2x00_loop_reset(scsi_qla_host_t *ha)
1065{ 1056{
1066 int ret; 1057 int ret;
@@ -1479,8 +1470,7 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
1479static int 1470static int
1480qla2x00_iospace_config(scsi_qla_host_t *ha) 1471qla2x00_iospace_config(scsi_qla_host_t *ha)
1481{ 1472{
1482 unsigned long pio, pio_len, pio_flags; 1473 resource_size_t pio;
1483 unsigned long mmio, mmio_len, mmio_flags;
1484 1474
1485 if (pci_request_selected_regions(ha->pdev, ha->bars, 1475 if (pci_request_selected_regions(ha->pdev, ha->bars,
1486 QLA2XXX_DRIVER_NAME)) { 1476 QLA2XXX_DRIVER_NAME)) {
@@ -1495,10 +1485,8 @@ qla2x00_iospace_config(scsi_qla_host_t *ha)
1495 1485
1496 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 1486 /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1497 pio = pci_resource_start(ha->pdev, 0); 1487 pio = pci_resource_start(ha->pdev, 0);
1498 pio_len = pci_resource_len(ha->pdev, 0); 1488 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1499 pio_flags = pci_resource_flags(ha->pdev, 0); 1489 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1500 if (pio_flags & IORESOURCE_IO) {
1501 if (pio_len < MIN_IOBASE_LEN) {
1502 qla_printk(KERN_WARNING, ha, 1490 qla_printk(KERN_WARNING, ha,
1503 "Invalid PCI I/O region size (%s)...\n", 1491 "Invalid PCI I/O region size (%s)...\n",
1504 pci_name(ha->pdev)); 1492 pci_name(ha->pdev));
@@ -1511,28 +1499,23 @@ qla2x00_iospace_config(scsi_qla_host_t *ha)
1511 pio = 0; 1499 pio = 0;
1512 } 1500 }
1513 ha->pio_address = pio; 1501 ha->pio_address = pio;
1514 ha->pio_length = pio_len;
1515 1502
1516skip_pio: 1503skip_pio:
1517 /* Use MMIO operations for all accesses. */ 1504 /* Use MMIO operations for all accesses. */
1518 mmio = pci_resource_start(ha->pdev, 1); 1505 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1519 mmio_len = pci_resource_len(ha->pdev, 1);
1520 mmio_flags = pci_resource_flags(ha->pdev, 1);
1521
1522 if (!(mmio_flags & IORESOURCE_MEM)) {
1523 qla_printk(KERN_ERR, ha, 1506 qla_printk(KERN_ERR, ha,
1524 "region #0 not an MMIO resource (%s), aborting\n", 1507 "region #1 not an MMIO resource (%s), aborting\n",
1525 pci_name(ha->pdev)); 1508 pci_name(ha->pdev));
1526 goto iospace_error_exit; 1509 goto iospace_error_exit;
1527 } 1510 }
1528 if (mmio_len < MIN_IOBASE_LEN) { 1511 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1529 qla_printk(KERN_ERR, ha, 1512 qla_printk(KERN_ERR, ha,
1530 "Invalid PCI mem region size (%s), aborting\n", 1513 "Invalid PCI mem region size (%s), aborting\n",
1531 pci_name(ha->pdev)); 1514 pci_name(ha->pdev));
1532 goto iospace_error_exit; 1515 goto iospace_error_exit;
1533 } 1516 }
1534 1517
1535 ha->iobase = ioremap(mmio, MIN_IOBASE_LEN); 1518 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1536 if (!ha->iobase) { 1519 if (!ha->iobase) {
1537 qla_printk(KERN_ERR, ha, 1520 qla_printk(KERN_ERR, ha,
1538 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 1521 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
@@ -1701,9 +1684,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1701 /* load the F/W, read paramaters, and init the H/W */ 1684 /* load the F/W, read paramaters, and init the H/W */
1702 ha->instance = num_hosts; 1685 ha->instance = num_hosts;
1703 1686
1704 init_MUTEX(&ha->mbx_cmd_sem);
1705 init_MUTEX(&ha->vport_sem); 1687 init_MUTEX(&ha->vport_sem);
1706 init_MUTEX_LOCKED(&ha->mbx_intr_sem); 1688 init_completion(&ha->mbx_cmd_comp);
1689 complete(&ha->mbx_cmd_comp);
1690 init_completion(&ha->mbx_intr_comp);
1707 1691
1708 INIT_LIST_HEAD(&ha->list); 1692 INIT_LIST_HEAD(&ha->list);
1709 INIT_LIST_HEAD(&ha->fcports); 1693 INIT_LIST_HEAD(&ha->fcports);
@@ -1807,6 +1791,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1807 1791
1808 qla2x00_init_host_attr(ha); 1792 qla2x00_init_host_attr(ha);
1809 1793
1794 qla2x00_dfs_setup(ha);
1795
1810 qla_printk(KERN_INFO, ha, "\n" 1796 qla_printk(KERN_INFO, ha, "\n"
1811 " QLogic Fibre Channel HBA Driver: %s\n" 1797 " QLogic Fibre Channel HBA Driver: %s\n"
1812 " QLogic %s - %s\n" 1798 " QLogic %s - %s\n"
@@ -1838,6 +1824,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
1838 1824
1839 ha = pci_get_drvdata(pdev); 1825 ha = pci_get_drvdata(pdev);
1840 1826
1827 qla2x00_dfs_remove(ha);
1828
1841 qla2x00_free_sysfs_attr(ha); 1829 qla2x00_free_sysfs_attr(ha);
1842 1830
1843 fc_remove_host(ha->host); 1831 fc_remove_host(ha->host);
@@ -1871,8 +1859,11 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1871 kthread_stop(t); 1859 kthread_stop(t);
1872 } 1860 }
1873 1861
1862 if (ha->flags.fce_enabled)
1863 qla2x00_disable_fce_trace(ha, NULL, NULL);
1864
1874 if (ha->eft) 1865 if (ha->eft)
1875 qla2x00_trace_control(ha, TC_DISABLE, 0, 0); 1866 qla2x00_disable_eft_trace(ha);
1876 1867
1877 ha->flags.online = 0; 1868 ha->flags.online = 0;
1878 1869
@@ -2016,7 +2007,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
2016* 0 = success. 2007* 0 = success.
2017* 1 = failure. 2008* 1 = failure.
2018*/ 2009*/
2019uint8_t 2010static uint8_t
2020qla2x00_mem_alloc(scsi_qla_host_t *ha) 2011qla2x00_mem_alloc(scsi_qla_host_t *ha)
2021{ 2012{
2022 char name[16]; 2013 char name[16];
@@ -2213,7 +2204,7 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
2213* Input: 2204* Input:
2214* ha = adapter block pointer. 2205* ha = adapter block pointer.
2215*/ 2206*/
2216void 2207static void
2217qla2x00_mem_free(scsi_qla_host_t *ha) 2208qla2x00_mem_free(scsi_qla_host_t *ha)
2218{ 2209{
2219 struct list_head *fcpl, *fcptemp; 2210 struct list_head *fcpl, *fcptemp;
@@ -2228,6 +2219,10 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2228 /* free sp pool */ 2219 /* free sp pool */
2229 qla2x00_free_sp_pool(ha); 2220 qla2x00_free_sp_pool(ha);
2230 2221
2222 if (ha->fce)
2223 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2224 ha->fce_dma);
2225
2231 if (ha->fw_dump) { 2226 if (ha->fw_dump) {
2232 if (ha->eft) 2227 if (ha->eft)
2233 dma_free_coherent(&ha->pdev->dev, 2228 dma_free_coherent(&ha->pdev->dev,
@@ -2748,23 +2743,6 @@ qla2x00_timer(scsi_qla_host_t *ha)
2748 qla2x00_restart_timer(ha, WATCH_INTERVAL); 2743 qla2x00_restart_timer(ha, WATCH_INTERVAL);
2749} 2744}
2750 2745
2751/* XXX(hch): crude hack to emulate a down_timeout() */
2752int
2753qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
2754{
2755 const unsigned int step = 100; /* msecs */
2756 unsigned int iterations = jiffies_to_msecs(timeout)/100;
2757
2758 do {
2759 if (!down_trylock(sema))
2760 return 0;
2761 if (msleep_interruptible(step))
2762 break;
2763 } while (--iterations > 0);
2764
2765 return -ETIMEDOUT;
2766}
2767
2768/* Firmware interface routines. */ 2746/* Firmware interface routines. */
2769 2747
2770#define FW_BLOBS 6 2748#define FW_BLOBS 6
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index ad2fa01bd233..b68fb73613ed 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -22,7 +22,7 @@ static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
22 * qla2x00_lock_nvram_access() - 22 * qla2x00_lock_nvram_access() -
23 * @ha: HA context 23 * @ha: HA context
24 */ 24 */
25void 25static void
26qla2x00_lock_nvram_access(scsi_qla_host_t *ha) 26qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
27{ 27{
28 uint16_t data; 28 uint16_t data;
@@ -55,7 +55,7 @@ qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
55 * qla2x00_unlock_nvram_access() - 55 * qla2x00_unlock_nvram_access() -
56 * @ha: HA context 56 * @ha: HA context
57 */ 57 */
58void 58static void
59qla2x00_unlock_nvram_access(scsi_qla_host_t *ha) 59qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
60{ 60{
61 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 61 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -74,7 +74,7 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
74 * 74 *
75 * Returns the word read from nvram @addr. 75 * Returns the word read from nvram @addr.
76 */ 76 */
77uint16_t 77static uint16_t
78qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr) 78qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr)
79{ 79{
80 uint16_t data; 80 uint16_t data;
@@ -93,7 +93,7 @@ qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr)
93 * @addr: Address in NVRAM to write 93 * @addr: Address in NVRAM to write
94 * @data: word to program 94 * @data: word to program
95 */ 95 */
96void 96static void
97qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data) 97qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
98{ 98{
99 int count; 99 int count;
@@ -550,7 +550,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
550 int ret; 550 int ret;
551 uint32_t liter, miter; 551 uint32_t liter, miter;
552 uint32_t sec_mask, rest_addr, conf_addr; 552 uint32_t sec_mask, rest_addr, conf_addr;
553 uint32_t fdata, findex ; 553 uint32_t fdata, findex, cnt;
554 uint8_t man_id, flash_id; 554 uint8_t man_id, flash_id;
555 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 555 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
556 dma_addr_t optrom_dma; 556 dma_addr_t optrom_dma;
@@ -690,8 +690,14 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
690 0xff0000) | ((fdata >> 16) & 0xff)); 690 0xff0000) | ((fdata >> 16) & 0xff));
691 } 691 }
692 692
693 /* Enable flash write-protection. */ 693 /* Enable flash write-protection and wait for completion. */
694 qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0x9c); 694 qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0x9c);
695 for (cnt = 300; cnt &&
696 qla24xx_read_flash_dword(ha,
697 flash_conf_to_access_addr(0x005)) & BIT_0;
698 cnt--) {
699 udelay(10);
700 }
695 701
696 /* Disable flash write. */ 702 /* Disable flash write. */
697 WRT_REG_DWORD(&reg->ctrl_status, 703 WRT_REG_DWORD(&reg->ctrl_status,
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ae6f7a2fb19f..2c2f6b4697c7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.00-k5" 10#define QLA2XXX_VERSION "8.02.00-k7"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index d692c713416a..cbe0a17ced5f 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#include <scsi/iscsi_if.h>
8#include "ql4_def.h" 9#include "ql4_def.h"
9#include "ql4_glbl.h" 10#include "ql4_glbl.h"
10#include "ql4_dbg.h" 11#include "ql4_dbg.h"
@@ -1305,7 +1306,8 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
1305 atomic_set(&ddb_entry->relogin_timer, 0); 1306 atomic_set(&ddb_entry->relogin_timer, 0);
1306 clear_bit(DF_RELOGIN, &ddb_entry->flags); 1307 clear_bit(DF_RELOGIN, &ddb_entry->flags);
1307 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); 1308 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
1308 iscsi_if_create_session_done(ddb_entry->conn); 1309 iscsi_session_event(ddb_entry->sess,
1310 ISCSI_KEVENT_CREATE_SESSION);
1309 /* 1311 /*
1310 * Change the lun state to READY in case the lun TIMEOUT before 1312 * Change the lun state to READY in case the lun TIMEOUT before
1311 * the device came back. 1313 * the device came back.
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 4a154beb0d39..0f029d0d7315 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -123,15 +123,14 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
123 break; 123 break;
124 124
125 /* Copy Sense Data into sense buffer. */ 125 /* Copy Sense Data into sense buffer. */
126 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 126 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
127 127
128 sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt); 128 sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
129 if (sensebytecnt == 0) 129 if (sensebytecnt == 0)
130 break; 130 break;
131 131
132 memcpy(cmd->sense_buffer, sts_entry->senseData, 132 memcpy(cmd->sense_buffer, sts_entry->senseData,
133 min(sensebytecnt, 133 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
134 (uint16_t) sizeof(cmd->sense_buffer)));
135 134
136 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, " 135 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
137 "ASC/ASCQ = %02x/%02x\n", ha->host_no, 136 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
@@ -208,8 +207,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
208 break; 207 break;
209 208
210 /* Copy Sense Data into sense buffer. */ 209 /* Copy Sense Data into sense buffer. */
211 memset(cmd->sense_buffer, 0, 210 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
212 sizeof(cmd->sense_buffer));
213 211
214 sensebytecnt = 212 sensebytecnt =
215 le16_to_cpu(sts_entry->senseDataByteCnt); 213 le16_to_cpu(sts_entry->senseDataByteCnt);
@@ -217,8 +215,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
217 break; 215 break;
218 216
219 memcpy(cmd->sense_buffer, sts_entry->senseData, 217 memcpy(cmd->sense_buffer, sts_entry->senseData,
220 min(sensebytecnt, 218 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
221 (uint16_t) sizeof(cmd->sense_buffer)));
222 219
223 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, " 220 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
224 "ASC/ASCQ = %02x/%02x\n", ha->host_no, 221 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 89460d27c689..f55b9f7d9396 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -298,8 +298,7 @@ void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry)
298 return; 298 return;
299 299
300 if (ddb_entry->conn) { 300 if (ddb_entry->conn) {
301 iscsi_if_destroy_session_done(ddb_entry->conn); 301 atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
302 iscsi_destroy_conn(ddb_entry->conn);
303 iscsi_remove_session(ddb_entry->sess); 302 iscsi_remove_session(ddb_entry->sess);
304 } 303 }
305 iscsi_free_session(ddb_entry->sess); 304 iscsi_free_session(ddb_entry->sess);
@@ -309,6 +308,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
309{ 308{
310 int err; 309 int err;
311 310
311 ddb_entry->sess->recovery_tmo = ddb_entry->ha->port_down_retry_count;
312 err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); 312 err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
313 if (err) { 313 if (err) {
314 DEBUG2(printk(KERN_ERR "Could not add session.\n")); 314 DEBUG2(printk(KERN_ERR "Could not add session.\n"));
@@ -321,9 +321,6 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
321 DEBUG2(printk(KERN_ERR "Could not add connection.\n")); 321 DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
322 return -ENOMEM; 322 return -ENOMEM;
323 } 323 }
324
325 ddb_entry->sess->recovery_tmo = ddb_entry->ha->port_down_retry_count;
326 iscsi_if_create_session_done(ddb_entry->conn);
327 return 0; 324 return 0;
328} 325}
329 326
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 7a2e7986b038..65455ab1f3b9 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -871,11 +871,12 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
871 struct scatterlist *sg, *s; 871 struct scatterlist *sg, *s;
872 int i, n; 872 int i, n;
873 873
874 if (Cmnd->use_sg) { 874 if (scsi_bufflen(Cmnd)) {
875 int sg_count; 875 int sg_count;
876 876
877 sg = (struct scatterlist *) Cmnd->request_buffer; 877 sg = scsi_sglist(Cmnd);
878 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction); 878 sg_count = sbus_map_sg(qpti->sdev, sg, scsi_sg_count(Cmnd),
879 Cmnd->sc_data_direction);
879 880
880 ds = cmd->dataseg; 881 ds = cmd->dataseg;
881 cmd->segment_cnt = sg_count; 882 cmd->segment_cnt = sg_count;
@@ -914,16 +915,6 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
914 } 915 }
915 sg_count -= n; 916 sg_count -= n;
916 } 917 }
917 } else if (Cmnd->request_bufflen) {
918 Cmnd->SCp.ptr = (char *)(unsigned long)
919 sbus_map_single(qpti->sdev,
920 Cmnd->request_buffer,
921 Cmnd->request_bufflen,
922 Cmnd->sc_data_direction);
923
924 cmd->dataseg[0].d_base = (u32) ((unsigned long)Cmnd->SCp.ptr);
925 cmd->dataseg[0].d_count = Cmnd->request_bufflen;
926 cmd->segment_cnt = 1;
927 } else { 918 } else {
928 cmd->dataseg[0].d_base = 0; 919 cmd->dataseg[0].d_base = 0;
929 cmd->dataseg[0].d_count = 0; 920 cmd->dataseg[0].d_count = 0;
@@ -1151,7 +1142,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1151 1142
1152 if (sts->state_flags & SF_GOT_SENSE) 1143 if (sts->state_flags & SF_GOT_SENSE)
1153 memcpy(Cmnd->sense_buffer, sts->req_sense_data, 1144 memcpy(Cmnd->sense_buffer, sts->req_sense_data,
1154 sizeof(Cmnd->sense_buffer)); 1145 SCSI_SENSE_BUFFERSIZE);
1155 1146
1156 if (sts->hdr.entry_type == ENTRY_STATUS) 1147 if (sts->hdr.entry_type == ENTRY_STATUS)
1157 Cmnd->result = 1148 Cmnd->result =
@@ -1159,17 +1150,11 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1159 else 1150 else
1160 Cmnd->result = DID_ERROR << 16; 1151 Cmnd->result = DID_ERROR << 16;
1161 1152
1162 if (Cmnd->use_sg) { 1153 if (scsi_bufflen(Cmnd))
1163 sbus_unmap_sg(qpti->sdev, 1154 sbus_unmap_sg(qpti->sdev,
1164 (struct scatterlist *)Cmnd->request_buffer, 1155 scsi_sglist(Cmnd), scsi_sg_count(Cmnd),
1165 Cmnd->use_sg,
1166 Cmnd->sc_data_direction); 1156 Cmnd->sc_data_direction);
1167 } else if (Cmnd->request_bufflen) { 1157
1168 sbus_unmap_single(qpti->sdev,
1169 (__u32)((unsigned long)Cmnd->SCp.ptr),
1170 Cmnd->request_bufflen,
1171 Cmnd->sc_data_direction);
1172 }
1173 qpti->cmd_count[Cmnd->device->id]--; 1158 qpti->cmd_count[Cmnd->device->id]--;
1174 sbus_writew(out_ptr, qpti->qregs + MBOX5); 1159 sbus_writew(out_ptr, qpti->qregs + MBOX5);
1175 Cmnd->host_scribble = (unsigned char *) done_queue; 1160 Cmnd->host_scribble = (unsigned char *) done_queue;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 0fb1709ce5e3..1a9fba6a9f92 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -122,6 +122,11 @@ static const char *const scsi_device_types[] = {
122 "Automation/Drive ", 122 "Automation/Drive ",
123}; 123};
124 124
125/**
126 * scsi_device_type - Return 17 char string indicating device type.
127 * @type: type number to look up
128 */
129
125const char * scsi_device_type(unsigned type) 130const char * scsi_device_type(unsigned type)
126{ 131{
127 if (type == 0x1e) 132 if (type == 0x1e)
@@ -136,32 +141,45 @@ const char * scsi_device_type(unsigned type)
136EXPORT_SYMBOL(scsi_device_type); 141EXPORT_SYMBOL(scsi_device_type);
137 142
138struct scsi_host_cmd_pool { 143struct scsi_host_cmd_pool {
139 struct kmem_cache *slab; 144 struct kmem_cache *cmd_slab;
140 unsigned int users; 145 struct kmem_cache *sense_slab;
141 char *name; 146 unsigned int users;
142 unsigned int slab_flags; 147 char *cmd_name;
143 gfp_t gfp_mask; 148 char *sense_name;
149 unsigned int slab_flags;
150 gfp_t gfp_mask;
144}; 151};
145 152
146static struct scsi_host_cmd_pool scsi_cmd_pool = { 153static struct scsi_host_cmd_pool scsi_cmd_pool = {
147 .name = "scsi_cmd_cache", 154 .cmd_name = "scsi_cmd_cache",
155 .sense_name = "scsi_sense_cache",
148 .slab_flags = SLAB_HWCACHE_ALIGN, 156 .slab_flags = SLAB_HWCACHE_ALIGN,
149}; 157};
150 158
151static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { 159static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
152 .name = "scsi_cmd_cache(DMA)", 160 .cmd_name = "scsi_cmd_cache(DMA)",
161 .sense_name = "scsi_sense_cache(DMA)",
153 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA, 162 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
154 .gfp_mask = __GFP_DMA, 163 .gfp_mask = __GFP_DMA,
155}; 164};
156 165
157static DEFINE_MUTEX(host_cmd_pool_mutex); 166static DEFINE_MUTEX(host_cmd_pool_mutex);
158 167
168/**
169 * __scsi_get_command - Allocate a struct scsi_cmnd
170 * @shost: host to transmit command
171 * @gfp_mask: allocation mask
172 *
173 * Description: allocate a struct scsi_cmd from host's slab, recycling from the
174 * host's free_list if necessary.
175 */
159struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) 176struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
160{ 177{
161 struct scsi_cmnd *cmd; 178 struct scsi_cmnd *cmd;
179 unsigned char *buf;
162 180
163 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 181 cmd = kmem_cache_alloc(shost->cmd_pool->cmd_slab,
164 gfp_mask | shost->cmd_pool->gfp_mask); 182 gfp_mask | shost->cmd_pool->gfp_mask);
165 183
166 if (unlikely(!cmd)) { 184 if (unlikely(!cmd)) {
167 unsigned long flags; 185 unsigned long flags;
@@ -173,19 +191,32 @@ struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
173 list_del_init(&cmd->list); 191 list_del_init(&cmd->list);
174 } 192 }
175 spin_unlock_irqrestore(&shost->free_list_lock, flags); 193 spin_unlock_irqrestore(&shost->free_list_lock, flags);
194
195 if (cmd) {
196 buf = cmd->sense_buffer;
197 memset(cmd, 0, sizeof(*cmd));
198 cmd->sense_buffer = buf;
199 }
200 } else {
201 buf = kmem_cache_alloc(shost->cmd_pool->sense_slab,
202 gfp_mask | shost->cmd_pool->gfp_mask);
203 if (likely(buf)) {
204 memset(cmd, 0, sizeof(*cmd));
205 cmd->sense_buffer = buf;
206 } else {
207 kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
208 cmd = NULL;
209 }
176 } 210 }
177 211
178 return cmd; 212 return cmd;
179} 213}
180EXPORT_SYMBOL_GPL(__scsi_get_command); 214EXPORT_SYMBOL_GPL(__scsi_get_command);
181 215
182/* 216/**
183 * Function: scsi_get_command() 217 * scsi_get_command - Allocate and setup a scsi command block
184 * 218 * @dev: parent scsi device
185 * Purpose: Allocate and setup a scsi command block 219 * @gfp_mask: allocator flags
186 *
187 * Arguments: dev - parent scsi device
188 * gfp_mask- allocator flags
189 * 220 *
190 * Returns: The allocated scsi command structure. 221 * Returns: The allocated scsi command structure.
191 */ 222 */
@@ -202,7 +233,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
202 if (likely(cmd != NULL)) { 233 if (likely(cmd != NULL)) {
203 unsigned long flags; 234 unsigned long flags;
204 235
205 memset(cmd, 0, sizeof(*cmd));
206 cmd->device = dev; 236 cmd->device = dev;
207 init_timer(&cmd->eh_timeout); 237 init_timer(&cmd->eh_timeout);
208 INIT_LIST_HEAD(&cmd->list); 238 INIT_LIST_HEAD(&cmd->list);
@@ -217,6 +247,12 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
217} 247}
218EXPORT_SYMBOL(scsi_get_command); 248EXPORT_SYMBOL(scsi_get_command);
219 249
250/**
251 * __scsi_put_command - Free a struct scsi_cmnd
252 * @shost: dev->host
253 * @cmd: Command to free
254 * @dev: parent scsi device
255 */
220void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd, 256void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
221 struct device *dev) 257 struct device *dev)
222{ 258{
@@ -230,19 +266,19 @@ void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
230 } 266 }
231 spin_unlock_irqrestore(&shost->free_list_lock, flags); 267 spin_unlock_irqrestore(&shost->free_list_lock, flags);
232 268
233 if (likely(cmd != NULL)) 269 if (likely(cmd != NULL)) {
234 kmem_cache_free(shost->cmd_pool->slab, cmd); 270 kmem_cache_free(shost->cmd_pool->sense_slab,
271 cmd->sense_buffer);
272 kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
273 }
235 274
236 put_device(dev); 275 put_device(dev);
237} 276}
238EXPORT_SYMBOL(__scsi_put_command); 277EXPORT_SYMBOL(__scsi_put_command);
239 278
240/* 279/**
241 * Function: scsi_put_command() 280 * scsi_put_command - Free a scsi command block
242 * 281 * @cmd: command block to free
243 * Purpose: Free a scsi command block
244 *
245 * Arguments: cmd - command block to free
246 * 282 *
247 * Returns: Nothing. 283 * Returns: Nothing.
248 * 284 *
@@ -263,12 +299,13 @@ void scsi_put_command(struct scsi_cmnd *cmd)
263} 299}
264EXPORT_SYMBOL(scsi_put_command); 300EXPORT_SYMBOL(scsi_put_command);
265 301
266/* 302/**
267 * Function: scsi_setup_command_freelist() 303 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
268 * 304 * @shost: host to allocate the freelist for.
269 * Purpose: Setup the command freelist for a scsi host.
270 * 305 *
271 * Arguments: shost - host to allocate the freelist for. 306 * Description: The command freelist protects against system-wide out of memory
307 * deadlock by preallocating one SCSI command structure for each host, so the
308 * system can always write to a swap file on a device associated with that host.
272 * 309 *
273 * Returns: Nothing. 310 * Returns: Nothing.
274 */ 311 */
@@ -282,16 +319,24 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
282 319
283 /* 320 /*
284 * Select a command slab for this host and create it if not 321 * Select a command slab for this host and create it if not
285 * yet existant. 322 * yet existent.
286 */ 323 */
287 mutex_lock(&host_cmd_pool_mutex); 324 mutex_lock(&host_cmd_pool_mutex);
288 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool); 325 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
289 if (!pool->users) { 326 if (!pool->users) {
290 pool->slab = kmem_cache_create(pool->name, 327 pool->cmd_slab = kmem_cache_create(pool->cmd_name,
291 sizeof(struct scsi_cmnd), 0, 328 sizeof(struct scsi_cmnd), 0,
292 pool->slab_flags, NULL); 329 pool->slab_flags, NULL);
293 if (!pool->slab) 330 if (!pool->cmd_slab)
331 goto fail;
332
333 pool->sense_slab = kmem_cache_create(pool->sense_name,
334 SCSI_SENSE_BUFFERSIZE, 0,
335 pool->slab_flags, NULL);
336 if (!pool->sense_slab) {
337 kmem_cache_destroy(pool->cmd_slab);
294 goto fail; 338 goto fail;
339 }
295 } 340 }
296 341
297 pool->users++; 342 pool->users++;
@@ -301,29 +346,36 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
301 /* 346 /*
302 * Get one backup command for this host. 347 * Get one backup command for this host.
303 */ 348 */
304 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 349 cmd = kmem_cache_alloc(shost->cmd_pool->cmd_slab,
305 GFP_KERNEL | shost->cmd_pool->gfp_mask); 350 GFP_KERNEL | shost->cmd_pool->gfp_mask);
306 if (!cmd) 351 if (!cmd)
307 goto fail2; 352 goto fail2;
308 list_add(&cmd->list, &shost->free_list); 353
354 cmd->sense_buffer = kmem_cache_alloc(shost->cmd_pool->sense_slab,
355 GFP_KERNEL |
356 shost->cmd_pool->gfp_mask);
357 if (!cmd->sense_buffer)
358 goto fail2;
359
360 list_add(&cmd->list, &shost->free_list);
309 return 0; 361 return 0;
310 362
311 fail2: 363 fail2:
312 if (!--pool->users) 364 if (cmd)
313 kmem_cache_destroy(pool->slab); 365 kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
314 return -ENOMEM; 366 mutex_lock(&host_cmd_pool_mutex);
367 if (!--pool->users) {
368 kmem_cache_destroy(pool->cmd_slab);
369 kmem_cache_destroy(pool->sense_slab);
370 }
315 fail: 371 fail:
316 mutex_unlock(&host_cmd_pool_mutex); 372 mutex_unlock(&host_cmd_pool_mutex);
317 return -ENOMEM; 373 return -ENOMEM;
318
319} 374}
320 375
321/* 376/**
322 * Function: scsi_destroy_command_freelist() 377 * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
323 * 378 * @shost: host whose freelist is going to be destroyed
324 * Purpose: Release the command freelist for a scsi host.
325 *
326 * Arguments: shost - host that's freelist is going to be destroyed
327 */ 379 */
328void scsi_destroy_command_freelist(struct Scsi_Host *shost) 380void scsi_destroy_command_freelist(struct Scsi_Host *shost)
329{ 381{
@@ -332,12 +384,16 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)
332 384
333 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); 385 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
334 list_del_init(&cmd->list); 386 list_del_init(&cmd->list);
335 kmem_cache_free(shost->cmd_pool->slab, cmd); 387 kmem_cache_free(shost->cmd_pool->sense_slab,
388 cmd->sense_buffer);
389 kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
336 } 390 }
337 391
338 mutex_lock(&host_cmd_pool_mutex); 392 mutex_lock(&host_cmd_pool_mutex);
339 if (!--shost->cmd_pool->users) 393 if (!--shost->cmd_pool->users) {
340 kmem_cache_destroy(shost->cmd_pool->slab); 394 kmem_cache_destroy(shost->cmd_pool->cmd_slab);
395 kmem_cache_destroy(shost->cmd_pool->sense_slab);
396 }
341 mutex_unlock(&host_cmd_pool_mutex); 397 mutex_unlock(&host_cmd_pool_mutex);
342} 398}
343 399
@@ -441,8 +497,12 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
441} 497}
442#endif 498#endif
443 499
444/* 500/**
445 * Assign a serial number to the request for error recovery 501 * scsi_cmd_get_serial - Assign a serial number to a command
502 * @host: the scsi host
503 * @cmd: command to assign serial number to
504 *
505 * Description: a serial number identifies a request for error recovery
446 * and debugging purposes. Protected by the Host_Lock of host. 506 * and debugging purposes. Protected by the Host_Lock of host.
447 */ 507 */
448static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) 508static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
@@ -452,14 +512,12 @@ static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd
452 cmd->serial_number = host->cmd_serial_number++; 512 cmd->serial_number = host->cmd_serial_number++;
453} 513}
454 514
455/* 515/**
456 * Function: scsi_dispatch_command 516 * scsi_dispatch_command - Dispatch a command to the low-level driver.
457 * 517 * @cmd: command block we are dispatching.
458 * Purpose: Dispatch a command to the low-level driver.
459 *
460 * Arguments: cmd - command block we are dispatching.
461 * 518 *
462 * Notes: 519 * Return: nonzero return request was rejected and device's queue needs to be
520 * plugged.
463 */ 521 */
464int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 522int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
465{ 523{
@@ -585,7 +643,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
585 643
586/** 644/**
587 * scsi_req_abort_cmd -- Request command recovery for the specified command 645 * scsi_req_abort_cmd -- Request command recovery for the specified command
588 * cmd: pointer to the SCSI command of interest 646 * @cmd: pointer to the SCSI command of interest
589 * 647 *
590 * This function requests that SCSI Core start recovery for the 648 * This function requests that SCSI Core start recovery for the
591 * command by deleting the timer and adding the command to the eh 649 * command by deleting the timer and adding the command to the eh
@@ -606,9 +664,9 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
606 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 664 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
607 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 665 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
608 * 666 *
609 * This function is the mid-level's (SCSI Core) interrupt routine, which 667 * Description: This function is the mid-level's (SCSI Core) interrupt routine,
610 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues 668 * which regains ownership of the SCSI command (de facto) from a LLDD, and
611 * the command to the done queue for further processing. 669 * enqueues the command to the done queue for further processing.
612 * 670 *
613 * This is the producer of the done queue who enqueues at the tail. 671 * This is the producer of the done queue who enqueues at the tail.
614 * 672 *
@@ -617,7 +675,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
617static void scsi_done(struct scsi_cmnd *cmd) 675static void scsi_done(struct scsi_cmnd *cmd)
618{ 676{
619 /* 677 /*
620 * We don't have to worry about this one timing out any more. 678 * We don't have to worry about this one timing out anymore.
621 * If we are unable to remove the timer, then the command 679 * If we are unable to remove the timer, then the command
622 * has already timed out. In which case, we have no choice but to 680 * has already timed out. In which case, we have no choice but to
623 * let the timeout function run, as we have no idea where in fact 681 * let the timeout function run, as we have no idea where in fact
@@ -660,10 +718,11 @@ static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
660 return *(struct scsi_driver **)cmd->request->rq_disk->private_data; 718 return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
661} 719}
662 720
663/* 721/**
664 * Function: scsi_finish_command 722 * scsi_finish_command - cleanup and pass command back to upper layer
723 * @cmd: the command
665 * 724 *
666 * Purpose: Pass command off to upper layer for finishing of I/O 725 * Description: Pass command off to upper layer for finishing of I/O
667 * request, waking processes that are waiting on results, 726 * request, waking processes that are waiting on results,
668 * etc. 727 * etc.
669 */ 728 */
@@ -708,18 +767,14 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
708} 767}
709EXPORT_SYMBOL(scsi_finish_command); 768EXPORT_SYMBOL(scsi_finish_command);
710 769
711/* 770/**
712 * Function: scsi_adjust_queue_depth() 771 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
713 * 772 * @sdev: SCSI Device in question
714 * Purpose: Allow low level drivers to tell us to change the queue depth 773 * @tagged: Do we use tagged queueing (non-0) or do we treat
715 * on a specific SCSI device 774 * this device as an untagged device (0)
716 * 775 * @tags: Number of tags allowed if tagged queueing enabled,
717 * Arguments: sdev - SCSI Device in question 776 * or number of commands the low level driver can
718 * tagged - Do we use tagged queueing (non-0) or do we treat 777 * queue up in non-tagged mode (as per cmd_per_lun).
719 * this device as an untagged device (0)
720 * tags - Number of tags allowed if tagged queueing enabled,
721 * or number of commands the low level driver can
722 * queue up in non-tagged mode (as per cmd_per_lun).
723 * 778 *
724 * Returns: Nothing 779 * Returns: Nothing
725 * 780 *
@@ -742,8 +797,8 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
742 797
743 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 798 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
744 799
745 /* Check to see if the queue is managed by the block layer 800 /* Check to see if the queue is managed by the block layer.
746 * if it is, and we fail to adjust the depth, exit */ 801 * If it is, and we fail to adjust the depth, exit. */
747 if (blk_queue_tagged(sdev->request_queue) && 802 if (blk_queue_tagged(sdev->request_queue) &&
748 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 803 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
749 goto out; 804 goto out;
@@ -772,20 +827,17 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
772} 827}
773EXPORT_SYMBOL(scsi_adjust_queue_depth); 828EXPORT_SYMBOL(scsi_adjust_queue_depth);
774 829
775/* 830/**
776 * Function: scsi_track_queue_full() 831 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
832 * @sdev: SCSI Device in question
833 * @depth: Current number of outstanding SCSI commands on this device,
834 * not counting the one returned as QUEUE_FULL.
777 * 835 *
778 * Purpose: This function will track successive QUEUE_FULL events on a 836 * Description: This function will track successive QUEUE_FULL events on a
779 * specific SCSI device to determine if and when there is a 837 * specific SCSI device to determine if and when there is a
780 * need to adjust the queue depth on the device. 838 * need to adjust the queue depth on the device.
781 * 839 *
782 * Arguments: sdev - SCSI Device in question 840 * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth,
783 * depth - Current number of outstanding SCSI commands on
784 * this device, not counting the one returned as
785 * QUEUE_FULL.
786 *
787 * Returns: 0 - No change needed
788 * >0 - Adjust queue depth to this new depth
789 * -1 - Drop back to untagged operation using host->cmd_per_lun 841 * -1 - Drop back to untagged operation using host->cmd_per_lun
790 * as the untagged command depth 842 * as the untagged command depth
791 * 843 *
@@ -824,10 +876,10 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth)
824EXPORT_SYMBOL(scsi_track_queue_full); 876EXPORT_SYMBOL(scsi_track_queue_full);
825 877
826/** 878/**
827 * scsi_device_get - get an addition reference to a scsi_device 879 * scsi_device_get - get an additional reference to a scsi_device
828 * @sdev: device to get a reference to 880 * @sdev: device to get a reference to
829 * 881 *
830 * Gets a reference to the scsi_device and increments the use count 882 * Description: Gets a reference to the scsi_device and increments the use count
831 * of the underlying LLDD module. You must hold host_lock of the 883 * of the underlying LLDD module. You must hold host_lock of the
832 * parent Scsi_Host or already have a reference when calling this. 884 * parent Scsi_Host or already have a reference when calling this.
833 */ 885 */
@@ -849,8 +901,8 @@ EXPORT_SYMBOL(scsi_device_get);
849 * scsi_device_put - release a reference to a scsi_device 901 * scsi_device_put - release a reference to a scsi_device
850 * @sdev: device to release a reference on. 902 * @sdev: device to release a reference on.
851 * 903 *
852 * Release a reference to the scsi_device and decrements the use count 904 * Description: Release a reference to the scsi_device and decrements the use
853 * of the underlying LLDD module. The device is freed once the last 905 * count of the underlying LLDD module. The device is freed once the last
854 * user vanishes. 906 * user vanishes.
855 */ 907 */
856void scsi_device_put(struct scsi_device *sdev) 908void scsi_device_put(struct scsi_device *sdev)
@@ -867,7 +919,7 @@ void scsi_device_put(struct scsi_device *sdev)
867} 919}
868EXPORT_SYMBOL(scsi_device_put); 920EXPORT_SYMBOL(scsi_device_put);
869 921
870/* helper for shost_for_each_device, thus not documented */ 922/* helper for shost_for_each_device, see that for documentation */
871struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, 923struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
872 struct scsi_device *prev) 924 struct scsi_device *prev)
873{ 925{
@@ -895,6 +947,8 @@ EXPORT_SYMBOL(__scsi_iterate_devices);
895/** 947/**
896 * starget_for_each_device - helper to walk all devices of a target 948 * starget_for_each_device - helper to walk all devices of a target
897 * @starget: target whose devices we want to iterate over. 949 * @starget: target whose devices we want to iterate over.
950 * @data: Opaque passed to each function call.
951 * @fn: Function to call on each device
898 * 952 *
899 * This traverses over each device of @starget. The devices have 953 * This traverses over each device of @starget. The devices have
900 * a reference that must be released by scsi_host_put when breaking 954 * a reference that must be released by scsi_host_put when breaking
@@ -946,13 +1000,13 @@ EXPORT_SYMBOL(__starget_for_each_device);
946 * @starget: SCSI target pointer 1000 * @starget: SCSI target pointer
947 * @lun: SCSI Logical Unit Number 1001 * @lun: SCSI Logical Unit Number
948 * 1002 *
949 * Looks up the scsi_device with the specified @lun for a give 1003 * Description: Looks up the scsi_device with the specified @lun for a given
950 * @starget. The returned scsi_device does not have an additional 1004 * @starget. The returned scsi_device does not have an additional
951 * reference. You must hold the host's host_lock over this call and 1005 * reference. You must hold the host's host_lock over this call and
952 * any access to the returned scsi_device. 1006 * any access to the returned scsi_device.
953 * 1007 *
954 * Note: The only reason why drivers would want to use this is because 1008 * Note: The only reason why drivers should use this is because
955 * they're need to access the device list in irq context. Otherwise you 1009 * they need to access the device list in irq context. Otherwise you
956 * really want to use scsi_device_lookup_by_target instead. 1010 * really want to use scsi_device_lookup_by_target instead.
957 **/ 1011 **/
958struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, 1012struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
@@ -974,9 +1028,9 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target);
974 * @starget: SCSI target pointer 1028 * @starget: SCSI target pointer
975 * @lun: SCSI Logical Unit Number 1029 * @lun: SCSI Logical Unit Number
976 * 1030 *
977 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1031 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
978 * give host. The returned scsi_device has an additional reference that 1032 * for a given host. The returned scsi_device has an additional reference that
979 * needs to be release with scsi_host_put once you're done with it. 1033 * needs to be released with scsi_device_put once you're done with it.
980 **/ 1034 **/
981struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1035struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
982 uint lun) 1036 uint lun)
@@ -996,19 +1050,19 @@ struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
996EXPORT_SYMBOL(scsi_device_lookup_by_target); 1050EXPORT_SYMBOL(scsi_device_lookup_by_target);
997 1051
998/** 1052/**
999 * scsi_device_lookup - find a device given the host (UNLOCKED) 1053 * __scsi_device_lookup - find a device given the host (UNLOCKED)
1000 * @shost: SCSI host pointer 1054 * @shost: SCSI host pointer
1001 * @channel: SCSI channel (zero if only one channel) 1055 * @channel: SCSI channel (zero if only one channel)
1002 * @pun: SCSI target number (physical unit number) 1056 * @id: SCSI target number (physical unit number)
1003 * @lun: SCSI Logical Unit Number 1057 * @lun: SCSI Logical Unit Number
1004 * 1058 *
1005 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1059 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1006 * give host. The returned scsi_device does not have an additional reference. 1060 * for a given host. The returned scsi_device does not have an additional
1007 * You must hold the host's host_lock over this call and any access to the 1061 * reference. You must hold the host's host_lock over this call and any access
1008 * returned scsi_device. 1062 * to the returned scsi_device.
1009 * 1063 *
1010 * Note: The only reason why drivers would want to use this is because 1064 * Note: The only reason why drivers would want to use this is because
1011 * they're need to access the device list in irq context. Otherwise you 1065 * they need to access the device list in irq context. Otherwise you
1012 * really want to use scsi_device_lookup instead. 1066 * really want to use scsi_device_lookup instead.
1013 **/ 1067 **/
1014struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, 1068struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
@@ -1033,9 +1087,9 @@ EXPORT_SYMBOL(__scsi_device_lookup);
1033 * @id: SCSI target number (physical unit number) 1087 * @id: SCSI target number (physical unit number)
1034 * @lun: SCSI Logical Unit Number 1088 * @lun: SCSI Logical Unit Number
1035 * 1089 *
1036 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1090 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1037 * give host. The returned scsi_device has an additional reference that 1091 * for a given host. The returned scsi_device has an additional reference that
1038 * needs to be release with scsi_host_put once you're done with it. 1092 * needs to be released with scsi_device_put once you're done with it.
1039 **/ 1093 **/
1040struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, 1094struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1041 uint channel, uint id, uint lun) 1095 uint channel, uint id, uint lun)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 46cae5a212de..82c06f0a9d02 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -329,7 +329,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
329 if (done == NULL) 329 if (done == NULL)
330 return 0; /* assume mid level reprocessing command */ 330 return 0; /* assume mid level reprocessing command */
331 331
332 SCpnt->resid = 0; 332 scsi_set_resid(SCpnt, 0);
333 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { 333 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
334 printk(KERN_INFO "scsi_debug: cmd "); 334 printk(KERN_INFO "scsi_debug: cmd ");
335 for (k = 0, len = SCpnt->cmd_len; k < len; ++k) 335 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
@@ -603,26 +603,16 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
603 void * kaddr_off; 603 void * kaddr_off;
604 struct scatterlist * sg; 604 struct scatterlist * sg;
605 605
606 if (0 == scp->request_bufflen) 606 if (0 == scsi_bufflen(scp))
607 return 0; 607 return 0;
608 if (NULL == scp->request_buffer) 608 if (NULL == scsi_sglist(scp))
609 return (DID_ERROR << 16); 609 return (DID_ERROR << 16);
610 if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) || 610 if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) ||
611 (scp->sc_data_direction == DMA_FROM_DEVICE))) 611 (scp->sc_data_direction == DMA_FROM_DEVICE)))
612 return (DID_ERROR << 16); 612 return (DID_ERROR << 16);
613 if (0 == scp->use_sg) {
614 req_len = scp->request_bufflen;
615 act_len = (req_len < arr_len) ? req_len : arr_len;
616 memcpy(scp->request_buffer, arr, act_len);
617 if (scp->resid)
618 scp->resid -= act_len;
619 else
620 scp->resid = req_len - act_len;
621 return 0;
622 }
623 active = 1; 613 active = 1;
624 req_len = act_len = 0; 614 req_len = act_len = 0;
625 scsi_for_each_sg(scp, sg, scp->use_sg, k) { 615 scsi_for_each_sg(scp, sg, scsi_sg_count(scp), k) {
626 if (active) { 616 if (active) {
627 kaddr = (unsigned char *) 617 kaddr = (unsigned char *)
628 kmap_atomic(sg_page(sg), KM_USER0); 618 kmap_atomic(sg_page(sg), KM_USER0);
@@ -640,10 +630,10 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
640 } 630 }
641 req_len += sg->length; 631 req_len += sg->length;
642 } 632 }
643 if (scp->resid) 633 if (scsi_get_resid(scp))
644 scp->resid -= act_len; 634 scsi_set_resid(scp, scsi_get_resid(scp) - act_len);
645 else 635 else
646 scp->resid = req_len - act_len; 636 scsi_set_resid(scp, req_len - act_len);
647 return 0; 637 return 0;
648} 638}
649 639
@@ -656,22 +646,15 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
656 void * kaddr_off; 646 void * kaddr_off;
657 struct scatterlist * sg; 647 struct scatterlist * sg;
658 648
659 if (0 == scp->request_bufflen) 649 if (0 == scsi_bufflen(scp))
660 return 0; 650 return 0;
661 if (NULL == scp->request_buffer) 651 if (NULL == scsi_sglist(scp))
662 return -1; 652 return -1;
663 if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) || 653 if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) ||
664 (scp->sc_data_direction == DMA_TO_DEVICE))) 654 (scp->sc_data_direction == DMA_TO_DEVICE)))
665 return -1; 655 return -1;
666 if (0 == scp->use_sg) {
667 req_len = scp->request_bufflen;
668 len = (req_len < max_arr_len) ? req_len : max_arr_len;
669 memcpy(arr, scp->request_buffer, len);
670 return len;
671 }
672 sg = scsi_sglist(scp);
673 req_len = fin = 0; 656 req_len = fin = 0;
674 for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) { 657 scsi_for_each_sg(scp, sg, scsi_sg_count(scp), k) {
675 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0); 658 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
676 if (NULL == kaddr) 659 if (NULL == kaddr)
677 return -1; 660 return -1;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 348cc5a6e3cd..b8de041bc0ae 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -276,11 +276,12 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
276} 276}
277 277
278/** 278/**
279 * scsi_dev_info_list_add: add one dev_info list entry. 279 * scsi_dev_info_list_add - add one dev_info list entry.
280 * @compatible: if true, null terminate short strings. Otherwise space pad.
280 * @vendor: vendor string 281 * @vendor: vendor string
281 * @model: model (product) string 282 * @model: model (product) string
282 * @strflags: integer string 283 * @strflags: integer string
283 * @flag: if strflags NULL, use this flag value 284 * @flags: if strflags NULL, use this flag value
284 * 285 *
285 * Description: 286 * Description:
286 * Create and add one dev_info entry for @vendor, @model, @strflags or 287 * Create and add one dev_info entry for @vendor, @model, @strflags or
@@ -322,8 +323,7 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
322} 323}
323 324
324/** 325/**
325 * scsi_dev_info_list_add_str: parse dev_list and add to the 326 * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
326 * scsi_dev_info_list.
327 * @dev_list: string of device flags to add 327 * @dev_list: string of device flags to add
328 * 328 *
329 * Description: 329 * Description:
@@ -374,15 +374,15 @@ static int scsi_dev_info_list_add_str(char *dev_list)
374} 374}
375 375
376/** 376/**
377 * get_device_flags - get device specific flags from the dynamic device 377 * get_device_flags - get device specific flags from the dynamic device list.
378 * list. Called during scan time. 378 * @sdev: &scsi_device to get flags for
379 * @vendor: vendor name 379 * @vendor: vendor name
380 * @model: model name 380 * @model: model name
381 * 381 *
382 * Description: 382 * Description:
383 * Search the scsi_dev_info_list for an entry matching @vendor and 383 * Search the scsi_dev_info_list for an entry matching @vendor and
384 * @model, if found, return the matching flags value, else return 384 * @model, if found, return the matching flags value, else return
385 * the host or global default settings. 385 * the host or global default settings. Called during scan time.
386 **/ 386 **/
387int scsi_get_device_flags(struct scsi_device *sdev, 387int scsi_get_device_flags(struct scsi_device *sdev,
388 const unsigned char *vendor, 388 const unsigned char *vendor,
@@ -483,13 +483,11 @@ stop_output:
483} 483}
484 484
485/* 485/*
486 * proc_scsi_dev_info_write: allow additions to the scsi_dev_info_list via 486 * proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc.
487 * /proc.
488 * 487 *
489 * Use: echo "vendor:model:flag" > /proc/scsi/device_info 488 * Description: Adds a black/white list entry for vendor and model with an
490 * 489 * integer value of flag to the scsi device info list.
491 * To add a black/white list entry for vendor and model with an integer 490 * To use, echo "vendor:model:flag" > /proc/scsi/device_info
492 * value of flag to the scsi device info list.
493 */ 491 */
494static int proc_scsi_devinfo_write(struct file *file, const char __user *buf, 492static int proc_scsi_devinfo_write(struct file *file, const char __user *buf,
495 unsigned long length, void *data) 493 unsigned long length, void *data)
@@ -532,8 +530,7 @@ MODULE_PARM_DESC(default_dev_flags,
532 "scsi default device flag integer value"); 530 "scsi default device flag integer value");
533 531
534/** 532/**
535 * scsi_dev_info_list_delete: called from scsi.c:exit_scsi to remove 533 * scsi_dev_info_list_delete - called from scsi.c:exit_scsi to remove the scsi_dev_info_list.
536 * the scsi_dev_info_list.
537 **/ 534 **/
538void scsi_exit_devinfo(void) 535void scsi_exit_devinfo(void)
539{ 536{
@@ -552,13 +549,12 @@ void scsi_exit_devinfo(void)
552} 549}
553 550
554/** 551/**
555 * scsi_dev_list_init: set up the dynamic device list. 552 * scsi_init_devinfo - set up the dynamic device list.
556 * @dev_list: string of device flags to add
557 * 553 *
558 * Description: 554 * Description:
559 * Add command line @dev_list entries, then add 555 * Add command line entries from scsi_dev_flags, then add
560 * scsi_static_device_list entries to the scsi device info list. 556 * scsi_static_device_list entries to the scsi device info list.
561 **/ 557 */
562int __init scsi_init_devinfo(void) 558int __init scsi_init_devinfo(void)
563{ 559{
564#ifdef CONFIG_SCSI_PROC_FS 560#ifdef CONFIG_SCSI_PROC_FS
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ebaca4ca4a13..547e85aa414f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -62,7 +62,7 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
62 * @shost: SCSI host to invoke error handling on. 62 * @shost: SCSI host to invoke error handling on.
63 * 63 *
64 * Schedule SCSI EH without scmd. 64 * Schedule SCSI EH without scmd.
65 **/ 65 */
66void scsi_schedule_eh(struct Scsi_Host *shost) 66void scsi_schedule_eh(struct Scsi_Host *shost)
67{ 67{
68 unsigned long flags; 68 unsigned long flags;
@@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(scsi_schedule_eh);
86 * 86 *
87 * Return value: 87 * Return value:
88 * 0 on failure. 88 * 0 on failure.
89 **/ 89 */
90int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag) 90int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
91{ 91{
92 struct Scsi_Host *shost = scmd->device->host; 92 struct Scsi_Host *shost = scmd->device->host;
@@ -121,7 +121,7 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
121 * This should be turned into an inline function. Each scsi command 121 * This should be turned into an inline function. Each scsi command
122 * has its own timer, and as it is added to the queue, we set up the 122 * has its own timer, and as it is added to the queue, we set up the
123 * timer. When the command completes, we cancel the timer. 123 * timer. When the command completes, we cancel the timer.
124 **/ 124 */
125void scsi_add_timer(struct scsi_cmnd *scmd, int timeout, 125void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
126 void (*complete)(struct scsi_cmnd *)) 126 void (*complete)(struct scsi_cmnd *))
127{ 127{
@@ -155,7 +155,7 @@ void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
155 * Return value: 155 * Return value:
156 * 1 if we were able to detach the timer. 0 if we blew it, and the 156 * 1 if we were able to detach the timer. 0 if we blew it, and the
157 * timer function has already started to run. 157 * timer function has already started to run.
158 **/ 158 */
159int scsi_delete_timer(struct scsi_cmnd *scmd) 159int scsi_delete_timer(struct scsi_cmnd *scmd)
160{ 160{
161 int rtn; 161 int rtn;
@@ -181,7 +181,7 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
181 * only in that the normal completion handling might run, but if the 181 * only in that the normal completion handling might run, but if the
182 * normal completion function determines that the timer has already 182 * normal completion function determines that the timer has already
183 * fired, then it mustn't do anything. 183 * fired, then it mustn't do anything.
184 **/ 184 */
185void scsi_times_out(struct scsi_cmnd *scmd) 185void scsi_times_out(struct scsi_cmnd *scmd)
186{ 186{
187 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); 187 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
@@ -224,7 +224,7 @@ void scsi_times_out(struct scsi_cmnd *scmd)
224 * 224 *
225 * Return value: 225 * Return value:
226 * 0 when dev was taken offline by error recovery. 1 OK to proceed. 226 * 0 when dev was taken offline by error recovery. 1 OK to proceed.
227 **/ 227 */
228int scsi_block_when_processing_errors(struct scsi_device *sdev) 228int scsi_block_when_processing_errors(struct scsi_device *sdev)
229{ 229{
230 int online; 230 int online;
@@ -245,7 +245,7 @@ EXPORT_SYMBOL(scsi_block_when_processing_errors);
245 * scsi_eh_prt_fail_stats - Log info on failures. 245 * scsi_eh_prt_fail_stats - Log info on failures.
246 * @shost: scsi host being recovered. 246 * @shost: scsi host being recovered.
247 * @work_q: Queue of scsi cmds to process. 247 * @work_q: Queue of scsi cmds to process.
248 **/ 248 */
249static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, 249static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
250 struct list_head *work_q) 250 struct list_head *work_q)
251{ 251{
@@ -295,7 +295,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
295 * Notes: 295 * Notes:
296 * When a deferred error is detected the current command has 296 * When a deferred error is detected the current command has
297 * not been executed and needs retrying. 297 * not been executed and needs retrying.
298 **/ 298 */
299static int scsi_check_sense(struct scsi_cmnd *scmd) 299static int scsi_check_sense(struct scsi_cmnd *scmd)
300{ 300{
301 struct scsi_sense_hdr sshdr; 301 struct scsi_sense_hdr sshdr;
@@ -398,7 +398,7 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
398 * queued during error recovery. the main difference here is that we 398 * queued during error recovery. the main difference here is that we
399 * don't allow for the possibility of retries here, and we are a lot 399 * don't allow for the possibility of retries here, and we are a lot
400 * more restrictive about what we consider acceptable. 400 * more restrictive about what we consider acceptable.
401 **/ 401 */
402static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) 402static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
403{ 403{
404 /* 404 /*
@@ -452,7 +452,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
452/** 452/**
453 * scsi_eh_done - Completion function for error handling. 453 * scsi_eh_done - Completion function for error handling.
454 * @scmd: Cmd that is done. 454 * @scmd: Cmd that is done.
455 **/ 455 */
456static void scsi_eh_done(struct scsi_cmnd *scmd) 456static void scsi_eh_done(struct scsi_cmnd *scmd)
457{ 457{
458 struct completion *eh_action; 458 struct completion *eh_action;
@@ -469,7 +469,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
469/** 469/**
470 * scsi_try_host_reset - ask host adapter to reset itself 470 * scsi_try_host_reset - ask host adapter to reset itself
471 * @scmd: SCSI cmd to send hsot reset. 471 * @scmd: SCSI cmd to send hsot reset.
472 **/ 472 */
473static int scsi_try_host_reset(struct scsi_cmnd *scmd) 473static int scsi_try_host_reset(struct scsi_cmnd *scmd)
474{ 474{
475 unsigned long flags; 475 unsigned long flags;
@@ -498,7 +498,7 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd)
498/** 498/**
499 * scsi_try_bus_reset - ask host to perform a bus reset 499 * scsi_try_bus_reset - ask host to perform a bus reset
500 * @scmd: SCSI cmd to send bus reset. 500 * @scmd: SCSI cmd to send bus reset.
501 **/ 501 */
502static int scsi_try_bus_reset(struct scsi_cmnd *scmd) 502static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
503{ 503{
504 unsigned long flags; 504 unsigned long flags;
@@ -533,7 +533,7 @@ static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
533 * unreliable for a given host, then the host itself needs to put a 533 * unreliable for a given host, then the host itself needs to put a
534 * timer on it, and set the host back to a consistent state prior to 534 * timer on it, and set the host back to a consistent state prior to
535 * returning. 535 * returning.
536 **/ 536 */
537static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) 537static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
538{ 538{
539 int rtn; 539 int rtn;
@@ -568,7 +568,7 @@ static int __scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
568 * author of the low-level driver wishes this operation to be timed, 568 * author of the low-level driver wishes this operation to be timed,
569 * they can provide this facility themselves. helper functions in 569 * they can provide this facility themselves. helper functions in
570 * scsi_error.c can be supplied to make this easier to do. 570 * scsi_error.c can be supplied to make this easier to do.
571 **/ 571 */
572static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) 572static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
573{ 573{
574 /* 574 /*
@@ -601,7 +601,7 @@ static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
601 * sent must be one that does not transfer any data. If @sense_bytes != 0 601 * sent must be one that does not transfer any data. If @sense_bytes != 0
602 * @cmnd is ignored and this functions sets up a REQUEST_SENSE command 602 * @cmnd is ignored and this functions sets up a REQUEST_SENSE command
603 * and cmnd buffers to read @sense_bytes into @scmd->sense_buffer. 603 * and cmnd buffers to read @sense_bytes into @scmd->sense_buffer.
604 **/ 604 */
605void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, 605void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
606 unsigned char *cmnd, int cmnd_size, unsigned sense_bytes) 606 unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
607{ 607{
@@ -625,7 +625,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
625 625
626 if (sense_bytes) { 626 if (sense_bytes) {
627 scmd->request_bufflen = min_t(unsigned, 627 scmd->request_bufflen = min_t(unsigned,
628 sizeof(scmd->sense_buffer), sense_bytes); 628 SCSI_SENSE_BUFFERSIZE, sense_bytes);
629 sg_init_one(&ses->sense_sgl, scmd->sense_buffer, 629 sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
630 scmd->request_bufflen); 630 scmd->request_bufflen);
631 scmd->request_buffer = &ses->sense_sgl; 631 scmd->request_buffer = &ses->sense_sgl;
@@ -657,7 +657,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
657 * Zero the sense buffer. The scsi spec mandates that any 657 * Zero the sense buffer. The scsi spec mandates that any
658 * untransferred sense data should be interpreted as being zero. 658 * untransferred sense data should be interpreted as being zero.
659 */ 659 */
660 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); 660 memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
661} 661}
662EXPORT_SYMBOL(scsi_eh_prep_cmnd); 662EXPORT_SYMBOL(scsi_eh_prep_cmnd);
663 663
@@ -667,7 +667,7 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
667 * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd 667 * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd
668 * 668 *
669 * Undo any damage done by above scsi_prep_eh_cmnd(). 669 * Undo any damage done by above scsi_prep_eh_cmnd().
670 **/ 670 */
671void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) 671void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
672{ 672{
673 /* 673 /*
@@ -697,7 +697,7 @@ EXPORT_SYMBOL(scsi_eh_restore_cmnd);
697 * 697 *
698 * Return value: 698 * Return value:
699 * SUCCESS or FAILED or NEEDS_RETRY 699 * SUCCESS or FAILED or NEEDS_RETRY
700 **/ 700 */
701static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, 701static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
702 int cmnd_size, int timeout, unsigned sense_bytes) 702 int cmnd_size, int timeout, unsigned sense_bytes)
703{ 703{
@@ -765,7 +765,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
765 * Some hosts automatically obtain this information, others require 765 * Some hosts automatically obtain this information, others require
766 * that we obtain it on our own. This function will *not* return until 766 * that we obtain it on our own. This function will *not* return until
767 * the command either times out, or it completes. 767 * the command either times out, or it completes.
768 **/ 768 */
769static int scsi_request_sense(struct scsi_cmnd *scmd) 769static int scsi_request_sense(struct scsi_cmnd *scmd)
770{ 770{
771 return scsi_send_eh_cmnd(scmd, NULL, 0, SENSE_TIMEOUT, ~0); 771 return scsi_send_eh_cmnd(scmd, NULL, 0, SENSE_TIMEOUT, ~0);
@@ -779,10 +779,10 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
779 * Notes: 779 * Notes:
780 * We don't want to use the normal command completion while we are are 780 * We don't want to use the normal command completion while we are are
781 * still handling errors - it may cause other commands to be queued, 781 * still handling errors - it may cause other commands to be queued,
782 * and that would disturb what we are doing. thus we really want to 782 * and that would disturb what we are doing. Thus we really want to
783 * keep a list of pending commands for final completion, and once we 783 * keep a list of pending commands for final completion, and once we
784 * are ready to leave error handling we handle completion for real. 784 * are ready to leave error handling we handle completion for real.
785 **/ 785 */
786void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) 786void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
787{ 787{
788 scmd->device->host->host_failed--; 788 scmd->device->host->host_failed--;
@@ -794,7 +794,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
794/** 794/**
795 * scsi_eh_get_sense - Get device sense data. 795 * scsi_eh_get_sense - Get device sense data.
796 * @work_q: Queue of commands to process. 796 * @work_q: Queue of commands to process.
797 * @done_q: Queue of proccessed commands.. 797 * @done_q: Queue of processed commands.
798 * 798 *
799 * Description: 799 * Description:
800 * See if we need to request sense information. if so, then get it 800 * See if we need to request sense information. if so, then get it
@@ -802,7 +802,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
802 * 802 *
803 * Notes: 803 * Notes:
804 * This has the unfortunate side effect that if a shost adapter does 804 * This has the unfortunate side effect that if a shost adapter does
805 * not automatically request sense information, that we end up shutting 805 * not automatically request sense information, we end up shutting
806 * it down before we request it. 806 * it down before we request it.
807 * 807 *
808 * All drivers should request sense information internally these days, 808 * All drivers should request sense information internally these days,
@@ -810,7 +810,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
810 * 810 *
811 * XXX: Long term this code should go away, but that needs an audit of 811 * XXX: Long term this code should go away, but that needs an audit of
812 * all LLDDs first. 812 * all LLDDs first.
813 **/ 813 */
814int scsi_eh_get_sense(struct list_head *work_q, 814int scsi_eh_get_sense(struct list_head *work_q,
815 struct list_head *done_q) 815 struct list_head *done_q)
816{ 816{
@@ -858,11 +858,11 @@ EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
858 858
859/** 859/**
860 * scsi_eh_tur - Send TUR to device. 860 * scsi_eh_tur - Send TUR to device.
861 * @scmd: Scsi cmd to send TUR 861 * @scmd: &scsi_cmnd to send TUR
862 * 862 *
863 * Return value: 863 * Return value:
864 * 0 - Device is ready. 1 - Device NOT ready. 864 * 0 - Device is ready. 1 - Device NOT ready.
865 **/ 865 */
866static int scsi_eh_tur(struct scsi_cmnd *scmd) 866static int scsi_eh_tur(struct scsi_cmnd *scmd)
867{ 867{
868 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; 868 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
@@ -887,17 +887,17 @@ retry_tur:
887} 887}
888 888
889/** 889/**
890 * scsi_eh_abort_cmds - abort canceled commands. 890 * scsi_eh_abort_cmds - abort pending commands.
891 * @shost: scsi host being recovered. 891 * @work_q: &list_head for pending commands.
892 * @eh_done_q: list_head for processed commands. 892 * @done_q: &list_head for processed commands.
893 * 893 *
894 * Decription: 894 * Decription:
895 * Try and see whether or not it makes sense to try and abort the 895 * Try and see whether or not it makes sense to try and abort the
896 * running command. this only works out to be the case if we have one 896 * running command. This only works out to be the case if we have one
897 * command that has timed out. if the command simply failed, it makes 897 * command that has timed out. If the command simply failed, it makes
898 * no sense to try and abort the command, since as far as the shost 898 * no sense to try and abort the command, since as far as the shost
899 * adapter is concerned, it isn't running. 899 * adapter is concerned, it isn't running.
900 **/ 900 */
901static int scsi_eh_abort_cmds(struct list_head *work_q, 901static int scsi_eh_abort_cmds(struct list_head *work_q,
902 struct list_head *done_q) 902 struct list_head *done_q)
903{ 903{
@@ -931,11 +931,11 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
931 931
932/** 932/**
933 * scsi_eh_try_stu - Send START_UNIT to device. 933 * scsi_eh_try_stu - Send START_UNIT to device.
934 * @scmd: Scsi cmd to send START_UNIT 934 * @scmd: &scsi_cmnd to send START_UNIT
935 * 935 *
936 * Return value: 936 * Return value:
937 * 0 - Device is ready. 1 - Device NOT ready. 937 * 0 - Device is ready. 1 - Device NOT ready.
938 **/ 938 */
939static int scsi_eh_try_stu(struct scsi_cmnd *scmd) 939static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
940{ 940{
941 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; 941 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
@@ -956,13 +956,14 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
956 956
957 /** 957 /**
958 * scsi_eh_stu - send START_UNIT if needed 958 * scsi_eh_stu - send START_UNIT if needed
959 * @shost: scsi host being recovered. 959 * @shost: &scsi host being recovered.
960 * @eh_done_q: list_head for processed commands. 960 * @work_q: &list_head for pending commands.
961 * @done_q: &list_head for processed commands.
961 * 962 *
962 * Notes: 963 * Notes:
963 * If commands are failing due to not ready, initializing command required, 964 * If commands are failing due to not ready, initializing command required,
964 * try revalidating the device, which will end up sending a start unit. 965 * try revalidating the device, which will end up sending a start unit.
965 **/ 966 */
966static int scsi_eh_stu(struct Scsi_Host *shost, 967static int scsi_eh_stu(struct Scsi_Host *shost,
967 struct list_head *work_q, 968 struct list_head *work_q,
968 struct list_head *done_q) 969 struct list_head *done_q)
@@ -1008,14 +1009,15 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
1008/** 1009/**
1009 * scsi_eh_bus_device_reset - send bdr if needed 1010 * scsi_eh_bus_device_reset - send bdr if needed
1010 * @shost: scsi host being recovered. 1011 * @shost: scsi host being recovered.
1011 * @eh_done_q: list_head for processed commands. 1012 * @work_q: &list_head for pending commands.
1013 * @done_q: &list_head for processed commands.
1012 * 1014 *
1013 * Notes: 1015 * Notes:
1014 * Try a bus device reset. still, look to see whether we have multiple 1016 * Try a bus device reset. Still, look to see whether we have multiple
1015 * devices that are jammed or not - if we have multiple devices, it 1017 * devices that are jammed or not - if we have multiple devices, it
1016 * makes no sense to try bus_device_reset - we really would need to try 1018 * makes no sense to try bus_device_reset - we really would need to try
1017 * a bus_reset instead. 1019 * a bus_reset instead.
1018 **/ 1020 */
1019static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, 1021static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1020 struct list_head *work_q, 1022 struct list_head *work_q,
1021 struct list_head *done_q) 1023 struct list_head *done_q)
@@ -1063,9 +1065,10 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1063 1065
1064/** 1066/**
1065 * scsi_eh_bus_reset - send a bus reset 1067 * scsi_eh_bus_reset - send a bus reset
1066 * @shost: scsi host being recovered. 1068 * @shost: &scsi host being recovered.
1067 * @eh_done_q: list_head for processed commands. 1069 * @work_q: &list_head for pending commands.
1068 **/ 1070 * @done_q: &list_head for processed commands.
1071 */
1069static int scsi_eh_bus_reset(struct Scsi_Host *shost, 1072static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1070 struct list_head *work_q, 1073 struct list_head *work_q,
1071 struct list_head *done_q) 1074 struct list_head *done_q)
@@ -1122,7 +1125,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1122 * scsi_eh_host_reset - send a host reset 1125 * scsi_eh_host_reset - send a host reset
1123 * @work_q: list_head for processed commands. 1126 * @work_q: list_head for processed commands.
1124 * @done_q: list_head for processed commands. 1127 * @done_q: list_head for processed commands.
1125 **/ 1128 */
1126static int scsi_eh_host_reset(struct list_head *work_q, 1129static int scsi_eh_host_reset(struct list_head *work_q,
1127 struct list_head *done_q) 1130 struct list_head *done_q)
1128{ 1131{
@@ -1157,8 +1160,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
1157 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover 1160 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
1158 * @work_q: list_head for processed commands. 1161 * @work_q: list_head for processed commands.
1159 * @done_q: list_head for processed commands. 1162 * @done_q: list_head for processed commands.
1160 * 1163 */
1161 **/
1162static void scsi_eh_offline_sdevs(struct list_head *work_q, 1164static void scsi_eh_offline_sdevs(struct list_head *work_q,
1163 struct list_head *done_q) 1165 struct list_head *done_q)
1164{ 1166{
@@ -1191,7 +1193,7 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
1191 * is woken. In cases where the error code indicates an error that 1193 * is woken. In cases where the error code indicates an error that
1192 * doesn't require the error handler read (i.e. we don't need to 1194 * doesn't require the error handler read (i.e. we don't need to
1193 * abort/reset), this function should return SUCCESS. 1195 * abort/reset), this function should return SUCCESS.
1194 **/ 1196 */
1195int scsi_decide_disposition(struct scsi_cmnd *scmd) 1197int scsi_decide_disposition(struct scsi_cmnd *scmd)
1196{ 1198{
1197 int rtn; 1199 int rtn;
@@ -1372,7 +1374,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1372 * 1374 *
1373 * If scsi_allocate_request() fails for what ever reason, we 1375 * If scsi_allocate_request() fails for what ever reason, we
1374 * completely forget to lock the door. 1376 * completely forget to lock the door.
1375 **/ 1377 */
1376static void scsi_eh_lock_door(struct scsi_device *sdev) 1378static void scsi_eh_lock_door(struct scsi_device *sdev)
1377{ 1379{
1378 unsigned char cmnd[MAX_COMMAND_SIZE]; 1380 unsigned char cmnd[MAX_COMMAND_SIZE];
@@ -1396,7 +1398,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
1396 * Notes: 1398 * Notes:
1397 * When we entered the error handler, we blocked all further i/o to 1399 * When we entered the error handler, we blocked all further i/o to
1398 * this device. we need to 'reverse' this process. 1400 * this device. we need to 'reverse' this process.
1399 **/ 1401 */
1400static void scsi_restart_operations(struct Scsi_Host *shost) 1402static void scsi_restart_operations(struct Scsi_Host *shost)
1401{ 1403{
1402 struct scsi_device *sdev; 1404 struct scsi_device *sdev;
@@ -1440,9 +1442,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
1440/** 1442/**
1441 * scsi_eh_ready_devs - check device ready state and recover if not. 1443 * scsi_eh_ready_devs - check device ready state and recover if not.
1442 * @shost: host to be recovered. 1444 * @shost: host to be recovered.
1443 * @eh_done_q: list_head for processed commands. 1445 * @work_q: &list_head for pending commands.
1444 * 1446 * @done_q: &list_head for processed commands.
1445 **/ 1447 */
1446void scsi_eh_ready_devs(struct Scsi_Host *shost, 1448void scsi_eh_ready_devs(struct Scsi_Host *shost,
1447 struct list_head *work_q, 1449 struct list_head *work_q,
1448 struct list_head *done_q) 1450 struct list_head *done_q)
@@ -1458,8 +1460,7 @@ EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
1458/** 1460/**
1459 * scsi_eh_flush_done_q - finish processed commands or retry them. 1461 * scsi_eh_flush_done_q - finish processed commands or retry them.
1460 * @done_q: list_head of processed commands. 1462 * @done_q: list_head of processed commands.
1461 * 1463 */
1462 **/
1463void scsi_eh_flush_done_q(struct list_head *done_q) 1464void scsi_eh_flush_done_q(struct list_head *done_q)
1464{ 1465{
1465 struct scsi_cmnd *scmd, *next; 1466 struct scsi_cmnd *scmd, *next;
@@ -1513,7 +1514,7 @@ EXPORT_SYMBOL(scsi_eh_flush_done_q);
1513 * scsi_finish_cmd() called for it. we do all of the retry stuff 1514 * scsi_finish_cmd() called for it. we do all of the retry stuff
1514 * here, so when we restart the host after we return it should have an 1515 * here, so when we restart the host after we return it should have an
1515 * empty queue. 1516 * empty queue.
1516 **/ 1517 */
1517static void scsi_unjam_host(struct Scsi_Host *shost) 1518static void scsi_unjam_host(struct Scsi_Host *shost)
1518{ 1519{
1519 unsigned long flags; 1520 unsigned long flags;
@@ -1540,7 +1541,7 @@ static void scsi_unjam_host(struct Scsi_Host *shost)
1540 * Notes: 1541 * Notes:
1541 * This is the main error handling loop. This is run as a kernel thread 1542 * This is the main error handling loop. This is run as a kernel thread
1542 * for every SCSI host and handles all error handling activity. 1543 * for every SCSI host and handles all error handling activity.
1543 **/ 1544 */
1544int scsi_error_handler(void *data) 1545int scsi_error_handler(void *data)
1545{ 1546{
1546 struct Scsi_Host *shost = data; 1547 struct Scsi_Host *shost = data;
@@ -1769,7 +1770,7 @@ EXPORT_SYMBOL(scsi_reset_provider);
1769 * 1770 *
1770 * Return value: 1771 * Return value:
1771 * 1 if valid sense data information found, else 0; 1772 * 1 if valid sense data information found, else 0;
1772 **/ 1773 */
1773int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, 1774int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
1774 struct scsi_sense_hdr *sshdr) 1775 struct scsi_sense_hdr *sshdr)
1775{ 1776{
@@ -1819,14 +1820,12 @@ int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
1819 struct scsi_sense_hdr *sshdr) 1820 struct scsi_sense_hdr *sshdr)
1820{ 1821{
1821 return scsi_normalize_sense(cmd->sense_buffer, 1822 return scsi_normalize_sense(cmd->sense_buffer,
1822 sizeof(cmd->sense_buffer), sshdr); 1823 SCSI_SENSE_BUFFERSIZE, sshdr);
1823} 1824}
1824EXPORT_SYMBOL(scsi_command_normalize_sense); 1825EXPORT_SYMBOL(scsi_command_normalize_sense);
1825 1826
1826/** 1827/**
1827 * scsi_sense_desc_find - search for a given descriptor type in 1828 * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format.
1828 * descriptor sense data format.
1829 *
1830 * @sense_buffer: byte array of descriptor format sense data 1829 * @sense_buffer: byte array of descriptor format sense data
1831 * @sb_len: number of valid bytes in sense_buffer 1830 * @sb_len: number of valid bytes in sense_buffer
1832 * @desc_type: value of descriptor type to find 1831 * @desc_type: value of descriptor type to find
@@ -1837,7 +1836,7 @@ EXPORT_SYMBOL(scsi_command_normalize_sense);
1837 * 1836 *
1838 * Return value: 1837 * Return value:
1839 * pointer to start of (first) descriptor if found else NULL 1838 * pointer to start of (first) descriptor if found else NULL
1840 **/ 1839 */
1841const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, 1840const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
1842 int desc_type) 1841 int desc_type)
1843{ 1842{
@@ -1865,9 +1864,7 @@ const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
1865EXPORT_SYMBOL(scsi_sense_desc_find); 1864EXPORT_SYMBOL(scsi_sense_desc_find);
1866 1865
1867/** 1866/**
1868 * scsi_get_sense_info_fld - attempts to get information field from 1867 * scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format)
1869 * sense data (either fixed or descriptor format)
1870 *
1871 * @sense_buffer: byte array of sense data 1868 * @sense_buffer: byte array of sense data
1872 * @sb_len: number of valid bytes in sense_buffer 1869 * @sb_len: number of valid bytes in sense_buffer
1873 * @info_out: pointer to 64 integer where 8 or 4 byte information 1870 * @info_out: pointer to 64 integer where 8 or 4 byte information
@@ -1875,7 +1872,7 @@ EXPORT_SYMBOL(scsi_sense_desc_find);
1875 * 1872 *
1876 * Return value: 1873 * Return value:
1877 * 1 if information field found, 0 if not found. 1874 * 1 if information field found, 0 if not found.
1878 **/ 1875 */
1879int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, 1876int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
1880 u64 * info_out) 1877 u64 * info_out)
1881{ 1878{
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 32293f451669..28b19ef26309 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -174,10 +174,15 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
174} 174}
175 175
176 176
177/* 177/**
178 * the scsi_ioctl() function differs from most ioctls in that it does 178 * scsi_ioctl - Dispatch ioctl to scsi device
179 * not take a major/minor number as the dev field. Rather, it takes 179 * @sdev: scsi device receiving ioctl
180 * a pointer to a scsi_devices[] element, a structure. 180 * @cmd: which ioctl is it
181 * @arg: data associated with ioctl
182 *
183 * Description: The scsi_ioctl() function differs from most ioctls in that it
184 * does not take a major/minor number as the dev field. Rather, it takes
185 * a pointer to a &struct scsi_device.
181 */ 186 */
182int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 187int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
183{ 188{
@@ -239,7 +244,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
239 return scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 244 return scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
240 case SCSI_IOCTL_TEST_UNIT_READY: 245 case SCSI_IOCTL_TEST_UNIT_READY:
241 return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT, 246 return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT,
242 NORMAL_RETRIES); 247 NORMAL_RETRIES, NULL);
243 case SCSI_IOCTL_START_UNIT: 248 case SCSI_IOCTL_START_UNIT:
244 scsi_cmd[0] = START_STOP; 249 scsi_cmd[0] = START_STOP;
245 scsi_cmd[1] = 0; 250 scsi_cmd[1] = 0;
@@ -264,9 +269,12 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
264} 269}
265EXPORT_SYMBOL(scsi_ioctl); 270EXPORT_SYMBOL(scsi_ioctl);
266 271
267/* 272/**
268 * the scsi_nonblock_ioctl() function is designed for ioctls which may 273 * scsi_nonblock_ioctl() - Handle SG_SCSI_RESET
269 * be executed even if the device is in recovery. 274 * @sdev: scsi device receiving ioctl
275 * @cmd: Must be SC_SCSI_RESET
276 * @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST}
277 * @filp: either NULL or a &struct file which must have the O_NONBLOCK flag.
270 */ 278 */
271int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, 279int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
272 void __user *arg, struct file *filp) 280 void __user *arg, struct file *filp)
@@ -276,7 +284,7 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
276 /* The first set of iocts may be executed even if we're doing 284 /* The first set of iocts may be executed even if we're doing
277 * error processing, as long as the device was opened 285 * error processing, as long as the device was opened
278 * non-blocking */ 286 * non-blocking */
279 if (filp && filp->f_flags & O_NONBLOCK) { 287 if (filp && (filp->f_flags & O_NONBLOCK)) {
280 if (scsi_host_in_recovery(sdev->host)) 288 if (scsi_host_in_recovery(sdev->host))
281 return -ENODEV; 289 return -ENODEV;
282 } else if (!scsi_block_when_processing_errors(sdev)) 290 } else if (!scsi_block_when_processing_errors(sdev))
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a9ac5b1b1667..4cf902efbdbf 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -175,7 +175,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
175 * 175 *
176 * returns the req->errors value which is the scsi_cmnd result 176 * returns the req->errors value which is the scsi_cmnd result
177 * field. 177 * field.
178 **/ 178 */
179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
180 int data_direction, void *buffer, unsigned bufflen, 180 int data_direction, void *buffer, unsigned bufflen,
181 unsigned char *sense, int timeout, int retries, int flags) 181 unsigned char *sense, int timeout, int retries, int flags)
@@ -274,7 +274,7 @@ static void scsi_bi_endio(struct bio *bio, int error)
274/** 274/**
275 * scsi_req_map_sg - map a scatterlist into a request 275 * scsi_req_map_sg - map a scatterlist into a request
276 * @rq: request to fill 276 * @rq: request to fill
277 * @sg: scatterlist 277 * @sgl: scatterlist
278 * @nsegs: number of elements 278 * @nsegs: number of elements
279 * @bufflen: len of buffer 279 * @bufflen: len of buffer
280 * @gfp: memory allocation flags 280 * @gfp: memory allocation flags
@@ -365,14 +365,16 @@ free_bios:
365 * @sdev: scsi device 365 * @sdev: scsi device
366 * @cmd: scsi command 366 * @cmd: scsi command
367 * @cmd_len: length of scsi cdb 367 * @cmd_len: length of scsi cdb
368 * @data_direction: data direction 368 * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
369 * @buffer: data buffer (this can be a kernel buffer or scatterlist) 369 * @buffer: data buffer (this can be a kernel buffer or scatterlist)
370 * @bufflen: len of buffer 370 * @bufflen: len of buffer
371 * @use_sg: if buffer is a scatterlist this is the number of elements 371 * @use_sg: if buffer is a scatterlist this is the number of elements
372 * @timeout: request timeout in seconds 372 * @timeout: request timeout in seconds
373 * @retries: number of times to retry request 373 * @retries: number of times to retry request
374 * @flags: or into request flags 374 * @privdata: data passed to done()
375 **/ 375 * @done: callback function when done
376 * @gfp: memory allocation flags
377 */
376int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, 378int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
377 int cmd_len, int data_direction, void *buffer, unsigned bufflen, 379 int cmd_len, int data_direction, void *buffer, unsigned bufflen,
378 int use_sg, int timeout, int retries, void *privdata, 380 int use_sg, int timeout, int retries, void *privdata,
@@ -439,7 +441,7 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
439{ 441{
440 cmd->serial_number = 0; 442 cmd->serial_number = 0;
441 cmd->resid = 0; 443 cmd->resid = 0;
442 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); 444 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
443 if (cmd->cmd_len == 0) 445 if (cmd->cmd_len == 0)
444 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 446 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
445} 447}
@@ -524,7 +526,7 @@ static void scsi_run_queue(struct request_queue *q)
524 struct Scsi_Host *shost = sdev->host; 526 struct Scsi_Host *shost = sdev->host;
525 unsigned long flags; 527 unsigned long flags;
526 528
527 if (sdev->single_lun) 529 if (scsi_target(sdev)->single_lun)
528 scsi_single_lun_run(sdev); 530 scsi_single_lun_run(sdev);
529 531
530 spin_lock_irqsave(shost->host_lock, flags); 532 spin_lock_irqsave(shost->host_lock, flags);
@@ -1102,7 +1104,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1102 * 1104 *
1103 * Returns: 0 on success 1105 * Returns: 0 on success
1104 * BLKPREP_DEFER if the failure is retryable 1106 * BLKPREP_DEFER if the failure is retryable
1105 * BLKPREP_KILL if the failure is fatal
1106 */ 1107 */
1107static int scsi_init_io(struct scsi_cmnd *cmd) 1108static int scsi_init_io(struct scsi_cmnd *cmd)
1108{ 1109{
@@ -1136,17 +1137,9 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1136 * each segment. 1137 * each segment.
1137 */ 1138 */
1138 count = blk_rq_map_sg(req->q, req, cmd->request_buffer); 1139 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1139 if (likely(count <= cmd->use_sg)) { 1140 BUG_ON(count > cmd->use_sg);
1140 cmd->use_sg = count; 1141 cmd->use_sg = count;
1141 return BLKPREP_OK; 1142 return BLKPREP_OK;
1142 }
1143
1144 printk(KERN_ERR "Incorrect number of segments after building list\n");
1145 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1146 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1147 req->current_nr_sectors);
1148
1149 return BLKPREP_KILL;
1150} 1143}
1151 1144
1152static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1145static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
@@ -1557,7 +1550,7 @@ static void scsi_request_fn(struct request_queue *q)
1557 1550
1558 if (!scsi_host_queue_ready(q, shost, sdev)) 1551 if (!scsi_host_queue_ready(q, shost, sdev))
1559 goto not_ready; 1552 goto not_ready;
1560 if (sdev->single_lun) { 1553 if (scsi_target(sdev)->single_lun) {
1561 if (scsi_target(sdev)->starget_sdev_user && 1554 if (scsi_target(sdev)->starget_sdev_user &&
1562 scsi_target(sdev)->starget_sdev_user != sdev) 1555 scsi_target(sdev)->starget_sdev_user != sdev)
1563 goto not_ready; 1556 goto not_ready;
@@ -1675,6 +1668,14 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1675 1668
1676 if (!shost->use_clustering) 1669 if (!shost->use_clustering)
1677 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1670 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1671
1672 /*
1673 * set a reasonable default alignment on word boundaries: the
1674 * host and device may alter it using
1675 * blk_queue_update_dma_alignment() later.
1676 */
1677 blk_queue_dma_alignment(q, 0x03);
1678
1678 return q; 1679 return q;
1679} 1680}
1680EXPORT_SYMBOL(__scsi_alloc_queue); 1681EXPORT_SYMBOL(__scsi_alloc_queue);
@@ -1804,7 +1805,7 @@ void scsi_exit_queue(void)
1804 * @timeout: command timeout 1805 * @timeout: command timeout
1805 * @retries: number of retries before failing 1806 * @retries: number of retries before failing
1806 * @data: returns a structure abstracting the mode header data 1807 * @data: returns a structure abstracting the mode header data
1807 * @sense: place to put sense data (or NULL if no sense to be collected). 1808 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1808 * must be SCSI_SENSE_BUFFERSIZE big. 1809 * must be SCSI_SENSE_BUFFERSIZE big.
1809 * 1810 *
1810 * Returns zero if successful; negative error number or scsi 1811 * Returns zero if successful; negative error number or scsi
@@ -1871,8 +1872,7 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1871EXPORT_SYMBOL_GPL(scsi_mode_select); 1872EXPORT_SYMBOL_GPL(scsi_mode_select);
1872 1873
1873/** 1874/**
1874 * scsi_mode_sense - issue a mode sense, falling back from 10 to 1875 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1875 * six bytes if necessary.
1876 * @sdev: SCSI device to be queried 1876 * @sdev: SCSI device to be queried
1877 * @dbd: set if mode sense will allow block descriptors to be returned 1877 * @dbd: set if mode sense will allow block descriptors to be returned
1878 * @modepage: mode page being requested 1878 * @modepage: mode page being requested
@@ -1881,13 +1881,13 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
1881 * @timeout: command timeout 1881 * @timeout: command timeout
1882 * @retries: number of retries before failing 1882 * @retries: number of retries before failing
1883 * @data: returns a structure abstracting the mode header data 1883 * @data: returns a structure abstracting the mode header data
1884 * @sense: place to put sense data (or NULL if no sense to be collected). 1884 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1885 * must be SCSI_SENSE_BUFFERSIZE big. 1885 * must be SCSI_SENSE_BUFFERSIZE big.
1886 * 1886 *
1887 * Returns zero if unsuccessful, or the header offset (either 4 1887 * Returns zero if unsuccessful, or the header offset (either 4
1888 * or 8 depending on whether a six or ten byte command was 1888 * or 8 depending on whether a six or ten byte command was
1889 * issued) if successful. 1889 * issued) if successful.
1890 **/ 1890 */
1891int 1891int
1892scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1892scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1893 unsigned char *buffer, int len, int timeout, int retries, 1893 unsigned char *buffer, int len, int timeout, int retries,
@@ -1981,40 +1981,69 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1981} 1981}
1982EXPORT_SYMBOL(scsi_mode_sense); 1982EXPORT_SYMBOL(scsi_mode_sense);
1983 1983
1984/**
1985 * scsi_test_unit_ready - test if unit is ready
1986 * @sdev: scsi device to change the state of.
1987 * @timeout: command timeout
1988 * @retries: number of retries before failing
1989 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
1990 * returning sense. Make sure that this is cleared before passing
1991 * in.
1992 *
1993 * Returns zero if unsuccessful or an error if TUR failed. For
1994 * removable media, a return of NOT_READY or UNIT_ATTENTION is
1995 * translated to success, with the ->changed flag updated.
1996 **/
1984int 1997int
1985scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries) 1998scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
1999 struct scsi_sense_hdr *sshdr_external)
1986{ 2000{
1987 char cmd[] = { 2001 char cmd[] = {
1988 TEST_UNIT_READY, 0, 0, 0, 0, 0, 2002 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1989 }; 2003 };
1990 struct scsi_sense_hdr sshdr; 2004 struct scsi_sense_hdr *sshdr;
1991 int result; 2005 int result;
1992 2006
1993 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, 2007 if (!sshdr_external)
1994 timeout, retries); 2008 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2009 else
2010 sshdr = sshdr_external;
2011
2012 /* try to eat the UNIT_ATTENTION if there are enough retries */
2013 do {
2014 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2015 timeout, retries);
2016 } while ((driver_byte(result) & DRIVER_SENSE) &&
2017 sshdr && sshdr->sense_key == UNIT_ATTENTION &&
2018 --retries);
2019
2020 if (!sshdr)
2021 /* could not allocate sense buffer, so can't process it */
2022 return result;
1995 2023
1996 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { 2024 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1997 2025
1998 if ((scsi_sense_valid(&sshdr)) && 2026 if ((scsi_sense_valid(sshdr)) &&
1999 ((sshdr.sense_key == UNIT_ATTENTION) || 2027 ((sshdr->sense_key == UNIT_ATTENTION) ||
2000 (sshdr.sense_key == NOT_READY))) { 2028 (sshdr->sense_key == NOT_READY))) {
2001 sdev->changed = 1; 2029 sdev->changed = 1;
2002 result = 0; 2030 result = 0;
2003 } 2031 }
2004 } 2032 }
2033 if (!sshdr_external)
2034 kfree(sshdr);
2005 return result; 2035 return result;
2006} 2036}
2007EXPORT_SYMBOL(scsi_test_unit_ready); 2037EXPORT_SYMBOL(scsi_test_unit_ready);
2008 2038
2009/** 2039/**
2010 * scsi_device_set_state - Take the given device through the device 2040 * scsi_device_set_state - Take the given device through the device state model.
2011 * state model.
2012 * @sdev: scsi device to change the state of. 2041 * @sdev: scsi device to change the state of.
2013 * @state: state to change to. 2042 * @state: state to change to.
2014 * 2043 *
2015 * Returns zero if unsuccessful or an error if the requested 2044 * Returns zero if unsuccessful or an error if the requested
2016 * transition is illegal. 2045 * transition is illegal.
2017 **/ 2046 */
2018int 2047int
2019scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2048scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2020{ 2049{
@@ -2264,7 +2293,7 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2264 * Must be called with user context, may sleep. 2293 * Must be called with user context, may sleep.
2265 * 2294 *
2266 * Returns zero if unsuccessful or an error if not. 2295 * Returns zero if unsuccessful or an error if not.
2267 **/ 2296 */
2268int 2297int
2269scsi_device_quiesce(struct scsi_device *sdev) 2298scsi_device_quiesce(struct scsi_device *sdev)
2270{ 2299{
@@ -2289,7 +2318,7 @@ EXPORT_SYMBOL(scsi_device_quiesce);
2289 * queues. 2318 * queues.
2290 * 2319 *
2291 * Must be called with user context, may sleep. 2320 * Must be called with user context, may sleep.
2292 **/ 2321 */
2293void 2322void
2294scsi_device_resume(struct scsi_device *sdev) 2323scsi_device_resume(struct scsi_device *sdev)
2295{ 2324{
@@ -2326,8 +2355,7 @@ scsi_target_resume(struct scsi_target *starget)
2326EXPORT_SYMBOL(scsi_target_resume); 2355EXPORT_SYMBOL(scsi_target_resume);
2327 2356
2328/** 2357/**
2329 * scsi_internal_device_block - internal function to put a device 2358 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2330 * temporarily into the SDEV_BLOCK state
2331 * @sdev: device to block 2359 * @sdev: device to block
2332 * 2360 *
2333 * Block request made by scsi lld's to temporarily stop all 2361 * Block request made by scsi lld's to temporarily stop all
@@ -2342,7 +2370,7 @@ EXPORT_SYMBOL(scsi_target_resume);
2342 * state, all commands are deferred until the scsi lld reenables 2370 * state, all commands are deferred until the scsi lld reenables
2343 * the device with scsi_device_unblock or device_block_tmo fires. 2371 * the device with scsi_device_unblock or device_block_tmo fires.
2344 * This routine assumes the host_lock is held on entry. 2372 * This routine assumes the host_lock is held on entry.
2345 **/ 2373 */
2346int 2374int
2347scsi_internal_device_block(struct scsi_device *sdev) 2375scsi_internal_device_block(struct scsi_device *sdev)
2348{ 2376{
@@ -2382,7 +2410,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2382 * (which must be a legal transition) allowing the midlayer to 2410 * (which must be a legal transition) allowing the midlayer to
2383 * goose the queue for this device. This routine assumes the 2411 * goose the queue for this device. This routine assumes the
2384 * host_lock is held upon entry. 2412 * host_lock is held upon entry.
2385 **/ 2413 */
2386int 2414int
2387scsi_internal_device_unblock(struct scsi_device *sdev) 2415scsi_internal_device_unblock(struct scsi_device *sdev)
2388{ 2416{
@@ -2460,7 +2488,7 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock);
2460 2488
2461/** 2489/**
2462 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2490 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2463 * @sg: scatter-gather list 2491 * @sgl: scatter-gather list
2464 * @sg_count: number of segments in sg 2492 * @sg_count: number of segments in sg
2465 * @offset: offset in bytes into sg, on return offset into the mapped area 2493 * @offset: offset in bytes into sg, on return offset into the mapped area
2466 * @len: bytes to map, on return number of bytes mapped 2494 * @len: bytes to map, on return number of bytes mapped
@@ -2509,8 +2537,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2509EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2537EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2510 2538
2511/** 2539/**
2512 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously 2540 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2513 * mapped with scsi_kmap_atomic_sg
2514 * @virt: virtual address to be unmapped 2541 * @virt: virtual address to be unmapped
2515 */ 2542 */
2516void scsi_kunmap_atomic_sg(void *virt) 2543void scsi_kunmap_atomic_sg(void *virt)
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 40579edca101..3e1591828171 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -32,11 +32,12 @@ EXPORT_SYMBOL_GPL(scsi_nl_sock);
32 32
33 33
34/** 34/**
35 * scsi_nl_rcv_msg - 35 * scsi_nl_rcv_msg - Receive message handler.
36 * Receive message handler. Extracts message from a receive buffer. 36 * @skb: socket receive buffer
37 *
38 * Description: Extracts message from a receive buffer.
37 * Validates message header and calls appropriate transport message handler 39 * Validates message header and calls appropriate transport message handler
38 * 40 *
39 * @skb: socket receive buffer
40 * 41 *
41 **/ 42 **/
42static void 43static void
@@ -99,9 +100,7 @@ next_msg:
99 100
100 101
101/** 102/**
102 * scsi_nl_rcv_event - 103 * scsi_nl_rcv_event - Event handler for a netlink socket.
103 * Event handler for a netlink socket.
104 *
105 * @this: event notifier block 104 * @this: event notifier block
106 * @event: event type 105 * @event: event type
107 * @ptr: event payload 106 * @ptr: event payload
@@ -129,9 +128,7 @@ static struct notifier_block scsi_netlink_notifier = {
129 128
130 129
131/** 130/**
132 * scsi_netlink_init - 131 * scsi_netlink_init - Called by SCSI subsystem to intialize the SCSI transport netlink interface
133 * Called by SCSI subsystem to intialize the SCSI transport netlink
134 * interface
135 * 132 *
136 **/ 133 **/
137void 134void
@@ -160,9 +157,7 @@ scsi_netlink_init(void)
160 157
161 158
162/** 159/**
163 * scsi_netlink_exit - 160 * scsi_netlink_exit - Called by SCSI subsystem to disable the SCSI transport netlink interface
164 * Called by SCSI subsystem to disable the SCSI transport netlink
165 * interface
166 * 161 *
167 **/ 162 **/
168void 163void
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index bb6f051beda8..ed395154a5b1 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -45,6 +45,16 @@ static struct proc_dir_entry *proc_scsi;
45/* Protect sht->present and sht->proc_dir */ 45/* Protect sht->present and sht->proc_dir */
46static DEFINE_MUTEX(global_host_template_mutex); 46static DEFINE_MUTEX(global_host_template_mutex);
47 47
48/**
49 * proc_scsi_read - handle read from /proc by calling host's proc_info() command
50 * @buffer: passed to proc_info
51 * @start: passed to proc_info
52 * @offset: passed to proc_info
53 * @length: passed to proc_info
54 * @eof: returns whether length read was less than requested
55 * @data: pointer to a &struct Scsi_Host
56 */
57
48static int proc_scsi_read(char *buffer, char **start, off_t offset, 58static int proc_scsi_read(char *buffer, char **start, off_t offset,
49 int length, int *eof, void *data) 59 int length, int *eof, void *data)
50{ 60{
@@ -57,6 +67,13 @@ static int proc_scsi_read(char *buffer, char **start, off_t offset,
57 return n; 67 return n;
58} 68}
59 69
70/**
71 * proc_scsi_write_proc - Handle write to /proc by calling host's proc_info()
72 * @file: not used
73 * @buf: source of data to write.
74 * @count: number of bytes (at most PROC_BLOCK_SIZE) to write.
75 * @data: pointer to &struct Scsi_Host
76 */
60static int proc_scsi_write_proc(struct file *file, const char __user *buf, 77static int proc_scsi_write_proc(struct file *file, const char __user *buf,
61 unsigned long count, void *data) 78 unsigned long count, void *data)
62{ 79{
@@ -80,6 +97,13 @@ out:
80 return ret; 97 return ret;
81} 98}
82 99
100/**
101 * scsi_proc_hostdir_add - Create directory in /proc for a scsi host
102 * @sht: owner of this directory
103 *
104 * Sets sht->proc_dir to the new directory.
105 */
106
83void scsi_proc_hostdir_add(struct scsi_host_template *sht) 107void scsi_proc_hostdir_add(struct scsi_host_template *sht)
84{ 108{
85 if (!sht->proc_info) 109 if (!sht->proc_info)
@@ -97,6 +121,10 @@ void scsi_proc_hostdir_add(struct scsi_host_template *sht)
97 mutex_unlock(&global_host_template_mutex); 121 mutex_unlock(&global_host_template_mutex);
98} 122}
99 123
124/**
125 * scsi_proc_hostdir_rm - remove directory in /proc for a scsi host
126 * @sht: owner of directory
127 */
100void scsi_proc_hostdir_rm(struct scsi_host_template *sht) 128void scsi_proc_hostdir_rm(struct scsi_host_template *sht)
101{ 129{
102 if (!sht->proc_info) 130 if (!sht->proc_info)
@@ -110,6 +138,11 @@ void scsi_proc_hostdir_rm(struct scsi_host_template *sht)
110 mutex_unlock(&global_host_template_mutex); 138 mutex_unlock(&global_host_template_mutex);
111} 139}
112 140
141
142/**
143 * scsi_proc_host_add - Add entry for this host to appropriate /proc dir
144 * @shost: host to add
145 */
113void scsi_proc_host_add(struct Scsi_Host *shost) 146void scsi_proc_host_add(struct Scsi_Host *shost)
114{ 147{
115 struct scsi_host_template *sht = shost->hostt; 148 struct scsi_host_template *sht = shost->hostt;
@@ -133,6 +166,10 @@ void scsi_proc_host_add(struct Scsi_Host *shost)
133 p->owner = sht->module; 166 p->owner = sht->module;
134} 167}
135 168
169/**
170 * scsi_proc_host_rm - remove this host's entry from /proc
171 * @shost: which host
172 */
136void scsi_proc_host_rm(struct Scsi_Host *shost) 173void scsi_proc_host_rm(struct Scsi_Host *shost)
137{ 174{
138 char name[10]; 175 char name[10];
@@ -143,7 +180,14 @@ void scsi_proc_host_rm(struct Scsi_Host *shost)
143 sprintf(name,"%d", shost->host_no); 180 sprintf(name,"%d", shost->host_no);
144 remove_proc_entry(name, shost->hostt->proc_dir); 181 remove_proc_entry(name, shost->hostt->proc_dir);
145} 182}
146 183/**
184 * proc_print_scsidevice - return data about this host
185 * @dev: A scsi device
186 * @data: &struct seq_file to output to.
187 *
188 * Description: prints Host, Channel, Id, Lun, Vendor, Model, Rev, Type,
189 * and revision.
190 */
147static int proc_print_scsidevice(struct device *dev, void *data) 191static int proc_print_scsidevice(struct device *dev, void *data)
148{ 192{
149 struct scsi_device *sdev = to_scsi_device(dev); 193 struct scsi_device *sdev = to_scsi_device(dev);
@@ -189,6 +233,21 @@ static int proc_print_scsidevice(struct device *dev, void *data)
189 return 0; 233 return 0;
190} 234}
191 235
236/**
237 * scsi_add_single_device - Respond to user request to probe for/add device
238 * @host: user-supplied decimal integer
239 * @channel: user-supplied decimal integer
240 * @id: user-supplied decimal integer
241 * @lun: user-supplied decimal integer
242 *
243 * Description: called by writing "scsi add-single-device" to /proc/scsi/scsi.
244 *
245 * does scsi_host_lookup() and either user_scan() if that transport
246 * type supports it, or else scsi_scan_host_selected()
247 *
248 * Note: this seems to be aimed exclusively at SCSI parallel busses.
249 */
250
192static int scsi_add_single_device(uint host, uint channel, uint id, uint lun) 251static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
193{ 252{
194 struct Scsi_Host *shost; 253 struct Scsi_Host *shost;
@@ -206,6 +265,16 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
206 return error; 265 return error;
207} 266}
208 267
268/**
269 * scsi_remove_single_device - Respond to user request to remove a device
270 * @host: user-supplied decimal integer
271 * @channel: user-supplied decimal integer
272 * @id: user-supplied decimal integer
273 * @lun: user-supplied decimal integer
274 *
275 * Description: called by writing "scsi remove-single-device" to
276 * /proc/scsi/scsi. Does a scsi_device_lookup() and scsi_remove_device()
277 */
209static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun) 278static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
210{ 279{
211 struct scsi_device *sdev; 280 struct scsi_device *sdev;
@@ -226,6 +295,25 @@ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
226 return error; 295 return error;
227} 296}
228 297
298/**
299 * proc_scsi_write - handle writes to /proc/scsi/scsi
300 * @file: not used
301 * @buf: buffer to write
302 * @length: length of buf, at most PAGE_SIZE
303 * @ppos: not used
304 *
305 * Description: this provides a legacy mechanism to add or remove devices by
306 * Host, Channel, ID, and Lun. To use,
307 * "echo 'scsi add-single-device 0 1 2 3' > /proc/scsi/scsi" or
308 * "echo 'scsi remove-single-device 0 1 2 3' > /proc/scsi/scsi" with
309 * "0 1 2 3" replaced by the Host, Channel, Id, and Lun.
310 *
311 * Note: this seems to be aimed at parallel SCSI. Most modern busses (USB,
312 * SATA, Firewire, Fibre Channel, etc) dynamically assign these values to
313 * provide a unique identifier and nothing more.
314 */
315
316
229static ssize_t proc_scsi_write(struct file *file, const char __user *buf, 317static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
230 size_t length, loff_t *ppos) 318 size_t length, loff_t *ppos)
231{ 319{
@@ -291,6 +379,11 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
291 return err; 379 return err;
292} 380}
293 381
382/**
383 * proc_scsi_show - show contents of /proc/scsi/scsi (attached devices)
384 * @s: output goes here
385 * @p: not used
386 */
294static int proc_scsi_show(struct seq_file *s, void *p) 387static int proc_scsi_show(struct seq_file *s, void *p)
295{ 388{
296 seq_printf(s, "Attached devices:\n"); 389 seq_printf(s, "Attached devices:\n");
@@ -298,10 +391,17 @@ static int proc_scsi_show(struct seq_file *s, void *p)
298 return 0; 391 return 0;
299} 392}
300 393
394/**
395 * proc_scsi_open - glue function
396 * @inode: not used
397 * @file: passed to single_open()
398 *
399 * Associates proc_scsi_show with this file
400 */
301static int proc_scsi_open(struct inode *inode, struct file *file) 401static int proc_scsi_open(struct inode *inode, struct file *file)
302{ 402{
303 /* 403 /*
304 * We don't really needs this for the write case but it doesn't 404 * We don't really need this for the write case but it doesn't
305 * harm either. 405 * harm either.
306 */ 406 */
307 return single_open(file, proc_scsi_show, NULL); 407 return single_open(file, proc_scsi_show, NULL);
@@ -315,6 +415,9 @@ static const struct file_operations proc_scsi_operations = {
315 .release = single_release, 415 .release = single_release,
316}; 416};
317 417
418/**
419 * scsi_init_procfs - create scsi and scsi/scsi in procfs
420 */
318int __init scsi_init_procfs(void) 421int __init scsi_init_procfs(void)
319{ 422{
320 struct proc_dir_entry *pde; 423 struct proc_dir_entry *pde;
@@ -336,6 +439,9 @@ err1:
336 return -ENOMEM; 439 return -ENOMEM;
337} 440}
338 441
442/**
443 * scsi_exit_procfs - Remove scsi/scsi and scsi from procfs
444 */
339void scsi_exit_procfs(void) 445void scsi_exit_procfs(void)
340{ 446{
341 remove_proc_entry("scsi/scsi", NULL); 447 remove_proc_entry("scsi/scsi", NULL);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 40ea71cd2ca6..1dc165ad17fb 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -221,6 +221,9 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
221 221
222/** 222/**
223 * scsi_alloc_sdev - allocate and setup a scsi_Device 223 * scsi_alloc_sdev - allocate and setup a scsi_Device
224 * @starget: which target to allocate a &scsi_device for
225 * @lun: which lun
226 * @hostdata: usually NULL and set by ->slave_alloc instead
224 * 227 *
225 * Description: 228 * Description:
226 * Allocate, initialize for io, and return a pointer to a scsi_Device. 229 * Allocate, initialize for io, and return a pointer to a scsi_Device.
@@ -472,7 +475,6 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
472 475
473/** 476/**
474 * scsi_target_reap - check to see if target is in use and destroy if not 477 * scsi_target_reap - check to see if target is in use and destroy if not
475 *
476 * @starget: target to be checked 478 * @starget: target to be checked
477 * 479 *
478 * This is used after removing a LUN or doing a last put of the target 480 * This is used after removing a LUN or doing a last put of the target
@@ -863,7 +865,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
863 sdev->no_start_on_add = 1; 865 sdev->no_start_on_add = 1;
864 866
865 if (*bflags & BLIST_SINGLELUN) 867 if (*bflags & BLIST_SINGLELUN)
866 sdev->single_lun = 1; 868 scsi_target(sdev)->single_lun = 1;
867 869
868 sdev->use_10_for_rw = 1; 870 sdev->use_10_for_rw = 1;
869 871
@@ -928,8 +930,7 @@ static inline void scsi_destroy_sdev(struct scsi_device *sdev)
928 930
929#ifdef CONFIG_SCSI_LOGGING 931#ifdef CONFIG_SCSI_LOGGING
930/** 932/**
931 * scsi_inq_str - print INQUIRY data from min to max index, 933 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
932 * strip trailing whitespace
933 * @buf: Output buffer with at least end-first+1 bytes of space 934 * @buf: Output buffer with at least end-first+1 bytes of space
934 * @inq: Inquiry buffer (input) 935 * @inq: Inquiry buffer (input)
935 * @first: Offset of string into inq 936 * @first: Offset of string into inq
@@ -957,9 +958,10 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
957 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it 958 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
958 * @starget: pointer to target device structure 959 * @starget: pointer to target device structure
959 * @lun: LUN of target device 960 * @lun: LUN of target device
960 * @sdevscan: probe the LUN corresponding to this scsi_device
961 * @sdevnew: store the value of any new scsi_device allocated
962 * @bflagsp: store bflags here if not NULL 961 * @bflagsp: store bflags here if not NULL
962 * @sdevp: probe the LUN corresponding to this scsi_device
963 * @rescan: if nonzero skip some code only needed on first scan
964 * @hostdata: passed to scsi_alloc_sdev()
963 * 965 *
964 * Description: 966 * Description:
965 * Call scsi_probe_lun, if a LUN with an attached device is found, 967 * Call scsi_probe_lun, if a LUN with an attached device is found,
@@ -1110,6 +1112,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
1110 * scsi_sequential_lun_scan - sequentially scan a SCSI target 1112 * scsi_sequential_lun_scan - sequentially scan a SCSI target
1111 * @starget: pointer to target structure to scan 1113 * @starget: pointer to target structure to scan
1112 * @bflags: black/white list flag for LUN 0 1114 * @bflags: black/white list flag for LUN 0
1115 * @scsi_level: Which version of the standard does this device adhere to
1116 * @rescan: passed to scsi_probe_add_lun()
1113 * 1117 *
1114 * Description: 1118 * Description:
1115 * Generally, scan from LUN 1 (LUN 0 is assumed to already have been 1119 * Generally, scan from LUN 1 (LUN 0 is assumed to already have been
@@ -1220,7 +1224,7 @@ EXPORT_SYMBOL(scsilun_to_int);
1220 1224
1221/** 1225/**
1222 * int_to_scsilun: reverts an int into a scsi_lun 1226 * int_to_scsilun: reverts an int into a scsi_lun
1223 * @int: integer to be reverted 1227 * @lun: integer to be reverted
1224 * @scsilun: struct scsi_lun to be set. 1228 * @scsilun: struct scsi_lun to be set.
1225 * 1229 *
1226 * Description: 1230 * Description:
@@ -1252,18 +1256,22 @@ EXPORT_SYMBOL(int_to_scsilun);
1252 1256
1253/** 1257/**
1254 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results 1258 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1255 * @sdevscan: scan the host, channel, and id of this scsi_device 1259 * @starget: which target
1260 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
1261 * @rescan: nonzero if we can skip code only needed on first scan
1256 * 1262 *
1257 * Description: 1263 * Description:
1258 * If @sdevscan is for a SCSI-3 or up device, send a REPORT LUN 1264 * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
1259 * command, and scan the resulting list of LUNs by calling 1265 * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
1260 * scsi_probe_and_add_lun.
1261 * 1266 *
1262 * Modifies sdevscan->lun. 1267 * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
1268 * LUNs even if it's older than SCSI-3.
1269 * If BLIST_NOREPORTLUN is set, return 1 always.
1270 * If BLIST_NOLUN is set, return 0 always.
1263 * 1271 *
1264 * Return: 1272 * Return:
1265 * 0: scan completed (or no memory, so further scanning is futile) 1273 * 0: scan completed (or no memory, so further scanning is futile)
1266 * 1: no report lun scan, or not configured 1274 * 1: could not scan with REPORT LUN
1267 **/ 1275 **/
1268static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, 1276static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1269 int rescan) 1277 int rescan)
@@ -1481,6 +1489,7 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1481 if (scsi_host_scan_allowed(shost)) 1489 if (scsi_host_scan_allowed(shost))
1482 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); 1490 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1483 mutex_unlock(&shost->scan_mutex); 1491 mutex_unlock(&shost->scan_mutex);
1492 transport_configure_device(&starget->dev);
1484 scsi_target_reap(starget); 1493 scsi_target_reap(starget);
1485 put_device(&starget->dev); 1494 put_device(&starget->dev);
1486 1495
@@ -1561,6 +1570,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1561 out_reap: 1570 out_reap:
1562 /* now determine if the target has any children at all 1571 /* now determine if the target has any children at all
1563 * and if not, nuke it */ 1572 * and if not, nuke it */
1573 transport_configure_device(&starget->dev);
1564 scsi_target_reap(starget); 1574 scsi_target_reap(starget);
1565 1575
1566 put_device(&starget->dev); 1576 put_device(&starget->dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 00b386677392..ed83cdb6e67d 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1018,6 +1018,7 @@ int scsi_sysfs_add_host(struct Scsi_Host *shost)
1018 } 1018 }
1019 1019
1020 transport_register_device(&shost->shost_gendev); 1020 transport_register_device(&shost->shost_gendev);
1021 transport_configure_device(&shost->shost_gendev);
1021 return 0; 1022 return 0;
1022} 1023}
1023 1024
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
index 9815a1a2db24..d2557dbc2dc1 100644
--- a/drivers/scsi/scsi_tgt_if.c
+++ b/drivers/scsi/scsi_tgt_if.c
@@ -112,7 +112,7 @@ int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, u64 itn_id,
112 memset(&ev, 0, sizeof(ev)); 112 memset(&ev, 0, sizeof(ev));
113 ev.p.cmd_req.host_no = shost->host_no; 113 ev.p.cmd_req.host_no = shost->host_no;
114 ev.p.cmd_req.itn_id = itn_id; 114 ev.p.cmd_req.itn_id = itn_id;
115 ev.p.cmd_req.data_len = cmd->request_bufflen; 115 ev.p.cmd_req.data_len = scsi_bufflen(cmd);
116 memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb)); 116 memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb));
117 memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun)); 117 memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
118 ev.p.cmd_req.attribute = cmd->tag; 118 ev.p.cmd_req.attribute = cmd->tag;
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index a91761c3645f..93ece8f4e5de 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -180,7 +180,7 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work)
180 container_of(work, struct scsi_tgt_cmd, work); 180 container_of(work, struct scsi_tgt_cmd, work);
181 struct scsi_cmnd *cmd = tcmd->rq->special; 181 struct scsi_cmnd *cmd = tcmd->rq->special;
182 182
183 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction, 183 dprintk("cmd %p %d %u\n", cmd, cmd->sc_data_direction,
184 rq_data_dir(cmd->request)); 184 rq_data_dir(cmd->request));
185 scsi_unmap_user_pages(tcmd); 185 scsi_unmap_user_pages(tcmd);
186 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd); 186 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
@@ -327,11 +327,11 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
327{ 327{
328 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; 328 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
329 329
330 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request)); 330 dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request));
331 331
332 scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag); 332 scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag);
333 333
334 if (cmd->request_buffer) 334 if (scsi_sglist(cmd))
335 scsi_free_sgtable(cmd); 335 scsi_free_sgtable(cmd);
336 336
337 queue_work(scsi_tgtd, &tcmd->work); 337 queue_work(scsi_tgtd, &tcmd->work);
@@ -342,7 +342,7 @@ static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
342 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd); 342 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
343 int err; 343 int err;
344 344
345 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request)); 345 dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request));
346 346
347 err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done); 347 err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
348 switch (err) { 348 switch (err) {
@@ -365,16 +365,12 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
365 365
366 cmd->request_bufflen = rq->data_len; 366 cmd->request_bufflen = rq->data_len;
367 367
368 dprintk("cmd %p cnt %d %lu\n", cmd, cmd->use_sg, rq_data_dir(rq)); 368 dprintk("cmd %p cnt %d %lu\n", cmd, scsi_sg_count(cmd),
369 count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer); 369 rq_data_dir(rq));
370 if (likely(count <= cmd->use_sg)) { 370 count = blk_rq_map_sg(rq->q, rq, scsi_sglist(cmd));
371 cmd->use_sg = count; 371 BUG_ON(count > cmd->use_sg);
372 return 0; 372 cmd->use_sg = count;
373 } 373 return 0;
374
375 eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
376 scsi_free_sgtable(cmd);
377 return -EINVAL;
378} 374}
379 375
380/* TODO: test this crap and replace bio_map_user with new interface maybe */ 376/* TODO: test this crap and replace bio_map_user with new interface maybe */
@@ -496,8 +492,8 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
496 } 492 }
497 cmd = rq->special; 493 cmd = rq->special;
498 494
499 dprintk("cmd %p scb %x result %d len %d bufflen %u %lu %x\n", 495 dprintk("cmd %p scb %x result %d len %d bufflen %u %u %x\n",
500 cmd, cmd->cmnd[0], result, len, cmd->request_bufflen, 496 cmd, cmd->cmnd[0], result, len, scsi_bufflen(cmd),
501 rq_data_dir(rq), cmd->cmnd[0]); 497 rq_data_dir(rq), cmd->cmnd[0]);
502 498
503 if (result == TASK_ABORTED) { 499 if (result == TASK_ABORTED) {
@@ -617,7 +613,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
617 struct Scsi_Host *shost; 613 struct Scsi_Host *shost;
618 int err = -EINVAL; 614 int err = -EINVAL;
619 615
620 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid); 616 dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id);
621 617
622 shost = scsi_host_lookup(host_no); 618 shost = scsi_host_lookup(host_no);
623 if (IS_ERR(shost)) { 619 if (IS_ERR(shost)) {
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 7a7cfe583b2a..b1119da6e88c 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -481,9 +481,9 @@ MODULE_PARM_DESC(dev_loss_tmo,
481 " exceeded, the scsi target is removed. Value should be" 481 " exceeded, the scsi target is removed. Value should be"
482 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT."); 482 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT.");
483 483
484/** 484/*
485 * Netlink Infrastructure 485 * Netlink Infrastructure
486 **/ 486 */
487 487
488static atomic_t fc_event_seq; 488static atomic_t fc_event_seq;
489 489
@@ -491,10 +491,10 @@ static atomic_t fc_event_seq;
491 * fc_get_event_number - Obtain the next sequential FC event number 491 * fc_get_event_number - Obtain the next sequential FC event number
492 * 492 *
493 * Notes: 493 * Notes:
494 * We could have inline'd this, but it would have required fc_event_seq to 494 * We could have inlined this, but it would have required fc_event_seq to
495 * be exposed. For now, live with the subroutine call. 495 * be exposed. For now, live with the subroutine call.
496 * Atomic used to avoid lock/unlock... 496 * Atomic used to avoid lock/unlock...
497 **/ 497 */
498u32 498u32
499fc_get_event_number(void) 499fc_get_event_number(void)
500{ 500{
@@ -505,7 +505,6 @@ EXPORT_SYMBOL(fc_get_event_number);
505 505
506/** 506/**
507 * fc_host_post_event - called to post an even on an fc_host. 507 * fc_host_post_event - called to post an even on an fc_host.
508 *
509 * @shost: host the event occurred on 508 * @shost: host the event occurred on
510 * @event_number: fc event number obtained from get_fc_event_number() 509 * @event_number: fc event number obtained from get_fc_event_number()
511 * @event_code: fc_host event being posted 510 * @event_code: fc_host event being posted
@@ -513,7 +512,7 @@ EXPORT_SYMBOL(fc_get_event_number);
513 * 512 *
514 * Notes: 513 * Notes:
515 * This routine assumes no locks are held on entry. 514 * This routine assumes no locks are held on entry.
516 **/ 515 */
517void 516void
518fc_host_post_event(struct Scsi_Host *shost, u32 event_number, 517fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
519 enum fc_host_event_code event_code, u32 event_data) 518 enum fc_host_event_code event_code, u32 event_data)
@@ -579,17 +578,16 @@ EXPORT_SYMBOL(fc_host_post_event);
579 578
580 579
581/** 580/**
582 * fc_host_post_vendor_event - called to post a vendor unique event on 581 * fc_host_post_vendor_event - called to post a vendor unique event on an fc_host
583 * a fc_host
584 *
585 * @shost: host the event occurred on 582 * @shost: host the event occurred on
586 * @event_number: fc event number obtained from get_fc_event_number() 583 * @event_number: fc event number obtained from get_fc_event_number()
587 * @data_len: amount, in bytes, of vendor unique data 584 * @data_len: amount, in bytes, of vendor unique data
588 * @data_buf: pointer to vendor unique data 585 * @data_buf: pointer to vendor unique data
586 * @vendor_id: Vendor id
589 * 587 *
590 * Notes: 588 * Notes:
591 * This routine assumes no locks are held on entry. 589 * This routine assumes no locks are held on entry.
592 **/ 590 */
593void 591void
594fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, 592fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
595 u32 data_len, char * data_buf, u64 vendor_id) 593 u32 data_len, char * data_buf, u64 vendor_id)
@@ -1900,7 +1898,6 @@ static int fc_vport_match(struct attribute_container *cont,
1900 1898
1901/** 1899/**
1902 * fc_timed_out - FC Transport I/O timeout intercept handler 1900 * fc_timed_out - FC Transport I/O timeout intercept handler
1903 *
1904 * @scmd: The SCSI command which timed out 1901 * @scmd: The SCSI command which timed out
1905 * 1902 *
1906 * This routine protects against error handlers getting invoked while a 1903 * This routine protects against error handlers getting invoked while a
@@ -1920,7 +1917,7 @@ static int fc_vport_match(struct attribute_container *cont,
1920 * 1917 *
1921 * Notes: 1918 * Notes:
1922 * This routine assumes no locks are held on entry. 1919 * This routine assumes no locks are held on entry.
1923 **/ 1920 */
1924static enum scsi_eh_timer_return 1921static enum scsi_eh_timer_return
1925fc_timed_out(struct scsi_cmnd *scmd) 1922fc_timed_out(struct scsi_cmnd *scmd)
1926{ 1923{
@@ -2133,7 +2130,7 @@ EXPORT_SYMBOL(fc_release_transport);
2133 * 1 - work queued for execution 2130 * 1 - work queued for execution
2134 * 0 - work is already queued 2131 * 0 - work is already queued
2135 * -EINVAL - work queue doesn't exist 2132 * -EINVAL - work queue doesn't exist
2136 **/ 2133 */
2137static int 2134static int
2138fc_queue_work(struct Scsi_Host *shost, struct work_struct *work) 2135fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
2139{ 2136{
@@ -2152,7 +2149,7 @@ fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
2152/** 2149/**
2153 * fc_flush_work - Flush a fc_host's workqueue. 2150 * fc_flush_work - Flush a fc_host's workqueue.
2154 * @shost: Pointer to Scsi_Host bound to fc_host. 2151 * @shost: Pointer to Scsi_Host bound to fc_host.
2155 **/ 2152 */
2156static void 2153static void
2157fc_flush_work(struct Scsi_Host *shost) 2154fc_flush_work(struct Scsi_Host *shost)
2158{ 2155{
@@ -2175,7 +2172,7 @@ fc_flush_work(struct Scsi_Host *shost)
2175 * 2172 *
2176 * Return value: 2173 * Return value:
2177 * 1 on success / 0 already queued / < 0 for error 2174 * 1 on success / 0 already queued / < 0 for error
2178 **/ 2175 */
2179static int 2176static int
2180fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, 2177fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
2181 unsigned long delay) 2178 unsigned long delay)
@@ -2195,7 +2192,7 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
2195/** 2192/**
2196 * fc_flush_devloss - Flush a fc_host's devloss workqueue. 2193 * fc_flush_devloss - Flush a fc_host's devloss workqueue.
2197 * @shost: Pointer to Scsi_Host bound to fc_host. 2194 * @shost: Pointer to Scsi_Host bound to fc_host.
2198 **/ 2195 */
2199static void 2196static void
2200fc_flush_devloss(struct Scsi_Host *shost) 2197fc_flush_devloss(struct Scsi_Host *shost)
2201{ 2198{
@@ -2212,21 +2209,20 @@ fc_flush_devloss(struct Scsi_Host *shost)
2212 2209
2213 2210
2214/** 2211/**
2215 * fc_remove_host - called to terminate any fc_transport-related elements 2212 * fc_remove_host - called to terminate any fc_transport-related elements for a scsi host.
2216 * for a scsi host. 2213 * @shost: Which &Scsi_Host
2217 * @rport: remote port to be unblocked.
2218 * 2214 *
2219 * This routine is expected to be called immediately preceeding the 2215 * This routine is expected to be called immediately preceeding the
2220 * a driver's call to scsi_remove_host(). 2216 * a driver's call to scsi_remove_host().
2221 * 2217 *
2222 * WARNING: A driver utilizing the fc_transport, which fails to call 2218 * WARNING: A driver utilizing the fc_transport, which fails to call
2223 * this routine prior to scsi_remote_host(), will leave dangling 2219 * this routine prior to scsi_remove_host(), will leave dangling
2224 * objects in /sys/class/fc_remote_ports. Access to any of these 2220 * objects in /sys/class/fc_remote_ports. Access to any of these
2225 * objects can result in a system crash !!! 2221 * objects can result in a system crash !!!
2226 * 2222 *
2227 * Notes: 2223 * Notes:
2228 * This routine assumes no locks are held on entry. 2224 * This routine assumes no locks are held on entry.
2229 **/ 2225 */
2230void 2226void
2231fc_remove_host(struct Scsi_Host *shost) 2227fc_remove_host(struct Scsi_Host *shost)
2232{ 2228{
@@ -2281,10 +2277,10 @@ EXPORT_SYMBOL(fc_remove_host);
2281 2277
2282/** 2278/**
2283 * fc_starget_delete - called to delete the scsi decendents of an rport 2279 * fc_starget_delete - called to delete the scsi decendents of an rport
2284 * (target and all sdevs)
2285 *
2286 * @work: remote port to be operated on. 2280 * @work: remote port to be operated on.
2287 **/ 2281 *
2282 * Deletes target and all sdevs.
2283 */
2288static void 2284static void
2289fc_starget_delete(struct work_struct *work) 2285fc_starget_delete(struct work_struct *work)
2290{ 2286{
@@ -2303,9 +2299,8 @@ fc_starget_delete(struct work_struct *work)
2303 2299
2304/** 2300/**
2305 * fc_rport_final_delete - finish rport termination and delete it. 2301 * fc_rport_final_delete - finish rport termination and delete it.
2306 *
2307 * @work: remote port to be deleted. 2302 * @work: remote port to be deleted.
2308 **/ 2303 */
2309static void 2304static void
2310fc_rport_final_delete(struct work_struct *work) 2305fc_rport_final_delete(struct work_struct *work)
2311{ 2306{
@@ -2375,7 +2370,7 @@ fc_rport_final_delete(struct work_struct *work)
2375 * 2370 *
2376 * Notes: 2371 * Notes:
2377 * This routine assumes no locks are held on entry. 2372 * This routine assumes no locks are held on entry.
2378 **/ 2373 */
2379static struct fc_rport * 2374static struct fc_rport *
2380fc_rport_create(struct Scsi_Host *shost, int channel, 2375fc_rport_create(struct Scsi_Host *shost, int channel,
2381 struct fc_rport_identifiers *ids) 2376 struct fc_rport_identifiers *ids)
@@ -2462,8 +2457,7 @@ delete_rport:
2462} 2457}
2463 2458
2464/** 2459/**
2465 * fc_remote_port_add - notifies the fc transport of the existence 2460 * fc_remote_port_add - notify fc transport of the existence of a remote FC port.
2466 * of a remote FC port.
2467 * @shost: scsi host the remote port is connected to. 2461 * @shost: scsi host the remote port is connected to.
2468 * @channel: Channel on shost port connected to. 2462 * @channel: Channel on shost port connected to.
2469 * @ids: The world wide names, fc address, and FC4 port 2463 * @ids: The world wide names, fc address, and FC4 port
@@ -2499,7 +2493,7 @@ delete_rport:
2499 * 2493 *
2500 * Notes: 2494 * Notes:
2501 * This routine assumes no locks are held on entry. 2495 * This routine assumes no locks are held on entry.
2502 **/ 2496 */
2503struct fc_rport * 2497struct fc_rport *
2504fc_remote_port_add(struct Scsi_Host *shost, int channel, 2498fc_remote_port_add(struct Scsi_Host *shost, int channel,
2505 struct fc_rport_identifiers *ids) 2499 struct fc_rport_identifiers *ids)
@@ -2683,19 +2677,18 @@ EXPORT_SYMBOL(fc_remote_port_add);
2683 2677
2684 2678
2685/** 2679/**
2686 * fc_remote_port_delete - notifies the fc transport that a remote 2680 * fc_remote_port_delete - notifies the fc transport that a remote port is no longer in existence.
2687 * port is no longer in existence.
2688 * @rport: The remote port that no longer exists 2681 * @rport: The remote port that no longer exists
2689 * 2682 *
2690 * The LLDD calls this routine to notify the transport that a remote 2683 * The LLDD calls this routine to notify the transport that a remote
2691 * port is no longer part of the topology. Note: Although a port 2684 * port is no longer part of the topology. Note: Although a port
2692 * may no longer be part of the topology, it may persist in the remote 2685 * may no longer be part of the topology, it may persist in the remote
2693 * ports displayed by the fc_host. We do this under 2 conditions: 2686 * ports displayed by the fc_host. We do this under 2 conditions:
2694 * - If the port was a scsi target, we delay its deletion by "blocking" it. 2687 * 1) If the port was a scsi target, we delay its deletion by "blocking" it.
2695 * This allows the port to temporarily disappear, then reappear without 2688 * This allows the port to temporarily disappear, then reappear without
2696 * disrupting the SCSI device tree attached to it. During the "blocked" 2689 * disrupting the SCSI device tree attached to it. During the "blocked"
2697 * period the port will still exist. 2690 * period the port will still exist.
2698 * - If the port was a scsi target and disappears for longer than we 2691 * 2) If the port was a scsi target and disappears for longer than we
2699 * expect, we'll delete the port and the tear down the SCSI device tree 2692 * expect, we'll delete the port and the tear down the SCSI device tree
2700 * attached to it. However, we want to semi-persist the target id assigned 2693 * attached to it. However, we want to semi-persist the target id assigned
2701 * to that port if it eventually does exist. The port structure will 2694 * to that port if it eventually does exist. The port structure will
@@ -2709,7 +2702,8 @@ EXPORT_SYMBOL(fc_remote_port_add);
2709 * temporary blocked state. From the LLDD's perspective, the rport no 2702 * temporary blocked state. From the LLDD's perspective, the rport no
2710 * longer exists. From the SCSI midlayer's perspective, the SCSI target 2703 * longer exists. From the SCSI midlayer's perspective, the SCSI target
2711 * exists, but all sdevs on it are blocked from further I/O. The following 2704 * exists, but all sdevs on it are blocked from further I/O. The following
2712 * is then expected: 2705 * is then expected.
2706 *
2713 * If the remote port does not return (signaled by a LLDD call to 2707 * If the remote port does not return (signaled by a LLDD call to
2714 * fc_remote_port_add()) within the dev_loss_tmo timeout, then the 2708 * fc_remote_port_add()) within the dev_loss_tmo timeout, then the
2715 * scsi target is removed - killing all outstanding i/o and removing the 2709 * scsi target is removed - killing all outstanding i/o and removing the
@@ -2731,7 +2725,7 @@ EXPORT_SYMBOL(fc_remote_port_add);
2731 * 2725 *
2732 * Notes: 2726 * Notes:
2733 * This routine assumes no locks are held on entry. 2727 * This routine assumes no locks are held on entry.
2734 **/ 2728 */
2735void 2729void
2736fc_remote_port_delete(struct fc_rport *rport) 2730fc_remote_port_delete(struct fc_rport *rport)
2737{ 2731{
@@ -2792,12 +2786,12 @@ fc_remote_port_delete(struct fc_rport *rport)
2792EXPORT_SYMBOL(fc_remote_port_delete); 2786EXPORT_SYMBOL(fc_remote_port_delete);
2793 2787
2794/** 2788/**
2795 * fc_remote_port_rolechg - notifies the fc transport that the roles 2789 * fc_remote_port_rolechg - notifies the fc transport that the roles on a remote may have changed.
2796 * on a remote may have changed.
2797 * @rport: The remote port that changed. 2790 * @rport: The remote port that changed.
2791 * @roles: New roles for this port.
2798 * 2792 *
2799 * The LLDD calls this routine to notify the transport that the roles 2793 * Description: The LLDD calls this routine to notify the transport that the
2800 * on a remote port may have changed. The largest effect of this is 2794 * roles on a remote port may have changed. The largest effect of this is
2801 * if a port now becomes a FCP Target, it must be allocated a 2795 * if a port now becomes a FCP Target, it must be allocated a
2802 * scsi target id. If the port is no longer a FCP target, any 2796 * scsi target id. If the port is no longer a FCP target, any
2803 * scsi target id value assigned to it will persist in case the 2797 * scsi target id value assigned to it will persist in case the
@@ -2810,7 +2804,7 @@ EXPORT_SYMBOL(fc_remote_port_delete);
2810 * 2804 *
2811 * Notes: 2805 * Notes:
2812 * This routine assumes no locks are held on entry. 2806 * This routine assumes no locks are held on entry.
2813 **/ 2807 */
2814void 2808void
2815fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) 2809fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
2816{ 2810{
@@ -2875,12 +2869,12 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
2875EXPORT_SYMBOL(fc_remote_port_rolechg); 2869EXPORT_SYMBOL(fc_remote_port_rolechg);
2876 2870
2877/** 2871/**
2878 * fc_timeout_deleted_rport - Timeout handler for a deleted remote port, 2872 * fc_timeout_deleted_rport - Timeout handler for a deleted remote port.
2879 * which we blocked, and has now failed to return
2880 * in the allotted time.
2881 *
2882 * @work: rport target that failed to reappear in the allotted time. 2873 * @work: rport target that failed to reappear in the allotted time.
2883 **/ 2874 *
2875 * Description: An attempt to delete a remote port blocks, and if it fails
2876 * to return in the allotted time this gets called.
2877 */
2884static void 2878static void
2885fc_timeout_deleted_rport(struct work_struct *work) 2879fc_timeout_deleted_rport(struct work_struct *work)
2886{ 2880{
@@ -2984,14 +2978,12 @@ fc_timeout_deleted_rport(struct work_struct *work)
2984} 2978}
2985 2979
2986/** 2980/**
2987 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a 2981 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
2988 * disconnected SCSI target.
2989 *
2990 * @work: rport to terminate io on. 2982 * @work: rport to terminate io on.
2991 * 2983 *
2992 * Notes: Only requests the failure of the io, not that all are flushed 2984 * Notes: Only requests the failure of the io, not that all are flushed
2993 * prior to returning. 2985 * prior to returning.
2994 **/ 2986 */
2995static void 2987static void
2996fc_timeout_fail_rport_io(struct work_struct *work) 2988fc_timeout_fail_rport_io(struct work_struct *work)
2997{ 2989{
@@ -3008,9 +3000,8 @@ fc_timeout_fail_rport_io(struct work_struct *work)
3008 3000
3009/** 3001/**
3010 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. 3002 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
3011 *
3012 * @work: remote port to be scanned. 3003 * @work: remote port to be scanned.
3013 **/ 3004 */
3014static void 3005static void
3015fc_scsi_scan_rport(struct work_struct *work) 3006fc_scsi_scan_rport(struct work_struct *work)
3016{ 3007{
@@ -3047,7 +3038,7 @@ fc_scsi_scan_rport(struct work_struct *work)
3047 * 3038 *
3048 * Notes: 3039 * Notes:
3049 * This routine assumes no locks are held on entry. 3040 * This routine assumes no locks are held on entry.
3050 **/ 3041 */
3051static int 3042static int
3052fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev, 3043fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
3053 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport) 3044 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
@@ -3172,7 +3163,7 @@ delete_vport:
3172 * 3163 *
3173 * Notes: 3164 * Notes:
3174 * This routine assumes no locks are held on entry. 3165 * This routine assumes no locks are held on entry.
3175 **/ 3166 */
3176int 3167int
3177fc_vport_terminate(struct fc_vport *vport) 3168fc_vport_terminate(struct fc_vport *vport)
3178{ 3169{
@@ -3232,9 +3223,8 @@ EXPORT_SYMBOL(fc_vport_terminate);
3232 3223
3233/** 3224/**
3234 * fc_vport_sched_delete - workq-based delete request for a vport 3225 * fc_vport_sched_delete - workq-based delete request for a vport
3235 *
3236 * @work: vport to be deleted. 3226 * @work: vport to be deleted.
3237 **/ 3227 */
3238static void 3228static void
3239fc_vport_sched_delete(struct work_struct *work) 3229fc_vport_sched_delete(struct work_struct *work)
3240{ 3230{
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 5428d15f23c6..ef0e74264880 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,10 +30,10 @@
30#include <scsi/scsi_transport_iscsi.h> 30#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/iscsi_if.h> 31#include <scsi/iscsi_if.h>
32 32
33#define ISCSI_SESSION_ATTRS 15 33#define ISCSI_SESSION_ATTRS 18
34#define ISCSI_CONN_ATTRS 11 34#define ISCSI_CONN_ATTRS 11
35#define ISCSI_HOST_ATTRS 4 35#define ISCSI_HOST_ATTRS 4
36#define ISCSI_TRANSPORT_VERSION "2.0-724" 36#define ISCSI_TRANSPORT_VERSION "2.0-867"
37 37
38struct iscsi_internal { 38struct iscsi_internal {
39 int daemon_pid; 39 int daemon_pid;
@@ -50,6 +50,7 @@ struct iscsi_internal {
50}; 50};
51 51
52static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ 52static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
53static struct workqueue_struct *iscsi_eh_timer_workq;
53 54
54/* 55/*
55 * list of registered transports and lock that must 56 * list of registered transports and lock that must
@@ -115,6 +116,8 @@ static struct attribute_group iscsi_transport_group = {
115 .attrs = iscsi_transport_attrs, 116 .attrs = iscsi_transport_attrs,
116}; 117};
117 118
119
120
118static int iscsi_setup_host(struct transport_container *tc, struct device *dev, 121static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
119 struct class_device *cdev) 122 struct class_device *cdev)
120{ 123{
@@ -124,13 +127,30 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
124 memset(ihost, 0, sizeof(*ihost)); 127 memset(ihost, 0, sizeof(*ihost));
125 INIT_LIST_HEAD(&ihost->sessions); 128 INIT_LIST_HEAD(&ihost->sessions);
126 mutex_init(&ihost->mutex); 129 mutex_init(&ihost->mutex);
130
131 snprintf(ihost->unbind_workq_name, KOBJ_NAME_LEN, "iscsi_unbind_%d",
132 shost->host_no);
133 ihost->unbind_workq = create_singlethread_workqueue(
134 ihost->unbind_workq_name);
135 if (!ihost->unbind_workq)
136 return -ENOMEM;
137 return 0;
138}
139
140static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
141 struct class_device *cdev)
142{
143 struct Scsi_Host *shost = dev_to_shost(dev);
144 struct iscsi_host *ihost = shost->shost_data;
145
146 destroy_workqueue(ihost->unbind_workq);
127 return 0; 147 return 0;
128} 148}
129 149
130static DECLARE_TRANSPORT_CLASS(iscsi_host_class, 150static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
131 "iscsi_host", 151 "iscsi_host",
132 iscsi_setup_host, 152 iscsi_setup_host,
133 NULL, 153 iscsi_remove_host,
134 NULL); 154 NULL);
135 155
136static DECLARE_TRANSPORT_CLASS(iscsi_session_class, 156static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
@@ -252,7 +272,7 @@ static void session_recovery_timedout(struct work_struct *work)
252void iscsi_unblock_session(struct iscsi_cls_session *session) 272void iscsi_unblock_session(struct iscsi_cls_session *session)
253{ 273{
254 if (!cancel_delayed_work(&session->recovery_work)) 274 if (!cancel_delayed_work(&session->recovery_work))
255 flush_scheduled_work(); 275 flush_workqueue(iscsi_eh_timer_workq);
256 scsi_target_unblock(&session->dev); 276 scsi_target_unblock(&session->dev);
257} 277}
258EXPORT_SYMBOL_GPL(iscsi_unblock_session); 278EXPORT_SYMBOL_GPL(iscsi_unblock_session);
@@ -260,11 +280,40 @@ EXPORT_SYMBOL_GPL(iscsi_unblock_session);
260void iscsi_block_session(struct iscsi_cls_session *session) 280void iscsi_block_session(struct iscsi_cls_session *session)
261{ 281{
262 scsi_target_block(&session->dev); 282 scsi_target_block(&session->dev);
263 schedule_delayed_work(&session->recovery_work, 283 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
264 session->recovery_tmo * HZ); 284 session->recovery_tmo * HZ);
265} 285}
266EXPORT_SYMBOL_GPL(iscsi_block_session); 286EXPORT_SYMBOL_GPL(iscsi_block_session);
267 287
288static void __iscsi_unbind_session(struct work_struct *work)
289{
290 struct iscsi_cls_session *session =
291 container_of(work, struct iscsi_cls_session,
292 unbind_work);
293 struct Scsi_Host *shost = iscsi_session_to_shost(session);
294 struct iscsi_host *ihost = shost->shost_data;
295
296 /* Prevent new scans and make sure scanning is not in progress */
297 mutex_lock(&ihost->mutex);
298 if (list_empty(&session->host_list)) {
299 mutex_unlock(&ihost->mutex);
300 return;
301 }
302 list_del_init(&session->host_list);
303 mutex_unlock(&ihost->mutex);
304
305 scsi_remove_target(&session->dev);
306 iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
307}
308
309static int iscsi_unbind_session(struct iscsi_cls_session *session)
310{
311 struct Scsi_Host *shost = iscsi_session_to_shost(session);
312 struct iscsi_host *ihost = shost->shost_data;
313
314 return queue_work(ihost->unbind_workq, &session->unbind_work);
315}
316
268struct iscsi_cls_session * 317struct iscsi_cls_session *
269iscsi_alloc_session(struct Scsi_Host *shost, 318iscsi_alloc_session(struct Scsi_Host *shost,
270 struct iscsi_transport *transport) 319 struct iscsi_transport *transport)
@@ -281,6 +330,7 @@ iscsi_alloc_session(struct Scsi_Host *shost,
281 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); 330 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
282 INIT_LIST_HEAD(&session->host_list); 331 INIT_LIST_HEAD(&session->host_list);
283 INIT_LIST_HEAD(&session->sess_list); 332 INIT_LIST_HEAD(&session->sess_list);
333 INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
284 334
285 /* this is released in the dev's release function */ 335 /* this is released in the dev's release function */
286 scsi_host_get(shost); 336 scsi_host_get(shost);
@@ -297,6 +347,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
297{ 347{
298 struct Scsi_Host *shost = iscsi_session_to_shost(session); 348 struct Scsi_Host *shost = iscsi_session_to_shost(session);
299 struct iscsi_host *ihost; 349 struct iscsi_host *ihost;
350 unsigned long flags;
300 int err; 351 int err;
301 352
302 ihost = shost->shost_data; 353 ihost = shost->shost_data;
@@ -313,9 +364,15 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
313 } 364 }
314 transport_register_device(&session->dev); 365 transport_register_device(&session->dev);
315 366
367 spin_lock_irqsave(&sesslock, flags);
368 list_add(&session->sess_list, &sesslist);
369 spin_unlock_irqrestore(&sesslock, flags);
370
316 mutex_lock(&ihost->mutex); 371 mutex_lock(&ihost->mutex);
317 list_add(&session->host_list, &ihost->sessions); 372 list_add(&session->host_list, &ihost->sessions);
318 mutex_unlock(&ihost->mutex); 373 mutex_unlock(&ihost->mutex);
374
375 iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
319 return 0; 376 return 0;
320 377
321release_host: 378release_host:
@@ -328,9 +385,10 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
328 * iscsi_create_session - create iscsi class session 385 * iscsi_create_session - create iscsi class session
329 * @shost: scsi host 386 * @shost: scsi host
330 * @transport: iscsi transport 387 * @transport: iscsi transport
388 * @target_id: which target
331 * 389 *
332 * This can be called from a LLD or iscsi_transport. 390 * This can be called from a LLD or iscsi_transport.
333 **/ 391 */
334struct iscsi_cls_session * 392struct iscsi_cls_session *
335iscsi_create_session(struct Scsi_Host *shost, 393iscsi_create_session(struct Scsi_Host *shost,
336 struct iscsi_transport *transport, 394 struct iscsi_transport *transport,
@@ -350,19 +408,58 @@ iscsi_create_session(struct Scsi_Host *shost,
350} 408}
351EXPORT_SYMBOL_GPL(iscsi_create_session); 409EXPORT_SYMBOL_GPL(iscsi_create_session);
352 410
411static void iscsi_conn_release(struct device *dev)
412{
413 struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
414 struct device *parent = conn->dev.parent;
415
416 kfree(conn);
417 put_device(parent);
418}
419
420static int iscsi_is_conn_dev(const struct device *dev)
421{
422 return dev->release == iscsi_conn_release;
423}
424
425static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
426{
427 if (!iscsi_is_conn_dev(dev))
428 return 0;
429 return iscsi_destroy_conn(iscsi_dev_to_conn(dev));
430}
431
353void iscsi_remove_session(struct iscsi_cls_session *session) 432void iscsi_remove_session(struct iscsi_cls_session *session)
354{ 433{
355 struct Scsi_Host *shost = iscsi_session_to_shost(session); 434 struct Scsi_Host *shost = iscsi_session_to_shost(session);
356 struct iscsi_host *ihost = shost->shost_data; 435 struct iscsi_host *ihost = shost->shost_data;
436 unsigned long flags;
437 int err;
357 438
358 if (!cancel_delayed_work(&session->recovery_work)) 439 spin_lock_irqsave(&sesslock, flags);
359 flush_scheduled_work(); 440 list_del(&session->sess_list);
441 spin_unlock_irqrestore(&sesslock, flags);
360 442
361 mutex_lock(&ihost->mutex); 443 /*
362 list_del(&session->host_list); 444 * If we are blocked let commands flow again. The lld or iscsi
363 mutex_unlock(&ihost->mutex); 445 * layer should set up the queuecommand to fail commands.
446 */
447 iscsi_unblock_session(session);
448 iscsi_unbind_session(session);
449 /*
450 * If the session dropped while removing devices then we need to make
451 * sure it is not blocked
452 */
453 if (!cancel_delayed_work(&session->recovery_work))
454 flush_workqueue(iscsi_eh_timer_workq);
455 flush_workqueue(ihost->unbind_workq);
364 456
365 scsi_remove_target(&session->dev); 457 /* hw iscsi may not have removed all connections from session */
458 err = device_for_each_child(&session->dev, NULL,
459 iscsi_iter_destroy_conn_fn);
460 if (err)
461 dev_printk(KERN_ERR, &session->dev, "iscsi: Could not delete "
462 "all connections for session. Error %d.\n", err);
366 463
367 transport_unregister_device(&session->dev); 464 transport_unregister_device(&session->dev);
368 device_del(&session->dev); 465 device_del(&session->dev);
@@ -371,9 +468,9 @@ EXPORT_SYMBOL_GPL(iscsi_remove_session);
371 468
372void iscsi_free_session(struct iscsi_cls_session *session) 469void iscsi_free_session(struct iscsi_cls_session *session)
373{ 470{
471 iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION);
374 put_device(&session->dev); 472 put_device(&session->dev);
375} 473}
376
377EXPORT_SYMBOL_GPL(iscsi_free_session); 474EXPORT_SYMBOL_GPL(iscsi_free_session);
378 475
379/** 476/**
@@ -382,7 +479,7 @@ EXPORT_SYMBOL_GPL(iscsi_free_session);
382 * 479 *
383 * Can be called by a LLD or iscsi_transport. There must not be 480 * Can be called by a LLD or iscsi_transport. There must not be
384 * any running connections. 481 * any running connections.
385 **/ 482 */
386int iscsi_destroy_session(struct iscsi_cls_session *session) 483int iscsi_destroy_session(struct iscsi_cls_session *session)
387{ 484{
388 iscsi_remove_session(session); 485 iscsi_remove_session(session);
@@ -391,20 +488,6 @@ int iscsi_destroy_session(struct iscsi_cls_session *session)
391} 488}
392EXPORT_SYMBOL_GPL(iscsi_destroy_session); 489EXPORT_SYMBOL_GPL(iscsi_destroy_session);
393 490
394static void iscsi_conn_release(struct device *dev)
395{
396 struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
397 struct device *parent = conn->dev.parent;
398
399 kfree(conn);
400 put_device(parent);
401}
402
403static int iscsi_is_conn_dev(const struct device *dev)
404{
405 return dev->release == iscsi_conn_release;
406}
407
408/** 491/**
409 * iscsi_create_conn - create iscsi class connection 492 * iscsi_create_conn - create iscsi class connection
410 * @session: iscsi cls session 493 * @session: iscsi cls session
@@ -418,12 +501,13 @@ static int iscsi_is_conn_dev(const struct device *dev)
418 * for software iscsi we could be trying to preallocate a connection struct 501 * for software iscsi we could be trying to preallocate a connection struct
419 * in which case there could be two connection structs and cid would be 502 * in which case there could be two connection structs and cid would be
420 * non-zero. 503 * non-zero.
421 **/ 504 */
422struct iscsi_cls_conn * 505struct iscsi_cls_conn *
423iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid) 506iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
424{ 507{
425 struct iscsi_transport *transport = session->transport; 508 struct iscsi_transport *transport = session->transport;
426 struct iscsi_cls_conn *conn; 509 struct iscsi_cls_conn *conn;
510 unsigned long flags;
427 int err; 511 int err;
428 512
429 conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL); 513 conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
@@ -452,6 +536,11 @@ iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
452 goto release_parent_ref; 536 goto release_parent_ref;
453 } 537 }
454 transport_register_device(&conn->dev); 538 transport_register_device(&conn->dev);
539
540 spin_lock_irqsave(&connlock, flags);
541 list_add(&conn->conn_list, &connlist);
542 conn->active = 1;
543 spin_unlock_irqrestore(&connlock, flags);
455 return conn; 544 return conn;
456 545
457release_parent_ref: 546release_parent_ref:
@@ -465,17 +554,23 @@ EXPORT_SYMBOL_GPL(iscsi_create_conn);
465 554
466/** 555/**
467 * iscsi_destroy_conn - destroy iscsi class connection 556 * iscsi_destroy_conn - destroy iscsi class connection
468 * @session: iscsi cls session 557 * @conn: iscsi cls session
469 * 558 *
470 * This can be called from a LLD or iscsi_transport. 559 * This can be called from a LLD or iscsi_transport.
471 **/ 560 */
472int iscsi_destroy_conn(struct iscsi_cls_conn *conn) 561int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
473{ 562{
563 unsigned long flags;
564
565 spin_lock_irqsave(&connlock, flags);
566 conn->active = 0;
567 list_del(&conn->conn_list);
568 spin_unlock_irqrestore(&connlock, flags);
569
474 transport_unregister_device(&conn->dev); 570 transport_unregister_device(&conn->dev);
475 device_unregister(&conn->dev); 571 device_unregister(&conn->dev);
476 return 0; 572 return 0;
477} 573}
478
479EXPORT_SYMBOL_GPL(iscsi_destroy_conn); 574EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
480 575
481/* 576/*
@@ -685,132 +780,74 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
685} 780}
686 781
687/** 782/**
688 * iscsi_if_destroy_session_done - send session destr. completion event 783 * iscsi_session_event - send session destr. completion event
689 * @conn: last connection for session 784 * @session: iscsi class session
690 * 785 * @event: type of event
691 * This is called by HW iscsi LLDs to notify userpsace that its HW has 786 */
692 * removed a session. 787int iscsi_session_event(struct iscsi_cls_session *session,
693 **/ 788 enum iscsi_uevent_e event)
694int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
695{ 789{
696 struct iscsi_internal *priv; 790 struct iscsi_internal *priv;
697 struct iscsi_cls_session *session;
698 struct Scsi_Host *shost; 791 struct Scsi_Host *shost;
699 struct iscsi_uevent *ev; 792 struct iscsi_uevent *ev;
700 struct sk_buff *skb; 793 struct sk_buff *skb;
701 struct nlmsghdr *nlh; 794 struct nlmsghdr *nlh;
702 unsigned long flags;
703 int rc, len = NLMSG_SPACE(sizeof(*ev)); 795 int rc, len = NLMSG_SPACE(sizeof(*ev));
704 796
705 priv = iscsi_if_transport_lookup(conn->transport); 797 priv = iscsi_if_transport_lookup(session->transport);
706 if (!priv) 798 if (!priv)
707 return -EINVAL; 799 return -EINVAL;
708
709 session = iscsi_dev_to_session(conn->dev.parent);
710 shost = iscsi_session_to_shost(session); 800 shost = iscsi_session_to_shost(session);
711 801
712 skb = alloc_skb(len, GFP_KERNEL); 802 skb = alloc_skb(len, GFP_KERNEL);
713 if (!skb) { 803 if (!skb) {
714 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " 804 dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace "
715 "session creation event\n"); 805 "of session event %u\n", event);
716 return -ENOMEM; 806 return -ENOMEM;
717 } 807 }
718 808
719 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 809 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
720 ev = NLMSG_DATA(nlh); 810 ev = NLMSG_DATA(nlh);
721 ev->transport_handle = iscsi_handle(conn->transport); 811 ev->transport_handle = iscsi_handle(session->transport);
722 ev->type = ISCSI_KEVENT_DESTROY_SESSION;
723 ev->r.d_session.host_no = shost->host_no;
724 ev->r.d_session.sid = session->sid;
725
726 /*
727 * this will occur if the daemon is not up, so we just warn
728 * the user and when the daemon is restarted it will handle it
729 */
730 rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
731 if (rc < 0)
732 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
733 "session destruction event. Check iscsi daemon\n");
734
735 spin_lock_irqsave(&sesslock, flags);
736 list_del(&session->sess_list);
737 spin_unlock_irqrestore(&sesslock, flags);
738 812
739 spin_lock_irqsave(&connlock, flags); 813 ev->type = event;
740 conn->active = 0; 814 switch (event) {
741 list_del(&conn->conn_list); 815 case ISCSI_KEVENT_DESTROY_SESSION:
742 spin_unlock_irqrestore(&connlock, flags); 816 ev->r.d_session.host_no = shost->host_no;
743 817 ev->r.d_session.sid = session->sid;
744 return rc; 818 break;
745} 819 case ISCSI_KEVENT_CREATE_SESSION:
746EXPORT_SYMBOL_GPL(iscsi_if_destroy_session_done); 820 ev->r.c_session_ret.host_no = shost->host_no;
747 821 ev->r.c_session_ret.sid = session->sid;
748/** 822 break;
749 * iscsi_if_create_session_done - send session creation completion event 823 case ISCSI_KEVENT_UNBIND_SESSION:
750 * @conn: leading connection for session 824 ev->r.unbind_session.host_no = shost->host_no;
751 * 825 ev->r.unbind_session.sid = session->sid;
752 * This is called by HW iscsi LLDs to notify userpsace that its HW has 826 break;
753 * created a session or a existing session is back in the logged in state. 827 default:
754 **/ 828 dev_printk(KERN_ERR, &session->dev, "Invalid event %u.\n",
755int iscsi_if_create_session_done(struct iscsi_cls_conn *conn) 829 event);
756{ 830 kfree_skb(skb);
757 struct iscsi_internal *priv;
758 struct iscsi_cls_session *session;
759 struct Scsi_Host *shost;
760 struct iscsi_uevent *ev;
761 struct sk_buff *skb;
762 struct nlmsghdr *nlh;
763 unsigned long flags;
764 int rc, len = NLMSG_SPACE(sizeof(*ev));
765
766 priv = iscsi_if_transport_lookup(conn->transport);
767 if (!priv)
768 return -EINVAL; 831 return -EINVAL;
769
770 session = iscsi_dev_to_session(conn->dev.parent);
771 shost = iscsi_session_to_shost(session);
772
773 skb = alloc_skb(len, GFP_KERNEL);
774 if (!skb) {
775 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
776 "session creation event\n");
777 return -ENOMEM;
778 } 832 }
779 833
780 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
781 ev = NLMSG_DATA(nlh);
782 ev->transport_handle = iscsi_handle(conn->transport);
783 ev->type = ISCSI_UEVENT_CREATE_SESSION;
784 ev->r.c_session_ret.host_no = shost->host_no;
785 ev->r.c_session_ret.sid = session->sid;
786
787 /* 834 /*
788 * this will occur if the daemon is not up, so we just warn 835 * this will occur if the daemon is not up, so we just warn
789 * the user and when the daemon is restarted it will handle it 836 * the user and when the daemon is restarted it will handle it
790 */ 837 */
791 rc = iscsi_broadcast_skb(skb, GFP_KERNEL); 838 rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
792 if (rc < 0) 839 if (rc < 0)
793 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " 840 dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace "
794 "session creation event. Check iscsi daemon\n"); 841 "of session event %u. Check iscsi daemon\n", event);
795
796 spin_lock_irqsave(&sesslock, flags);
797 list_add(&session->sess_list, &sesslist);
798 spin_unlock_irqrestore(&sesslock, flags);
799
800 spin_lock_irqsave(&connlock, flags);
801 list_add(&conn->conn_list, &connlist);
802 conn->active = 1;
803 spin_unlock_irqrestore(&connlock, flags);
804 return rc; 842 return rc;
805} 843}
806EXPORT_SYMBOL_GPL(iscsi_if_create_session_done); 844EXPORT_SYMBOL_GPL(iscsi_session_event);
807 845
808static int 846static int
809iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 847iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
810{ 848{
811 struct iscsi_transport *transport = priv->iscsi_transport; 849 struct iscsi_transport *transport = priv->iscsi_transport;
812 struct iscsi_cls_session *session; 850 struct iscsi_cls_session *session;
813 unsigned long flags;
814 uint32_t hostno; 851 uint32_t hostno;
815 852
816 session = transport->create_session(transport, &priv->t, 853 session = transport->create_session(transport, &priv->t,
@@ -821,10 +858,6 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
821 if (!session) 858 if (!session)
822 return -ENOMEM; 859 return -ENOMEM;
823 860
824 spin_lock_irqsave(&sesslock, flags);
825 list_add(&session->sess_list, &sesslist);
826 spin_unlock_irqrestore(&sesslock, flags);
827
828 ev->r.c_session_ret.host_no = hostno; 861 ev->r.c_session_ret.host_no = hostno;
829 ev->r.c_session_ret.sid = session->sid; 862 ev->r.c_session_ret.sid = session->sid;
830 return 0; 863 return 0;
@@ -835,7 +868,6 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
835{ 868{
836 struct iscsi_cls_conn *conn; 869 struct iscsi_cls_conn *conn;
837 struct iscsi_cls_session *session; 870 struct iscsi_cls_session *session;
838 unsigned long flags;
839 871
840 session = iscsi_session_lookup(ev->u.c_conn.sid); 872 session = iscsi_session_lookup(ev->u.c_conn.sid);
841 if (!session) { 873 if (!session) {
@@ -854,28 +886,17 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
854 886
855 ev->r.c_conn_ret.sid = session->sid; 887 ev->r.c_conn_ret.sid = session->sid;
856 ev->r.c_conn_ret.cid = conn->cid; 888 ev->r.c_conn_ret.cid = conn->cid;
857
858 spin_lock_irqsave(&connlock, flags);
859 list_add(&conn->conn_list, &connlist);
860 conn->active = 1;
861 spin_unlock_irqrestore(&connlock, flags);
862
863 return 0; 889 return 0;
864} 890}
865 891
866static int 892static int
867iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) 893iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
868{ 894{
869 unsigned long flags;
870 struct iscsi_cls_conn *conn; 895 struct iscsi_cls_conn *conn;
871 896
872 conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid); 897 conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
873 if (!conn) 898 if (!conn)
874 return -EINVAL; 899 return -EINVAL;
875 spin_lock_irqsave(&connlock, flags);
876 conn->active = 0;
877 list_del(&conn->conn_list);
878 spin_unlock_irqrestore(&connlock, flags);
879 900
880 if (transport->destroy_conn) 901 if (transport->destroy_conn)
881 transport->destroy_conn(conn); 902 transport->destroy_conn(conn);
@@ -1002,7 +1023,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1002 struct iscsi_internal *priv; 1023 struct iscsi_internal *priv;
1003 struct iscsi_cls_session *session; 1024 struct iscsi_cls_session *session;
1004 struct iscsi_cls_conn *conn; 1025 struct iscsi_cls_conn *conn;
1005 unsigned long flags;
1006 1026
1007 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 1027 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
1008 if (!priv) 1028 if (!priv)
@@ -1020,13 +1040,16 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1020 break; 1040 break;
1021 case ISCSI_UEVENT_DESTROY_SESSION: 1041 case ISCSI_UEVENT_DESTROY_SESSION:
1022 session = iscsi_session_lookup(ev->u.d_session.sid); 1042 session = iscsi_session_lookup(ev->u.d_session.sid);
1023 if (session) { 1043 if (session)
1024 spin_lock_irqsave(&sesslock, flags);
1025 list_del(&session->sess_list);
1026 spin_unlock_irqrestore(&sesslock, flags);
1027
1028 transport->destroy_session(session); 1044 transport->destroy_session(session);
1029 } else 1045 else
1046 err = -EINVAL;
1047 break;
1048 case ISCSI_UEVENT_UNBIND_SESSION:
1049 session = iscsi_session_lookup(ev->u.d_session.sid);
1050 if (session)
1051 iscsi_unbind_session(session);
1052 else
1030 err = -EINVAL; 1053 err = -EINVAL;
1031 break; 1054 break;
1032 case ISCSI_UEVENT_CREATE_CONN: 1055 case ISCSI_UEVENT_CREATE_CONN:
@@ -1179,6 +1202,8 @@ iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
1179iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN); 1202iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
1180iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); 1203iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
1181iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS); 1204iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
1205iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
1206iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
1182 1207
1183#define iscsi_cdev_to_session(_cdev) \ 1208#define iscsi_cdev_to_session(_cdev) \
1184 iscsi_dev_to_session(_cdev->dev) 1209 iscsi_dev_to_session(_cdev->dev)
@@ -1217,6 +1242,9 @@ iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
1217iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1); 1242iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
1218iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1); 1243iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
1219iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1); 1244iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
1245iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
1246iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
1247iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
1220 1248
1221#define iscsi_priv_session_attr_show(field, format) \ 1249#define iscsi_priv_session_attr_show(field, format) \
1222static ssize_t \ 1250static ssize_t \
@@ -1413,6 +1441,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
1413 SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN); 1441 SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN);
1414 SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS); 1442 SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
1415 SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT); 1443 SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
1444 SETUP_CONN_RD_ATTR(ping_tmo, ISCSI_PING_TMO);
1445 SETUP_CONN_RD_ATTR(recv_tmo, ISCSI_RECV_TMO);
1416 1446
1417 BUG_ON(count > ISCSI_CONN_ATTRS); 1447 BUG_ON(count > ISCSI_CONN_ATTRS);
1418 priv->conn_attrs[count] = NULL; 1448 priv->conn_attrs[count] = NULL;
@@ -1438,6 +1468,9 @@ iscsi_register_transport(struct iscsi_transport *tt)
1438 SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN); 1468 SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN);
1439 SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD); 1469 SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD);
1440 SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN); 1470 SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN);
1471 SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
1472 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
1473 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
1441 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); 1474 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
1442 1475
1443 BUG_ON(count > ISCSI_SESSION_ATTRS); 1476 BUG_ON(count > ISCSI_SESSION_ATTRS);
@@ -1518,8 +1551,14 @@ static __init int iscsi_transport_init(void)
1518 goto unregister_session_class; 1551 goto unregister_session_class;
1519 } 1552 }
1520 1553
1554 iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
1555 if (!iscsi_eh_timer_workq)
1556 goto release_nls;
1557
1521 return 0; 1558 return 0;
1522 1559
1560release_nls:
1561 sock_release(nls->sk_socket);
1523unregister_session_class: 1562unregister_session_class:
1524 transport_class_unregister(&iscsi_session_class); 1563 transport_class_unregister(&iscsi_session_class);
1525unregister_conn_class: 1564unregister_conn_class:
@@ -1533,6 +1572,7 @@ unregister_transport_class:
1533 1572
1534static void __exit iscsi_transport_exit(void) 1573static void __exit iscsi_transport_exit(void)
1535{ 1574{
1575 destroy_workqueue(iscsi_eh_timer_workq);
1536 sock_release(nls->sk_socket); 1576 sock_release(nls->sk_socket);
1537 transport_class_unregister(&iscsi_connection_class); 1577 transport_class_unregister(&iscsi_connection_class);
1538 transport_class_unregister(&iscsi_session_class); 1578 transport_class_unregister(&iscsi_session_class);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 3120f4b3a11a..f2149d0bb999 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -173,6 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
173 173
174 handler = to_sas_internal(shost->transportt)->f->smp_handler; 174 handler = to_sas_internal(shost->transportt)->f->smp_handler;
175 ret = handler(shost, rphy, req); 175 ret = handler(shost, rphy, req);
176 req->errors = ret;
176 177
177 spin_lock_irq(q->queue_lock); 178 spin_lock_irq(q->queue_lock);
178 179
@@ -323,7 +324,7 @@ static int do_sas_phy_delete(struct device *dev, void *data)
323} 324}
324 325
325/** 326/**
326 * sas_remove_children -- tear down a devices SAS data structures 327 * sas_remove_children - tear down a devices SAS data structures
327 * @dev: device belonging to the sas object 328 * @dev: device belonging to the sas object
328 * 329 *
329 * Removes all SAS PHYs and remote PHYs for a given object 330 * Removes all SAS PHYs and remote PHYs for a given object
@@ -336,7 +337,7 @@ void sas_remove_children(struct device *dev)
336EXPORT_SYMBOL(sas_remove_children); 337EXPORT_SYMBOL(sas_remove_children);
337 338
338/** 339/**
339 * sas_remove_host -- tear down a Scsi_Host's SAS data structures 340 * sas_remove_host - tear down a Scsi_Host's SAS data structures
340 * @shost: Scsi Host that is torn down 341 * @shost: Scsi Host that is torn down
341 * 342 *
342 * Removes all SAS PHYs and remote PHYs for a given Scsi_Host. 343 * Removes all SAS PHYs and remote PHYs for a given Scsi_Host.
@@ -577,7 +578,7 @@ static void sas_phy_release(struct device *dev)
577} 578}
578 579
579/** 580/**
580 * sas_phy_alloc -- allocates and initialize a SAS PHY structure 581 * sas_phy_alloc - allocates and initialize a SAS PHY structure
581 * @parent: Parent device 582 * @parent: Parent device
582 * @number: Phy index 583 * @number: Phy index
583 * 584 *
@@ -618,7 +619,7 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number)
618EXPORT_SYMBOL(sas_phy_alloc); 619EXPORT_SYMBOL(sas_phy_alloc);
619 620
620/** 621/**
621 * sas_phy_add -- add a SAS PHY to the device hierarchy 622 * sas_phy_add - add a SAS PHY to the device hierarchy
622 * @phy: The PHY to be added 623 * @phy: The PHY to be added
623 * 624 *
624 * Publishes a SAS PHY to the rest of the system. 625 * Publishes a SAS PHY to the rest of the system.
@@ -638,7 +639,7 @@ int sas_phy_add(struct sas_phy *phy)
638EXPORT_SYMBOL(sas_phy_add); 639EXPORT_SYMBOL(sas_phy_add);
639 640
640/** 641/**
641 * sas_phy_free -- free a SAS PHY 642 * sas_phy_free - free a SAS PHY
642 * @phy: SAS PHY to free 643 * @phy: SAS PHY to free
643 * 644 *
644 * Frees the specified SAS PHY. 645 * Frees the specified SAS PHY.
@@ -655,7 +656,7 @@ void sas_phy_free(struct sas_phy *phy)
655EXPORT_SYMBOL(sas_phy_free); 656EXPORT_SYMBOL(sas_phy_free);
656 657
657/** 658/**
658 * sas_phy_delete -- remove SAS PHY 659 * sas_phy_delete - remove SAS PHY
659 * @phy: SAS PHY to remove 660 * @phy: SAS PHY to remove
660 * 661 *
661 * Removes the specified SAS PHY. If the SAS PHY has an 662 * Removes the specified SAS PHY. If the SAS PHY has an
@@ -677,7 +678,7 @@ sas_phy_delete(struct sas_phy *phy)
677EXPORT_SYMBOL(sas_phy_delete); 678EXPORT_SYMBOL(sas_phy_delete);
678 679
679/** 680/**
680 * scsi_is_sas_phy -- check if a struct device represents a SAS PHY 681 * scsi_is_sas_phy - check if a struct device represents a SAS PHY
681 * @dev: device to check 682 * @dev: device to check
682 * 683 *
683 * Returns: 684 * Returns:
@@ -843,7 +844,6 @@ EXPORT_SYMBOL(sas_port_alloc_num);
843 844
844/** 845/**
845 * sas_port_add - add a SAS port to the device hierarchy 846 * sas_port_add - add a SAS port to the device hierarchy
846 *
847 * @port: port to be added 847 * @port: port to be added
848 * 848 *
849 * publishes a port to the rest of the system 849 * publishes a port to the rest of the system
@@ -868,7 +868,7 @@ int sas_port_add(struct sas_port *port)
868EXPORT_SYMBOL(sas_port_add); 868EXPORT_SYMBOL(sas_port_add);
869 869
870/** 870/**
871 * sas_port_free -- free a SAS PORT 871 * sas_port_free - free a SAS PORT
872 * @port: SAS PORT to free 872 * @port: SAS PORT to free
873 * 873 *
874 * Frees the specified SAS PORT. 874 * Frees the specified SAS PORT.
@@ -885,7 +885,7 @@ void sas_port_free(struct sas_port *port)
885EXPORT_SYMBOL(sas_port_free); 885EXPORT_SYMBOL(sas_port_free);
886 886
887/** 887/**
888 * sas_port_delete -- remove SAS PORT 888 * sas_port_delete - remove SAS PORT
889 * @port: SAS PORT to remove 889 * @port: SAS PORT to remove
890 * 890 *
891 * Removes the specified SAS PORT. If the SAS PORT has an 891 * Removes the specified SAS PORT. If the SAS PORT has an
@@ -924,7 +924,7 @@ void sas_port_delete(struct sas_port *port)
924EXPORT_SYMBOL(sas_port_delete); 924EXPORT_SYMBOL(sas_port_delete);
925 925
926/** 926/**
927 * scsi_is_sas_port -- check if a struct device represents a SAS port 927 * scsi_is_sas_port - check if a struct device represents a SAS port
928 * @dev: device to check 928 * @dev: device to check
929 * 929 *
930 * Returns: 930 * Returns:
@@ -1309,6 +1309,7 @@ static void sas_rphy_initialize(struct sas_rphy *rphy)
1309 1309
1310/** 1310/**
1311 * sas_end_device_alloc - allocate an rphy for an end device 1311 * sas_end_device_alloc - allocate an rphy for an end device
1312 * @parent: which port
1312 * 1313 *
1313 * Allocates an SAS remote PHY structure, connected to @parent. 1314 * Allocates an SAS remote PHY structure, connected to @parent.
1314 * 1315 *
@@ -1345,6 +1346,8 @@ EXPORT_SYMBOL(sas_end_device_alloc);
1345 1346
1346/** 1347/**
1347 * sas_expander_alloc - allocate an rphy for an end device 1348 * sas_expander_alloc - allocate an rphy for an end device
1349 * @parent: which port
1350 * @type: SAS_EDGE_EXPANDER_DEVICE or SAS_FANOUT_EXPANDER_DEVICE
1348 * 1351 *
1349 * Allocates an SAS remote PHY structure, connected to @parent. 1352 * Allocates an SAS remote PHY structure, connected to @parent.
1350 * 1353 *
@@ -1383,7 +1386,7 @@ struct sas_rphy *sas_expander_alloc(struct sas_port *parent,
1383EXPORT_SYMBOL(sas_expander_alloc); 1386EXPORT_SYMBOL(sas_expander_alloc);
1384 1387
1385/** 1388/**
1386 * sas_rphy_add -- add a SAS remote PHY to the device hierarchy 1389 * sas_rphy_add - add a SAS remote PHY to the device hierarchy
1387 * @rphy: The remote PHY to be added 1390 * @rphy: The remote PHY to be added
1388 * 1391 *
1389 * Publishes a SAS remote PHY to the rest of the system. 1392 * Publishes a SAS remote PHY to the rest of the system.
@@ -1430,8 +1433,8 @@ int sas_rphy_add(struct sas_rphy *rphy)
1430EXPORT_SYMBOL(sas_rphy_add); 1433EXPORT_SYMBOL(sas_rphy_add);
1431 1434
1432/** 1435/**
1433 * sas_rphy_free -- free a SAS remote PHY 1436 * sas_rphy_free - free a SAS remote PHY
1434 * @rphy SAS remote PHY to free 1437 * @rphy: SAS remote PHY to free
1435 * 1438 *
1436 * Frees the specified SAS remote PHY. 1439 * Frees the specified SAS remote PHY.
1437 * 1440 *
@@ -1459,7 +1462,7 @@ void sas_rphy_free(struct sas_rphy *rphy)
1459EXPORT_SYMBOL(sas_rphy_free); 1462EXPORT_SYMBOL(sas_rphy_free);
1460 1463
1461/** 1464/**
1462 * sas_rphy_delete -- remove and free SAS remote PHY 1465 * sas_rphy_delete - remove and free SAS remote PHY
1463 * @rphy: SAS remote PHY to remove and free 1466 * @rphy: SAS remote PHY to remove and free
1464 * 1467 *
1465 * Removes the specified SAS remote PHY and frees it. 1468 * Removes the specified SAS remote PHY and frees it.
@@ -1473,7 +1476,7 @@ sas_rphy_delete(struct sas_rphy *rphy)
1473EXPORT_SYMBOL(sas_rphy_delete); 1476EXPORT_SYMBOL(sas_rphy_delete);
1474 1477
1475/** 1478/**
1476 * sas_rphy_remove -- remove SAS remote PHY 1479 * sas_rphy_remove - remove SAS remote PHY
1477 * @rphy: SAS remote phy to remove 1480 * @rphy: SAS remote phy to remove
1478 * 1481 *
1479 * Removes the specified SAS remote PHY. 1482 * Removes the specified SAS remote PHY.
@@ -1504,7 +1507,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
1504EXPORT_SYMBOL(sas_rphy_remove); 1507EXPORT_SYMBOL(sas_rphy_remove);
1505 1508
1506/** 1509/**
1507 * scsi_is_sas_rphy -- check if a struct device represents a SAS remote PHY 1510 * scsi_is_sas_rphy - check if a struct device represents a SAS remote PHY
1508 * @dev: device to check 1511 * @dev: device to check
1509 * 1512 *
1510 * Returns: 1513 * Returns:
@@ -1604,7 +1607,7 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1604 SETUP_TEMPLATE(expander_attrs, expander_##field, S_IRUGO, 1) 1607 SETUP_TEMPLATE(expander_attrs, expander_##field, S_IRUGO, 1)
1605 1608
1606/** 1609/**
1607 * sas_attach_transport -- instantiate SAS transport template 1610 * sas_attach_transport - instantiate SAS transport template
1608 * @ft: SAS transport class function template 1611 * @ft: SAS transport class function template
1609 */ 1612 */
1610struct scsi_transport_template * 1613struct scsi_transport_template *
@@ -1715,7 +1718,7 @@ sas_attach_transport(struct sas_function_template *ft)
1715EXPORT_SYMBOL(sas_attach_transport); 1718EXPORT_SYMBOL(sas_attach_transport);
1716 1719
1717/** 1720/**
1718 * sas_release_transport -- release SAS transport template instance 1721 * sas_release_transport - release SAS transport template instance
1719 * @t: transport template instance 1722 * @t: transport template instance
1720 */ 1723 */
1721void sas_release_transport(struct scsi_transport_template *t) 1724void sas_release_transport(struct scsi_transport_template *t)
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 4df21c92ff1e..1fb60313a516 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -52,13 +52,6 @@
52struct spi_internal { 52struct spi_internal {
53 struct scsi_transport_template t; 53 struct scsi_transport_template t;
54 struct spi_function_template *f; 54 struct spi_function_template *f;
55 /* The actual attributes */
56 struct class_device_attribute private_attrs[SPI_NUM_ATTRS];
57 /* The array of null terminated pointers to attributes
58 * needed by scsi_sysfs.c */
59 struct class_device_attribute *attrs[SPI_NUM_ATTRS + SPI_OTHER_ATTRS + 1];
60 struct class_device_attribute private_host_attrs[SPI_HOST_ATTRS];
61 struct class_device_attribute *host_attrs[SPI_HOST_ATTRS + 1];
62}; 55};
63 56
64#define to_spi_internal(tmpl) container_of(tmpl, struct spi_internal, t) 57#define to_spi_internal(tmpl) container_of(tmpl, struct spi_internal, t)
@@ -174,17 +167,20 @@ static int spi_host_setup(struct transport_container *tc, struct device *dev,
174 return 0; 167 return 0;
175} 168}
176 169
170static int spi_host_configure(struct transport_container *tc,
171 struct device *dev,
172 struct class_device *cdev);
173
177static DECLARE_TRANSPORT_CLASS(spi_host_class, 174static DECLARE_TRANSPORT_CLASS(spi_host_class,
178 "spi_host", 175 "spi_host",
179 spi_host_setup, 176 spi_host_setup,
180 NULL, 177 NULL,
181 NULL); 178 spi_host_configure);
182 179
183static int spi_host_match(struct attribute_container *cont, 180static int spi_host_match(struct attribute_container *cont,
184 struct device *dev) 181 struct device *dev)
185{ 182{
186 struct Scsi_Host *shost; 183 struct Scsi_Host *shost;
187 struct spi_internal *i;
188 184
189 if (!scsi_is_host_device(dev)) 185 if (!scsi_is_host_device(dev))
190 return 0; 186 return 0;
@@ -194,11 +190,13 @@ static int spi_host_match(struct attribute_container *cont,
194 != &spi_host_class.class) 190 != &spi_host_class.class)
195 return 0; 191 return 0;
196 192
197 i = to_spi_internal(shost->transportt); 193 return &shost->transportt->host_attrs.ac == cont;
198
199 return &i->t.host_attrs.ac == cont;
200} 194}
201 195
196static int spi_target_configure(struct transport_container *tc,
197 struct device *dev,
198 struct class_device *cdev);
199
202static int spi_device_configure(struct transport_container *tc, 200static int spi_device_configure(struct transport_container *tc,
203 struct device *dev, 201 struct device *dev,
204 struct class_device *cdev) 202 struct class_device *cdev)
@@ -300,8 +298,10 @@ store_spi_transport_##field(struct class_device *cdev, const char *buf, \
300 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ 298 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \
301 struct spi_internal *i = to_spi_internal(shost->transportt); \ 299 struct spi_internal *i = to_spi_internal(shost->transportt); \
302 \ 300 \
301 if (!i->f->set_##field) \
302 return -EINVAL; \
303 val = simple_strtoul(buf, NULL, 0); \ 303 val = simple_strtoul(buf, NULL, 0); \
304 i->f->set_##field(starget, val); \ 304 i->f->set_##field(starget, val); \
305 return count; \ 305 return count; \
306} 306}
307 307
@@ -317,6 +317,8 @@ store_spi_transport_##field(struct class_device *cdev, const char *buf, \
317 struct spi_transport_attrs *tp \ 317 struct spi_transport_attrs *tp \
318 = (struct spi_transport_attrs *)&starget->starget_data; \ 318 = (struct spi_transport_attrs *)&starget->starget_data; \
319 \ 319 \
320 if (i->f->set_##field) \
321 return -EINVAL; \
320 val = simple_strtoul(buf, NULL, 0); \ 322 val = simple_strtoul(buf, NULL, 0); \
321 if (val > tp->max_##field) \ 323 if (val > tp->max_##field) \
322 val = tp->max_##field; \ 324 val = tp->max_##field; \
@@ -327,14 +329,14 @@ store_spi_transport_##field(struct class_device *cdev, const char *buf, \
327#define spi_transport_rd_attr(field, format_string) \ 329#define spi_transport_rd_attr(field, format_string) \
328 spi_transport_show_function(field, format_string) \ 330 spi_transport_show_function(field, format_string) \
329 spi_transport_store_function(field, format_string) \ 331 spi_transport_store_function(field, format_string) \
330static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \ 332static CLASS_DEVICE_ATTR(field, S_IRUGO, \
331 show_spi_transport_##field, \ 333 show_spi_transport_##field, \
332 store_spi_transport_##field); 334 store_spi_transport_##field);
333 335
334#define spi_transport_simple_attr(field, format_string) \ 336#define spi_transport_simple_attr(field, format_string) \
335 spi_transport_show_simple(field, format_string) \ 337 spi_transport_show_simple(field, format_string) \
336 spi_transport_store_simple(field, format_string) \ 338 spi_transport_store_simple(field, format_string) \
337static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \ 339static CLASS_DEVICE_ATTR(field, S_IRUGO, \
338 show_spi_transport_##field, \ 340 show_spi_transport_##field, \
339 store_spi_transport_##field); 341 store_spi_transport_##field);
340 342
@@ -342,7 +344,7 @@ static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \
342 spi_transport_show_function(field, format_string) \ 344 spi_transport_show_function(field, format_string) \
343 spi_transport_store_max(field, format_string) \ 345 spi_transport_store_max(field, format_string) \
344 spi_transport_simple_attr(max_##field, format_string) \ 346 spi_transport_simple_attr(max_##field, format_string) \
345static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \ 347static CLASS_DEVICE_ATTR(field, S_IRUGO, \
346 show_spi_transport_##field, \ 348 show_spi_transport_##field, \
347 store_spi_transport_##field); 349 store_spi_transport_##field);
348 350
@@ -472,6 +474,9 @@ store_spi_transport_period(struct class_device *cdev, const char *buf,
472 (struct spi_transport_attrs *)&starget->starget_data; 474 (struct spi_transport_attrs *)&starget->starget_data;
473 int period, retval; 475 int period, retval;
474 476
477 if (!i->f->set_period)
478 return -EINVAL;
479
475 retval = store_spi_transport_period_helper(cdev, buf, count, &period); 480 retval = store_spi_transport_period_helper(cdev, buf, count, &period);
476 481
477 if (period < tp->min_period) 482 if (period < tp->min_period)
@@ -482,7 +487,7 @@ store_spi_transport_period(struct class_device *cdev, const char *buf,
482 return retval; 487 return retval;
483} 488}
484 489
485static CLASS_DEVICE_ATTR(period, S_IRUGO | S_IWUSR, 490static CLASS_DEVICE_ATTR(period, S_IRUGO,
486 show_spi_transport_period, 491 show_spi_transport_period,
487 store_spi_transport_period); 492 store_spi_transport_period);
488 493
@@ -490,9 +495,14 @@ static ssize_t
490show_spi_transport_min_period(struct class_device *cdev, char *buf) 495show_spi_transport_min_period(struct class_device *cdev, char *buf)
491{ 496{
492 struct scsi_target *starget = transport_class_to_starget(cdev); 497 struct scsi_target *starget = transport_class_to_starget(cdev);
498 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
499 struct spi_internal *i = to_spi_internal(shost->transportt);
493 struct spi_transport_attrs *tp = 500 struct spi_transport_attrs *tp =
494 (struct spi_transport_attrs *)&starget->starget_data; 501 (struct spi_transport_attrs *)&starget->starget_data;
495 502
503 if (!i->f->set_period)
504 return -EINVAL;
505
496 return show_spi_transport_period_helper(buf, tp->min_period); 506 return show_spi_transport_period_helper(buf, tp->min_period);
497} 507}
498 508
@@ -509,7 +519,7 @@ store_spi_transport_min_period(struct class_device *cdev, const char *buf,
509} 519}
510 520
511 521
512static CLASS_DEVICE_ATTR(min_period, S_IRUGO | S_IWUSR, 522static CLASS_DEVICE_ATTR(min_period, S_IRUGO,
513 show_spi_transport_min_period, 523 show_spi_transport_min_period,
514 store_spi_transport_min_period); 524 store_spi_transport_min_period);
515 525
@@ -531,12 +541,15 @@ static ssize_t store_spi_host_signalling(struct class_device *cdev,
531 struct spi_internal *i = to_spi_internal(shost->transportt); 541 struct spi_internal *i = to_spi_internal(shost->transportt);
532 enum spi_signal_type type = spi_signal_to_value(buf); 542 enum spi_signal_type type = spi_signal_to_value(buf);
533 543
544 if (!i->f->set_signalling)
545 return -EINVAL;
546
534 if (type != SPI_SIGNAL_UNKNOWN) 547 if (type != SPI_SIGNAL_UNKNOWN)
535 i->f->set_signalling(shost, type); 548 i->f->set_signalling(shost, type);
536 549
537 return count; 550 return count;
538} 551}
539static CLASS_DEVICE_ATTR(signalling, S_IRUGO | S_IWUSR, 552static CLASS_DEVICE_ATTR(signalling, S_IRUGO,
540 show_spi_host_signalling, 553 show_spi_host_signalling,
541 store_spi_host_signalling); 554 store_spi_host_signalling);
542 555
@@ -1262,35 +1275,6 @@ int spi_print_msg(const unsigned char *msg)
1262EXPORT_SYMBOL(spi_print_msg); 1275EXPORT_SYMBOL(spi_print_msg);
1263#endif /* ! CONFIG_SCSI_CONSTANTS */ 1276#endif /* ! CONFIG_SCSI_CONSTANTS */
1264 1277
1265#define SETUP_ATTRIBUTE(field) \
1266 i->private_attrs[count] = class_device_attr_##field; \
1267 if (!i->f->set_##field) { \
1268 i->private_attrs[count].attr.mode = S_IRUGO; \
1269 i->private_attrs[count].store = NULL; \
1270 } \
1271 i->attrs[count] = &i->private_attrs[count]; \
1272 if (i->f->show_##field) \
1273 count++
1274
1275#define SETUP_RELATED_ATTRIBUTE(field, rel_field) \
1276 i->private_attrs[count] = class_device_attr_##field; \
1277 if (!i->f->set_##rel_field) { \
1278 i->private_attrs[count].attr.mode = S_IRUGO; \
1279 i->private_attrs[count].store = NULL; \
1280 } \
1281 i->attrs[count] = &i->private_attrs[count]; \
1282 if (i->f->show_##rel_field) \
1283 count++
1284
1285#define SETUP_HOST_ATTRIBUTE(field) \
1286 i->private_host_attrs[count] = class_device_attr_##field; \
1287 if (!i->f->set_##field) { \
1288 i->private_host_attrs[count].attr.mode = S_IRUGO; \
1289 i->private_host_attrs[count].store = NULL; \
1290 } \
1291 i->host_attrs[count] = &i->private_host_attrs[count]; \
1292 count++
1293
1294static int spi_device_match(struct attribute_container *cont, 1278static int spi_device_match(struct attribute_container *cont,
1295 struct device *dev) 1279 struct device *dev)
1296{ 1280{
@@ -1343,16 +1327,156 @@ static DECLARE_TRANSPORT_CLASS(spi_transport_class,
1343 "spi_transport", 1327 "spi_transport",
1344 spi_setup_transport_attrs, 1328 spi_setup_transport_attrs,
1345 NULL, 1329 NULL,
1346 NULL); 1330 spi_target_configure);
1347 1331
1348static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class, 1332static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class,
1349 spi_device_match, 1333 spi_device_match,
1350 spi_device_configure); 1334 spi_device_configure);
1351 1335
1336static struct attribute *host_attributes[] = {
1337 &class_device_attr_signalling.attr,
1338 NULL
1339};
1340
1341static struct attribute_group host_attribute_group = {
1342 .attrs = host_attributes,
1343};
1344
1345static int spi_host_configure(struct transport_container *tc,
1346 struct device *dev,
1347 struct class_device *cdev)
1348{
1349 struct kobject *kobj = &cdev->kobj;
1350 struct Scsi_Host *shost = transport_class_to_shost(cdev);
1351 struct spi_internal *si = to_spi_internal(shost->transportt);
1352 struct attribute *attr = &class_device_attr_signalling.attr;
1353 int rc = 0;
1354
1355 if (si->f->set_signalling)
1356 rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR);
1357
1358 return rc;
1359}
1360
1361/* returns true if we should be showing the variable. Also
1362 * overloads the return by setting 1<<1 if the attribute should
1363 * be writeable */
1364#define TARGET_ATTRIBUTE_HELPER(name) \
1365 (si->f->show_##name ? 1 : 0) + \
1366 (si->f->set_##name ? 2 : 0)
1367
1368static int target_attribute_is_visible(struct kobject *kobj,
1369 struct attribute *attr, int i)
1370{
1371 struct class_device *cdev =
1372 container_of(kobj, struct class_device, kobj);
1373 struct scsi_target *starget = transport_class_to_starget(cdev);
1374 struct Scsi_Host *shost = transport_class_to_shost(cdev);
1375 struct spi_internal *si = to_spi_internal(shost->transportt);
1376
1377 if (attr == &class_device_attr_period.attr &&
1378 spi_support_sync(starget))
1379 return TARGET_ATTRIBUTE_HELPER(period);
1380 else if (attr == &class_device_attr_min_period.attr &&
1381 spi_support_sync(starget))
1382 return TARGET_ATTRIBUTE_HELPER(period);
1383 else if (attr == &class_device_attr_offset.attr &&
1384 spi_support_sync(starget))
1385 return TARGET_ATTRIBUTE_HELPER(offset);
1386 else if (attr == &class_device_attr_max_offset.attr &&
1387 spi_support_sync(starget))
1388 return TARGET_ATTRIBUTE_HELPER(offset);
1389 else if (attr == &class_device_attr_width.attr &&
1390 spi_support_wide(starget))
1391 return TARGET_ATTRIBUTE_HELPER(width);
1392 else if (attr == &class_device_attr_max_width.attr &&
1393 spi_support_wide(starget))
1394 return TARGET_ATTRIBUTE_HELPER(width);
1395 else if (attr == &class_device_attr_iu.attr &&
1396 spi_support_ius(starget))
1397 return TARGET_ATTRIBUTE_HELPER(iu);
1398 else if (attr == &class_device_attr_dt.attr &&
1399 spi_support_dt(starget))
1400 return TARGET_ATTRIBUTE_HELPER(dt);
1401 else if (attr == &class_device_attr_qas.attr &&
1402 spi_support_qas(starget))
1403 return TARGET_ATTRIBUTE_HELPER(qas);
1404 else if (attr == &class_device_attr_wr_flow.attr &&
1405 spi_support_ius(starget))
1406 return TARGET_ATTRIBUTE_HELPER(wr_flow);
1407 else if (attr == &class_device_attr_rd_strm.attr &&
1408 spi_support_ius(starget))
1409 return TARGET_ATTRIBUTE_HELPER(rd_strm);
1410 else if (attr == &class_device_attr_rti.attr &&
1411 spi_support_ius(starget))
1412 return TARGET_ATTRIBUTE_HELPER(rti);
1413 else if (attr == &class_device_attr_pcomp_en.attr &&
1414 spi_support_ius(starget))
1415 return TARGET_ATTRIBUTE_HELPER(pcomp_en);
1416 else if (attr == &class_device_attr_hold_mcs.attr &&
1417 spi_support_ius(starget))
1418 return TARGET_ATTRIBUTE_HELPER(hold_mcs);
1419 else if (attr == &class_device_attr_revalidate.attr)
1420 return 1;
1421
1422 return 0;
1423}
1424
1425static struct attribute *target_attributes[] = {
1426 &class_device_attr_period.attr,
1427 &class_device_attr_min_period.attr,
1428 &class_device_attr_offset.attr,
1429 &class_device_attr_max_offset.attr,
1430 &class_device_attr_width.attr,
1431 &class_device_attr_max_width.attr,
1432 &class_device_attr_iu.attr,
1433 &class_device_attr_dt.attr,
1434 &class_device_attr_qas.attr,
1435 &class_device_attr_wr_flow.attr,
1436 &class_device_attr_rd_strm.attr,
1437 &class_device_attr_rti.attr,
1438 &class_device_attr_pcomp_en.attr,
1439 &class_device_attr_hold_mcs.attr,
1440 &class_device_attr_revalidate.attr,
1441 NULL
1442};
1443
1444static struct attribute_group target_attribute_group = {
1445 .attrs = target_attributes,
1446 .is_visible = target_attribute_is_visible,
1447};
1448
1449static int spi_target_configure(struct transport_container *tc,
1450 struct device *dev,
1451 struct class_device *cdev)
1452{
1453 struct kobject *kobj = &cdev->kobj;
1454 int i;
1455 struct attribute *attr;
1456 int rc;
1457
1458 for (i = 0; (attr = target_attributes[i]) != NULL; i++) {
1459 int j = target_attribute_group.is_visible(kobj, attr, i);
1460
1461 /* FIXME: as well as returning -EEXIST, which we'd like
1462 * to ignore, sysfs also does a WARN_ON and dumps a trace,
1463 * which is bad, so temporarily, skip attributes that are
1464 * already visible (the revalidate one) */
1465 if (j && attr != &class_device_attr_revalidate.attr)
1466 rc = sysfs_add_file_to_group(kobj, attr,
1467 target_attribute_group.name);
1468 /* and make the attribute writeable if we have a set
1469 * function */
1470 if ((j & 1))
1471 rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR);
1472 }
1473
1474 return 0;
1475}
1476
1352struct scsi_transport_template * 1477struct scsi_transport_template *
1353spi_attach_transport(struct spi_function_template *ft) 1478spi_attach_transport(struct spi_function_template *ft)
1354{ 1479{
1355 int count = 0;
1356 struct spi_internal *i = kzalloc(sizeof(struct spi_internal), 1480 struct spi_internal *i = kzalloc(sizeof(struct spi_internal),
1357 GFP_KERNEL); 1481 GFP_KERNEL);
1358 1482
@@ -1360,47 +1484,17 @@ spi_attach_transport(struct spi_function_template *ft)
1360 return NULL; 1484 return NULL;
1361 1485
1362 i->t.target_attrs.ac.class = &spi_transport_class.class; 1486 i->t.target_attrs.ac.class = &spi_transport_class.class;
1363 i->t.target_attrs.ac.attrs = &i->attrs[0]; 1487 i->t.target_attrs.ac.grp = &target_attribute_group;
1364 i->t.target_attrs.ac.match = spi_target_match; 1488 i->t.target_attrs.ac.match = spi_target_match;
1365 transport_container_register(&i->t.target_attrs); 1489 transport_container_register(&i->t.target_attrs);
1366 i->t.target_size = sizeof(struct spi_transport_attrs); 1490 i->t.target_size = sizeof(struct spi_transport_attrs);
1367 i->t.host_attrs.ac.class = &spi_host_class.class; 1491 i->t.host_attrs.ac.class = &spi_host_class.class;
1368 i->t.host_attrs.ac.attrs = &i->host_attrs[0]; 1492 i->t.host_attrs.ac.grp = &host_attribute_group;
1369 i->t.host_attrs.ac.match = spi_host_match; 1493 i->t.host_attrs.ac.match = spi_host_match;
1370 transport_container_register(&i->t.host_attrs); 1494 transport_container_register(&i->t.host_attrs);
1371 i->t.host_size = sizeof(struct spi_host_attrs); 1495 i->t.host_size = sizeof(struct spi_host_attrs);
1372 i->f = ft; 1496 i->f = ft;
1373 1497
1374 SETUP_ATTRIBUTE(period);
1375 SETUP_RELATED_ATTRIBUTE(min_period, period);
1376 SETUP_ATTRIBUTE(offset);
1377 SETUP_RELATED_ATTRIBUTE(max_offset, offset);
1378 SETUP_ATTRIBUTE(width);
1379 SETUP_RELATED_ATTRIBUTE(max_width, width);
1380 SETUP_ATTRIBUTE(iu);
1381 SETUP_ATTRIBUTE(dt);
1382 SETUP_ATTRIBUTE(qas);
1383 SETUP_ATTRIBUTE(wr_flow);
1384 SETUP_ATTRIBUTE(rd_strm);
1385 SETUP_ATTRIBUTE(rti);
1386 SETUP_ATTRIBUTE(pcomp_en);
1387 SETUP_ATTRIBUTE(hold_mcs);
1388
1389 /* if you add an attribute but forget to increase SPI_NUM_ATTRS
1390 * this bug will trigger */
1391 BUG_ON(count > SPI_NUM_ATTRS);
1392
1393 i->attrs[count++] = &class_device_attr_revalidate;
1394
1395 i->attrs[count] = NULL;
1396
1397 count = 0;
1398 SETUP_HOST_ATTRIBUTE(signalling);
1399
1400 BUG_ON(count > SPI_HOST_ATTRS);
1401
1402 i->host_attrs[count] = NULL;
1403
1404 return &i->t; 1498 return &i->t;
1405} 1499}
1406EXPORT_SYMBOL(spi_attach_transport); 1500EXPORT_SYMBOL(spi_attach_transport);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 65c584db33bd..2445c98ae95e 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -185,11 +185,10 @@ static int srp_host_match(struct attribute_container *cont, struct device *dev)
185 185
186/** 186/**
187 * srp_rport_add - add a SRP remote port to the device hierarchy 187 * srp_rport_add - add a SRP remote port to the device hierarchy
188 *
189 * @shost: scsi host the remote port is connected to. 188 * @shost: scsi host the remote port is connected to.
190 * @ids: The port id for the remote port. 189 * @ids: The port id for the remote port.
191 * 190 *
192 * publishes a port to the rest of the system 191 * Publishes a port to the rest of the system.
193 */ 192 */
194struct srp_rport *srp_rport_add(struct Scsi_Host *shost, 193struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
195 struct srp_rport_identifiers *ids) 194 struct srp_rport_identifiers *ids)
@@ -242,8 +241,8 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
242EXPORT_SYMBOL_GPL(srp_rport_add); 241EXPORT_SYMBOL_GPL(srp_rport_add);
243 242
244/** 243/**
245 * srp_rport_del -- remove a SRP remote port 244 * srp_rport_del - remove a SRP remote port
246 * @port: SRP remote port to remove 245 * @rport: SRP remote port to remove
247 * 246 *
248 * Removes the specified SRP remote port. 247 * Removes the specified SRP remote port.
249 */ 248 */
@@ -271,7 +270,7 @@ static int do_srp_rport_del(struct device *dev, void *data)
271} 270}
272 271
273/** 272/**
274 * srp_remove_host -- tear down a Scsi_Host's SRP data structures 273 * srp_remove_host - tear down a Scsi_Host's SRP data structures
275 * @shost: Scsi Host that is torn down 274 * @shost: Scsi Host that is torn down
276 * 275 *
277 * Removes all SRP remote ports for a given Scsi_Host. 276 * Removes all SRP remote ports for a given Scsi_Host.
@@ -297,7 +296,7 @@ static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
297} 296}
298 297
299/** 298/**
300 * srp_attach_transport -- instantiate SRP transport template 299 * srp_attach_transport - instantiate SRP transport template
301 * @ft: SRP transport class function template 300 * @ft: SRP transport class function template
302 */ 301 */
303struct scsi_transport_template * 302struct scsi_transport_template *
@@ -337,7 +336,7 @@ srp_attach_transport(struct srp_function_template *ft)
337EXPORT_SYMBOL_GPL(srp_attach_transport); 336EXPORT_SYMBOL_GPL(srp_attach_transport);
338 337
339/** 338/**
340 * srp_release_transport -- release SRP transport template instance 339 * srp_release_transport - release SRP transport template instance
341 * @t: transport template instance 340 * @t: transport template instance
342 */ 341 */
343void srp_release_transport(struct scsi_transport_template *t) 342void srp_release_transport(struct scsi_transport_template *t)
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index cd68a66c7bb3..3f21bc65e8c6 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -24,6 +24,14 @@
24static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds, 24static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds,
25 unsigned int *secs); 25 unsigned int *secs);
26 26
27/**
28 * scsi_bios_ptable - Read PC partition table out of first sector of device.
29 * @dev: from this device
30 *
31 * Description: Reads the first sector from the device and returns %0x42 bytes
32 * starting at offset %0x1be.
33 * Returns: partition table in kmalloc(GFP_KERNEL) memory, or NULL on error.
34 */
27unsigned char *scsi_bios_ptable(struct block_device *dev) 35unsigned char *scsi_bios_ptable(struct block_device *dev)
28{ 36{
29 unsigned char *res = kmalloc(66, GFP_KERNEL); 37 unsigned char *res = kmalloc(66, GFP_KERNEL);
@@ -43,15 +51,17 @@ unsigned char *scsi_bios_ptable(struct block_device *dev)
43} 51}
44EXPORT_SYMBOL(scsi_bios_ptable); 52EXPORT_SYMBOL(scsi_bios_ptable);
45 53
46/* 54/**
47 * Function : int scsicam_bios_param (struct block_device *bdev, ector_t capacity, int *ip) 55 * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors.
56 * @bdev: which device
57 * @capacity: size of the disk in sectors
58 * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders
48 * 59 *
49 * Purpose : to determine the BIOS mapping used for a drive in a 60 * Description : determine the BIOS mapping/geometry used for a drive in a
50 * SCSI-CAM system, storing the results in ip as required 61 * SCSI-CAM system, storing the results in ip as required
51 * by the HDIO_GETGEO ioctl(). 62 * by the HDIO_GETGEO ioctl().
52 * 63 *
53 * Returns : -1 on failure, 0 on success. 64 * Returns : -1 on failure, 0 on success.
54 *
55 */ 65 */
56 66
57int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip) 67int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
@@ -98,15 +108,18 @@ int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
98} 108}
99EXPORT_SYMBOL(scsicam_bios_param); 109EXPORT_SYMBOL(scsicam_bios_param);
100 110
101/* 111/**
102 * Function : static int scsi_partsize(unsigned char *buf, unsigned long 112 * scsi_partsize - Parse cylinders/heads/sectors from PC partition table
103 * capacity,unsigned int *cyls, unsigned int *hds, unsigned int *secs); 113 * @buf: partition table, see scsi_bios_ptable()
114 * @capacity: size of the disk in sectors
115 * @cyls: put cylinders here
116 * @hds: put heads here
117 * @secs: put sectors here
104 * 118 *
105 * Purpose : to determine the BIOS mapping used to create the partition 119 * Description: determine the BIOS mapping/geometry used to create the partition
106 * table, storing the results in *cyls, *hds, and *secs 120 * table, storing the results in *cyls, *hds, and *secs
107 * 121 *
108 * Returns : -1 on failure, 0 on success. 122 * Returns: -1 on failure, 0 on success.
109 *
110 */ 123 */
111 124
112int scsi_partsize(unsigned char *buf, unsigned long capacity, 125int scsi_partsize(unsigned char *buf, unsigned long capacity,
@@ -194,7 +207,7 @@ EXPORT_SYMBOL(scsi_partsize);
194 * 207 *
195 * WORKING X3T9.2 208 * WORKING X3T9.2
196 * DRAFT 792D 209 * DRAFT 792D
197 * 210 * see http://www.t10.org/ftp/t10/drafts/cam/cam-r12b.pdf
198 * 211 *
199 * Revision 6 212 * Revision 6
200 * 10-MAR-94 213 * 10-MAR-94
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a69b155f39a2..24eba3118b5a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -395,6 +395,15 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
395 goto out; 395 goto out;
396 } 396 }
397 397
398 /*
399 * Some devices (some sdcards for one) don't like it if the
400 * last sector gets read in a larger then 1 sector read.
401 */
402 if (unlikely(sdp->last_sector_bug &&
403 rq->nr_sectors > sdp->sector_size / 512 &&
404 block + this_count == get_capacity(disk)))
405 this_count -= sdp->sector_size / 512;
406
398 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n", 407 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
399 (unsigned long long)block)); 408 (unsigned long long)block));
400 409
@@ -736,6 +745,7 @@ static int sd_media_changed(struct gendisk *disk)
736{ 745{
737 struct scsi_disk *sdkp = scsi_disk(disk); 746 struct scsi_disk *sdkp = scsi_disk(disk);
738 struct scsi_device *sdp = sdkp->device; 747 struct scsi_device *sdp = sdkp->device;
748 struct scsi_sense_hdr *sshdr = NULL;
739 int retval; 749 int retval;
740 750
741 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n")); 751 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
@@ -749,8 +759,11 @@ static int sd_media_changed(struct gendisk *disk)
749 * can deal with it then. It is only because of unrecoverable errors 759 * can deal with it then. It is only because of unrecoverable errors
750 * that we would ever take a device offline in the first place. 760 * that we would ever take a device offline in the first place.
751 */ 761 */
752 if (!scsi_device_online(sdp)) 762 if (!scsi_device_online(sdp)) {
753 goto not_present; 763 set_media_not_present(sdkp);
764 retval = 1;
765 goto out;
766 }
754 767
755 /* 768 /*
756 * Using TEST_UNIT_READY enables differentiation between drive with 769 * Using TEST_UNIT_READY enables differentiation between drive with
@@ -762,8 +775,12 @@ static int sd_media_changed(struct gendisk *disk)
762 * sd_revalidate() is called. 775 * sd_revalidate() is called.
763 */ 776 */
764 retval = -ENODEV; 777 retval = -ENODEV;
765 if (scsi_block_when_processing_errors(sdp)) 778
766 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES); 779 if (scsi_block_when_processing_errors(sdp)) {
780 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
781 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
782 sshdr);
783 }
767 784
768 /* 785 /*
769 * Unable to test, unit probably not ready. This usually 786 * Unable to test, unit probably not ready. This usually
@@ -771,8 +788,13 @@ static int sd_media_changed(struct gendisk *disk)
771 * and we will figure it out later once the drive is 788 * and we will figure it out later once the drive is
772 * available again. 789 * available again.
773 */ 790 */
774 if (retval) 791 if (retval || (scsi_sense_valid(sshdr) &&
775 goto not_present; 792 /* 0x3a is medium not present */
793 sshdr->asc == 0x3a)) {
794 set_media_not_present(sdkp);
795 retval = 1;
796 goto out;
797 }
776 798
777 /* 799 /*
778 * For removable scsi disk we have to recognise the presence 800 * For removable scsi disk we have to recognise the presence
@@ -783,12 +805,12 @@ static int sd_media_changed(struct gendisk *disk)
783 805
784 retval = sdp->changed; 806 retval = sdp->changed;
785 sdp->changed = 0; 807 sdp->changed = 0;
786 808out:
809 if (retval != sdkp->previous_state)
810 sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
811 sdkp->previous_state = retval;
812 kfree(sshdr);
787 return retval; 813 return retval;
788
789not_present:
790 set_media_not_present(sdkp);
791 return 1;
792} 814}
793 815
794static int sd_sync_cache(struct scsi_disk *sdkp) 816static int sd_sync_cache(struct scsi_disk *sdkp)
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
deleted file mode 100644
index b11324479b5b..000000000000
--- a/drivers/scsi/seagate.c
+++ /dev/null
@@ -1,1667 +0,0 @@
1/*
2 * seagate.c Copyright (C) 1992, 1993 Drew Eckhardt
3 * low level scsi driver for ST01/ST02, Future Domain TMC-885,
4 * TMC-950 by Drew Eckhardt <drew@colorado.edu>
5 *
6 * Note : TMC-880 boards don't work because they have two bits in
7 * the status register flipped, I'll fix this "RSN"
8 * [why do I have strong feeling that above message is from 1993? :-)
9 * pavel@ucw.cz]
10 *
11 * This card does all the I/O via memory mapped I/O, so there is no need
12 * to check or allocate a region of the I/O address space.
13 */
14
15/* 1996 - to use new read{b,w,l}, write{b,w,l}, and phys_to_virt
16 * macros, replaced assembler routines with C. There's probably a
17 * performance hit, but I only have a cdrom and can't tell. Define
18 * SEAGATE_USE_ASM if you want the old assembler code -- SJT
19 *
20 * 1998-jul-29 - created DPRINTK macros and made it work under
21 * linux 2.1.112, simplified some #defines etc. <pavel@ucw.cz>
22 *
23 * Aug 2000 - aeb - deleted seagate_st0x_biosparam(). It would try to
24 * read the physical disk geometry, a bad mistake. Of course it doesn't
25 * matter much what geometry one invents, but on large disks it
26 * returned 256 (or more) heads, causing all kind of failures.
27 * Of course this means that people might see a different geometry now,
28 * so boot parameters may be necessary in some cases.
29 */
30
31/*
32 * Configuration :
33 * To use without BIOS -DOVERRIDE=base_address -DCONTROLLER=FD or SEAGATE
34 * -DIRQ will override the default of 5.
35 * Note: You can now set these options from the kernel's "command line".
36 * The syntax is:
37 *
38 * st0x=ADDRESS,IRQ (for a Seagate controller)
39 * or:
40 * tmc8xx=ADDRESS,IRQ (for a TMC-8xx or TMC-950 controller)
41 * eg:
42 * tmc8xx=0xC8000,15
43 *
44 * will configure the driver for a TMC-8xx style controller using IRQ 15
45 * with a base address of 0xC8000.
46 *
47 * -DARBITRATE
48 * Will cause the host adapter to arbitrate for the
49 * bus for better SCSI-II compatibility, rather than just
50 * waiting for BUS FREE and then doing its thing. Should
51 * let us do one command per Lun when I integrate my
52 * reorganization changes into the distribution sources.
53 *
54 * -DDEBUG=65535
55 * Will activate debug code.
56 *
57 * -DFAST or -DFAST32
58 * Will use blind transfers where possible
59 *
60 * -DPARITY
61 * This will enable parity.
62 *
63 * -DSEAGATE_USE_ASM
64 * Will use older seagate assembly code. should be (very small amount)
65 * Faster.
66 *
67 * -DSLOW_RATE=50
68 * Will allow compatibility with broken devices that don't
69 * handshake fast enough (ie, some CD ROM's) for the Seagate
70 * code.
71 *
72 * 50 is some number, It will let you specify a default
73 * transfer rate if handshaking isn't working correctly.
74 *
75 * -DOLDCNTDATASCEME There is a new sceme to set the CONTROL
76 * and DATA reigsters which complies more closely
77 * with the SCSI2 standard. This hopefully eliminates
78 * the need to swap the order these registers are
79 * 'messed' with. It makes the following two options
80 * obsolete. To reenable the old sceme define this.
81 *
82 * The following to options are patches from the SCSI.HOWTO
83 *
84 * -DSWAPSTAT This will swap the definitions for STAT_MSG and STAT_CD.
85 *
86 * -DSWAPCNTDATA This will swap the order that seagate.c messes with
87 * the CONTROL an DATA registers.
88 */
89
90#include <linux/module.h>
91#include <linux/interrupt.h>
92#include <linux/spinlock.h>
93#include <linux/signal.h>
94#include <linux/string.h>
95#include <linux/proc_fs.h>
96#include <linux/init.h>
97#include <linux/blkdev.h>
98#include <linux/stat.h>
99#include <linux/delay.h>
100#include <linux/io.h>
101
102#include <asm/system.h>
103#include <asm/uaccess.h>
104
105#include <scsi/scsi_cmnd.h>
106#include <scsi/scsi_device.h>
107#include <scsi/scsi.h>
108
109#include <scsi/scsi_dbg.h>
110#include <scsi/scsi_host.h>
111
112
113#ifdef DEBUG
114#define DPRINTK( when, msg... ) do { if ( (DEBUG & (when)) == (when) ) printk( msg ); } while (0)
115#else
116#define DPRINTK( when, msg... ) do { } while (0)
117#define DEBUG 0
118#endif
119#define DANY( msg... ) DPRINTK( 0xffff, msg );
120
121#ifndef IRQ
122#define IRQ 5
123#endif
124
125#ifdef FAST32
126#define FAST
127#endif
128
129#undef LINKED /* Linked commands are currently broken! */
130
131#if defined(OVERRIDE) && !defined(CONTROLLER)
132#error Please use -DCONTROLLER=SEAGATE or -DCONTROLLER=FD to override controller type
133#endif
134
135#ifndef __i386__
136#undef SEAGATE_USE_ASM
137#endif
138
139/*
140 Thanks to Brian Antoine for the example code in his Messy-Loss ST-01
141 driver, and Mitsugu Suzuki for information on the ST-01
142 SCSI host.
143*/
144
145/*
146 CONTROL defines
147*/
148
149#define CMD_RST 0x01
150#define CMD_SEL 0x02
151#define CMD_BSY 0x04
152#define CMD_ATTN 0x08
153#define CMD_START_ARB 0x10
154#define CMD_EN_PARITY 0x20
155#define CMD_INTR 0x40
156#define CMD_DRVR_ENABLE 0x80
157
158/*
159 STATUS
160*/
161#ifdef SWAPSTAT
162#define STAT_MSG 0x08
163#define STAT_CD 0x02
164#else
165#define STAT_MSG 0x02
166#define STAT_CD 0x08
167#endif
168
169#define STAT_BSY 0x01
170#define STAT_IO 0x04
171#define STAT_REQ 0x10
172#define STAT_SEL 0x20
173#define STAT_PARITY 0x40
174#define STAT_ARB_CMPL 0x80
175
176/*
177 REQUESTS
178*/
179
180#define REQ_MASK (STAT_CD | STAT_IO | STAT_MSG)
181#define REQ_DATAOUT 0
182#define REQ_DATAIN STAT_IO
183#define REQ_CMDOUT STAT_CD
184#define REQ_STATIN (STAT_CD | STAT_IO)
185#define REQ_MSGOUT (STAT_MSG | STAT_CD)
186#define REQ_MSGIN (STAT_MSG | STAT_CD | STAT_IO)
187
188extern volatile int seagate_st0x_timeout;
189
190#ifdef PARITY
191#define BASE_CMD CMD_EN_PARITY
192#else
193#define BASE_CMD 0
194#endif
195
196/*
197 Debugging code
198*/
199
200#define PHASE_BUS_FREE 1
201#define PHASE_ARBITRATION 2
202#define PHASE_SELECTION 4
203#define PHASE_DATAIN 8
204#define PHASE_DATAOUT 0x10
205#define PHASE_CMDOUT 0x20
206#define PHASE_MSGIN 0x40
207#define PHASE_MSGOUT 0x80
208#define PHASE_STATUSIN 0x100
209#define PHASE_ETC (PHASE_DATAIN | PHASE_DATAOUT | PHASE_CMDOUT | PHASE_MSGIN | PHASE_MSGOUT | PHASE_STATUSIN)
210#define PRINT_COMMAND 0x200
211#define PHASE_EXIT 0x400
212#define PHASE_RESELECT 0x800
213#define DEBUG_FAST 0x1000
214#define DEBUG_SG 0x2000
215#define DEBUG_LINKED 0x4000
216#define DEBUG_BORKEN 0x8000
217
218/*
219 * Control options - these are timeouts specified in .01 seconds.
220 */
221
222/* 30, 20 work */
223#define ST0X_BUS_FREE_DELAY 25
224#define ST0X_SELECTION_DELAY 25
225
226#define SEAGATE 1 /* these determine the type of the controller */
227#define FD 2
228
229#define ST0X_ID_STR "Seagate ST-01/ST-02"
230#define FD_ID_STR "TMC-8XX/TMC-950"
231
232static int internal_command (unsigned char target, unsigned char lun,
233 const void *cmnd,
234 void *buff, int bufflen, int reselect);
235
236static int incommand; /* set if arbitration has finished
237 and we are in some command phase. */
238
239static unsigned int base_address = 0; /* Where the card ROM starts, used to
240 calculate memory mapped register
241 location. */
242
243static void __iomem *st0x_cr_sr; /* control register write, status
244 register read. 256 bytes in
245 length.
246 Read is status of SCSI BUS, as per
247 STAT masks. */
248
249static void __iomem *st0x_dr; /* data register, read write 256
250 bytes in length. */
251
252static volatile int st0x_aborted = 0; /* set when we are aborted, ie by a
253 time out, etc. */
254
255static unsigned char controller_type = 0; /* set to SEAGATE for ST0x
256 boards or FD for TMC-8xx
257 boards */
258static int irq = IRQ;
259
260module_param(base_address, uint, 0);
261module_param(controller_type, byte, 0);
262module_param(irq, int, 0);
263MODULE_LICENSE("GPL");
264
265
266#define retcode(result) (((result) << 16) | (message << 8) | status)
267#define STATUS ((u8) readb(st0x_cr_sr))
268#define DATA ((u8) readb(st0x_dr))
269#define WRITE_CONTROL(d) { writeb((d), st0x_cr_sr); }
270#define WRITE_DATA(d) { writeb((d), st0x_dr); }
271
272#ifndef OVERRIDE
273static unsigned int seagate_bases[] = {
274 0xc8000, 0xca000, 0xcc000,
275 0xce000, 0xdc000, 0xde000
276};
277
278typedef struct {
279 const unsigned char *signature;
280 unsigned offset;
281 unsigned length;
282 unsigned char type;
283} Signature;
284
285static Signature __initdata signatures[] = {
286 {"ST01 v1.7 (C) Copyright 1987 Seagate", 15, 37, SEAGATE},
287 {"SCSI BIOS 2.00 (C) Copyright 1987 Seagate", 15, 40, SEAGATE},
288
289/*
290 * The following two lines are NOT mistakes. One detects ROM revision
291 * 3.0.0, the other 3.2. Since seagate has only one type of SCSI adapter,
292 * and this is not going to change, the "SEAGATE" and "SCSI" together
293 * are probably "good enough"
294 */
295
296 {"SEAGATE SCSI BIOS ", 16, 17, SEAGATE},
297 {"SEAGATE SCSI BIOS ", 17, 17, SEAGATE},
298
299/*
300 * However, future domain makes several incompatible SCSI boards, so specific
301 * signatures must be used.
302 */
303
304 {"FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89", 5, 46, FD},
305 {"FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89", 5, 46, FD},
306 {"FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90", 5, 47, FD},
307 {"FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90", 5, 47, FD},
308 {"FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90", 5, 46, FD},
309 {"FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92", 5, 44, FD},
310 {"IBM F1 BIOS V1.1004/30/92", 5, 25, FD},
311 {"FUTURE DOMAIN TMC-950", 5, 21, FD},
312 /* Added for 2.2.16 by Matthias_Heidbrink@b.maus.de */
313 {"IBM F1 V1.2009/22/93", 5, 25, FD},
314};
315
316#define NUM_SIGNATURES ARRAY_SIZE(signatures)
317#endif /* n OVERRIDE */
318
319/*
320 * hostno stores the hostnumber, as told to us by the init routine.
321 */
322
323static int hostno = -1;
324static void seagate_reconnect_intr (int, void *);
325static irqreturn_t do_seagate_reconnect_intr (int, void *);
326static int seagate_st0x_bus_reset(struct scsi_cmnd *);
327
328#ifdef FAST
329static int fast = 1;
330#else
331#define fast 0
332#endif
333
334#ifdef SLOW_RATE
335/*
336 * Support for broken devices :
337 * The Seagate board has a handshaking problem. Namely, a lack
338 * thereof for slow devices. You can blast 600K/second through
339 * it if you are polling for each byte, more if you do a blind
340 * transfer. In the first case, with a fast device, REQ will
341 * transition high-low or high-low-high before your loop restarts
342 * and you'll have no problems. In the second case, the board
343 * will insert wait states for up to 13.2 usecs for REQ to
344 * transition low->high, and everything will work.
345 *
346 * However, there's nothing in the state machine that says
347 * you *HAVE* to see a high-low-high set of transitions before
348 * sending the next byte, and slow things like the Trantor CD ROMS
349 * will break because of this.
350 *
351 * So, we need to slow things down, which isn't as simple as it
352 * seems. We can't slow things down period, because then people
353 * who don't recompile their kernels will shoot me for ruining
354 * their performance. We need to do it on a case per case basis.
355 *
356 * The best for performance will be to, only for borken devices
357 * (this is stored on a per-target basis in the scsi_devices array)
358 *
359 * Wait for a low->high transition before continuing with that
360 * transfer. If we timeout, continue anyways. We don't need
361 * a long timeout, because REQ should only be asserted until the
362 * corresponding ACK is received and processed.
363 *
364 * Note that we can't use the system timer for this, because of
365 * resolution, and we *really* can't use the timer chip since
366 * gettimeofday() and the beeper routines use that. So,
367 * the best thing for us to do will be to calibrate a timing
368 * loop in the initialization code using the timer chip before
369 * gettimeofday() can screw with it.
370 *
371 * FIXME: this is broken (not borken :-). Empty loop costs less than
372 * loop with ISA access in it! -- pavel@ucw.cz
373 */
374
375static int borken_calibration = 0;
376
377static void __init borken_init (void)
378{
379 register int count = 0, start = jiffies + 1, stop = start + 25;
380
381 /* FIXME: There may be a better approach, this is a straight port for
382 now */
383 preempt_disable();
384 while (time_before (jiffies, start))
385 cpu_relax();
386 for (; time_before (jiffies, stop); ++count)
387 cpu_relax();
388 preempt_enable();
389
390/*
391 * Ok, we now have a count for .25 seconds. Convert to a
392 * count per second and divide by transfer rate in K. */
393
394 borken_calibration = (count * 4) / (SLOW_RATE * 1024);
395
396 if (borken_calibration < 1)
397 borken_calibration = 1;
398}
399
400static inline void borken_wait (void)
401{
402 register int count;
403
404 for (count = borken_calibration; count && (STATUS & STAT_REQ); --count)
405 cpu_relax();
406
407#if (DEBUG & DEBUG_BORKEN)
408 if (count)
409 printk ("scsi%d : borken timeout\n", hostno);
410#endif
411}
412
413#endif /* def SLOW_RATE */
414
415/* These beasts only live on ISA, and ISA means 8MHz. Each ULOOP()
416 * contains at least one ISA access, which takes more than 0.125
417 * usec. So if we loop 8 times time in usec, we are safe.
418 */
419
420#define ULOOP( i ) for (clock = i*8;;)
421#define TIMEOUT (!(clock--))
422
423static int __init seagate_st0x_detect (struct scsi_host_template * tpnt)
424{
425 struct Scsi_Host *instance;
426 int i, j;
427 unsigned long cr, dr;
428
429 tpnt->proc_name = "seagate";
430/*
431 * First, we try for the manual override.
432 */
433 DANY ("Autodetecting ST0x / TMC-8xx\n");
434
435 if (hostno != -1) {
436 printk (KERN_ERR "seagate_st0x_detect() called twice?!\n");
437 return 0;
438 }
439
440/* If the user specified the controller type from the command line,
441 controller_type will be non-zero, so don't try to detect one */
442
443 if (!controller_type) {
444#ifdef OVERRIDE
445 base_address = OVERRIDE;
446 controller_type = CONTROLLER;
447
448 DANY ("Base address overridden to %x, controller type is %s\n",
449 base_address,
450 controller_type == SEAGATE ? "SEAGATE" : "FD");
451#else /* OVERRIDE */
452/*
453 * To detect this card, we simply look for the signature
454 * from the BIOS version notice in all the possible locations
455 * of the ROM's. This has a nice side effect of not trashing
456 * any register locations that might be used by something else.
457 *
458 * XXX - note that we probably should be probing the address
459 * space for the on-board RAM instead.
460 */
461
462 for (i = 0; i < ARRAY_SIZE(seagate_bases); ++i) {
463 void __iomem *p = ioremap(seagate_bases[i], 0x2000);
464 if (!p)
465 continue;
466 for (j = 0; j < NUM_SIGNATURES; ++j)
467 if (check_signature(p + signatures[j].offset, signatures[j].signature, signatures[j].length)) {
468 base_address = seagate_bases[i];
469 controller_type = signatures[j].type;
470 break;
471 }
472 iounmap(p);
473 }
474#endif /* OVERRIDE */
475 }
476 /* (! controller_type) */
477 tpnt->this_id = (controller_type == SEAGATE) ? 7 : 6;
478 tpnt->name = (controller_type == SEAGATE) ? ST0X_ID_STR : FD_ID_STR;
479
480 if (!base_address) {
481 printk(KERN_INFO "seagate: ST0x/TMC-8xx not detected.\n");
482 return 0;
483 }
484
485 cr = base_address + (controller_type == SEAGATE ? 0x1a00 : 0x1c00);
486 dr = cr + 0x200;
487 st0x_cr_sr = ioremap(cr, 0x100);
488 st0x_dr = ioremap(dr, 0x100);
489
490 DANY("%s detected. Base address = %x, cr = %x, dr = %x\n",
491 tpnt->name, base_address, cr, dr);
492
493 /*
494 * At all times, we will use IRQ 5. Should also check for IRQ3
495 * if we lose our first interrupt.
496 */
497 instance = scsi_register (tpnt, 0);
498 if (instance == NULL)
499 return 0;
500
501 hostno = instance->host_no;
502 if (request_irq (irq, do_seagate_reconnect_intr, IRQF_DISABLED, (controller_type == SEAGATE) ? "seagate" : "tmc-8xx", instance)) {
503 printk(KERN_ERR "scsi%d : unable to allocate IRQ%d\n", hostno, irq);
504 return 0;
505 }
506 instance->irq = irq;
507 instance->io_port = base_address;
508#ifdef SLOW_RATE
509 printk(KERN_INFO "Calibrating borken timer... ");
510 borken_init();
511 printk(" %d cycles per transfer\n", borken_calibration);
512#endif
513 printk (KERN_INFO "This is one second... ");
514 {
515 int clock;
516 ULOOP (1 * 1000 * 1000) {
517 STATUS;
518 if (TIMEOUT)
519 break;
520 }
521 }
522
523 printk ("done, %s options:"
524#ifdef ARBITRATE
525 " ARBITRATE"
526#endif
527#if DEBUG
528 " DEBUG"
529#endif
530#ifdef FAST
531 " FAST"
532#ifdef FAST32
533 "32"
534#endif
535#endif
536#ifdef LINKED
537 " LINKED"
538#endif
539#ifdef PARITY
540 " PARITY"
541#endif
542#ifdef SEAGATE_USE_ASM
543 " SEAGATE_USE_ASM"
544#endif
545#ifdef SLOW_RATE
546 " SLOW_RATE"
547#endif
548#ifdef SWAPSTAT
549 " SWAPSTAT"
550#endif
551#ifdef SWAPCNTDATA
552 " SWAPCNTDATA"
553#endif
554 "\n", tpnt->name);
555 return 1;
556}
557
558static const char *seagate_st0x_info (struct Scsi_Host *shpnt)
559{
560 static char buffer[64];
561
562 snprintf(buffer, 64, "%s at irq %d, address 0x%05X",
563 (controller_type == SEAGATE) ? ST0X_ID_STR : FD_ID_STR,
564 irq, base_address);
565 return buffer;
566}
567
568/*
569 * These are our saved pointers for the outstanding command that is
570 * waiting for a reconnect
571 */
572
573static unsigned char current_target, current_lun;
574static unsigned char *current_cmnd, *current_data;
575static int current_nobuffs;
576static struct scatterlist *current_buffer;
577static int current_bufflen;
578
579#ifdef LINKED
580/*
581 * linked_connected indicates whether or not we are currently connected to
582 * linked_target, linked_lun and in an INFORMATION TRANSFER phase,
583 * using linked commands.
584 */
585
586static int linked_connected = 0;
587static unsigned char linked_target, linked_lun;
588#endif
589
590static void (*done_fn) (struct scsi_cmnd *) = NULL;
591static struct scsi_cmnd *SCint = NULL;
592
593/*
594 * These control whether or not disconnect / reconnect will be attempted,
595 * or are being attempted.
596 */
597
598#define NO_RECONNECT 0
599#define RECONNECT_NOW 1
600#define CAN_RECONNECT 2
601
602/*
603 * LINKED_RIGHT indicates that we are currently connected to the correct target
604 * for this command, LINKED_WRONG indicates that we are connected to the wrong
605 * target. Note that these imply CAN_RECONNECT and require defined(LINKED).
606 */
607
608#define LINKED_RIGHT 3
609#define LINKED_WRONG 4
610
611/*
612 * This determines if we are expecting to reconnect or not.
613 */
614
615static int should_reconnect = 0;
616
617/*
618 * The seagate_reconnect_intr routine is called when a target reselects the
619 * host adapter. This occurs on the interrupt triggered by the target
620 * asserting SEL.
621 */
622
623static irqreturn_t do_seagate_reconnect_intr(int irq, void *dev_id)
624{
625 unsigned long flags;
626 struct Scsi_Host *dev = dev_id;
627
628 spin_lock_irqsave (dev->host_lock, flags);
629 seagate_reconnect_intr (irq, dev_id);
630 spin_unlock_irqrestore (dev->host_lock, flags);
631 return IRQ_HANDLED;
632}
633
634static void seagate_reconnect_intr (int irq, void *dev_id)
635{
636 int temp;
637 struct scsi_cmnd *SCtmp;
638
639 DPRINTK (PHASE_RESELECT, "scsi%d : seagate_reconnect_intr() called\n", hostno);
640
641 if (!should_reconnect)
642 printk(KERN_WARNING "scsi%d: unexpected interrupt.\n", hostno);
643 else {
644 should_reconnect = 0;
645
646 DPRINTK (PHASE_RESELECT, "scsi%d : internal_command(%d, %08x, %08x, RECONNECT_NOW\n",
647 hostno, current_target, current_data, current_bufflen);
648
649 temp = internal_command (current_target, current_lun, current_cmnd, current_data, current_bufflen, RECONNECT_NOW);
650
651 if (msg_byte(temp) != DISCONNECT) {
652 if (done_fn) {
653 DPRINTK(PHASE_RESELECT, "scsi%d : done_fn(%d,%08x)", hostno, hostno, temp);
654 if (!SCint)
655 panic ("SCint == NULL in seagate");
656 SCtmp = SCint;
657 SCint = NULL;
658 SCtmp->result = temp;
659 done_fn(SCtmp);
660 } else
661 printk(KERN_ERR "done_fn() not defined.\n");
662 }
663 }
664}
665
666/*
667 * The seagate_st0x_queue_command() function provides a queued interface
668 * to the seagate SCSI driver. Basically, it just passes control onto the
669 * seagate_command() function, after fixing it so that the done_fn()
670 * is set to the one passed to the function. We have to be very careful,
671 * because there are some commands on some devices that do not disconnect,
672 * and if we simply call the done_fn when the command is done then another
673 * command is started and queue_command is called again... We end up
674 * overflowing the kernel stack, and this tends not to be such a good idea.
675 */
676
677static int recursion_depth = 0;
678
679static int seagate_st0x_queue_command(struct scsi_cmnd * SCpnt,
680 void (*done) (struct scsi_cmnd *))
681{
682 int result, reconnect;
683 struct scsi_cmnd *SCtmp;
684
685 DANY ("seagate: que_command");
686 done_fn = done;
687 current_target = SCpnt->device->id;
688 current_lun = SCpnt->device->lun;
689 current_cmnd = SCpnt->cmnd;
690 current_data = (unsigned char *) SCpnt->request_buffer;
691 current_bufflen = SCpnt->request_bufflen;
692 SCint = SCpnt;
693 if (recursion_depth)
694 return 1;
695 recursion_depth++;
696 do {
697#ifdef LINKED
698 /*
699 * Set linked command bit in control field of SCSI command.
700 */
701
702 current_cmnd[SCpnt->cmd_len] |= 0x01;
703 if (linked_connected) {
704 DPRINTK (DEBUG_LINKED, "scsi%d : using linked commands, current I_T_L nexus is ", hostno);
705 if (linked_target == current_target && linked_lun == current_lun)
706 {
707 DPRINTK(DEBUG_LINKED, "correct\n");
708 reconnect = LINKED_RIGHT;
709 } else {
710 DPRINTK(DEBUG_LINKED, "incorrect\n");
711 reconnect = LINKED_WRONG;
712 }
713 } else
714#endif /* LINKED */
715 reconnect = CAN_RECONNECT;
716
717 result = internal_command(SCint->device->id, SCint->device->lun, SCint->cmnd,
718 SCint->request_buffer, SCint->request_bufflen, reconnect);
719 if (msg_byte(result) == DISCONNECT)
720 break;
721 SCtmp = SCint;
722 SCint = NULL;
723 SCtmp->result = result;
724 done_fn(SCtmp);
725 }
726 while (SCint);
727 recursion_depth--;
728 return 0;
729}
730
731static int internal_command (unsigned char target, unsigned char lun,
732 const void *cmnd, void *buff, int bufflen, int reselect)
733{
734 unsigned char *data = NULL;
735 struct scatterlist *buffer = NULL;
736 int clock, temp, nobuffs = 0, done = 0, len = 0;
737#if DEBUG
738 int transfered = 0, phase = 0, newphase;
739#endif
740 register unsigned char status_read;
741 unsigned char tmp_data, tmp_control, status = 0, message = 0;
742 unsigned transfersize = 0, underflow = 0;
743#ifdef SLOW_RATE
744 int borken = (int) SCint->device->borken; /* Does the current target require
745 Very Slow I/O ? */
746#endif
747
748 incommand = 0;
749 st0x_aborted = 0;
750
751#if (DEBUG & PRINT_COMMAND)
752 printk("scsi%d : target = %d, command = ", hostno, target);
753 __scsi_print_command((unsigned char *) cmnd);
754#endif
755
756#if (DEBUG & PHASE_RESELECT)
757 switch (reselect) {
758 case RECONNECT_NOW:
759 printk("scsi%d : reconnecting\n", hostno);
760 break;
761#ifdef LINKED
762 case LINKED_RIGHT:
763 printk("scsi%d : connected, can reconnect\n", hostno);
764 break;
765 case LINKED_WRONG:
766 printk("scsi%d : connected to wrong target, can reconnect\n",
767 hostno);
768 break;
769#endif
770 case CAN_RECONNECT:
771 printk("scsi%d : allowed to reconnect\n", hostno);
772 break;
773 default:
774 printk("scsi%d : not allowed to reconnect\n", hostno);
775 }
776#endif
777
778 if (target == (controller_type == SEAGATE ? 7 : 6))
779 return DID_BAD_TARGET;
780
781 /*
782 * We work it differently depending on if this is is "the first time,"
783 * or a reconnect. If this is a reselect phase, then SEL will
784 * be asserted, and we must skip selection / arbitration phases.
785 */
786
787 switch (reselect) {
788 case RECONNECT_NOW:
789 DPRINTK (PHASE_RESELECT, "scsi%d : phase RESELECT \n", hostno);
790 /*
791 * At this point, we should find the logical or of our ID
792 * and the original target's ID on the BUS, with BSY, SEL,
793 * and I/O signals asserted.
794 *
795 * After ARBITRATION phase is completed, only SEL, BSY,
796 * and the target ID are asserted. A valid initiator ID
797 * is not on the bus until IO is asserted, so we must wait
798 * for that.
799 */
800 ULOOP (100 * 1000) {
801 temp = STATUS;
802 if ((temp & STAT_IO) && !(temp & STAT_BSY))
803 break;
804 if (TIMEOUT) {
805 DPRINTK (PHASE_RESELECT, "scsi%d : RESELECT timed out while waiting for IO .\n", hostno);
806 return (DID_BAD_INTR << 16);
807 }
808 }
809
810 /*
811 * After I/O is asserted by the target, we can read our ID
812 * and its ID off of the BUS.
813 */
814
815 if (!((temp = DATA) & (controller_type == SEAGATE ? 0x80 : 0x40))) {
816 DPRINTK (PHASE_RESELECT, "scsi%d : detected reconnect request to different target.\n\tData bus = %d\n", hostno, temp);
817 return (DID_BAD_INTR << 16);
818 }
819
820 if (!(temp & (1 << current_target))) {
821 printk(KERN_WARNING "scsi%d : Unexpected reselect interrupt. Data bus = %d\n", hostno, temp);
822 return (DID_BAD_INTR << 16);
823 }
824
825 buffer = current_buffer;
826 cmnd = current_cmnd; /* WDE add */
827 data = current_data; /* WDE add */
828 len = current_bufflen; /* WDE add */
829 nobuffs = current_nobuffs;
830
831 /*
832 * We have determined that we have been selected. At this
833 * point, we must respond to the reselection by asserting
834 * BSY ourselves
835 */
836
837#if 1
838 WRITE_CONTROL (BASE_CMD | CMD_DRVR_ENABLE | CMD_BSY);
839#else
840 WRITE_CONTROL (BASE_CMD | CMD_BSY);
841#endif
842
843 /*
844 * The target will drop SEL, and raise BSY, at which time
845 * we must drop BSY.
846 */
847
848 ULOOP (100 * 1000) {
849 if (!(STATUS & STAT_SEL))
850 break;
851 if (TIMEOUT) {
852 WRITE_CONTROL (BASE_CMD | CMD_INTR);
853 DPRINTK (PHASE_RESELECT, "scsi%d : RESELECT timed out while waiting for SEL.\n", hostno);
854 return (DID_BAD_INTR << 16);
855 }
856 }
857 WRITE_CONTROL (BASE_CMD);
858 /*
859 * At this point, we have connected with the target
860 * and can get on with our lives.
861 */
862 break;
863 case CAN_RECONNECT:
864#ifdef LINKED
865 /*
866 * This is a bletcherous hack, just as bad as the Unix #!
867 * interpreter stuff. If it turns out we are using the wrong
868 * I_T_L nexus, the easiest way to deal with it is to go into
869 * our INFORMATION TRANSFER PHASE code, send a ABORT
870 * message on MESSAGE OUT phase, and then loop back to here.
871 */
872connect_loop:
873#endif
874 DPRINTK (PHASE_BUS_FREE, "scsi%d : phase = BUS FREE \n", hostno);
875
876 /*
877 * BUS FREE PHASE
878 *
879 * On entry, we make sure that the BUS is in a BUS FREE
880 * phase, by insuring that both BSY and SEL are low for
881 * at least one bus settle delay. Several reads help
882 * eliminate wire glitch.
883 */
884
885#ifndef ARBITRATE
886#error FIXME: this is broken: we may not use jiffies here - we are under cli(). It will hardlock.
887 clock = jiffies + ST0X_BUS_FREE_DELAY;
888
889 while (((STATUS | STATUS | STATUS) & (STAT_BSY | STAT_SEL)) && (!st0x_aborted) && time_before (jiffies, clock))
890 cpu_relax();
891
892 if (time_after (jiffies, clock))
893 return retcode (DID_BUS_BUSY);
894 else if (st0x_aborted)
895 return retcode (st0x_aborted);
896#endif
897 DPRINTK (PHASE_SELECTION, "scsi%d : phase = SELECTION\n", hostno);
898
899 clock = jiffies + ST0X_SELECTION_DELAY;
900
901 /*
902 * Arbitration/selection procedure :
903 * 1. Disable drivers
904 * 2. Write HOST adapter address bit
905 * 3. Set start arbitration.
906 * 4. We get either ARBITRATION COMPLETE or SELECT at this
907 * point.
908 * 5. OR our ID and targets on bus.
909 * 6. Enable SCSI drivers and asserted SEL and ATTN
910 */
911
912#ifdef ARBITRATE
913 /* FIXME: verify host lock is always held here */
914 WRITE_CONTROL(0);
915 WRITE_DATA((controller_type == SEAGATE) ? 0x80 : 0x40);
916 WRITE_CONTROL(CMD_START_ARB);
917
918 ULOOP (ST0X_SELECTION_DELAY * 10000) {
919 status_read = STATUS;
920 if (status_read & STAT_ARB_CMPL)
921 break;
922 if (st0x_aborted) /* FIXME: What? We are going to do something even after abort? */
923 break;
924 if (TIMEOUT || (status_read & STAT_SEL)) {
925 printk(KERN_WARNING "scsi%d : arbitration lost or timeout.\n", hostno);
926 WRITE_CONTROL (BASE_CMD);
927 return retcode (DID_NO_CONNECT);
928 }
929 }
930 DPRINTK (PHASE_SELECTION, "scsi%d : arbitration complete\n", hostno);
931#endif
932
933 /*
934 * When the SCSI device decides that we're gawking at it,
935 * it will respond by asserting BUSY on the bus.
936 *
937 * Note : the Seagate ST-01/02 product manual says that we
938 * should twiddle the DATA register before the control
939 * register. However, this does not work reliably so we do
940 * it the other way around.
941 *
942 * Probably could be a problem with arbitration too, we
943 * really should try this with a SCSI protocol or logic
944 * analyzer to see what is going on.
945 */
946 tmp_data = (unsigned char) ((1 << target) | (controller_type == SEAGATE ? 0x80 : 0x40));
947 tmp_control = BASE_CMD | CMD_DRVR_ENABLE | CMD_SEL | (reselect ? CMD_ATTN : 0);
948
949 /* FIXME: verify host lock is always held here */
950#ifdef OLDCNTDATASCEME
951#ifdef SWAPCNTDATA
952 WRITE_CONTROL (tmp_control);
953 WRITE_DATA (tmp_data);
954#else
955 WRITE_DATA (tmp_data);
956 WRITE_CONTROL (tmp_control);
957#endif
958#else
959 tmp_control ^= CMD_BSY; /* This is guesswork. What used to be in driver */
960 WRITE_CONTROL (tmp_control); /* could never work: it sent data into control */
961 WRITE_DATA (tmp_data); /* register and control info into data. Hopefully */
962 tmp_control ^= CMD_BSY; /* fixed, but order of first two may be wrong. */
963 WRITE_CONTROL (tmp_control); /* -- pavel@ucw.cz */
964#endif
965
966 ULOOP (250 * 1000) {
967 if (st0x_aborted) {
968 /*
969 * If we have been aborted, and we have a
970 * command in progress, IE the target
971 * still has BSY asserted, then we will
972 * reset the bus, and notify the midlevel
973 * driver to expect sense.
974 */
975
976 WRITE_CONTROL (BASE_CMD);
977 if (STATUS & STAT_BSY) {
978 printk(KERN_WARNING "scsi%d : BST asserted after we've been aborted.\n", hostno);
979 seagate_st0x_bus_reset(NULL);
980 return retcode (DID_RESET);
981 }
982 return retcode (st0x_aborted);
983 }
984 if (STATUS & STAT_BSY)
985 break;
986 if (TIMEOUT) {
987 DPRINTK (PHASE_SELECTION, "scsi%d : NO CONNECT with target %d, stat = %x \n", hostno, target, STATUS);
988 return retcode (DID_NO_CONNECT);
989 }
990 }
991
992 /* Establish current pointers. Take into account scatter / gather */
993
994 if ((nobuffs = SCint->use_sg)) {
995#if (DEBUG & DEBUG_SG)
996 {
997 int i;
998 printk("scsi%d : scatter gather requested, using %d buffers.\n", hostno, nobuffs);
999 for (i = 0; i < nobuffs; ++i)
1000 printk("scsi%d : buffer %d address = %p length = %d\n",
1001 hostno, i,
1002 sg_virt(&buffer[i]),
1003 buffer[i].length);
1004 }
1005#endif
1006
1007 buffer = (struct scatterlist *) SCint->request_buffer;
1008 len = buffer->length;
1009 data = sg_virt(buffer);
1010 } else {
1011 DPRINTK (DEBUG_SG, "scsi%d : scatter gather not requested.\n", hostno);
1012 buffer = NULL;
1013 len = SCint->request_bufflen;
1014 data = (unsigned char *) SCint->request_buffer;
1015 }
1016
1017 DPRINTK (PHASE_DATAIN | PHASE_DATAOUT, "scsi%d : len = %d\n",
1018 hostno, len);
1019
1020 break;
1021#ifdef LINKED
1022 case LINKED_RIGHT:
1023 break;
1024 case LINKED_WRONG:
1025 break;
1026#endif
1027 } /* end of switch(reselect) */
1028
1029 /*
1030 * There are several conditions under which we wish to send a message :
1031 * 1. When we are allowing disconnect / reconnect, and need to
1032 * establish the I_T_L nexus via an IDENTIFY with the DiscPriv bit
1033 * set.
1034 *
1035 * 2. When we are doing linked commands, are have the wrong I_T_L
1036 * nexus established and want to send an ABORT message.
1037 */
1038
1039 /* GCC does not like an ifdef inside a macro, so do it the hard way. */
1040#ifdef LINKED
1041 WRITE_CONTROL (BASE_CMD | CMD_DRVR_ENABLE | (((reselect == CAN_RECONNECT)|| (reselect == LINKED_WRONG))? CMD_ATTN : 0));
1042#else
1043 WRITE_CONTROL (BASE_CMD | CMD_DRVR_ENABLE | (((reselect == CAN_RECONNECT))? CMD_ATTN : 0));
1044#endif
1045
1046 /*
1047 * INFORMATION TRANSFER PHASE
1048 *
1049 * The nasty looking read / write inline assembler loops we use for
1050 * DATAIN and DATAOUT phases are approximately 4-5 times as fast as
1051 * the 'C' versions - since we're moving 1024 bytes of data, this
1052 * really adds up.
1053 *
1054 * SJT: The nasty-looking assembler is gone, so it's slower.
1055 *
1056 */
1057
1058 DPRINTK (PHASE_ETC, "scsi%d : phase = INFORMATION TRANSFER\n", hostno);
1059
1060 incommand = 1;
1061 transfersize = SCint->transfersize;
1062 underflow = SCint->underflow;
1063
1064 /*
1065 * Now, we poll the device for status information,
1066 * and handle any requests it makes. Note that since we are unsure
1067 * of how much data will be flowing across the system, etc and
1068 * cannot make reasonable timeouts, that we will instead have the
1069 * midlevel driver handle any timeouts that occur in this phase.
1070 */
1071
1072 while (((status_read = STATUS) & STAT_BSY) && !st0x_aborted && !done) {
1073#ifdef PARITY
1074 if (status_read & STAT_PARITY) {
1075 printk(KERN_ERR "scsi%d : got parity error\n", hostno);
1076 st0x_aborted = DID_PARITY;
1077 }
1078#endif
1079 if (status_read & STAT_REQ) {
1080#if ((DEBUG & PHASE_ETC) == PHASE_ETC)
1081 if ((newphase = (status_read & REQ_MASK)) != phase) {
1082 phase = newphase;
1083 switch (phase) {
1084 case REQ_DATAOUT:
1085 printk ("scsi%d : phase = DATA OUT\n", hostno);
1086 break;
1087 case REQ_DATAIN:
1088 printk ("scsi%d : phase = DATA IN\n", hostno);
1089 break;
1090 case REQ_CMDOUT:
1091 printk
1092 ("scsi%d : phase = COMMAND OUT\n", hostno);
1093 break;
1094 case REQ_STATIN:
1095 printk ("scsi%d : phase = STATUS IN\n", hostno);
1096 break;
1097 case REQ_MSGOUT:
1098 printk
1099 ("scsi%d : phase = MESSAGE OUT\n", hostno);
1100 break;
1101 case REQ_MSGIN:
1102 printk ("scsi%d : phase = MESSAGE IN\n", hostno);
1103 break;
1104 default:
1105 printk ("scsi%d : phase = UNKNOWN\n", hostno);
1106 st0x_aborted = DID_ERROR;
1107 }
1108 }
1109#endif
1110 switch (status_read & REQ_MASK) {
1111 case REQ_DATAOUT:
1112 /*
1113 * If we are in fast mode, then we simply splat
1114 * the data out in word-sized chunks as fast as
1115 * we can.
1116 */
1117
1118 if (!len) {
1119#if 0
1120 printk("scsi%d: underflow to target %d lun %d \n", hostno, target, lun);
1121 st0x_aborted = DID_ERROR;
1122 fast = 0;
1123#endif
1124 break;
1125 }
1126
1127 if (fast && transfersize
1128 && !(len % transfersize)
1129 && (len >= transfersize)
1130#ifdef FAST32
1131 && !(transfersize % 4)
1132#endif
1133 ) {
1134 DPRINTK (DEBUG_FAST,
1135 "scsi%d : FAST transfer, underflow = %d, transfersize = %d\n"
1136 " len = %d, data = %08x\n",
1137 hostno, SCint->underflow,
1138 SCint->transfersize, len,
1139 data);
1140
1141 /* SJT: Start. Fast Write */
1142#ifdef SEAGATE_USE_ASM
1143 __asm__ ("cld\n\t"
1144#ifdef FAST32
1145 "shr $2, %%ecx\n\t"
1146 "1:\t"
1147 "lodsl\n\t"
1148 "movl %%eax, (%%edi)\n\t"
1149#else
1150 "1:\t"
1151 "lodsb\n\t"
1152 "movb %%al, (%%edi)\n\t"
1153#endif
1154 "loop 1b;"
1155 /* output */ :
1156 /* input */ :"D" (st0x_dr),
1157 "S"
1158 (data),
1159 "c" (SCint->transfersize)
1160/* clobbered */
1161 : "eax", "ecx",
1162 "esi");
1163#else /* SEAGATE_USE_ASM */
1164 memcpy_toio(st0x_dr, data, transfersize);
1165#endif /* SEAGATE_USE_ASM */
1166/* SJT: End */
1167 len -= transfersize;
1168 data += transfersize;
1169 DPRINTK (DEBUG_FAST, "scsi%d : FAST transfer complete len = %d data = %08x\n", hostno, len, data);
1170 } else {
1171 /*
1172 * We loop as long as we are in a
1173 * data out phase, there is data to
1174 * send, and BSY is still active.
1175 */
1176
1177/* SJT: Start. Slow Write. */
1178#ifdef SEAGATE_USE_ASM
1179
1180 int __dummy_1, __dummy_2;
1181
1182/*
1183 * We loop as long as we are in a data out phase, there is data to send,
1184 * and BSY is still active.
1185 */
1186/* Local variables : len = ecx , data = esi,
1187 st0x_cr_sr = ebx, st0x_dr = edi
1188*/
1189 __asm__ (
1190 /* Test for any data here at all. */
1191 "orl %%ecx, %%ecx\n\t"
1192 "jz 2f\n\t" "cld\n\t"
1193/* "movl st0x_cr_sr, %%ebx\n\t" */
1194/* "movl st0x_dr, %%edi\n\t" */
1195 "1:\t"
1196 "movb (%%ebx), %%al\n\t"
1197 /* Test for BSY */
1198 "test $1, %%al\n\t"
1199 "jz 2f\n\t"
1200 /* Test for data out phase - STATUS & REQ_MASK should be
1201 REQ_DATAOUT, which is 0. */
1202 "test $0xe, %%al\n\t"
1203 "jnz 2f\n\t"
1204 /* Test for REQ */
1205 "test $0x10, %%al\n\t"
1206 "jz 1b\n\t"
1207 "lodsb\n\t"
1208 "movb %%al, (%%edi)\n\t"
1209 "loop 1b\n\t" "2:\n"
1210 /* output */ :"=S" (data), "=c" (len),
1211 "=b"
1212 (__dummy_1),
1213 "=D" (__dummy_2)
1214/* input */
1215 : "0" (data), "1" (len),
1216 "2" (st0x_cr_sr),
1217 "3" (st0x_dr)
1218/* clobbered */
1219 : "eax");
1220#else /* SEAGATE_USE_ASM */
1221 while (len) {
1222 unsigned char stat;
1223
1224 stat = STATUS;
1225 if (!(stat & STAT_BSY)
1226 || ((stat & REQ_MASK) !=
1227 REQ_DATAOUT))
1228 break;
1229 if (stat & STAT_REQ) {
1230 WRITE_DATA (*data++);
1231 --len;
1232 }
1233 }
1234#endif /* SEAGATE_USE_ASM */
1235/* SJT: End. */
1236 }
1237
1238 if (!len && nobuffs) {
1239 --nobuffs;
1240 ++buffer;
1241 len = buffer->length;
1242 data = sg_virt(buffer);
1243 DPRINTK (DEBUG_SG,
1244 "scsi%d : next scatter-gather buffer len = %d address = %08x\n",
1245 hostno, len, data);
1246 }
1247 break;
1248
1249 case REQ_DATAIN:
1250#ifdef SLOW_RATE
1251 if (borken) {
1252#if (DEBUG & (PHASE_DATAIN))
1253 transfered += len;
1254#endif
1255 for (; len && (STATUS & (REQ_MASK | STAT_REQ)) == (REQ_DATAIN | STAT_REQ); --len) {
1256 *data++ = DATA;
1257 borken_wait();
1258 }
1259#if (DEBUG & (PHASE_DATAIN))
1260 transfered -= len;
1261#endif
1262 } else
1263#endif
1264
1265 if (fast && transfersize
1266 && !(len % transfersize)
1267 && (len >= transfersize)
1268#ifdef FAST32
1269 && !(transfersize % 4)
1270#endif
1271 ) {
1272 DPRINTK (DEBUG_FAST,
1273 "scsi%d : FAST transfer, underflow = %d, transfersize = %d\n"
1274 " len = %d, data = %08x\n",
1275 hostno, SCint->underflow,
1276 SCint->transfersize, len,
1277 data);
1278
1279/* SJT: Start. Fast Read */
1280#ifdef SEAGATE_USE_ASM
1281 __asm__ ("cld\n\t"
1282#ifdef FAST32
1283 "shr $2, %%ecx\n\t"
1284 "1:\t"
1285 "movl (%%esi), %%eax\n\t"
1286 "stosl\n\t"
1287#else
1288 "1:\t"
1289 "movb (%%esi), %%al\n\t"
1290 "stosb\n\t"
1291#endif
1292 "loop 1b\n\t"
1293 /* output */ :
1294 /* input */ :"S" (st0x_dr),
1295 "D"
1296 (data),
1297 "c" (SCint->transfersize)
1298/* clobbered */
1299 : "eax", "ecx",
1300 "edi");
1301#else /* SEAGATE_USE_ASM */
1302 memcpy_fromio(data, st0x_dr, len);
1303#endif /* SEAGATE_USE_ASM */
1304/* SJT: End */
1305 len -= transfersize;
1306 data += transfersize;
1307#if (DEBUG & PHASE_DATAIN)
1308 printk ("scsi%d: transfered += %d\n", hostno, transfersize);
1309 transfered += transfersize;
1310#endif
1311
1312 DPRINTK (DEBUG_FAST, "scsi%d : FAST transfer complete len = %d data = %08x\n", hostno, len, data);
1313 } else {
1314
1315#if (DEBUG & PHASE_DATAIN)
1316 printk ("scsi%d: transfered += %d\n", hostno, len);
1317 transfered += len; /* Assume we'll transfer it all, then
1318 subtract what we *didn't* transfer */
1319#endif
1320
1321/*
1322 * We loop as long as we are in a data in phase, there is room to read,
1323 * and BSY is still active
1324 */
1325
1326/* SJT: Start. */
1327#ifdef SEAGATE_USE_ASM
1328
1329 int __dummy_3, __dummy_4;
1330
1331/* Dummy clobbering variables for the new gcc-2.95 */
1332
1333/*
1334 * We loop as long as we are in a data in phase, there is room to read,
1335 * and BSY is still active
1336 */
1337 /* Local variables : ecx = len, edi = data
1338 esi = st0x_cr_sr, ebx = st0x_dr */
1339 __asm__ (
1340 /* Test for room to read */
1341 "orl %%ecx, %%ecx\n\t"
1342 "jz 2f\n\t" "cld\n\t"
1343/* "movl st0x_cr_sr, %%esi\n\t" */
1344/* "movl st0x_dr, %%ebx\n\t" */
1345 "1:\t"
1346 "movb (%%esi), %%al\n\t"
1347 /* Test for BSY */
1348 "test $1, %%al\n\t"
1349 "jz 2f\n\t"
1350 /* Test for data in phase - STATUS & REQ_MASK should be REQ_DATAIN,
1351 = STAT_IO, which is 4. */
1352 "movb $0xe, %%ah\n\t"
1353 "andb %%al, %%ah\n\t"
1354 "cmpb $0x04, %%ah\n\t"
1355 "jne 2f\n\t"
1356 /* Test for REQ */
1357 "test $0x10, %%al\n\t"
1358 "jz 1b\n\t"
1359 "movb (%%ebx), %%al\n\t"
1360 "stosb\n\t"
1361 "loop 1b\n\t" "2:\n"
1362 /* output */ :"=D" (data), "=c" (len),
1363 "=S"
1364 (__dummy_3),
1365 "=b" (__dummy_4)
1366/* input */
1367 : "0" (data), "1" (len),
1368 "2" (st0x_cr_sr),
1369 "3" (st0x_dr)
1370/* clobbered */
1371 : "eax");
1372#else /* SEAGATE_USE_ASM */
1373 while (len) {
1374 unsigned char stat;
1375
1376 stat = STATUS;
1377 if (!(stat & STAT_BSY)
1378 || ((stat & REQ_MASK) !=
1379 REQ_DATAIN))
1380 break;
1381 if (stat & STAT_REQ) {
1382 *data++ = DATA;
1383 --len;
1384 }
1385 }
1386#endif /* SEAGATE_USE_ASM */
1387/* SJT: End. */
1388#if (DEBUG & PHASE_DATAIN)
1389 printk ("scsi%d: transfered -= %d\n", hostno, len);
1390 transfered -= len; /* Since we assumed all of Len got *
1391 transfered, correct our mistake */
1392#endif
1393 }
1394
1395 if (!len && nobuffs) {
1396 --nobuffs;
1397 ++buffer;
1398 len = buffer->length;
1399 data = sg_virt(buffer);
1400 DPRINTK (DEBUG_SG, "scsi%d : next scatter-gather buffer len = %d address = %08x\n", hostno, len, data);
1401 }
1402 break;
1403
1404 case REQ_CMDOUT:
1405 while (((status_read = STATUS) & STAT_BSY) &&
1406 ((status_read & REQ_MASK) == REQ_CMDOUT))
1407 if (status_read & STAT_REQ) {
1408 WRITE_DATA (*(const unsigned char *) cmnd);
1409 cmnd = 1 + (const unsigned char *)cmnd;
1410#ifdef SLOW_RATE
1411 if (borken)
1412 borken_wait ();
1413#endif
1414 }
1415 break;
1416
1417 case REQ_STATIN:
1418 status = DATA;
1419 break;
1420
1421 case REQ_MSGOUT:
1422 /*
1423 * We can only have sent a MSG OUT if we
1424 * requested to do this by raising ATTN.
1425 * So, we must drop ATTN.
1426 */
1427 WRITE_CONTROL (BASE_CMD | CMD_DRVR_ENABLE);
1428 /*
1429 * If we are reconnecting, then we must
1430 * send an IDENTIFY message in response
1431 * to MSGOUT.
1432 */
1433 switch (reselect) {
1434 case CAN_RECONNECT:
1435 WRITE_DATA (IDENTIFY (1, lun));
1436 DPRINTK (PHASE_RESELECT | PHASE_MSGOUT, "scsi%d : sent IDENTIFY message.\n", hostno);
1437 break;
1438#ifdef LINKED
1439 case LINKED_WRONG:
1440 WRITE_DATA (ABORT);
1441 linked_connected = 0;
1442 reselect = CAN_RECONNECT;
1443 goto connect_loop;
1444 DPRINTK (PHASE_MSGOUT | DEBUG_LINKED, "scsi%d : sent ABORT message to cancel incorrect I_T_L nexus.\n", hostno);
1445#endif /* LINKED */
1446 DPRINTK (DEBUG_LINKED, "correct\n");
1447 default:
1448 WRITE_DATA (NOP);
1449 printk("scsi%d : target %d requested MSGOUT, sent NOP message.\n", hostno, target);
1450 }
1451 break;
1452
1453 case REQ_MSGIN:
1454 switch (message = DATA) {
1455 case DISCONNECT:
1456 DANY("seagate: deciding to disconnect\n");
1457 should_reconnect = 1;
1458 current_data = data; /* WDE add */
1459 current_buffer = buffer;
1460 current_bufflen = len; /* WDE add */
1461 current_nobuffs = nobuffs;
1462#ifdef LINKED
1463 linked_connected = 0;
1464#endif
1465 done = 1;
1466 DPRINTK ((PHASE_RESELECT | PHASE_MSGIN), "scsi%d : disconnected.\n", hostno);
1467 break;
1468
1469#ifdef LINKED
1470 case LINKED_CMD_COMPLETE:
1471 case LINKED_FLG_CMD_COMPLETE:
1472#endif
1473 case COMMAND_COMPLETE:
1474 /*
1475 * Note : we should check for underflow here.
1476 */
1477 DPRINTK(PHASE_MSGIN, "scsi%d : command complete.\n", hostno);
1478 done = 1;
1479 break;
1480 case ABORT:
1481 DPRINTK(PHASE_MSGIN, "scsi%d : abort message.\n", hostno);
1482 done = 1;
1483 break;
1484 case SAVE_POINTERS:
1485 current_buffer = buffer;
1486 current_bufflen = len; /* WDE add */
1487 current_data = data; /* WDE mod */
1488 current_nobuffs = nobuffs;
1489 DPRINTK (PHASE_MSGIN, "scsi%d : pointers saved.\n", hostno);
1490 break;
1491 case RESTORE_POINTERS:
1492 buffer = current_buffer;
1493 cmnd = current_cmnd;
1494 data = current_data; /* WDE mod */
1495 len = current_bufflen;
1496 nobuffs = current_nobuffs;
1497 DPRINTK(PHASE_MSGIN, "scsi%d : pointers restored.\n", hostno);
1498 break;
1499 default:
1500
1501 /*
1502 * IDENTIFY distinguishes itself
1503 * from the other messages by
1504 * setting the high bit.
1505 *
1506 * Note : we need to handle at
1507 * least one outstanding command
1508 * per LUN, and need to hash the
1509 * SCSI command for that I_T_L
1510 * nexus based on the known ID
1511 * (at this point) and LUN.
1512 */
1513
1514 if (message & 0x80) {
1515 DPRINTK (PHASE_MSGIN, "scsi%d : IDENTIFY message received from id %d, lun %d.\n", hostno, target, message & 7);
1516 } else {
1517 /*
1518 * We should go into a
1519 * MESSAGE OUT phase, and
1520 * send a MESSAGE_REJECT
1521 * if we run into a message
1522 * that we don't like. The
1523 * seagate driver needs
1524 * some serious
1525 * restructuring first
1526 * though.
1527 */
1528 DPRINTK (PHASE_MSGIN, "scsi%d : unknown message %d from target %d.\n", hostno, message, target);
1529 }
1530 }
1531 break;
1532 default:
1533 printk(KERN_ERR "scsi%d : unknown phase.\n", hostno);
1534 st0x_aborted = DID_ERROR;
1535 } /* end of switch (status_read & REQ_MASK) */
1536#ifdef SLOW_RATE
1537 /*
1538 * I really don't care to deal with borken devices in
1539 * each single byte transfer case (ie, message in,
1540 * message out, status), so I'll do the wait here if
1541 * necessary.
1542 */
1543 if(borken)
1544 borken_wait();
1545#endif
1546
1547 } /* if(status_read & STAT_REQ) ends */
1548 } /* while(((status_read = STATUS)...) ends */
1549
1550 DPRINTK(PHASE_DATAIN | PHASE_DATAOUT | PHASE_EXIT, "scsi%d : Transfered %d bytes\n", hostno, transfered);
1551
1552#if (DEBUG & PHASE_EXIT)
1553#if 0 /* Doesn't work for scatter/gather */
1554 printk("Buffer : \n");
1555 for(i = 0; i < 20; ++i)
1556 printk("%02x ", ((unsigned char *) data)[i]); /* WDE mod */
1557 printk("\n");
1558#endif
1559 printk("scsi%d : status = ", hostno);
1560 scsi_print_status(status);
1561 printk(" message = %02x\n", message);
1562#endif
1563
1564 /* We shouldn't reach this until *after* BSY has been deasserted */
1565
1566#ifdef LINKED
1567 else
1568 {
1569 /*
1570 * Fix the message byte so that unsuspecting high level drivers
1571 * don't puke when they see a LINKED COMMAND message in place of
1572 * the COMMAND COMPLETE they may be expecting. Shouldn't be
1573 * necessary, but it's better to be on the safe side.
1574 *
1575 * A non LINKED* message byte will indicate that the command
1576 * completed, and we are now disconnected.
1577 */
1578
1579 switch (message) {
1580 case LINKED_CMD_COMPLETE:
1581 case LINKED_FLG_CMD_COMPLETE:
1582 message = COMMAND_COMPLETE;
1583 linked_target = current_target;
1584 linked_lun = current_lun;
1585 linked_connected = 1;
1586 DPRINTK (DEBUG_LINKED, "scsi%d : keeping I_T_L nexus established for linked command.\n", hostno);
1587 /* We also will need to adjust status to accommodate intermediate
1588 conditions. */
1589 if ((status == INTERMEDIATE_GOOD) || (status == INTERMEDIATE_C_GOOD))
1590 status = GOOD;
1591 break;
1592 /*
1593 * We should also handle what are "normal" termination
1594 * messages here (ABORT, BUS_DEVICE_RESET?, and
1595 * COMMAND_COMPLETE individually, and flake if things
1596 * aren't right.
1597 */
1598 default:
1599 DPRINTK (DEBUG_LINKED, "scsi%d : closing I_T_L nexus.\n", hostno);
1600 linked_connected = 0;
1601 }
1602 }
1603#endif /* LINKED */
1604
1605 if (should_reconnect) {
1606 DPRINTK (PHASE_RESELECT, "scsi%d : exiting seagate_st0x_queue_command() with reconnect enabled.\n", hostno);
1607 WRITE_CONTROL (BASE_CMD | CMD_INTR);
1608 } else
1609 WRITE_CONTROL (BASE_CMD);
1610
1611 return retcode (st0x_aborted);
1612} /* end of internal_command */
1613
1614static int seagate_st0x_abort(struct scsi_cmnd * SCpnt)
1615{
1616 st0x_aborted = DID_ABORT;
1617 return SUCCESS;
1618}
1619
1620#undef ULOOP
1621#undef TIMEOUT
1622
1623/*
1624 * the seagate_st0x_reset function resets the SCSI bus
1625 *
1626 * May be called with SCpnt = NULL
1627 */
1628
1629static int seagate_st0x_bus_reset(struct scsi_cmnd * SCpnt)
1630{
1631 /* No timeouts - this command is going to fail because it was reset. */
1632 DANY ("scsi%d: Reseting bus... ", hostno);
1633
1634 /* assert RESET signal on SCSI bus. */
1635 WRITE_CONTROL (BASE_CMD | CMD_RST);
1636
1637 mdelay (20);
1638
1639 WRITE_CONTROL (BASE_CMD);
1640 st0x_aborted = DID_RESET;
1641
1642 DANY ("done.\n");
1643 return SUCCESS;
1644}
1645
1646static int seagate_st0x_release(struct Scsi_Host *shost)
1647{
1648 if (shost->irq)
1649 free_irq(shost->irq, shost);
1650 release_region(shost->io_port, shost->n_io_port);
1651 return 0;
1652}
1653
1654static struct scsi_host_template driver_template = {
1655 .detect = seagate_st0x_detect,
1656 .release = seagate_st0x_release,
1657 .info = seagate_st0x_info,
1658 .queuecommand = seagate_st0x_queue_command,
1659 .eh_abort_handler = seagate_st0x_abort,
1660 .eh_bus_reset_handler = seagate_st0x_bus_reset,
1661 .can_queue = 1,
1662 .this_id = 7,
1663 .sg_tablesize = SG_ALL,
1664 .cmd_per_lun = 1,
1665 .use_clustering = DISABLE_CLUSTERING,
1666};
1667#include "scsi_module.c"
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index f1871ea04045..17216b76efdc 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -602,8 +602,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
602 * but is is possible that the app intended SG_DXFER_TO_DEV, because there 602 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
603 * is a non-zero input_size, so emit a warning. 603 * is a non-zero input_size, so emit a warning.
604 */ 604 */
605 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) 605 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
606 if (printk_ratelimit()) 606 static char cmd[TASK_COMM_LEN];
607 if (strcmp(current->comm, cmd) && printk_ratelimit()) {
607 printk(KERN_WARNING 608 printk(KERN_WARNING
608 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--" 609 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
609 "guessing data in;\n" KERN_WARNING " " 610 "guessing data in;\n" KERN_WARNING " "
@@ -611,6 +612,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
611 old_hdr.reply_len - (int)SZ_SG_HEADER, 612 old_hdr.reply_len - (int)SZ_SG_HEADER,
612 input_size, (unsigned int) cmnd[0], 613 input_size, (unsigned int) cmnd[0],
613 current->comm); 614 current->comm);
615 strcpy(cmd, current->comm);
616 }
617 }
614 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); 618 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
615 return (k < 0) ? k : count; 619 return (k < 0) ? k : count;
616} 620}
@@ -1418,7 +1422,6 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1418 goto out; 1422 goto out;
1419 } 1423 }
1420 1424
1421 class_set_devdata(cl_dev, sdp);
1422 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1); 1425 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
1423 if (error) 1426 if (error)
1424 goto cdev_add_err; 1427 goto cdev_add_err;
@@ -1431,11 +1434,14 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1431 MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1434 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
1432 cl_dev->dev, "%s", 1435 cl_dev->dev, "%s",
1433 disk->disk_name); 1436 disk->disk_name);
1434 if (IS_ERR(sg_class_member)) 1437 if (IS_ERR(sg_class_member)) {
1435 printk(KERN_WARNING "sg_add: " 1438 printk(KERN_ERR "sg_add: "
1436 "class_device_create failed\n"); 1439 "class_device_create failed\n");
1440 error = PTR_ERR(sg_class_member);
1441 goto cdev_add_err;
1442 }
1437 class_set_devdata(sg_class_member, sdp); 1443 class_set_devdata(sg_class_member, sdp);
1438 error = sysfs_create_link(&scsidp->sdev_gendev.kobj, 1444 error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1439 &sg_class_member->kobj, "generic"); 1445 &sg_class_member->kobj, "generic");
1440 if (error) 1446 if (error)
1441 printk(KERN_ERR "sg_add: unable to make symlink " 1447 printk(KERN_ERR "sg_add: unable to make symlink "
@@ -1447,6 +1453,8 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1447 "Attached scsi generic sg%d type %d\n", sdp->index, 1453 "Attached scsi generic sg%d type %d\n", sdp->index,
1448 scsidp->type); 1454 scsidp->type);
1449 1455
1456 class_set_devdata(cl_dev, sdp);
1457
1450 return 0; 1458 return 0;
1451 1459
1452cdev_add_err: 1460cdev_add_err:
@@ -2521,7 +2529,7 @@ sg_idr_max_id(int id, void *p, void *data)
2521static int 2529static int
2522sg_last_dev(void) 2530sg_last_dev(void)
2523{ 2531{
2524 int k = 0; 2532 int k = -1;
2525 unsigned long iflags; 2533 unsigned long iflags;
2526 2534
2527 read_lock_irqsave(&sg_index_lock, iflags); 2535 read_lock_irqsave(&sg_index_lock, iflags);
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index eef82758d047..d4ebe8c67ba9 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -159,6 +159,7 @@ void sgiwd93_reset(unsigned long base)
159 udelay(50); 159 udelay(50);
160 hregs->ctrl = 0; 160 hregs->ctrl = 0;
161} 161}
162EXPORT_SYMBOL_GPL(sgiwd93_reset);
162 163
163static inline void init_hpc_chain(struct hpc_data *hd) 164static inline void init_hpc_chain(struct hpc_data *hd)
164{ 165{
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index c61999031141..1fcee16fa36d 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -67,8 +67,6 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
67 67
68#define SR_DISKS 256 68#define SR_DISKS 256
69 69
70#define MAX_RETRIES 3
71#define SR_TIMEOUT (30 * HZ)
72#define SR_CAPABILITIES \ 70#define SR_CAPABILITIES \
73 (CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \ 71 (CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \
74 CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \ 72 CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \
@@ -179,21 +177,28 @@ static int sr_media_change(struct cdrom_device_info *cdi, int slot)
179{ 177{
180 struct scsi_cd *cd = cdi->handle; 178 struct scsi_cd *cd = cdi->handle;
181 int retval; 179 int retval;
180 struct scsi_sense_hdr *sshdr;
182 181
183 if (CDSL_CURRENT != slot) { 182 if (CDSL_CURRENT != slot) {
184 /* no changer support */ 183 /* no changer support */
185 return -EINVAL; 184 return -EINVAL;
186 } 185 }
187 186
188 retval = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES); 187 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
189 if (retval) { 188 retval = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES,
190 /* Unable to test, unit probably not ready. This usually 189 sshdr);
191 * means there is no disc in the drive. Mark as changed, 190 if (retval || (scsi_sense_valid(sshdr) &&
192 * and we will figure it out later once the drive is 191 /* 0x3a is medium not present */
193 * available again. */ 192 sshdr->asc == 0x3a)) {
193 /* Media not present or unable to test, unit probably not
194 * ready. This usually means there is no disc in the drive.
195 * Mark as changed, and we will figure it out later once
196 * the drive is available again.
197 */
194 cd->device->changed = 1; 198 cd->device->changed = 1;
195 return 1; /* This will force a flush, if called from 199 /* This will force a flush, if called from check_disk_change */
196 * check_disk_change */ 200 retval = 1;
201 goto out;
197 }; 202 };
198 203
199 retval = cd->device->changed; 204 retval = cd->device->changed;
@@ -203,9 +208,17 @@ static int sr_media_change(struct cdrom_device_info *cdi, int slot)
203 if (retval) { 208 if (retval) {
204 /* check multisession offset etc */ 209 /* check multisession offset etc */
205 sr_cd_check(cdi); 210 sr_cd_check(cdi);
206
207 get_sectorsize(cd); 211 get_sectorsize(cd);
208 } 212 }
213
214out:
215 /* Notify userspace, that media has changed. */
216 if (retval != cd->previous_state)
217 sdev_evt_send_simple(cd->device, SDEV_EVT_MEDIA_CHANGE,
218 GFP_KERNEL);
219 cd->previous_state = retval;
220 kfree(sshdr);
221
209 return retval; 222 return retval;
210} 223}
211 224
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index d65de9621b27..81fbc0b78a52 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -20,6 +20,9 @@
20#include <linux/genhd.h> 20#include <linux/genhd.h>
21#include <linux/kref.h> 21#include <linux/kref.h>
22 22
23#define MAX_RETRIES 3
24#define SR_TIMEOUT (30 * HZ)
25
23struct scsi_device; 26struct scsi_device;
24 27
25/* The CDROM is fairly slow, so we need a little extra time */ 28/* The CDROM is fairly slow, so we need a little extra time */
@@ -37,6 +40,7 @@ typedef struct scsi_cd {
37 unsigned xa_flag:1; /* CD has XA sectors ? */ 40 unsigned xa_flag:1; /* CD has XA sectors ? */
38 unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ 41 unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */
39 unsigned readcd_cdda:1; /* reading audio data using READ_CD */ 42 unsigned readcd_cdda:1; /* reading audio data using READ_CD */
43 unsigned previous_state:1; /* media has changed */
40 struct cdrom_device_info cdi; 44 struct cdrom_device_info cdi;
41 /* We hold gendisk and scsi_device references on probe and use 45 /* We hold gendisk and scsi_device references on probe and use
42 * the refs on this kref to decide when to release them */ 46 * the refs on this kref to decide when to release them */
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index e1589f91706a..d5cebff1d646 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -275,18 +275,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
275/* ---------------------------------------------------------------------- */ 275/* ---------------------------------------------------------------------- */
276/* interface to cdrom.c */ 276/* interface to cdrom.c */
277 277
278static int test_unit_ready(Scsi_CD *cd)
279{
280 struct packet_command cgc;
281
282 memset(&cgc, 0, sizeof(struct packet_command));
283 cgc.cmd[0] = GPCMD_TEST_UNIT_READY;
284 cgc.quiet = 1;
285 cgc.data_direction = DMA_NONE;
286 cgc.timeout = IOCTL_TIMEOUT;
287 return sr_do_ioctl(cd, &cgc);
288}
289
290int sr_tray_move(struct cdrom_device_info *cdi, int pos) 278int sr_tray_move(struct cdrom_device_info *cdi, int pos)
291{ 279{
292 Scsi_CD *cd = cdi->handle; 280 Scsi_CD *cd = cdi->handle;
@@ -310,14 +298,46 @@ int sr_lock_door(struct cdrom_device_info *cdi, int lock)
310 298
311int sr_drive_status(struct cdrom_device_info *cdi, int slot) 299int sr_drive_status(struct cdrom_device_info *cdi, int slot)
312{ 300{
301 struct scsi_cd *cd = cdi->handle;
302 struct scsi_sense_hdr sshdr;
303 struct media_event_desc med;
304
313 if (CDSL_CURRENT != slot) { 305 if (CDSL_CURRENT != slot) {
314 /* we have no changer support */ 306 /* we have no changer support */
315 return -EINVAL; 307 return -EINVAL;
316 } 308 }
317 if (0 == test_unit_ready(cdi->handle)) 309 if (0 == scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES,
310 &sshdr))
318 return CDS_DISC_OK; 311 return CDS_DISC_OK;
319 312
320 return CDS_TRAY_OPEN; 313 if (!cdrom_get_media_event(cdi, &med)) {
314 if (med.media_present)
315 return CDS_DISC_OK;
316 else if (med.door_open)
317 return CDS_TRAY_OPEN;
318 else
319 return CDS_NO_DISC;
320 }
321
322 /*
323 * 0x04 is format in progress .. but there must be a disc present!
324 */
325 if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04)
326 return CDS_DISC_OK;
327
328 /*
329 * If not using Mt Fuji extended media tray reports,
330 * just return TRAY_OPEN since ATAPI doesn't provide
331 * any other way to detect this...
332 */
333 if (scsi_sense_valid(&sshdr) &&
334 /* 0x3a is medium not present */
335 sshdr.asc == 0x3a)
336 return CDS_NO_DISC;
337 else
338 return CDS_TRAY_OPEN;
339
340 return CDS_DRIVE_NOT_READY;
321} 341}
322 342
323int sr_disk_status(struct cdrom_device_info *cdi) 343int sr_disk_status(struct cdrom_device_info *cdi)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 328c47c6aeb1..71952703125a 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -9,7 +9,7 @@
9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, 9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
10 Michael Schaefer, J"org Weule, and Eric Youngdale. 10 Michael Schaefer, J"org Weule, and Eric Youngdale.
11 11
12 Copyright 1992 - 2007 Kai Makisara 12 Copyright 1992 - 2008 Kai Makisara
13 email Kai.Makisara@kolumbus.fi 13 email Kai.Makisara@kolumbus.fi
14 14
15 Some small formal changes - aeb, 950809 15 Some small formal changes - aeb, 950809
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20070203"; 20static const char *verstr = "20080117";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -3214,8 +3214,7 @@ static int partition_tape(struct scsi_tape *STp, int size)
3214 3214
3215 3215
3216/* The ioctl command */ 3216/* The ioctl command */
3217static int st_ioctl(struct inode *inode, struct file *file, 3217static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
3218 unsigned int cmd_in, unsigned long arg)
3219{ 3218{
3220 int i, cmd_nr, cmd_type, bt; 3219 int i, cmd_nr, cmd_type, bt;
3221 int retval = 0; 3220 int retval = 0;
@@ -3870,7 +3869,7 @@ static const struct file_operations st_fops =
3870 .owner = THIS_MODULE, 3869 .owner = THIS_MODULE,
3871 .read = st_read, 3870 .read = st_read,
3872 .write = st_write, 3871 .write = st_write,
3873 .ioctl = st_ioctl, 3872 .unlocked_ioctl = st_ioctl,
3874#ifdef CONFIG_COMPAT 3873#ifdef CONFIG_COMPAT
3875 .compat_ioctl = st_compat_ioctl, 3874 .compat_ioctl = st_compat_ioctl,
3876#endif 3875#endif
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 2dcde373b20e..bcaba86060ab 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -515,9 +515,9 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
515 * various queues are valid. 515 * various queues are valid.
516 */ 516 */
517 517
518 if (cmd->use_sg) { 518 if (scsi_bufflen(cmd)) {
519 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; 519 cmd->SCp.buffer = scsi_sglist(cmd);
520 cmd->SCp.buffers_residual = cmd->use_sg - 1; 520 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
521 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); 521 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
522 cmd->SCp.this_residual = cmd->SCp.buffer->length; 522 cmd->SCp.this_residual = cmd->SCp.buffer->length;
523 523
@@ -528,8 +528,8 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
528 } else { 528 } else {
529 cmd->SCp.buffer = NULL; 529 cmd->SCp.buffer = NULL;
530 cmd->SCp.buffers_residual = 0; 530 cmd->SCp.buffers_residual = 0;
531 cmd->SCp.ptr = (char *) cmd->request_buffer; 531 cmd->SCp.ptr = NULL;
532 cmd->SCp.this_residual = cmd->request_bufflen; 532 cmd->SCp.this_residual = 0;
533 } 533 }
534 534
535} 535}
@@ -935,7 +935,7 @@ static int NCR5380_queue_command(struct scsi_cmnd *cmd,
935 } 935 }
936# endif 936# endif
937# ifdef NCR5380_STAT_LIMIT 937# ifdef NCR5380_STAT_LIMIT
938 if (cmd->request_bufflen > NCR5380_STAT_LIMIT) 938 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
939# endif 939# endif
940 switch (cmd->cmnd[0]) 940 switch (cmd->cmnd[0])
941 { 941 {
@@ -943,14 +943,14 @@ static int NCR5380_queue_command(struct scsi_cmnd *cmd,
943 case WRITE_6: 943 case WRITE_6:
944 case WRITE_10: 944 case WRITE_10:
945 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); 945 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
946 hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen; 946 hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
947 hostdata->pendingw++; 947 hostdata->pendingw++;
948 break; 948 break;
949 case READ: 949 case READ:
950 case READ_6: 950 case READ_6:
951 case READ_10: 951 case READ_10:
952 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); 952 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
953 hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen; 953 hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
954 hostdata->pendingr++; 954 hostdata->pendingr++;
955 break; 955 break;
956 } 956 }
@@ -1345,7 +1345,7 @@ static void collect_stats(struct NCR5380_hostdata *hostdata,
1345 struct scsi_cmnd *cmd) 1345 struct scsi_cmnd *cmd)
1346{ 1346{
1347# ifdef NCR5380_STAT_LIMIT 1347# ifdef NCR5380_STAT_LIMIT
1348 if (cmd->request_bufflen > NCR5380_STAT_LIMIT) 1348 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
1349# endif 1349# endif
1350 switch (cmd->cmnd[0]) 1350 switch (cmd->cmnd[0])
1351 { 1351 {
@@ -1353,14 +1353,14 @@ static void collect_stats(struct NCR5380_hostdata *hostdata,
1353 case WRITE_6: 1353 case WRITE_6:
1354 case WRITE_10: 1354 case WRITE_10:
1355 hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase); 1355 hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase);
1356 /*hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;*/ 1356 /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/
1357 hostdata->pendingw--; 1357 hostdata->pendingw--;
1358 break; 1358 break;
1359 case READ: 1359 case READ:
1360 case READ_6: 1360 case READ_6:
1361 case READ_10: 1361 case READ_10:
1362 hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase); 1362 hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase);
1363 /*hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;*/ 1363 /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/
1364 hostdata->pendingr--; 1364 hostdata->pendingr--;
1365 break; 1365 break;
1366 } 1366 }
@@ -1863,7 +1863,7 @@ static int do_abort (struct Scsi_Host *host)
1863 * the target sees, so we just handshake. 1863 * the target sees, so we just handshake.
1864 */ 1864 */
1865 1865
1866 while (!(tmp = NCR5380_read(STATUS_REG)) & SR_REQ); 1866 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
1867 1867
1868 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); 1868 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
1869 1869
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 90cee94d9522..1f6fd1680335 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -328,27 +328,13 @@ static __inline__ unsigned int sym53c416_write(int base, unsigned char *buffer,
328static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id) 328static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
329{ 329{
330 struct Scsi_Host *dev = dev_id; 330 struct Scsi_Host *dev = dev_id;
331 int base = 0; 331 int base = dev->io_port;
332 int i; 332 int i;
333 unsigned long flags = 0; 333 unsigned long flags = 0;
334 unsigned char status_reg, pio_int_reg, int_reg; 334 unsigned char status_reg, pio_int_reg, int_reg;
335 struct scatterlist *sg; 335 struct scatterlist *sg;
336 unsigned int tot_trans = 0; 336 unsigned int tot_trans = 0;
337 337
338 /* We search the base address of the host adapter which caused the interrupt */
339 /* FIXME: should pass dev_id sensibly as hosts[i] */
340 for(i = 0; i < host_index && !base; i++)
341 if(irq == hosts[i].irq)
342 base = hosts[i].base;
343 /* If no adapter found, we cannot handle the interrupt. Leave a message */
344 /* and continue. This should never happen... */
345 if(!base)
346 {
347 printk(KERN_ERR "sym53c416: No host adapter defined for interrupt %d\n", irq);
348 return IRQ_NONE;
349 }
350 /* Now we have the base address and we can start handling the interrupt */
351
352 spin_lock_irqsave(dev->host_lock,flags); 338 spin_lock_irqsave(dev->host_lock,flags);
353 status_reg = inb(base + STATUS_REG); 339 status_reg = inb(base + STATUS_REG);
354 pio_int_reg = inb(base + PIO_INT_REG); 340 pio_int_reg = inb(base + PIO_INT_REG);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 9e0908d1981a..21e926dcdab0 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -207,10 +207,9 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
207 /* 207 /*
208 * Bounce back the sense data to user. 208 * Bounce back the sense data to user.
209 */ 209 */
210 memset(&cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 210 memset(&cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
211 memcpy(cmd->sense_buffer, cp->sns_bbuf, 211 memcpy(cmd->sense_buffer, cp->sns_bbuf,
212 min(sizeof(cmd->sense_buffer), 212 min(SCSI_SENSE_BUFFERSIZE, SYM_SNS_BBUF_LEN));
213 (size_t)SYM_SNS_BBUF_LEN));
214#if 0 213#if 0
215 /* 214 /*
216 * If the device reports a UNIT ATTENTION condition 215 * If the device reports a UNIT ATTENTION condition
@@ -609,22 +608,24 @@ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
609 */ 608 */
610#define WAIT_FOR_PCI_RECOVERY 35 609#define WAIT_FOR_PCI_RECOVERY 35
611 if (pci_channel_offline(pdev)) { 610 if (pci_channel_offline(pdev)) {
612 struct completion *io_reset;
613 int finished_reset = 0; 611 int finished_reset = 0;
614 init_completion(&eh_done); 612 init_completion(&eh_done);
615 spin_lock_irq(shost->host_lock); 613 spin_lock_irq(shost->host_lock);
616 /* Make sure we didn't race */ 614 /* Make sure we didn't race */
617 if (pci_channel_offline(pdev)) { 615 if (pci_channel_offline(pdev)) {
618 if (!sym_data->io_reset) 616 BUG_ON(sym_data->io_reset);
619 sym_data->io_reset = &eh_done; 617 sym_data->io_reset = &eh_done;
620 io_reset = sym_data->io_reset;
621 } else { 618 } else {
622 finished_reset = 1; 619 finished_reset = 1;
623 } 620 }
624 spin_unlock_irq(shost->host_lock); 621 spin_unlock_irq(shost->host_lock);
625 if (!finished_reset) 622 if (!finished_reset)
626 finished_reset = wait_for_completion_timeout(io_reset, 623 finished_reset = wait_for_completion_timeout
624 (sym_data->io_reset,
627 WAIT_FOR_PCI_RECOVERY*HZ); 625 WAIT_FOR_PCI_RECOVERY*HZ);
626 spin_lock_irq(shost->host_lock);
627 sym_data->io_reset = NULL;
628 spin_unlock_irq(shost->host_lock);
628 if (!finished_reset) 629 if (!finished_reset)
629 return SCSI_FAILED; 630 return SCSI_FAILED;
630 } 631 }
@@ -1744,7 +1745,7 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
1744 return -ENODEV; 1745 return -ENODEV;
1745} 1746}
1746 1747
1747static void __devexit sym2_remove(struct pci_dev *pdev) 1748static void sym2_remove(struct pci_dev *pdev)
1748{ 1749{
1749 struct Scsi_Host *shost = pci_get_drvdata(pdev); 1750 struct Scsi_Host *shost = pci_get_drvdata(pdev);
1750 1751
@@ -1879,7 +1880,6 @@ static void sym2_io_resume(struct pci_dev *pdev)
1879 spin_lock_irq(shost->host_lock); 1880 spin_lock_irq(shost->host_lock);
1880 if (sym_data->io_reset) 1881 if (sym_data->io_reset)
1881 complete_all(sym_data->io_reset); 1882 complete_all(sym_data->io_reset);
1882 sym_data->io_reset = NULL;
1883 spin_unlock_irq(shost->host_lock); 1883 spin_unlock_irq(shost->host_lock);
1884} 1884}
1885 1885
@@ -2056,7 +2056,7 @@ static struct pci_driver sym2_driver = {
2056 .name = NAME53C8XX, 2056 .name = NAME53C8XX,
2057 .id_table = sym2_id_table, 2057 .id_table = sym2_id_table,
2058 .probe = sym2_probe, 2058 .probe = sym2_probe,
2059 .remove = __devexit_p(sym2_remove), 2059 .remove = sym2_remove,
2060 .err_handler = &sym2_err_handler, 2060 .err_handler = &sym2_err_handler,
2061}; 2061};
2062 2062
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 44193049c4ae..5b04ddfed26c 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -444,7 +444,7 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
444 444
445 /* Map sense buffer */ 445 /* Map sense buffer */
446 if (pSRB->SRBFlag & AUTO_REQSENSE) { 446 if (pSRB->SRBFlag & AUTO_REQSENSE) {
447 pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->sense_buffer, sizeof(pcmd->sense_buffer)); 447 pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
448 pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1, 448 pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1,
449 DMA_FROM_DEVICE); 449 DMA_FROM_DEVICE);
450 cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList); 450 cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList);
@@ -599,7 +599,7 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
599 DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5); 599 DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5);
600 DC390_write8 (ScsiFifo, 0); 600 DC390_write8 (ScsiFifo, 0);
601 DC390_write8 (ScsiFifo, 0); 601 DC390_write8 (ScsiFifo, 0);
602 DC390_write8 (ScsiFifo, sizeof(scmd->sense_buffer)); 602 DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE);
603 DC390_write8 (ScsiFifo, 0); 603 DC390_write8 (ScsiFifo, 0);
604 DEBUG1(printk (KERN_DEBUG "DC390: AutoReqSense !\n")); 604 DEBUG1(printk (KERN_DEBUG "DC390: AutoReqSense !\n"));
605 } 605 }
@@ -1389,7 +1389,7 @@ dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus
1389 DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5); 1389 DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5);
1390 DC390_write8 (ScsiFifo, 0); 1390 DC390_write8 (ScsiFifo, 0);
1391 DC390_write8 (ScsiFifo, 0); 1391 DC390_write8 (ScsiFifo, 0);
1392 DC390_write8 (ScsiFifo, sizeof(pSRB->pcmd->sense_buffer)); 1392 DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE);
1393 DC390_write8 (ScsiFifo, 0); 1393 DC390_write8 (ScsiFifo, 0);
1394 DEBUG0(printk(KERN_DEBUG "DC390: AutoReqSense (CmndPhase)!\n")); 1394 DEBUG0(printk(KERN_DEBUG "DC390: AutoReqSense (CmndPhase)!\n"));
1395 } 1395 }
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 7edd6ceb13b2..4bc5407f9695 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1121,9 +1121,9 @@ static void map_dma(unsigned int i, unsigned int j) {
1121 1121
1122 if (SCpnt->sense_buffer) 1122 if (SCpnt->sense_buffer)
1123 cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer, 1123 cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer,
1124 sizeof SCpnt->sense_buffer, PCI_DMA_FROMDEVICE)); 1124 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));
1125 1125
1126 cpp->sense_len = sizeof SCpnt->sense_buffer; 1126 cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
1127 1127
1128 if (scsi_bufflen(SCpnt)) { 1128 if (scsi_bufflen(SCpnt)) {
1129 count = scsi_dma_map(SCpnt); 1129 count = scsi_dma_map(SCpnt);
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 6d1f0edd7985..75eca6b22db5 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -298,9 +298,16 @@ static inline int find_and_clear_bit_16(unsigned long *field)
298{ 298{
299 int rv; 299 int rv;
300 300
301 if (*field == 0) panic("No free mscp"); 301 if (*field == 0)
302 asm("xorl %0,%0\n0:\tbsfw %1,%w0\n\tbtr %0,%1\n\tjnc 0b" 302 panic("No free mscp");
303 : "=&r" (rv), "=m" (*field) : "1" (*field)); 303
304 asm volatile (
305 "xorl %0,%0\n\t"
306 "0: bsfw %1,%w0\n\t"
307 "btr %0,%1\n\t"
308 "jnc 0b"
309 : "=&r" (rv), "=m" (*field) :);
310
304 return rv; 311 return rv;
305} 312}
306 313
@@ -741,7 +748,7 @@ static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt,
741 } 748 }
742 my_mscp->command_link = 0; /*???*/ 749 my_mscp->command_link = 0; /*???*/
743 my_mscp->scsi_command_link_id = 0; /*???*/ 750 my_mscp->scsi_command_link_id = 0; /*???*/
744 my_mscp->length_of_sense_byte = sizeof SCpnt->sense_buffer; 751 my_mscp->length_of_sense_byte = SCSI_SENSE_BUFFERSIZE;
745 my_mscp->length_of_scsi_cdbs = SCpnt->cmd_len; 752 my_mscp->length_of_scsi_cdbs = SCpnt->cmd_len;
746 memcpy(my_mscp->scsi_cdbs, SCpnt->cmnd, my_mscp->length_of_scsi_cdbs); 753 memcpy(my_mscp->scsi_cdbs, SCpnt->cmnd, my_mscp->length_of_scsi_cdbs);
747 my_mscp->adapter_status = 0; 754 my_mscp->adapter_status = 0;
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index fdbb92d1f722..f286c37da7e0 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -407,16 +407,16 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd,
407 * - SCp.phase records this command's SRCID_ER bit setting 407 * - SCp.phase records this command's SRCID_ER bit setting
408 */ 408 */
409 409
410 if (cmd->use_sg) { 410 if (scsi_bufflen(cmd)) {
411 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; 411 cmd->SCp.buffer = scsi_sglist(cmd);
412 cmd->SCp.buffers_residual = cmd->use_sg - 1; 412 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
413 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 413 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
414 cmd->SCp.this_residual = cmd->SCp.buffer->length; 414 cmd->SCp.this_residual = cmd->SCp.buffer->length;
415 } else { 415 } else {
416 cmd->SCp.buffer = NULL; 416 cmd->SCp.buffer = NULL;
417 cmd->SCp.buffers_residual = 0; 417 cmd->SCp.buffers_residual = 0;
418 cmd->SCp.ptr = (char *) cmd->request_buffer; 418 cmd->SCp.ptr = NULL;
419 cmd->SCp.this_residual = cmd->request_bufflen; 419 cmd->SCp.this_residual = 0;
420 } 420 }
421 421
422/* WD docs state that at the conclusion of a "LEVEL2" command, the 422/* WD docs state that at the conclusion of a "LEVEL2" command, the
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 03cd44f231df..b4304ae78527 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1108,13 +1108,10 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
1108 scb->host = host; 1108 scb->host = host;
1109 1109
1110 nseg = scsi_sg_count(SCpnt); 1110 nseg = scsi_sg_count(SCpnt);
1111 if (nseg) { 1111 if (nseg > 1) {
1112 struct scatterlist *sg; 1112 struct scatterlist *sg;
1113 unsigned i; 1113 unsigned i;
1114 1114
1115 if (SCpnt->device->host->sg_tablesize == SG_NONE) {
1116 panic("wd7000_queuecommand: scatter/gather not supported.\n");
1117 }
1118 dprintk("Using scatter/gather with %d elements.\n", nseg); 1115 dprintk("Using scatter/gather with %d elements.\n", nseg);
1119 1116
1120 sgb = scb->sgb; 1117 sgb = scb->sgb;
@@ -1128,7 +1125,10 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
1128 } 1125 }
1129 } else { 1126 } else {
1130 scb->op = 0; 1127 scb->op = 0;
1131 any2scsi(scb->dataptr, isa_virt_to_bus(scsi_sglist(SCpnt))); 1128 if (nseg) {
1129 struct scatterlist *sg = scsi_sglist(SCpnt);
1130 any2scsi(scb->dataptr, isa_page_to_bus(sg_page(sg)) + sg->offset);
1131 }
1132 any2scsi(scb->maxlen, scsi_bufflen(SCpnt)); 1132 any2scsi(scb->maxlen, scsi_bufflen(SCpnt));
1133 } 1133 }
1134 1134
@@ -1524,7 +1524,7 @@ static __init int wd7000_detect(struct scsi_host_template *tpnt)
1524 * For boards before rev 6.0, scatter/gather isn't supported. 1524 * For boards before rev 6.0, scatter/gather isn't supported.
1525 */ 1525 */
1526 if (host->rev1 < 6) 1526 if (host->rev1 < 6)
1527 sh->sg_tablesize = SG_NONE; 1527 sh->sg_tablesize = 1;
1528 1528
1529 present++; /* count it */ 1529 present++; /* count it */
1530 1530